diff --git a/.cursor/rules/code-comments.mdc b/.cursor/rules/code-comments.mdc new file mode 100644 index 000000000..12b7cffa0 --- /dev/null +++ b/.cursor/rules/code-comments.mdc @@ -0,0 +1,43 @@ +--- +description: Comment and logging conventions for the Hatchet codebase +alwaysApply: true +--- + +# Comments + +- Do NOT add section-separator comments like `# ----------- section name -----------` or `// ===== section =====`. Use whitespace and clear naming instead. +- Do NOT add comments that narrate what the code does (e.g., `// Import the module`, `// Return the result`). Comments explain **why**, not **what**. +- `TODO` and `FIXME` comments must include context: `// TODO: reason` or `// FIXME: description of the issue`. +- `NOTE:` for important non-obvious context: `// NOTE: we break this into a separate query because CTEs can't see modified rows`. + +## Python-specific + +- Docstrings: Google style with `:param:` and `:returns:` for public APIs. +- Type ignores must be specific: `# type: ignore[attr-defined]`, never bare `# type: ignore`. +- Linter suppressions (`# noqa: E501`) are fine in generated code; avoid in hand-written code. + +## Go-specific + +- Exported types and functions must have godoc comments. +- Struct fields use inline comments for `(required)` / `(optional)` annotations. +- Logger field is named `l`, not `logger` or `log`. + +# Logging + +- Avoid logging state at every line. + +## Python + +- Import the shared logger: `from hatchet_sdk.logger import logger` (named `"hatchet"`). +- Do NOT create per-module loggers with `logging.getLogger(__name__)` in SDK code. +- Levels: `debug` for internals, `info` for key events, `warning` for recoverable issues, `error` for failures, `exception` for failures with tracebacks. +- Use `logger.exception(...)` (not `logger.error` + `traceback`) when logging caught exceptions. + +## Go + +- Create loggers via `logger.NewDefaultLogger("service-name")` with kebab-case service names. +- Store as `*zerolog.Logger` in a struct field named `l`. +- Chain structured fields before the message: `d.l.Error().Err(err).Str("key", val).Msg("description")`. +- Use `.Err(err)` to attach errors — do not format errors into the message string. +- Use `.Msgf()` only when dynamic values are needed; prefer `.Msg()` with structured fields. +- Log errors at the point of handling, not at every level of the call stack. diff --git a/.cursor/rules/taskfile.mdc b/.cursor/rules/taskfile.mdc new file mode 100644 index 000000000..ee512fd83 --- /dev/null +++ b/.cursor/rules/taskfile.mdc @@ -0,0 +1,17 @@ +--- +description: Prefer Taskfile tasks over raw shell commands +alwaysApply: true +--- + +# Taskfile + +- **Prefer the Taskfile** over raw shell commands when a suitable task exists. Run `task ` (or `task -- `) instead of invoking tools directly. +- Before running build, test, lint, migrate, or codegen commands, check `Taskfile.yaml` for a matching task. Examples: + - SQL codegen → `task generate-sqlc` (not raw `sqlc generate`) + - Full codegen → `task generate` or `task generate-all` + - Go tests → `task test` or `task test-integration` + - Linting → `task lint` or `task lint-go` + - Migrations → `task migrate`, `task goose-migrate`, `task empty-migration` + - Formatting → `task pre` (or `task fmt-go`, `task fmt-app`, `task fmt-docs`) + - Dev setup → `task setup`, `task start-db`, `task start-dev` +- Use raw commands only when no task exists for the operation or when the user explicitly asks for a specific command. diff --git a/.cursor/rules/typescript-e2e-tests.mdc b/.cursor/rules/typescript-e2e-tests.mdc new file mode 100644 index 000000000..c1b64af0a --- /dev/null +++ b/.cursor/rules/typescript-e2e-tests.mdc @@ -0,0 +1,31 @@ +--- +description: Running and writing TypeScript SDK e2e tests +globs: sdks/typescript/**/*.e2e.ts +alwaysApply: false +--- + +# TypeScript SDK E2E Tests + +## Running Tests + +Tests require a running Hatchet engine (e.g. via `task start-dev`). Run from `sdks/typescript/`: + +```bash +# All e2e tests +pnpm test:e2e + +# Specific test file +pnpm test:e2e durable.e2e.ts + +# Specific test by name pattern +pnpm test:e2e durable.e2e.ts -t "durable replay reset" +``` + +The e2e harness automatically spawns a shared worker (`src/v1/examples/e2e-worker.ts`) via `jest.e2e-global-setup.ts` and tears it down after tests complete. + +## Test Structure + +- Test files live alongside their workflows in `src/v1/examples//`. +- Workflows are defined in `workflow.ts`, tests in `.e2e.ts`. +- Use `makeE2EClient()` from `src/v1/examples/__e2e__/harness.ts` for the Hatchet client. +- Register new workflows/tasks in `src/v1/examples/e2e-worker.ts` so the shared worker picks them up. diff --git a/.github/workflows/sdk-python.yml b/.github/workflows/sdk-python.yml index d9a26b374..309f997f9 100644 --- a/.github/workflows/sdk-python.yml +++ b/.github/workflows/sdk-python.yml @@ -160,6 +160,216 @@ jobs: name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-opt-${{ matrix.optimistic-scheduling }}-api-logs path: api.log + old-engine-new-sdk: + runs-on: ubicloud-standard-4 + timeout-minutes: 20 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + fetch-depth: 0 + fetch-tags: true + + - name: Setup Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version: "1.25" + + - name: Start Docker dependencies + working-directory: . + run: docker compose up -d + + - name: Determine latest stable release tag + working-directory: . + run: | + LATEST_TAG=$(git tag --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -1) + if [ -z "$LATEST_TAG" ]; then + echo "ERROR: No stable release tag found" + exit 1 + fi + echo "Latest stable tag: $LATEST_TAG" + echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV + + - name: Pull release images + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml pull hatchet-migrate hatchet-admin hatchet-engine hatchet-api + + - name: Run migrations + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml run --rm hatchet-migrate + + - name: Setup config and seed database + working-directory: . + run: | + mkdir -p generated + docker compose -f docker-compose.yml -f docker-compose.release.yml run --rm \ + hatchet-admin /hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/generated + + - name: Generate API token + working-directory: . + run: | + TOKEN=$(docker compose -f docker-compose.yml -f docker-compose.release.yml run -T --rm \ + hatchet-admin /hatchet/hatchet-admin token create --config /hatchet/generated --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52) + echo "HATCHET_CLIENT_TOKEN=$TOKEN" >> $GITHUB_ENV + + - name: Start engine and API + working-directory: . + run: | + docker compose -f docker-compose.yml -f docker-compose.release.yml up -d hatchet-engine hatchet-api + sleep 30 + + - name: Set up Python + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 + with: + python-version: '3.14' + + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + version: 1.5.1 + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Install dependencies + run: poetry install --no-interaction --all-extras + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=old-engine-new-sdk-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Run pytest + env: + HATCHET_CLIENT_TLS_STRATEGY: none + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: "True" + run: | + echo "Testing current SDK against engine ${{ env.LATEST_TAG }}" + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + poetry run pytest -s -vvv --maxfail=5 --capture=no --retries 3 --retry-delay 2 -n auto + + - name: Collect engine logs + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml logs hatchet-engine > /tmp/engine.log 2>&1 || true + + - name: Upload engine logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-engine-logs + path: /tmp/engine.log + + - name: Collect API logs + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml logs hatchet-api > /tmp/api.log 2>&1 || true + + - name: Upload API logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-api-logs + path: /tmp/api.log + + - name: Teardown + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml down + + new-engine-old-sdk: + runs-on: ubicloud-standard-4 + timeout-minutes: 20 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + fetch-depth: 0 + fetch-tags: true + + - name: Setup Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version: "1.25" + + - name: Start Docker dependencies + working-directory: . + run: docker compose up -d + + - name: Run migrations + working-directory: . + run: | + export DATABASE_URL="postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet" + go run ./cmd/hatchet-migrate + + - name: Setup and start engine + working-directory: . + run: | + export SEED_DEVELOPMENT=true + export SERVER_PORT=8080 + export SERVER_URL=http://localhost:8080 + export SERVER_AUTH_COOKIE_DOMAIN=localhost + export SERVER_AUTH_COOKIE_INSECURE=true + export SERVER_DEFAULT_ENGINE_VERSION=V1 + export SERVER_MSGQUEUE_RABBITMQ_URL="amqp://user:password@localhost:5672/" + + go run ./cmd/hatchet-admin quickstart + + go run ./cmd/hatchet-engine --config ./generated/ > engine.log 2>&1 & + go run ./cmd/hatchet-api --config ./generated/ > api.log 2>&1 & + + sleep 30 + + - name: Generate API token + working-directory: . + run: | + echo "HATCHET_CLIENT_TOKEN=$(go run ./cmd/hatchet-admin token create --config ./generated/ --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52)" >> $GITHUB_ENV + echo "HATCHET_CLIENT_TLS_ROOT_CA_FILE=${{ github.workspace }}/certs/ca.cert" >> $GITHUB_ENV + echo "HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED=True" >> $GITHUB_ENV + + - name: Clone main SDK source + run: | + git clone --depth=1 --branch main https://github.com/hatchet-dev/hatchet.git /tmp/old-sdk + + - name: Set up Python + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 + with: + python-version: '3.14' + + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + version: 1.5.1 + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Install SDK and test dependencies + working-directory: /tmp/old-sdk/sdks/python + run: poetry install --no-interaction --all-extras + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=new-engine-old-sdk-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Run pytest against new engine + working-directory: /tmp/old-sdk/sdks/python + run: | + echo "Testing main SDK against current engine" + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + poetry run pytest -s -vvv --maxfail=5 --capture=no --retries 3 --retry-delay 2 -n auto + + - name: Upload engine logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-engine-logs + path: engine.log + + - name: Upload API logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-api-logs + path: api.log + publish: runs-on: ubicloud-standard-4 needs: [lint, test] diff --git a/.github/workflows/sdk-ruby.yml b/.github/workflows/sdk-ruby.yml index df8bf0ceb..4b35cebe1 100644 --- a/.github/workflows/sdk-ruby.yml +++ b/.github/workflows/sdk-ruby.yml @@ -224,6 +224,280 @@ jobs: name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-opt-${{ matrix.optimistic-scheduling }}-api-logs path: api.log + old-engine-new-sdk: + runs-on: ubicloud-standard-4 + timeout-minutes: 20 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + fetch-depth: 0 + fetch-tags: true + + - name: Start Docker dependencies + working-directory: . + run: docker compose up -d + + - name: Determine latest stable release tag + working-directory: . + run: | + LATEST_TAG=$(git tag --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -1) + if [ -z "$LATEST_TAG" ]; then + echo "ERROR: No stable release tag found" + exit 1 + fi + echo "Latest stable tag: $LATEST_TAG" + echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV + + - name: Pull release images + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml pull hatchet-migrate hatchet-admin hatchet-engine hatchet-api + + - name: Run migrations + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml run --rm hatchet-migrate + + - name: Setup config and seed database + working-directory: . + run: | + mkdir -p generated + docker compose -f docker-compose.yml -f docker-compose.release.yml run --rm \ + hatchet-admin /hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/generated + + - name: Generate API token + working-directory: . + run: | + TOKEN=$(docker compose -f docker-compose.yml -f docker-compose.release.yml run -T --rm \ + hatchet-admin /hatchet/hatchet-admin token create --config /hatchet/generated --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52) + echo "HATCHET_CLIENT_TOKEN=$TOKEN" >> $GITHUB_ENV + + - name: Start engine and API + working-directory: . + run: | + docker compose -f docker-compose.yml -f docker-compose.release.yml up -d hatchet-engine hatchet-api + sleep 30 + + - name: Set up Ruby + uses: ruby/setup-ruby@09a7688d3b55cf0e976497ff046b70949eeaccfd # v1.288.0 + with: + ruby-version: "3.2" + bundler-cache: true + working-directory: ./sdks/ruby/src + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=old-engine-new-sdk-rb-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Run unit tests + env: + HATCHET_CLIENT_TLS_STRATEGY: none + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: "true" + run: | + echo "Testing current SDK against engine ${{ env.LATEST_TAG }}" + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + bundle exec rspec --format documentation --tag ~integration + + - name: Run integration tests + env: + HATCHET_CLIENT_TLS_STRATEGY: none + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: "true" + run: bundle exec rspec spec/integration/ --format documentation --tag integration + + - name: Set up Ruby for examples + uses: ruby/setup-ruby@09a7688d3b55cf0e976497ff046b70949eeaccfd # v1.288.0 + with: + ruby-version: "3.2" + bundler-cache: true + working-directory: ./sdks/ruby/examples + + - name: Start example worker + working-directory: ./sdks/ruby/examples + env: + HATCHET_CLIENT_TLS_STRATEGY: none + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: "true" + run: bundle exec ruby worker.rb > worker.log 2>&1 & + + - name: Wait for worker health + working-directory: . + run: | + for i in $(seq 1 60); do + if curl -s http://localhost:8001/health > /dev/null 2>&1; then + echo "Worker is healthy after ${i}s" + exit 0 + fi + sleep 1 + done + echo "Worker failed to start within 60s" + cat ./sdks/ruby/examples/worker.log || true + exit 1 + + - name: Run e2e tests + working-directory: ./sdks/ruby/examples + env: + HATCHET_CLIENT_TLS_STRATEGY: none + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: "true" + run: bundle exec rspec -f d --fail-fast + + - name: Upload worker logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-worker-logs + path: ./sdks/ruby/examples/worker.log + + - name: Collect engine logs + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml logs hatchet-engine > /tmp/engine.log 2>&1 || true + + - name: Upload engine logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-engine-logs + path: /tmp/engine.log + + - name: Collect API logs + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml logs hatchet-api > /tmp/api.log 2>&1 || true + + - name: Upload API logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-api-logs + path: /tmp/api.log + + - name: Teardown + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml down + + new-engine-old-sdk: + runs-on: ubicloud-standard-4 + timeout-minutes: 20 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Setup Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version: "1.25" + + - name: Start Docker dependencies + working-directory: . + run: docker compose up -d + + - name: Run migrations + working-directory: . + run: | + export DATABASE_URL="postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet" + go run ./cmd/hatchet-migrate + + - name: Setup and start engine + working-directory: . + run: | + export SEED_DEVELOPMENT=true + export SERVER_PORT=8080 + export SERVER_URL=http://localhost:8080 + export SERVER_AUTH_COOKIE_DOMAIN=localhost + export SERVER_AUTH_COOKIE_INSECURE=true + export SERVER_DEFAULT_ENGINE_VERSION=V1 + export SERVER_MSGQUEUE_RABBITMQ_URL="amqp://user:password@localhost:5672/" + + go run ./cmd/hatchet-admin quickstart + + go run ./cmd/hatchet-engine --config ./generated/ > engine.log 2>&1 & + go run ./cmd/hatchet-api --config ./generated/ > api.log 2>&1 & + + sleep 30 + + - name: Generate API token + working-directory: . + run: | + echo "HATCHET_CLIENT_TOKEN=$(go run ./cmd/hatchet-admin token create --config ./generated/ --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52)" >> $GITHUB_ENV + echo "HATCHET_CLIENT_TLS_ROOT_CA_FILE=${{ github.workspace }}/certs/ca.cert" >> $GITHUB_ENV + echo "HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED=true" >> $GITHUB_ENV + + - name: Clone main SDK source + working-directory: . + run: | + git clone --depth=1 --branch main https://github.com/hatchet-dev/hatchet.git /tmp/old-sdk + + - name: Set up Ruby for old SDK + uses: ruby/setup-ruby@09a7688d3b55cf0e976497ff046b70949eeaccfd # v1.288.0 + with: + ruby-version: "3.2" + bundler-cache: true + working-directory: /tmp/old-sdk/sdks/ruby/src + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=new-engine-old-sdk-rb-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Run unit tests + working-directory: /tmp/old-sdk/sdks/ruby/src + run: | + echo "Testing main SDK against current engine" + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + bundle exec rspec --format documentation --tag ~integration + + - name: Run integration tests + working-directory: /tmp/old-sdk/sdks/ruby/src + run: bundle exec rspec spec/integration/ --format documentation --tag integration + + - name: Set up Ruby for old examples + uses: ruby/setup-ruby@09a7688d3b55cf0e976497ff046b70949eeaccfd # v1.288.0 + with: + ruby-version: "3.2" + bundler-cache: true + working-directory: /tmp/old-sdk/sdks/ruby/examples + + - name: Start example worker + working-directory: /tmp/old-sdk/sdks/ruby/examples + run: bundle exec ruby worker.rb > worker.log 2>&1 & + + - name: Wait for worker health + working-directory: . + run: | + for i in $(seq 1 60); do + if curl -s http://localhost:8001/health > /dev/null 2>&1; then + echo "Worker is healthy after ${i}s" + exit 0 + fi + sleep 1 + done + echo "Worker failed to start within 60s" + cat /tmp/old-sdk/sdks/ruby/examples/worker.log || true + exit 1 + + - name: Run e2e tests + working-directory: /tmp/old-sdk/sdks/ruby/examples + run: bundle exec rspec -f d --fail-fast + + - name: Upload worker logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-worker-logs + path: /tmp/old-sdk/sdks/ruby/examples/worker.log + + - name: Upload engine logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-engine-logs + path: engine.log + + - name: Upload API logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-api-logs + path: api.log + publish: runs-on: ubicloud-standard-4 needs: [lint, test] diff --git a/.github/workflows/sdk-typescript.yml b/.github/workflows/sdk-typescript.yml index a341b9d26..b2afb1a28 100644 --- a/.github/workflows/sdk-typescript.yml +++ b/.github/workflows/sdk-typescript.yml @@ -185,6 +185,203 @@ jobs: name: sdk-typescript-api-logs path: api.log + old-engine-new-sdk: + runs-on: ubicloud-standard-4 + timeout-minutes: 20 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + fetch-depth: 0 + fetch-tags: true + + - name: Start Docker dependencies + working-directory: . + run: docker compose up -d + + - name: Determine latest stable release tag + working-directory: . + run: | + LATEST_TAG=$(git tag --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -1) + if [ -z "$LATEST_TAG" ]; then + echo "ERROR: No stable release tag found" + exit 1 + fi + echo "Latest stable tag: $LATEST_TAG" + echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV + + - name: Pull release images + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml pull hatchet-migrate hatchet-admin hatchet-engine hatchet-api + + - name: Run migrations + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml run --rm hatchet-migrate + + - name: Setup config and seed database + working-directory: . + run: | + mkdir -p generated + docker compose -f docker-compose.yml -f docker-compose.release.yml run --rm \ + hatchet-admin /hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/generated + + - name: Generate API token + working-directory: . + run: | + TOKEN=$(docker compose -f docker-compose.yml -f docker-compose.release.yml run -T --rm \ + hatchet-admin /hatchet/hatchet-admin token create --config /hatchet/generated --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52) + echo "HATCHET_CLIENT_TOKEN=$TOKEN" >> $GITHUB_ENV + + - name: Start engine and API + working-directory: . + run: | + docker compose -f docker-compose.yml -f docker-compose.release.yml up -d hatchet-engine hatchet-api + sleep 30 + + - name: Install pnpm + run: npm install -g pnpm@10.16.1 + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - name: Setup pnpm cache + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=old-engine-new-sdk-ts-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Run e2e tests + env: + HATCHET_CLIENT_TLS_STRATEGY: none + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: "true" + NODE_TLS_REJECT_UNAUTHORIZED: "0" + run: | + echo "Testing current SDK against engine ${{ env.LATEST_TAG }}" + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + pnpm test:e2e + + - name: Collect engine logs + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml logs hatchet-engine > /tmp/engine.log 2>&1 || true + + - name: Upload engine logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-engine-logs + path: /tmp/engine.log + + - name: Collect API logs + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml logs hatchet-api > /tmp/api.log 2>&1 || true + + - name: Upload API logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-api-logs + path: /tmp/api.log + + - name: Teardown + if: always() + working-directory: . + run: docker compose -f docker-compose.yml -f docker-compose.release.yml down + + new-engine-old-sdk: + runs-on: ubicloud-standard-4 + timeout-minutes: 20 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Setup Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version: "1.25" + + - name: Start Docker dependencies + working-directory: . + run: docker compose up -d + + - name: Run migrations + working-directory: . + run: | + export DATABASE_URL="postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet" + go run ./cmd/hatchet-migrate + + - name: Setup and start engine + working-directory: . + run: | + export SEED_DEVELOPMENT=true + export SERVER_PORT=8080 + export SERVER_URL=http://localhost:8080 + export SERVER_AUTH_COOKIE_DOMAIN=localhost + export SERVER_AUTH_COOKIE_INSECURE=true + export SERVER_DEFAULT_ENGINE_VERSION=V1 + export SERVER_MSGQUEUE_RABBITMQ_URL="amqp://user:password@localhost:5672/" + + go run ./cmd/hatchet-admin quickstart + + go run ./cmd/hatchet-engine --config ./generated/ > engine.log 2>&1 & + go run ./cmd/hatchet-api --config ./generated/ > api.log 2>&1 & + + sleep 30 + + - name: Generate API token + working-directory: . + run: | + echo "HATCHET_CLIENT_TOKEN=$(go run ./cmd/hatchet-admin token create --config ./generated/ --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52)" >> $GITHUB_ENV + echo "HATCHET_CLIENT_TLS_ROOT_CA_FILE=${{ github.workspace }}/certs/ca.cert" >> $GITHUB_ENV + + - name: Clone main SDK source + run: | + git clone --depth=1 --branch main https://github.com/hatchet-dev/hatchet.git /tmp/old-sdk + + - name: Install pnpm + run: npm install -g pnpm@10.16.1 + + - name: Install old SDK dependencies + working-directory: /tmp/old-sdk/sdks/typescript + run: pnpm install + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=new-engine-old-sdk-ts-${SHORT_SHA}" >> $GITHUB_ENV + + - name: Run e2e tests against new engine + working-directory: /tmp/old-sdk/sdks/typescript + run: | + echo "Testing main SDK against current engine" + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + pnpm test:e2e + + - name: Upload engine logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-engine-logs + path: engine.log + + - name: Upload API logs + if: always() + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + with: + name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-api-logs + path: api.log + publish: runs-on: ubicloud-standard-4 needs: [lint, test-unit, test-e2e] diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6cf0f0a09..d6f54d7c8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -426,6 +426,16 @@ jobs: -e DATABASE_URL="${{ env.DATABASE_URL }}" \ ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{ env.LATEST_TAG }} + - name: Capture old migration version + run: | + OLD_VERSION=$(docker run --rm --network host \ + -e PGPASSWORD=hatchet \ + postgres:17-alpine \ + psql -h 127.0.0.1 -p 5431 -U hatchet -d hatchet -t -A \ + -c "SELECT max(version_id) FROM goose_db_version WHERE is_applied = true") + echo "Old migration version: $OLD_VERSION" + echo "OLD_MIGRATION_VERSION=$OLD_VERSION" >> $GITHUB_ENV + - name: Setup config and seed database run: | mkdir -p generated @@ -491,6 +501,15 @@ jobs: go run ./cmd/hatchet-migrate echo "New migrations applied successfully" + - name: Down migrate to old version then up again + run: | + echo "Migrating down to old version ${{ env.OLD_MIGRATION_VERSION }}..." + go run ./cmd/hatchet-migrate --down ${{ env.OLD_MIGRATION_VERSION }} + echo "Down migration successful" + echo "Re-applying new migrations..." + go run ./cmd/hatchet-migrate + echo "Re-migration up successful" + - name: Wait for load test to complete run: | echo "Waiting for load test container to finish..." diff --git a/.gitignore b/.gitignore index f67f587f1..acfece0c2 100644 --- a/.gitignore +++ b/.gitignore @@ -102,3 +102,4 @@ frontend/docs/lib/generated/ # Scripts hack/dev/psql-connect.sh +CLAUDE.md diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 349847294..56ee7c337 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: check-yaml exclude: (^examples/|^cmd/hatchet-cli/cli/templates/) - repo: https://github.com/golangci/golangci-lint - rev: v2.7.2 + rev: v2.8.0 hooks: - id: golangci-lint args: ["--config=.golangci.yml", "--allow-parallel-runners"] diff --git a/Taskfile.yaml b/Taskfile.yaml index c3d9d94e0..5c84c2405 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -105,6 +105,17 @@ tasks: goose-migrate: cmds: - bash ./hack/dev/migrate.sh + migrate-down: + desc: "Migrate down to a specific version. Usage: task migrate-down -- 20260223180620 (for v1.0.82)" + dotenv: [.env] + cmds: + - | + if [ -z "{{.CLI_ARGS}}" ]; then + echo "Usage: task migrate-down -- " + echo "Example: task migrate-down -- 20260223180620 # v1.0.82" + exit 1 + fi + go run ./cmd/hatchet-migrate --down {{.CLI_ARGS}} seed-dev: dotenv: [.env] cmds: diff --git a/api-contracts/dispatcher/dispatcher.proto b/api-contracts/dispatcher/dispatcher.proto index 303d479ee..df6ba42e8 100644 --- a/api-contracts/dispatcher/dispatcher.proto +++ b/api-contracts/dispatcher/dispatcher.proto @@ -32,6 +32,8 @@ service Dispatcher { rpc ReleaseSlot(ReleaseSlotRequest) returns (ReleaseSlotResponse) {} + rpc RestoreEvictedTask(RestoreEvictedTaskRequest) returns (RestoreEvictedTaskResponse) {} + rpc UpsertWorkerLabels(UpsertWorkerLabelsRequest) returns (UpsertWorkerLabelsResponse) {} // GetVersion returns the dispatcher protocol version as a simple integer. @@ -184,6 +186,9 @@ message AssignedAction { // (optional) the workflow version id optional string workflow_version_id = 20; + + // (optional) the invocation count for durable task events (required for durable events, otherwise null) + optional int32 durable_task_invocation_count = 21; } message WorkerListenRequest { @@ -413,6 +418,14 @@ message ReleaseSlotRequest { message ReleaseSlotResponse {} +message RestoreEvictedTaskRequest { + string task_run_external_id = 1; +} + +message RestoreEvictedTaskResponse { + bool requeued = 1; +} + message GetVersionRequest {} message GetVersionResponse { diff --git a/api-contracts/openapi/components/schemas/_index.yaml b/api-contracts/openapi/components/schemas/_index.yaml index 1c39332c7..98b16ac20 100644 --- a/api-contracts/openapi/components/schemas/_index.yaml +++ b/api-contracts/openapi/components/schemas/_index.yaml @@ -352,8 +352,14 @@ V1ReplayedTasks: $ref: "./v1/task.yaml#/V1ReplayedTasks" V1CancelledTasks: $ref: "./v1/task.yaml#/V1CancelledTasks" +V1RestoreTaskResponse: + $ref: "./v1/task.yaml#/V1RestoreTaskResponse" V1TaskStatus: $ref: "./v1/task.yaml#/V1TaskStatus" +V1RunningFilter: + $ref: "./v1/task.yaml#/V1RunningFilter" +V1RunningDetailCount: + $ref: "./v1/task.yaml#/V1RunningDetailCount" V1TaskRunMetrics: $ref: "./v1/task.yaml#/V1TaskRunMetrics" V1TaskPointMetric: @@ -374,6 +380,10 @@ V1TaskRunStatus: $ref: "./workflow_run.yaml#/V1TaskRunStatus" V1TriggerWorkflowRunRequest: $ref: "./v1/workflow_run.yaml#/V1TriggerWorkflowRunRequest" +V1BranchDurableTaskRequest: + $ref: "./v1/workflow_run.yaml#/V1BranchDurableTaskRequest" +V1BranchDurableTaskResponse: + $ref: "./v1/workflow_run.yaml#/V1BranchDurableTaskResponse" V1LogLine: $ref: "./v1/logs.yaml#/V1LogLine" V1LogLineLevel: diff --git a/api-contracts/openapi/components/schemas/v1/task.yaml b/api-contracts/openapi/components/schemas/v1/task.yaml index 0d20f5eea..18e21fdb1 100644 --- a/api-contracts/openapi/components/schemas/v1/task.yaml +++ b/api-contracts/openapi/components/schemas/v1/task.yaml @@ -63,6 +63,9 @@ V1TaskSummary: description: The output of the task run (for the latest run) status: $ref: "#/V1TaskStatus" + isEvicted: + type: boolean + description: Whether the task has been evicted from a worker (still counts as RUNNING). startedAt: type: string format: date-time @@ -267,6 +270,13 @@ V1TaskStatus: - CANCELLED - FAILED +V1RunningFilter: + type: string + enum: + - ALL + - EVICTED + - ON_WORKER + V1TaskEventType: type: string enum: @@ -291,12 +301,35 @@ V1TaskEventType: - QUEUED - SKIPPED - COULD_NOT_SEND_TO_WORKER + - DURABLE_EVICTED + - DURABLE_RESTORING + +V1RestoreTaskResponse: + type: object + properties: + requeued: + type: boolean + required: + - requeued V1TaskRunMetrics: type: array items: $ref: "#/V1TaskRunMetric" +V1RunningDetailCount: + type: object + properties: + evicted: + type: integer + description: The number of evicted tasks within the RUNNING status bucket. + onWorker: + type: integer + description: The number of tasks currently on a worker within the RUNNING status bucket. + required: + - evicted + - onWorker + V1TaskRunMetric: type: object properties: @@ -304,6 +337,8 @@ V1TaskRunMetric: $ref: "#/V1TaskStatus" count: type: integer + runningDetailCount: + $ref: "#/V1RunningDetailCount" required: - status - count @@ -393,6 +428,9 @@ V1TaskTiming: description: The depth of the task in the waterfall. status: $ref: "#/V1TaskStatus" + isEvicted: + type: boolean + description: Whether the task has been evicted from a worker (still counts as RUNNING). taskDisplayName: type: string description: The display name of the task run. diff --git a/api-contracts/openapi/components/schemas/v1/workflow_run.yaml b/api-contracts/openapi/components/schemas/v1/workflow_run.yaml index 11e2f652a..c5a511f26 100644 --- a/api-contracts/openapi/components/schemas/v1/workflow_run.yaml +++ b/api-contracts/openapi/components/schemas/v1/workflow_run.yaml @@ -144,3 +144,45 @@ V1TriggerWorkflowRunRequest: required: - workflowName - input + +V1BranchDurableTaskRequest: + properties: + taskExternalId: + type: string + format: uuid + minLength: 36 + maxLength: 36 + description: The external id of the durable task to branch. + nodeId: + type: integer + format: int64 + description: The node id to replay from. + branchId: + type: integer + format: int64 + description: The branch id to replay from. + required: + - taskExternalId + - nodeId + - branchId + +V1BranchDurableTaskResponse: + properties: + taskExternalId: + type: string + format: uuid + minLength: 36 + maxLength: 36 + description: The external id of the durable task. + nodeId: + type: integer + format: int64 + description: The node id of the new entry. + branchId: + type: integer + format: int64 + description: The branch id of the new entry. + required: + - taskExternalId + - nodeId + - branchId diff --git a/api-contracts/openapi/openapi.yaml b/api-contracts/openapi/openapi.yaml index 30df3807f..a814e1144 100644 --- a/api-contracts/openapi/openapi.yaml +++ b/api-contracts/openapi/openapi.yaml @@ -33,6 +33,8 @@ paths: $ref: "./paths/v1/tasks/tasks.yaml#/cancelTasks" /api/v1/stable/tenants/{tenant}/tasks/replay: $ref: "./paths/v1/tasks/tasks.yaml#/replayTasks" + /api/v1/stable/tasks/{task}/restore: + $ref: "./paths/v1/tasks/tasks.yaml#/restoreTask" /api/v1/stable/dags/tasks: $ref: "./paths/v1/tasks/tasks.yaml#/listTasksByDAGIds" /api/v1/stable/tenants/{tenant}/workflow-runs: @@ -43,6 +45,8 @@ paths: $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/listWorkflowRunExternalIds" /api/v1/stable/tenants/{tenant}/workflow-runs/trigger: $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/trigger" + /api/v1/stable/tenants/{tenant}/durable-tasks/branch: + $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/branchDurableTask" /api/v1/stable/workflow-runs/{v1-workflow-run}: $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/getWorkflowRunDetails" /api/v1/stable/workflow-runs/{v1-workflow-run}/status: diff --git a/api-contracts/openapi/paths/v1/tasks/tasks.yaml b/api-contracts/openapi/paths/v1/tasks/tasks.yaml index 9e4ae10c4..194a1b7d1 100644 --- a/api-contracts/openapi/paths/v1/tasks/tasks.yaml +++ b/api-contracts/openapi/paths/v1/tasks/tasks.yaml @@ -441,6 +441,50 @@ replayTasks: tags: - Task +restoreTask: + post: + x-resources: ["tenant", "task"] + description: Restore an evicted durable task + operationId: v1-task:restore + parameters: + - description: The task id + in: path + name: task + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1RestoreTaskResponse" + description: Successfully restored the task + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: The task was not found + summary: Restore a task + tags: + - Task + listLogs: get: x-resources: ["tenant", "task"] diff --git a/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml b/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml index 41a98f986..62b16e641 100644 --- a/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml +++ b/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml @@ -107,6 +107,12 @@ listWorkflowRuns: required: false schema: type: boolean + - description: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + in: query + name: running_filter + required: false + schema: + $ref: "../../../components/schemas/_index.yaml#/V1RunningFilter" responses: "200": content: @@ -247,6 +253,12 @@ listWorkflowRunExternalIds: format: uuid minLength: 36 maxLength: 36 + - description: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + in: query + name: running_filter + required: false + schema: + $ref: "../../../components/schemas/_index.yaml#/V1RunningFilter" responses: "200": content: @@ -473,6 +485,51 @@ trigger: tags: - Workflow Runs +branchDurableTask: + post: + x-resources: ["tenant"] + description: Branch a durable task from a specific node, creating a new branch and re-processing its matches. + operationId: v1-durable-task:branch + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + requestBody: + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1BranchDurableTaskRequest" + description: The branch request + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1BranchDurableTaskResponse" + description: Successfully branch the durable task + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + summary: Branch durable task + tags: + - Workflow Runs + getTimings: get: x-resources: ["tenant", "v1-workflow-run"] diff --git a/api-contracts/v1/dispatcher.proto b/api-contracts/v1/dispatcher.proto index 6c92f90ad..8b2b24ca8 100644 --- a/api-contracts/v1/dispatcher.proto +++ b/api-contracts/v1/dispatcher.proto @@ -5,15 +5,169 @@ option go_package = "github.com/hatchet-dev/hatchet/internal/services/shared/pro package v1; import "v1/shared/condition.proto"; +import "v1/shared/trigger.proto"; service V1Dispatcher { + rpc DurableTask(stream DurableTaskRequest) returns (stream DurableTaskResponse) {} + + // NOTE: deprecated after DurableEventLog is implemented rpc RegisterDurableEvent(RegisterDurableEventRequest) returns (RegisterDurableEventResponse) {} - rpc ListenForDurableEvent(stream ListenForDurableEventRequest) returns (stream DurableEvent) {} - } +message DurableTaskRequestRegisterWorker { + string worker_id = 1; +} +message DurableTaskResponseRegisterWorker { + string worker_id = 1; +} + +message DurableEventLogEntryRef { + string durable_task_external_id = 1; + int32 invocation_count = 2; + int64 branch_id = 3; + int64 node_id = 4; +} + +message DurableTaskRunAckEntry { + int64 node_id = 1; + int64 branch_id = 2; +} + +message DurableTaskEventMemoAckResponse { + DurableEventLogEntryRef ref = 1; + bool memo_already_existed = 2; + optional bytes memo_result_payload = 3; +} + +message DurableTaskEventTriggerRunsAckResponse { + string durable_task_external_id = 1; + int32 invocation_count = 2; + repeated DurableTaskRunAckEntry run_entries = 3; +} + +message DurableTaskEventWaitForAckResponse { + DurableEventLogEntryRef ref = 1; +} + +message DurableTaskEventLogEntryCompletedResponse { + DurableEventLogEntryRef ref = 1; + bytes payload = 2; +} + +message DurableTaskEvictInvocationRequest { + int32 invocation_count = 1; + string durable_task_external_id = 2; + optional string reason = 3; +} + +// Sent by the server after recording eviction for an evict_invocation request. +message DurableTaskEvictionAckResponse { + int32 invocation_count = 1; + string durable_task_external_id = 2; +} + +message DurableTaskAwaitedCompletedEntry { + string durable_task_external_id = 1; + int64 branch_id = 2; + int64 node_id = 3; + int32 invocation_count = 4; +} + +// Sent by the server to notify a worker that its invocation is stale and should be cancelled. +message DurableTaskServerEvictNotice { + string durable_task_external_id = 1; + int32 invocation_count = 2; + string reason = 3; +} + +message DurableTaskWorkerStatusRequest { + string worker_id = 1; + repeated DurableTaskAwaitedCompletedEntry waiting_entries = 2; +} + +message DurableTaskCompleteMemoRequest { + DurableEventLogEntryRef ref = 1; + bytes payload = 2; + bytes memo_key = 3; +} + +message DurableTaskMemoRequest { + // The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + // at running a durable task. Each time the task is started, it gets a new invocation count (which has) + // incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + // differentiating between different attempts of the same task running in different places, to prevent race conditions + // and other problems from duplication. It also allows for older invocations to be evicted cleanly + int32 invocation_count = 1; + string durable_task_external_id = 2; + + bytes key = 3; + + // optional payload because we can send a memo request to check if a memo already exists + optional bytes payload = 4; +} + +message DurableTaskTriggerRunsRequest { + // The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + // at running a durable task. Each time the task is started, it gets a new invocation count (which has) + // incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + // differentiating between different attempts of the same task running in different places, to prevent race conditions + // and other problems from duplication. It also allows for older invocations to be evicted cleanly + int32 invocation_count = 1; + string durable_task_external_id = 2; + + repeated TriggerWorkflowRequest trigger_opts = 3; +} + +message DurableTaskWaitForRequest { + // The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + // at running a durable task. Each time the task is started, it gets a new invocation count (which has) + // incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + // differentiating between different attempts of the same task running in different places, to prevent race conditions + // and other problems from duplication. It also allows for older invocations to be evicted cleanly + int32 invocation_count = 1; + string durable_task_external_id = 2; + + // Fields for DURABLE_TASK_TRIGGER_KIND_WAIT_FOR + optional DurableEventListenerConditions wait_for_conditions = 3; +} + +message DurableTaskRequest { + oneof message { + DurableTaskRequestRegisterWorker register_worker = 1; + DurableTaskMemoRequest memo = 2; + DurableTaskTriggerRunsRequest trigger_runs = 3; + DurableTaskWaitForRequest wait_for = 4; + DurableTaskEvictInvocationRequest evict_invocation = 5; + DurableTaskWorkerStatusRequest worker_status = 6; + DurableTaskCompleteMemoRequest complete_memo = 7; + } +} + +enum DurableTaskErrorType { + DURABLE_TASK_ERROR_TYPE_UNSPECIFIED = 0; + DURABLE_TASK_ERROR_TYPE_NONDETERMINISM = 1; +} + +message DurableTaskErrorResponse { + DurableEventLogEntryRef ref = 1; + DurableTaskErrorType error_type = 2; + string error_message = 3; +} + +message DurableTaskResponse { + oneof message { + DurableTaskResponseRegisterWorker register_worker = 1; + DurableTaskEventMemoAckResponse memo_ack = 2; + DurableTaskEventTriggerRunsAckResponse trigger_runs_ack = 3; + DurableTaskEventWaitForAckResponse wait_for_ack = 4; + DurableTaskEventLogEntryCompletedResponse entry_completed = 5; + DurableTaskErrorResponse error = 6; + DurableTaskEvictionAckResponse eviction_ack = 7; + DurableTaskServerEvictNotice server_evict = 8; + } +} message RegisterDurableEventRequest { string task_id = 1; // external uuid for the task run diff --git a/api-contracts/v1/shared/trigger.proto b/api-contracts/v1/shared/trigger.proto new file mode 100644 index 000000000..ad8913a43 --- /dev/null +++ b/api-contracts/v1/shared/trigger.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +option go_package = "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"; + +package v1; + +enum WorkerLabelComparator { + EQUAL = 0; + NOT_EQUAL = 1; + GREATER_THAN = 2; + GREATER_THAN_OR_EQUAL = 3; + LESS_THAN = 4; + LESS_THAN_OR_EQUAL = 5; +} + +message DesiredWorkerLabels { + // value of the affinity + optional string str_value = 1; + optional int32 int_value = 2; + + /** + * (optional) Specifies whether the affinity setting is required. + * If required, the worker will not accept actions that do not have a truthy affinity setting. + * + * Defaults to false. + */ + optional bool required = 3; + + /** + * (optional) Specifies the comparator for the affinity setting. + * If not set, the default is EQUAL. + */ + optional WorkerLabelComparator comparator = 4; + + /** + * (optional) Specifies the weight of the affinity setting. + * If not set, the default is 100. + */ + optional int32 weight = 5; +} + + +message TriggerWorkflowRequest { + string name = 1; + + // (optional) the input data for the workflow + string input = 2; + + // (optional) the parent workflow run id + optional string parent_id = 3; + + // (optional) the parent task external run id + optional string parent_task_run_external_id = 4; + + // (optional) the index of the child workflow. if this is set, matches on the index or the + // child key will return an existing workflow run if the parent id, parent task run id, and + // child index/key match an existing workflow run. + optional int32 child_index = 5; + + // (optional) the key for the child. if this is set, matches on the index or the + // child key will return an existing workflow run if the parent id, parent task run id, and + // child index/key match an existing workflow run. + optional string child_key = 6; + + // (optional) additional metadata for the workflow + optional string additional_metadata = 7; + + // (optional) desired worker id for the workflow run, + // requires the workflow definition to have a sticky strategy + optional string desired_worker_id = 8; + + // (optional) override for the priority of the workflow tasks, will set all tasks to this priority + optional int32 priority = 9; + + // (optional) the desired worker labels for the workflow run, which will be used to determine which workers can pick up the workflow's tasks. if not set, defaults to an empty set of labels, which means any worker can pick up the tasks. + map desired_worker_labels = 10; +} diff --git a/api-contracts/v1/workflows.proto b/api-contracts/v1/workflows.proto index 159ffd83a..e155664f6 100644 --- a/api-contracts/v1/workflows.proto +++ b/api-contracts/v1/workflows.proto @@ -6,6 +6,7 @@ package v1; import "google/protobuf/timestamp.proto"; import "v1/shared/condition.proto"; +import "v1/shared/trigger.proto"; // AdminService represents a set of RPCs for admin management of tasks, workflows, etc. service AdminService { @@ -14,6 +15,7 @@ service AdminService { rpc ReplayTasks(ReplayTasksRequest) returns (ReplayTasksResponse); rpc TriggerWorkflowRun(TriggerWorkflowRunRequest) returns (TriggerWorkflowRunResponse); rpc GetRunDetails(GetRunDetailsRequest) returns (GetRunDetailsResponse); + rpc BranchDurableTask(BranchDurableTaskRequest) returns (BranchDurableTaskResponse); } message CancelTasksRequest { @@ -54,6 +56,18 @@ message TriggerWorkflowRunResponse { string external_id = 1; } +message BranchDurableTaskRequest { + string task_external_id = 1; // (required) the external id (uuid) of the durable task + int64 node_id = 2; // (required) the node id to branch from + int64 branch_id = 3; // (required) the branch id to branch from +} + +message BranchDurableTaskResponse { + string task_external_id = 1; // the external id of the durable task + int64 node_id = 2; // the node id of the new entry + int64 branch_id = 3; // the branch id of the new entry +} + enum StickyStrategy { SOFT = 0; HARD = 1; @@ -75,6 +89,7 @@ enum RunStatus { COMPLETED = 2; FAILED = 3; CANCELLED = 4; + EVICTED = 5; } @@ -119,40 +134,6 @@ message Concurrency { optional ConcurrencyLimitStrategy limit_strategy = 3; // (optional) the strategy to use when the concurrency limit is reached, default CANCEL_IN_PROGRESS } -enum WorkerLabelComparator { - EQUAL = 0; - NOT_EQUAL = 1; - GREATER_THAN = 2; - GREATER_THAN_OR_EQUAL = 3; - LESS_THAN = 4; - LESS_THAN_OR_EQUAL = 5; -} - -message DesiredWorkerLabels { - // value of the affinity - optional string str_value = 1; - optional int32 int_value = 2; - - /** - * (optional) Specifies whether the affinity setting is required. - * If required, the worker will not accept actions that do not have a truthy affinity setting. - * - * Defaults to false. - */ - optional bool required = 3; - - /** - * (optional) Specifies the comparator for the affinity setting. - * If not set, the default is EQUAL. - */ - optional WorkerLabelComparator comparator = 4; - - /** - * (optional) Specifies the weight of the affinity setting. - * If not set, the default is 100. - */ - optional int32 weight = 5; -} // CreateTaskOpts represents options to create a task. message CreateTaskOpts { @@ -198,6 +179,7 @@ message TaskRunDetail { optional string error = 3; // (optional) error message from the task run, if any optional bytes output = 4; // (optional) the output payload for the task run string readable_id = 5; // the readable id of the task + bool is_evicted = 6; // whether the task has been evicted from a worker (status will be RUNNING) } message GetRunDetailsResponse { @@ -206,4 +188,5 @@ message GetRunDetailsResponse { map task_runs = 3; // map of task run external ids to their details bool done = 4; // indicates if the workflow run is done bytes additional_metadata = 5; // (optional) additional metadata for the workflow run + bool is_evicted = 6; // whether any task in this run has been evicted } diff --git a/api-contracts/workflows/workflows.proto b/api-contracts/workflows/workflows.proto index b5a160efc..48c3fb902 100644 --- a/api-contracts/workflows/workflows.proto +++ b/api-contracts/workflows/workflows.proto @@ -3,12 +3,13 @@ syntax = "proto3"; option go_package = "github.com/hatchet-dev/hatchet/internal/services/admin/contracts"; import "google/protobuf/timestamp.proto"; +import "v1/shared/trigger.proto"; // WorkflowService represents a set of RPCs for managing workflows. service WorkflowService { rpc PutWorkflow(PutWorkflowRequest) returns (WorkflowVersion); rpc ScheduleWorkflow(ScheduleWorkflowRequest) returns (WorkflowVersion); - rpc TriggerWorkflow(TriggerWorkflowRequest) returns (TriggerWorkflowResponse); + rpc TriggerWorkflow(v1.TriggerWorkflowRequest) returns (TriggerWorkflowResponse); rpc BulkTriggerWorkflow(BulkTriggerWorkflowRequest) returns (BulkTriggerWorkflowResponse); rpc PutRateLimit(PutRateLimitRequest) returns (PutRateLimitResponse); } @@ -70,41 +71,6 @@ message CreateWorkflowJobOpts { repeated CreateWorkflowStepOpts steps = 4; // (required) the job tasks } -enum WorkerLabelComparator { - EQUAL = 0; - NOT_EQUAL = 1; - GREATER_THAN = 2; - GREATER_THAN_OR_EQUAL = 3; - LESS_THAN = 4; - LESS_THAN_OR_EQUAL = 5; -} - -message DesiredWorkerLabels { - // value of the affinity - optional string str_value = 1; - optional int32 int_value = 2; - - /** - * (optional) Specifies whether the affinity setting is required. - * If required, the worker will not accept actions that do not have a truthy affinity setting. - * - * Defaults to false. - */ - optional bool required = 3; - - /** - * (optional) Specifies the comparator for the affinity setting. - * If not set, the default is EQUAL. - */ - optional WorkerLabelComparator comparator = 4; - - /** - * (optional) Specifies the weight of the affinity setting. - * If not set, the default is 100. - */ - optional int32 weight = 5; -} - // CreateWorkflowStepOpts represents options to create a workflow task. message CreateWorkflowStepOpts { string readable_id = 1; // (required) the task name @@ -115,7 +81,7 @@ message CreateWorkflowStepOpts { string user_data = 6; // (optional) the custom task user data, assuming string representation of JSON int32 retries = 7; // (optional) the number of retries for the task, default 0 repeated CreateStepRateLimit rate_limits = 8; // (optional) the rate limits for the task - map worker_labels = 9; // (optional) the desired worker affinity state for the task + map worker_labels = 9; // (optional) the desired worker affinity state for the task optional float backoff_factor = 10; // (optional) the retry backoff factor for the task optional int32 backoff_max_seconds = 11; // (optional) the maximum backoff time for the task } @@ -192,48 +158,13 @@ message WorkflowTriggerCronRef { } message BulkTriggerWorkflowRequest { - repeated TriggerWorkflowRequest workflows = 1; + repeated v1.TriggerWorkflowRequest workflows = 1; } message BulkTriggerWorkflowResponse { repeated string workflow_run_ids = 1; } -message TriggerWorkflowRequest { - string name = 1; - - // (optional) the input data for the workflow - string input = 2; - - // (optional) the parent workflow run id - optional string parent_id = 3; - - // (optional) the parent task external run id - optional string parent_task_run_external_id = 4; - - // (optional) the index of the child workflow. if this is set, matches on the index or the - // child key will return an existing workflow run if the parent id, parent task run id, and - // child index/key match an existing workflow run. - optional int32 child_index = 5; - - // (optional) the key for the child. if this is set, matches on the index or the - // child key will return an existing workflow run if the parent id, parent task run id, and - // child index/key match an existing workflow run. - optional string child_key = 6; - - // (optional) additional metadata for the workflow - optional string additional_metadata = 7; - - // (optional) desired worker id for the workflow run, - // requires the workflow definition to have a sticky strategy - optional string desired_worker_id = 8; - - // (optional) override for the priority of the workflow tasks, will set all tasks to this priority - optional int32 priority = 9; - - map desired_worker_labels = 10; // (optional) override for the desired worker labels for the workflow tasks, used for routing to specific workers (or worker pools) -} - message TriggerWorkflowResponse { string workflow_run_id = 1; } diff --git a/api/v1/server/handlers/v1/tasks/get_metrics.go b/api/v1/server/handlers/v1/tasks/get_metrics.go index ecf0e6b46..b798bbe17 100644 --- a/api/v1/server/handlers/v1/tasks/get_metrics.go +++ b/api/v1/server/handlers/v1/tasks/get_metrics.go @@ -59,7 +59,7 @@ func (t *TasksService) V1TaskListStatusMetrics(ctx echo.Context, request gen.V1T return nil, err } - result := transformers.ToTaskRunMetrics(&metrics) + result := transformers.StatusToTaskRunMetrics(&metrics) // Search for api errors to see how we handle errors in other cases return gen.V1TaskListStatusMetrics200JSONResponse( diff --git a/api/v1/server/handlers/v1/tasks/restore.go b/api/v1/server/handlers/v1/tasks/restore.go new file mode 100644 index 000000000..752c754b5 --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/restore.go @@ -0,0 +1,29 @@ +package tasks + +import ( + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/internal/msgqueue" + tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" + "github.com/hatchet-dev/hatchet/pkg/analytics" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" +) + +func (t *TasksService) V1TaskRestore(ctx echo.Context, request gen.V1TaskRestoreRequestObject) (gen.V1TaskRestoreResponseObject, error) { + tenant := ctx.Get("tenant").(*sqlcv1.Tenant) + + msg, err := tasktypes.DurableRestoreTaskMessage(tenant.ID, request.Task, "Restore via REST API") + if err != nil { + return nil, err + } + + err = t.config.MessageQueueV1.SendMessage(ctx.Request().Context(), msgqueue.TASK_PROCESSING_QUEUE, msg) + if err != nil { + return nil, err + } + + t.config.Analytics.Count(ctx.Request().Context(), analytics.DurableTask, analytics.Restore) + + return gen.V1TaskRestore200JSONResponse{Requeued: true}, nil +} diff --git a/api/v1/server/handlers/v1/workflow-runs/branch_durable_task.go b/api/v1/server/handlers/v1/workflow-runs/branch_durable_task.go new file mode 100644 index 000000000..0978e33c1 --- /dev/null +++ b/api/v1/server/handlers/v1/workflow-runs/branch_durable_task.go @@ -0,0 +1,47 @@ +package workflowruns + +import ( + "github.com/labstack/echo/v4" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" +) + +func (t *V1WorkflowRunsService) V1DurableTaskBranch(ctx echo.Context, request gen.V1DurableTaskBranchRequestObject) (gen.V1DurableTaskBranchResponseObject, error) { + tenant := ctx.Get("tenant").(*sqlcv1.Tenant) + + grpcReq := &contracts.BranchDurableTaskRequest{ + TaskExternalId: request.Body.TaskExternalId.String(), + NodeId: request.Body.NodeId, + BranchId: request.Body.BranchId, + } + + resp, err := t.proxyBranchDurableTask.Do( + ctx.Request().Context(), + tenant, + grpcReq, + ) + + if err != nil { + if e, ok := status.FromError(err); ok { + switch e.Code() { + case codes.InvalidArgument: + return gen.V1DurableTaskBranch400JSONResponse( + apierrors.NewAPIErrors(e.Message()), + ), nil + } + } + + return nil, err + } + + return gen.V1DurableTaskBranch200JSONResponse{ + TaskExternalId: request.Body.TaskExternalId, + NodeId: resp.NodeId, + BranchId: resp.BranchId, + }, nil +} diff --git a/api/v1/server/handlers/v1/workflow-runs/get-status.go b/api/v1/server/handlers/v1/workflow-runs/get-status.go index b5e1b2072..faa025352 100644 --- a/api/v1/server/handlers/v1/workflow-runs/get-status.go +++ b/api/v1/server/handlers/v1/workflow-runs/get-status.go @@ -5,12 +5,16 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" v1 "github.com/hatchet-dev/hatchet/pkg/repository" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) func (t *V1WorkflowRunsService) V1WorkflowRunGetStatus(ctx echo.Context, request gen.V1WorkflowRunGetStatusRequestObject) (gen.V1WorkflowRunGetStatusResponseObject, error) { maybeWorkflowRun := ctx.Get("v1-workflow-run").(*v1.V1WorkflowRunPopulator) - return gen.V1WorkflowRunGetStatus200JSONResponse( - gen.V1TaskStatus(maybeWorkflowRun.WorkflowRun.ReadableStatus), - ), nil + status := gen.V1TaskStatus(maybeWorkflowRun.WorkflowRun.ReadableStatus) + if maybeWorkflowRun.WorkflowRun.ReadableStatus == sqlcv1.V1ReadableStatusOlapEVICTED { + status = gen.V1TaskStatusRUNNING + } + + return gen.V1WorkflowRunGetStatus200JSONResponse(status), nil } diff --git a/api/v1/server/handlers/v1/workflow-runs/get.go b/api/v1/server/handlers/v1/workflow-runs/get.go index a43390822..dc01f0631 100644 --- a/api/v1/server/handlers/v1/workflow-runs/get.go +++ b/api/v1/server/handlers/v1/workflow-runs/get.go @@ -30,7 +30,6 @@ func (t *V1WorkflowRunsService) V1WorkflowRunGet(ctx echo.Context, request gen.V return nil, err } - // Search for api errors to see how we handle errors in other cases return gen.V1WorkflowRunGet200JSONResponse( *details, ), nil diff --git a/api/v1/server/handlers/v1/workflow-runs/list.go b/api/v1/server/handlers/v1/workflow-runs/list.go index ce1d171ab..b28384839 100644 --- a/api/v1/server/handlers/v1/workflow-runs/list.go +++ b/api/v1/server/handlers/v1/workflow-runs/list.go @@ -15,29 +15,102 @@ import ( transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" ) +func allOlapStatuses(runningFilter *gen.V1RunningFilter) []sqlcv1.V1ReadableStatusOlap { + statuses := []sqlcv1.V1ReadableStatusOlap{ + sqlcv1.V1ReadableStatusOlapQUEUED, + sqlcv1.V1ReadableStatusOlapFAILED, + sqlcv1.V1ReadableStatusOlapCOMPLETED, + sqlcv1.V1ReadableStatusOlapCANCELLED, + } + + rf := gen.ALL + if runningFilter != nil { + rf = *runningFilter + } + switch rf { + case gen.EVICTED: + statuses = append(statuses, sqlcv1.V1ReadableStatusOlapEVICTED) + case gen.ONWORKER: + statuses = append(statuses, sqlcv1.V1ReadableStatusOlapRUNNING) + default: + statuses = append(statuses, sqlcv1.V1ReadableStatusOlapRUNNING, sqlcv1.V1ReadableStatusOlapEVICTED) + } + + return statuses +} + +var taskStatusToOlapStatus = map[gen.V1TaskStatus]sqlcv1.V1ReadableStatusOlap{ + gen.V1TaskStatusQUEUED: sqlcv1.V1ReadableStatusOlapQUEUED, + gen.V1TaskStatusRUNNING: sqlcv1.V1ReadableStatusOlapRUNNING, + gen.V1TaskStatusFAILED: sqlcv1.V1ReadableStatusOlapFAILED, + gen.V1TaskStatusCOMPLETED: sqlcv1.V1ReadableStatusOlapCOMPLETED, + gen.V1TaskStatusCANCELLED: sqlcv1.V1ReadableStatusOlapCANCELLED, +} + +func normalizeWorkflowRunStatuses(statuses []gen.V1TaskStatus, runningFilter *gen.V1RunningFilter) []sqlcv1.V1ReadableStatusOlap { + normalized := make([]sqlcv1.V1ReadableStatusOlap, 0, len(statuses)) + seen := make(map[sqlcv1.V1ReadableStatusOlap]struct{}, len(statuses)) + + for _, status := range statuses { + if status == gen.V1TaskStatusRUNNING { + rf := gen.ALL + if runningFilter != nil { + rf = *runningFilter + } + switch rf { + case gen.EVICTED: + if _, exists := seen[sqlcv1.V1ReadableStatusOlapEVICTED]; !exists { + seen[sqlcv1.V1ReadableStatusOlapEVICTED] = struct{}{} + normalized = append(normalized, sqlcv1.V1ReadableStatusOlapEVICTED) + } + case gen.ONWORKER: + if _, exists := seen[sqlcv1.V1ReadableStatusOlapRUNNING]; !exists { + seen[sqlcv1.V1ReadableStatusOlapRUNNING] = struct{}{} + normalized = append(normalized, sqlcv1.V1ReadableStatusOlapRUNNING) + } + default: + if _, exists := seen[sqlcv1.V1ReadableStatusOlapRUNNING]; !exists { + seen[sqlcv1.V1ReadableStatusOlapRUNNING] = struct{}{} + normalized = append(normalized, sqlcv1.V1ReadableStatusOlapRUNNING) + } + if _, exists := seen[sqlcv1.V1ReadableStatusOlapEVICTED]; !exists { + seen[sqlcv1.V1ReadableStatusOlapEVICTED] = struct{}{} + normalized = append(normalized, sqlcv1.V1ReadableStatusOlapEVICTED) + } + } + continue + } + + mapped, ok := taskStatusToOlapStatus[status] + if !ok { + continue + } + + if _, exists := seen[mapped]; exists { + continue + } + + seen[mapped] = struct{}{} + normalized = append(normalized, mapped) + } + + return normalized +} + func (t *V1WorkflowRunsService) WithDags(ctx context.Context, request gen.V1WorkflowRunListRequestObject, tenantId uuid.UUID) (gen.V1WorkflowRunListResponseObject, error) { ctx, span := telemetry.NewSpan(ctx, "v1-workflow-runs-list-with-dags-tasks") defer span.End() var ( - statuses = []sqlcv1.V1ReadableStatusOlap{ - sqlcv1.V1ReadableStatusOlapQUEUED, - sqlcv1.V1ReadableStatusOlapRUNNING, - sqlcv1.V1ReadableStatusOlapFAILED, - sqlcv1.V1ReadableStatusOlapCOMPLETED, - sqlcv1.V1ReadableStatusOlapCANCELLED, - } - since = request.Params.Since - limit int64 = 50 - offset int64 + statuses = allOlapStatuses(request.Params.RunningFilter) + since = request.Params.Since + limit int64 = 50 + offset int64 ) if request.Params.Statuses != nil { if len(*request.Params.Statuses) > 0 { - statuses = []sqlcv1.V1ReadableStatusOlap{} - for _, status := range *request.Params.Statuses { - statuses = append(statuses, sqlcv1.V1ReadableStatusOlap(status)) - } + statuses = normalizeWorkflowRunStatuses(*request.Params.Statuses, request.Params.RunningFilter) } } @@ -176,13 +249,7 @@ func (t *V1WorkflowRunsService) OnlyTasks(ctx context.Context, request gen.V1Wor defer span.End() var ( - statuses = []sqlcv1.V1ReadableStatusOlap{ - sqlcv1.V1ReadableStatusOlapQUEUED, - sqlcv1.V1ReadableStatusOlapRUNNING, - sqlcv1.V1ReadableStatusOlapFAILED, - sqlcv1.V1ReadableStatusOlapCOMPLETED, - sqlcv1.V1ReadableStatusOlapCANCELLED, - } + statuses = allOlapStatuses(request.Params.RunningFilter) since = request.Params.Since workflowIds = []uuid.UUID{} limit int64 = 50 @@ -191,10 +258,7 @@ func (t *V1WorkflowRunsService) OnlyTasks(ctx context.Context, request gen.V1Wor if request.Params.Statuses != nil { if len(*request.Params.Statuses) > 0 { - statuses = []sqlcv1.V1ReadableStatusOlap{} - for _, status := range *request.Params.Statuses { - statuses = append(statuses, sqlcv1.V1ReadableStatusOlap(status)) - } + statuses = normalizeWorkflowRunStatuses(*request.Params.Statuses, request.Params.RunningFilter) } } @@ -330,23 +394,14 @@ func (t *V1WorkflowRunsService) V1WorkflowRunExternalIdsList(ctx echo.Context, r defer span.End() var ( - statuses = []sqlcv1.V1ReadableStatusOlap{ - sqlcv1.V1ReadableStatusOlapQUEUED, - sqlcv1.V1ReadableStatusOlapRUNNING, - sqlcv1.V1ReadableStatusOlapFAILED, - sqlcv1.V1ReadableStatusOlapCOMPLETED, - sqlcv1.V1ReadableStatusOlapCANCELLED, - } + statuses = allOlapStatuses(request.Params.RunningFilter) since = request.Params.Since workflowIds = []uuid.UUID{} ) if request.Params.Statuses != nil { if len(*request.Params.Statuses) > 0 { - statuses = []sqlcv1.V1ReadableStatusOlap{} - for _, status := range *request.Params.Statuses { - statuses = append(statuses, sqlcv1.V1ReadableStatusOlap(status)) - } + statuses = normalizeWorkflowRunStatuses(*request.Params.Statuses, request.Params.RunningFilter) } } diff --git a/api/v1/server/handlers/v1/workflow-runs/service.go b/api/v1/server/handlers/v1/workflow-runs/service.go index 45715009e..53f330fa6 100644 --- a/api/v1/server/handlers/v1/workflow-runs/service.go +++ b/api/v1/server/handlers/v1/workflow-runs/service.go @@ -11,8 +11,9 @@ import ( ) type V1WorkflowRunsService struct { - config *server.ServerConfig - proxyTrigger *proxy.Proxy[admincontracts.TriggerWorkflowRunRequest, admincontracts.TriggerWorkflowRunResponse] + config *server.ServerConfig + proxyTrigger *proxy.Proxy[admincontracts.TriggerWorkflowRunRequest, admincontracts.TriggerWorkflowRunResponse] + proxyBranchDurableTask *proxy.Proxy[admincontracts.BranchDurableTaskRequest, admincontracts.BranchDurableTaskResponse] } func NewV1WorkflowRunsService(config *server.ServerConfig) *V1WorkflowRunsService { @@ -20,8 +21,13 @@ func NewV1WorkflowRunsService(config *server.ServerConfig) *V1WorkflowRunsServic return cli.Admin().TriggerWorkflowRun(ctx, in) }) + proxyBranchDurableTask := proxy.NewProxy(config, func(ctx context.Context, cli *client.GRPCClient, in *admincontracts.BranchDurableTaskRequest) (*admincontracts.BranchDurableTaskResponse, error) { + return cli.Admin().BranchDurableTask(ctx, in) + }) + return &V1WorkflowRunsService{ - config: config, - proxyTrigger: proxyTrigger, + config: config, + proxyTrigger: proxyTrigger, + proxyBranchDurableTask: proxyBranchDurableTask, } } diff --git a/api/v1/server/oas/gen/openapi.gen.go b/api/v1/server/oas/gen/openapi.gen.go index 5f43ce313..4493154ec 100644 --- a/api/v1/server/oas/gen/openapi.gen.go +++ b/api/v1/server/oas/gen/openapi.gen.go @@ -236,6 +236,13 @@ const ( V1LogLineOrderByDirectionDESC V1LogLineOrderByDirection = "DESC" ) +// Defines values for V1RunningFilter. +const ( + ALL V1RunningFilter = "ALL" + EVICTED V1RunningFilter = "EVICTED" + ONWORKER V1RunningFilter = "ON_WORKER" +) + // Defines values for V1TaskEventType. const ( V1TaskEventTypeACKNOWLEDGED V1TaskEventType = "ACKNOWLEDGED" @@ -243,6 +250,8 @@ const ( V1TaskEventTypeCANCELLED V1TaskEventType = "CANCELLED" V1TaskEventTypeCOULDNOTSENDTOWORKER V1TaskEventType = "COULD_NOT_SEND_TO_WORKER" V1TaskEventTypeCREATED V1TaskEventType = "CREATED" + V1TaskEventTypeDURABLEEVICTED V1TaskEventType = "DURABLE_EVICTED" + V1TaskEventTypeDURABLERESTORING V1TaskEventType = "DURABLE_RESTORING" V1TaskEventTypeFAILED V1TaskEventType = "FAILED" V1TaskEventTypeFINISHED V1TaskEventType = "FINISHED" V1TaskEventTypeQUEUED V1TaskEventType = "QUEUED" @@ -1389,6 +1398,30 @@ type UserTenantPublic struct { Name *string `json:"name,omitempty"` } +// V1BranchDurableTaskRequest defines model for V1BranchDurableTaskRequest. +type V1BranchDurableTaskRequest struct { + // BranchId The branch id to replay from. + BranchId int64 `json:"branchId"` + + // NodeId The node id to replay from. + NodeId int64 `json:"nodeId"` + + // TaskExternalId The external id of the durable task to branch. + TaskExternalId openapi_types.UUID `json:"taskExternalId"` +} + +// V1BranchDurableTaskResponse defines model for V1BranchDurableTaskResponse. +type V1BranchDurableTaskResponse struct { + // BranchId The branch id of the new entry. + BranchId int64 `json:"branchId"` + + // NodeId The node id of the new entry. + NodeId int64 `json:"nodeId"` + + // TaskExternalId The external id of the durable task. + TaskExternalId openapi_types.UUID `json:"taskExternalId"` +} + // V1CELDebugRequest defines model for V1CELDebugRequest. type V1CELDebugRequest struct { // AdditionalMetadata Additional metadata, which simulates metadata that could be sent with an event or a workflow run @@ -1681,6 +1714,23 @@ type V1ReplayedTasks struct { Ids *[]openapi_types.UUID `json:"ids,omitempty"` } +// V1RestoreTaskResponse defines model for V1RestoreTaskResponse. +type V1RestoreTaskResponse struct { + Requeued bool `json:"requeued"` +} + +// V1RunningDetailCount defines model for V1RunningDetailCount. +type V1RunningDetailCount struct { + // Evicted The number of evicted tasks within the RUNNING status bucket. + Evicted int `json:"evicted"` + + // OnWorker The number of tasks currently on a worker within the RUNNING status bucket. + OnWorker int `json:"onWorker"` +} + +// V1RunningFilter defines model for V1RunningFilter. +type V1RunningFilter string + // V1TaskEvent defines model for V1TaskEvent. type V1TaskEvent struct { // Attempt The attempt number of the task. @@ -1731,8 +1781,9 @@ type V1TaskPointMetrics struct { // V1TaskRunMetric defines model for V1TaskRunMetric. type V1TaskRunMetric struct { - Count int `json:"count"` - Status V1TaskStatus `json:"status"` + Count int `json:"count"` + RunningDetailCount *V1RunningDetailCount `json:"runningDetailCount,omitempty"` + Status V1TaskStatus `json:"status"` } // V1TaskRunMetrics defines model for V1TaskRunMetrics. @@ -1771,8 +1822,11 @@ type V1TaskSummary struct { FinishedAt *time.Time `json:"finishedAt,omitempty"` // Input The input of the task run. - Input openapi.NonNullableJSON `json:"input"` - Metadata APIResourceMeta `json:"metadata"` + Input openapi.NonNullableJSON `json:"input"` + + // IsEvicted Whether the task has been evicted from a worker (still counts as RUNNING). + IsEvicted *bool `json:"isEvicted,omitempty"` + Metadata APIResourceMeta `json:"metadata"` // NumSpawnedChildren The number of spawned children tasks NumSpawnedChildren int `json:"numSpawnedChildren"` @@ -1833,8 +1887,11 @@ type V1TaskTiming struct { Depth int `json:"depth"` // FinishedAt The timestamp the task run finished. - FinishedAt *time.Time `json:"finishedAt,omitempty"` - Metadata APIResourceMeta `json:"metadata"` + FinishedAt *time.Time `json:"finishedAt,omitempty"` + + // IsEvicted Whether the task has been evicted from a worker (still counts as RUNNING). + IsEvicted *bool `json:"isEvicted,omitempty"` + Metadata APIResourceMeta `json:"metadata"` // ParentTaskExternalId The external ID of the parent task. ParentTaskExternalId *openapi_types.UUID `json:"parentTaskExternalId,omitempty"` @@ -2627,6 +2684,9 @@ type V1WorkflowRunListParams struct { // IncludePayloads A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset. IncludePayloads *bool `form:"include_payloads,omitempty" json:"include_payloads,omitempty"` + + // RunningFilter Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + RunningFilter *V1RunningFilter `form:"running_filter,omitempty" json:"running_filter,omitempty"` } // V1WorkflowRunDisplayNamesListParams defines parameters for V1WorkflowRunDisplayNamesList. @@ -2651,6 +2711,9 @@ type V1WorkflowRunExternalIdsListParams struct { // WorkflowIds The workflow ids to find runs for WorkflowIds *[]openapi_types.UUID `form:"workflow_ids,omitempty" json:"workflow_ids,omitempty"` + + // RunningFilter Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + RunningFilter *V1RunningFilter `form:"running_filter,omitempty" json:"running_filter,omitempty"` } // V1WorkflowRunTaskEventsListParams defines parameters for V1WorkflowRunTaskEventsList. @@ -2941,6 +3004,9 @@ type AlertEmailGroupUpdateJSONRequestBody = UpdateTenantAlertEmailGroupRequest // V1CelDebugJSONRequestBody defines body for V1CelDebug for application/json ContentType. type V1CelDebugJSONRequestBody = V1CELDebugRequest +// V1DurableTaskBranchJSONRequestBody defines body for V1DurableTaskBranch for application/json ContentType. +type V1DurableTaskBranchJSONRequestBody = V1BranchDurableTaskRequest + // V1FilterCreateJSONRequestBody defines body for V1FilterCreate for application/json ContentType. type V1FilterCreateJSONRequestBody = V1CreateFilterRequest @@ -3193,12 +3259,18 @@ type ServerInterface interface { // List log lines // (GET /api/v1/stable/tasks/{task}/logs) V1LogLineList(ctx echo.Context, task openapi_types.UUID, params V1LogLineListParams) error + // Restore a task + // (POST /api/v1/stable/tasks/{task}/restore) + V1TaskRestore(ctx echo.Context, task openapi_types.UUID) error // List events for a task // (GET /api/v1/stable/tasks/{task}/task-events) V1TaskEventList(ctx echo.Context, task openapi_types.UUID, params V1TaskEventListParams) error // Debug a CEL expression // (POST /api/v1/stable/tenants/{tenant}/cel/debug) V1CelDebug(ctx echo.Context, tenant openapi_types.UUID) error + // Branch durable task + // (POST /api/v1/stable/tenants/{tenant}/durable-tasks/branch) + V1DurableTaskBranch(ctx echo.Context, tenant openapi_types.UUID) error // List events // (GET /api/v1/stable/tenants/{tenant}/events) V1EventList(ctx echo.Context, tenant openapi_types.UUID, params V1EventListParams) error @@ -3913,6 +3985,26 @@ func (w *ServerInterfaceWrapper) V1LogLineList(ctx echo.Context) error { return err } +// V1TaskRestore converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskRestore(ctx echo.Context) error { + var err error + // ------------- Path parameter "task" ------------- + var task openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "task", runtime.ParamLocationPath, ctx.Param("task"), &task) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter task: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskRestore(ctx, task) + return err +} + // V1TaskEventList converts echo context to params. func (w *ServerInterfaceWrapper) V1TaskEventList(ctx echo.Context) error { var err error @@ -3969,6 +4061,26 @@ func (w *ServerInterfaceWrapper) V1CelDebug(ctx echo.Context) error { return err } +// V1DurableTaskBranch converts echo context to params. +func (w *ServerInterfaceWrapper) V1DurableTaskBranch(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1DurableTaskBranch(ctx, tenant) + return err +} + // V1EventList converts echo context to params. func (w *ServerInterfaceWrapper) V1EventList(ctx echo.Context) error { var err error @@ -4682,6 +4794,13 @@ func (w *ServerInterfaceWrapper) V1WorkflowRunList(ctx echo.Context) error { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter include_payloads: %s", err)) } + // ------------- Optional query parameter "running_filter" ------------- + + err = runtime.BindQueryParameter("form", true, false, "running_filter", ctx.QueryParams(), ¶ms.RunningFilter) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter running_filter: %s", err)) + } + // Invoke the callback with all the unmarshaled arguments err = w.Handler.V1WorkflowRunList(ctx, tenant, params) return err @@ -4768,6 +4887,13 @@ func (w *ServerInterfaceWrapper) V1WorkflowRunExternalIdsList(ctx echo.Context) return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter workflow_ids: %s", err)) } + // ------------- Optional query parameter "running_filter" ------------- + + err = runtime.BindQueryParameter("form", true, false, "running_filter", ctx.QueryParams(), ¶ms.RunningFilter) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter running_filter: %s", err)) + } + // Invoke the callback with all the unmarshaled arguments err = w.Handler.V1WorkflowRunExternalIdsList(ctx, tenant, params) return err @@ -7293,8 +7419,10 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL router.GET(baseURL+"/api/v1/stable/dags/tasks", wrapper.V1DagListTasks) router.GET(baseURL+"/api/v1/stable/tasks/:task", wrapper.V1TaskGet) router.GET(baseURL+"/api/v1/stable/tasks/:task/logs", wrapper.V1LogLineList) + router.POST(baseURL+"/api/v1/stable/tasks/:task/restore", wrapper.V1TaskRestore) router.GET(baseURL+"/api/v1/stable/tasks/:task/task-events", wrapper.V1TaskEventList) router.POST(baseURL+"/api/v1/stable/tenants/:tenant/cel/debug", wrapper.V1CelDebug) + router.POST(baseURL+"/api/v1/stable/tenants/:tenant/durable-tasks/branch", wrapper.V1DurableTaskBranch) router.GET(baseURL+"/api/v1/stable/tenants/:tenant/events", wrapper.V1EventList) router.GET(baseURL+"/api/v1/stable/tenants/:tenant/events/keys", wrapper.V1EventKeyList) router.GET(baseURL+"/api/v1/stable/tenants/:tenant/events/:v1-event", wrapper.V1EventGet) @@ -7999,6 +8127,50 @@ func (response V1LogLineList403JSONResponse) VisitV1LogLineListResponse(w http.R return json.NewEncoder(w).Encode(response) } +type V1TaskRestoreRequestObject struct { + Task openapi_types.UUID `json:"task"` +} + +type V1TaskRestoreResponseObject interface { + VisitV1TaskRestoreResponse(w http.ResponseWriter) error +} + +type V1TaskRestore200JSONResponse V1RestoreTaskResponse + +func (response V1TaskRestore200JSONResponse) VisitV1TaskRestoreResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskRestore400JSONResponse APIErrors + +func (response V1TaskRestore400JSONResponse) VisitV1TaskRestoreResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskRestore403JSONResponse APIErrors + +func (response V1TaskRestore403JSONResponse) VisitV1TaskRestoreResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskRestore404JSONResponse APIErrors + +func (response V1TaskRestore404JSONResponse) VisitV1TaskRestoreResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + type V1TaskEventListRequestObject struct { Task openapi_types.UUID `json:"task"` Params V1TaskEventListParams @@ -8089,6 +8261,42 @@ func (response V1CelDebug403JSONResponse) VisitV1CelDebugResponse(w http.Respons return json.NewEncoder(w).Encode(response) } +type V1DurableTaskBranchRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Body *V1DurableTaskBranchJSONRequestBody +} + +type V1DurableTaskBranchResponseObject interface { + VisitV1DurableTaskBranchResponse(w http.ResponseWriter) error +} + +type V1DurableTaskBranch200JSONResponse V1BranchDurableTaskResponse + +func (response V1DurableTaskBranch200JSONResponse) VisitV1DurableTaskBranchResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1DurableTaskBranch400JSONResponse APIErrors + +func (response V1DurableTaskBranch400JSONResponse) VisitV1DurableTaskBranchResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1DurableTaskBranch403JSONResponse APIErrors + +func (response V1DurableTaskBranch403JSONResponse) VisitV1DurableTaskBranchResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + type V1EventListRequestObject struct { Tenant openapi_types.UUID `json:"tenant"` Params V1EventListParams @@ -12714,10 +12922,14 @@ type StrictServerInterface interface { V1LogLineList(ctx echo.Context, request V1LogLineListRequestObject) (V1LogLineListResponseObject, error) + V1TaskRestore(ctx echo.Context, request V1TaskRestoreRequestObject) (V1TaskRestoreResponseObject, error) + V1TaskEventList(ctx echo.Context, request V1TaskEventListRequestObject) (V1TaskEventListResponseObject, error) V1CelDebug(ctx echo.Context, request V1CelDebugRequestObject) (V1CelDebugResponseObject, error) + V1DurableTaskBranch(ctx echo.Context, request V1DurableTaskBranchRequestObject) (V1DurableTaskBranchResponseObject, error) + V1EventList(ctx echo.Context, request V1EventListRequestObject) (V1EventListResponseObject, error) V1EventKeyList(ctx echo.Context, request V1EventKeyListRequestObject) (V1EventKeyListResponseObject, error) @@ -13331,6 +13543,28 @@ func (sh *strictHandler) V1LogLineList(ctx echo.Context, task openapi_types.UUID return nil } +// V1TaskRestore operation +func (sh *strictHandler) V1TaskRestore(ctx echo.Context, task openapi_types.UUID) error { + var request V1TaskRestoreRequestObject + + request.Task = task + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskRestore(ctx, request.(V1TaskRestoreRequestObject)) + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskRestoreResponseObject); ok { + return validResponse.VisitV1TaskRestoreResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + // V1TaskEventList operation func (sh *strictHandler) V1TaskEventList(ctx echo.Context, task openapi_types.UUID, params V1TaskEventListParams) error { var request V1TaskEventListRequestObject @@ -13382,6 +13616,34 @@ func (sh *strictHandler) V1CelDebug(ctx echo.Context, tenant openapi_types.UUID) return nil } +// V1DurableTaskBranch operation +func (sh *strictHandler) V1DurableTaskBranch(ctx echo.Context, tenant openapi_types.UUID) error { + var request V1DurableTaskBranchRequestObject + + request.Tenant = tenant + + var body V1DurableTaskBranchJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1DurableTaskBranch(ctx, request.(V1DurableTaskBranchRequestObject)) + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1DurableTaskBranchResponseObject); ok { + return validResponse.VisitV1DurableTaskBranchResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + // V1EventList operation func (sh *strictHandler) V1EventList(ctx echo.Context, tenant openapi_types.UUID, params V1EventListParams) error { var request V1EventListRequestObject @@ -16160,326 +16422,336 @@ func (sh *strictHandler) WorkflowVersionGet(ctx echo.Context, workflow openapi_t // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9e3PbOLIo/lVY+v2q7kyV5Fcmc+ak6v6h2EqiiWP7SHJy986mvBAJSxhTJJcA7WhT", - "/u638CJBEiBBvSwlrNracUQ8Go3uRqPRj+8dN1xEYQADgjtvvnewO4cLwP7s3wwHcRzG9O8oDiMYEwTZ", - "Fzf0IP2vB7Ebo4igMOi86QDHTTAJF84HQNw5JA6kvR3WuNuB38Ai8mHnzelvJyfdzn0YLwDpvOkkKCC/", - "/9bpdsgygp03HRQQOINx57mbH748m/Jv5z6MHTJHmM+pTtfpZw0foYBpATEGM5jNikmMghmbNHTxnY+C", - "B92U9HeHhA6ZQ8cL3WQBAwI0AHQddO8g4sBvCBOcA2eGyDyZHrnh4njO8dTz4KP8WwfRPYK+V4aGwsA+", - "OWQOiDK5g7ADMA5dBAj0nCdE5gweEEU+csHUz21HJwALDSKeu50Y/jtBMfQ6b/7KTf01bRxO/4YuoTBK", - "WsFlYoHp74jABfvj/4/hfedN5/87zmjvWBDecUp1z+k0II7BsgSSGNcAzSdIQBkW4Pvh0/kcBDN4AzB+", - "CmMNYp/mkMxh7ISxE4TESTCMseOCwHFZR7r5KHYi2V/BJYkTmIIzDUMfgoDCw6eNISBwAgMQkCaTsm5O", - "AJ8cwvpi6xmHwSMifOGWkyHWwwnZV/4zo3aEHRRgAgIXWs8+RrMgiRpMjtEscJIoY6VGUyZkbkFalCz6", - "tOlztxOFmMzDmWWvG9Gadlz6YdCPoqGBK2/od8puzvCCrSbBkPWhXE+piDg4iaIwJjlGPD179dvr3//r", - "jx79o/B/9Pf/Pjk90zKqif77Aid5HmDr0lEFBV3ABT2HDoqd8N6hmIUBQS4TdCrEf3WmACO30+3MwnDm", - "Q8qLKY+XxFiJmU1gD+kJEAMp9gvSJKACrIJrBeWkQ1BpKDo5YcAkt0JXZUJi4lCLG/qFIoQPkcFYlu61", - "4lTIXLmYChl2kxFpQZRF6EOIiYECQ0w+hDOnfzN05rSVCuOckAi/OT4W9H8kvlDi1B0/IEIf4bJ+nge4", - "zE0TzR/uMtIFU9eD99bkO4I4TGIX6sU4l4le37B6ghZQORRjMZbzBLAQpzmp3Tk7OTvrnZ71Tl9NTl+/", - "Ofn9zW9/HP3xxx+vXv/RO3n95uSko6grHiCwRyfQoQoZBALyON0owHQdFDi3t1xA0KFVgKbTs9Pf/jj5", - "r97Zb7/D3m+vwOseOHvt9X47/a/fT71T9/7+v+n8C/DtEgYzyuSvfteAk0TeqmjyASaO6L8NXBX4AdFJ", - "sl1VQTfwxiR8gDrx8C1CMcS6JX+ZQ87+lFgJ7e6I1kfWG7yABHiAk2TNmZGjYKNcmRTkSgrbUX5/z16/", - "rsNhCls3FS8pMrRIdF0YEa4jjOC/E8iFSR6fXCHgmF2POhcoMBNrt/OtF4II9ehlYQaDHvxGYtAjYMag", - "eAQ+ovvSeZOuuJskyOs8lwiJw6tb79vEf+A62OARBsS4ZPgo70JW+qpmyFrNlc/w9bnbOafnkG8B0NDL", - "g9R4O7ILV8K4rcn2WC2IQsiWFAZuEscwcJeXaIHImMSAwNmSn97JgnY471+dDy7vhld3N6Pr96PBeNzp", - "di5G1zd3V4Mvg/Gk0+38z+3gdpD98/3o+vbmbnR9e3VxN7p+O7xS9jiDUpl77IYRVOf8cj36+O7y+kun", - "25n0xx9r+0NC6K86ERNDjLWXUsrObjaGk7XtUiXQo9r0DAaQYsQB9Mh07uNw4RCAHxwURAnBXUcycteB", - "xD3SiSG/iNdKAjXtxzMjglESYP1CFuAbWiQLJ0gWU6qv32dLI85TGD/c++GTEydBXoCigLw6097nsdwS", - "S3D5FtKOBEYjCDyqLemUbgptLL6nhy10aDeK8ac5cuf8kFM3B/Md5vdifgrUSFiBreIGdFWakMvUiSB1", - "bQSQOtoq7fsDXHLdz/MQXTrwb3Ld1T0w2FRKMPEfvtvoZVzUycPXLK/4sTM08IeXxJnJRG4NFCcywg4T", - "9uXNsD8iwgUiAfK7ciK2GP3x2+eHL79xrnX6svG/WiANR2GAYRlrRCo0ZYzlwKoGg49ihuM8DoMvgnUn", - "MZrNYGzcx4zKPilqT2lgNw6DQTXd0iZXYgPKSjMVe9qRoxiFMSLLImkz8SKkU+fNK3Z48b9PyyRfUhDo", - "bF3d4hQ4S6v6mmKw+qzW46xAdGmbVNSnFMhOUmWbM2Tox2IMZTfAg+4SR/uzU8jQPdsmdTPKY8ivUvSm", - "4zQ5FsrDsk8MODagc498AilE9ZzAr6MMa9nmja/GinXBuIskjJDbj03suAD/CQNHKvgOpRjnl/7o6le5", - "+vHV2GFjrCPGUk13gYL/fdpdgG//++z172WVNwXWzPXc6Nj3YUwGC4D893GYRGb5TZtgnbD0ESZ0jbyF", - "NG3F9ES0tPussHwPPcIum7G8dgFq3cprLjl8cO1es09yW+laqT7BLxkb2Vu5rm4nDv1a3Yiv5hOk+tiI", - "ttfioyMGq8OKGR/BDAXwM4ylQK+HSTZ+7nZg8IjiMFhAbuau7ztQOlhflLktfBN7wJAYBtMQxB4KZhdC", - "zup1LG5+NsrzbBgulUnoYBLGkD3C6OHO9gb7ycwgBv1ktvmFd8WbEzvxng0mSgaUnpIyTQLbHoRVSNUq", - "FlqJotiAy/bbVJ1oNNcahp0FJPPQqzcTKOj6xLsoxF553K6s+3Q7nFqGnnYOeYer+WzU3GQDwfzaYcxG", - "qhQ03UCF2XOwCsrI6CDdg1o6vUQ6eReBGQrS94aqXbxJW6aKPBPdT03sRSrfWL2L6GhHMWxcDN71by8n", - "HWYX1Zs11AGuYw/Gb5fv5KuyHCaQii8sWV6zkZj2u0u1d02tdQ2+JulLbf0RVmS1MrjDi7wAL77Qi/d7", - "40Ik/Y+SYJwsFiCutfuwrfpS7lbBklxnThfyVW64PBPzm97kRuL88uf4+sqZLgnEv9Yr76nazqb/uB4N", - "yDH2gPnT5ZT5XgK6L1BWgCgkyAWKoStBklIEYLfDFSSz/DBJIAvRM4Ygdufa08hE7+XXQ2Zz1z4iMy0z", - "M3fKhlojp8HAdg+QxdC8VZNxIxh4wh5dNbBo1mTkfycwqYeYt2oybpwEgQXEolmTkXHiuhB69UCnDe1H", - "T6kcVz0NaW6K7NuRehVegcfWOLHMYl15b/oznGoEeZWfHZPniqedOMX+DqdHW3ohLY2JCYzspdeYwEiH", - "2EpVmKAFDBOiX774WLf0x3XV4EdF/ZXXL7Z0nV77ZzgdJUGFdONv4Hbv2mmn1OHT3GQEATZczO5RgPC8", - "2dR/c4qs2lFKtLylYffWILoY4sTXm58xATFpthhMAEmwxXro+cTbyuct8QxnTeJ085tTufsA42oWaLJc", - "RSmtA1k5mAs917828kEkgaS7YOaacbpNUvW4GVxdDK/ed7qd0e3VFf9rfHt+PhhcDC463c67/vCS/cFf", - "rvnfb/vnH6/fvdNqK1SN0/uz2XrBFrtqNltMwl6WsPlpaafKY+qbo9UfKcR5Izx+YXjz0NS6OiiwiYl0", - "ZMaW6QP34QuczsPw4cUXqcCyqSWGs0sUwEbOecw9gn6migSVLPJI9cOZ46MANvHE4h782jnocKJBrZJi", - "6s1baGwSBWypXmtZWEE6w9cMVZfwEfp5w83bWypohlfvrjvdzpf+6KrT7QxGo+uRXqYo46SXJ6v9z0Gg", - "EyTi+8vfPSVZ6aUH/7jG/TM/QsMbqOhccQfVIED11freEe4zdxGj3bNuJ4Df5L9edTtBsmD/wJ03pyfM", - "CpzjrFxnnUun9M6JOBWmE59ZXasUWLT+z/BbeeRXdiNn69J6ooYE+OolljZllh0fYcJfN7L4oRObW5xG", - "Yv0PvcF+giRGrkYeB8nixu6KzehYXrSPTOv9H6tbNR8LccdUdsU2Djiyu07zEcWl+qhT6xCRgZqbpasi", - "RCf/R4BA5k9WRqWVzZa5wDE/Kr2LG8BkBO+Rb3iYZQ7KwoNZHYx5L8esI2ReRFtw82YTfQZ+Am0d52L+", - "1IodFhkjTL5i159Q4IVP+m3fhE25BtGP5nVIaaJZxwJ40HYR/Jt+Cv6NLYPuJQoUj7AMzTyG4z6MXejZ", - "en4o9wRlv+R6U6hylPZVpes9OAwzHtMeh+nnNQ7E4hilI5FjU2JNQaV2NOjCgIyV+2zhnYiBZ6Jn/tXR", - "ef+pBogmN9RVLBJrWBO2ZjIQKM1sBqULdNG/u5pH0o3oqndrAUtxdK34hzOECYyhJ2/2muABwz6nvsPI", - "c+J0HB6dhTD7DOMjjc96CXl2viKZr3LVZBYRKkYnzxGkf/08sRQjGPlg+UOFLfAlKWYqbFxZjjtedn1K", - "89cnJzXrLcBtWrXJjKR0tz/CCnY/W/gkdDGVeUz0VbCV3n9Y6/hLRy1YfDQDziAmt7FB87wdXTK/Lhh4", - "zNFTXPqxQ8LtuCCYjsskQP+mupEHA4LuEYxT3VqogyK2j/ujqiGxU+iHwUxCXCtlt+gOa2forXRxHbtz", - "6CU+VChtXZf2LbukdzuEu97b6wlNvNizwb8q6PE2Z/dmoVn0j/H5h8HFLf1RpwymM2/XTXBPHf7Kq8+8", - "/nbh3NeYxDbnDzhKgnPVCNz4MYkDsOuzVAHAZoljK8X9S6nDSzpOZkRR6TNZpt23if9wAX1I4DsWgrGi", - "C2AaQZB6AD7ApcMul04EEE83woM8nOkyn2viAS5P37Cmp9xV7Yz/66xJ2oluJwJxdkXVX50a0g0f8Uvd", - "hWxFatzAYM8Nt9h4fN6ne99M8pWoh0X9FFpptOn1leMhH4rpxgsUiH+e2njdVmPIpCR77Lu34ZUUibhh", - "TiX9UuzSLCkL6lblXKqaQ58TSh8NuBFyL+WJagDyLUs9QSnFZNFYdzPXVf7ykrzpyozczXNurEtVCvqa", - "sqC6SAlM89WZOHObPJMmLNkq32tRtApn7oFpW3M5eF5NKq8SEFEexWT+VjWm6tfhMVyAaB7GcOyHZMO2", - "75xdWe+gyM2Z2A/5E5joYe9QsaIdGquKlCZyjcDIiRO5sHpTg+qEVr9Q5PvSO9N+paWLRoWF2hr0Am9m", - "aOmqtvaCXZ1SjeqZU/almYMggL4JTPHZQZ7+6Q/TwZ0nPrr+UYWPcGW0o8spmD19xUnWsoCBhWn19Nsa", - "S6fdzetmg6+z6L2w3dlZ1yQiUnTn6aKrkKH2fCEwMok7vSvxHPleDPPekLU6L8IXScyyl+pSe4ncf0Li", - "IMzSjEx91Z1CCRLdiisxvwfiZquKV0troz8l/FA+x1SmianJtNuhB1j21C8JkyH2FzrHHR3A6f0zOTl5", - "xUiZ5GK6lNwym3K5NyzZTN4KWnO0Ll2EBXVyn6cKut6Ci32fDKIw5z+mbMSGHPEZh30xvdfUEmWuOz4P", - "k4DowTXf41Z5eM/6VGCoaJvPRRJYOKKLuIm0/eblQJgQE4grigjmGNa/F7YXO2RuPLCBd6nYmTU0SNuY", - "HtrWJE4sZE2TFaddKlbMnQdWN9+mFJiurDJ4QaCuH7tz9AgPUi41fxbYKxET0luivlMF18eQxMsKKbo1", - "flSuZrthiYpbkIIEiUf9jdpE7/tgtMgzoNYpT7QxJEpwzVRgfo329B2UEAgNyUketFiP8ONhPSjdwEco", - "Xydte49lHyu6e4diTMaQ3wDsae8SNO3VMMyMX6FyABZmTjGroEmN++D7W0HM+xLjnyPTWkLORLq0i40G", - "3Avg7ur67sv16ONg1OlmP476k8Hd5fDTcJJ5CQyv3t9Nhp8GF3fXt8w2Nx4P319xP4JJfzRhf/XPP15d", - "f7kcXLzn7gfDq+H4Q94TYTSYjP7BPRVUpwQ69PXt5G40eDcaiD6jgTKJOvf48pq2vBz0x+mYw8HF3dt/", - "3N2O2VJktti70e3VHU8++3HwjzvVN8LQRACqNRHqOEZBqhIIJBY4Gk6G5/3LqtGqnDrEX3ccDZ8GVwXE", - "N3D6EH/z1lWRjxOAH/TZTbNEA5UZVUT/BLNR8okEmnTUmY9lm8r7sc0knarRBQQa6Z/mf7XPF1TIGau5", - "IIS+Jx507KQi24fNJ5INCfCtOmtRl2bbKVZsgbHIeDcw5CVMrT+hw1pLE9qC9cJ6CxAIgL8kyMXXEblO", - "SLVNSQw4B9gJIwI9R5gm0kH0c6ybCW/r2eZNueTWTkaXZUNomD6wNqc9gysb/auRlArZLneb5nJLeTzM", - "2S61a94DNUO/F7qsoLOwx4m2M2KPgc/5VaFgJjKy490JCZ5gbvAtQnSXWVg7A6Z6fN6LT4OdJ1Z2gkXo", - "OyCGDoiiOATuHAUzXn+CIbhqfpmtkxMJC9ZZEQq+ZFnoowwPi+6pxIViU3wHkJ/E0AIU5iqtApJLHs9y", - "Ienn9AHmSzU/fWZxgCAQO8ueP4vph6sjfsA3SWTvmLVNHNDa0D7nXjZxAJHhaoKqNvv8ZZYEWoDNcmGQ", - "P4iknuiHLvBZcNgj9MOIfWYxx17iFiq9KeqdklF3e6l0n9PqJZUPwbJ2jahbtst6Lqvl6617FxQsanrV", - "lJ/NWOMtqt412Qi5tPfGU7zmKJKJhrO9UpMHGqmR087eHE6ClJudSXxPy/C/GEHZ56mkrFfX+hbDmPe4", - "SaY+cqtIgY1XkXJahXlvNl3s3yqbPhL7JKXo9ZcrZjHoX3waXnW6nU+DT28HowrZWZ2HoP5y1uQuVoWJ", - "HByKsWzVq3FxvGI8VooASfnF6jyp4WUwuhtfXk863c7gM7dZTPrjj3ej2ytmE7m+UmJPWP6U8+tPw6v3", - "d18Gbz9cX3+swH1Oi9IpkiBeVET2s+/CX10roHkOAhI6TyBmufJK6hXvrY+Ub5b0QJ/vYDMpDPjY5iXq", - "4V8vD1tKE/Xsm1KQXQKDug1rnrdgAQmMZfYCeY7ysZxf0BE8ck4dDyy7zqnzBOED/e8iDMj81xV9dFL0", - "aLMZmMWuRNRN6CNXkwuVa/xVl+C0LCBvqlEaGojdPPvVObgK4MyrExZQW4FqFEhKqQMpjz6fdLqdz6d6", - "UcJ9QncQcGiMYeXOzk2q+VQkzX9OBxxrgjLMVVLW9GOvdmHnAP2MlUvUldekFNhI0RCj5qYCIvq/PCBm", - "VjtoS3FraHpJQ9MWDUBbqVvXwJC/sh3ewIVfmM+TORsDvgEJ1iU+U9mEO045CDsRa+2AwHNcEAQhcQCr", - "S8sK3suk3aUDSwcd1t3Ha+1RwPNiiLFql8pp0dLQUTZP0Q8fAJ7rjps5wHN1yP+FC9OJA4grorxe/JiX", - "XnfO56watH7CzzBG96gOvcy6RmXQo2hOf0VxHgY9J8wBvgEYP4Wx7RzAiUQHB0Ni4K9tvGR5CEc+WOYY", - "Qe5fY0NWHrtfDQR2PgfBDEoEGZkggE9mJDLehU8Z1qRGrYd9Bb1DjszWHVUCkgJRib/1YCillxVfujk8", - "mVB+Gc5QsHrpttX4e61KbnuHcbnGqA7XMqnXQaHb7oQ0CIY93C1ZvN1201S1Gs9RhA/VyFoyOu/wNN/G", - "KcMn023b59PzweUFnCazTReS7Qp9FKNF4gMCcZZpg72WuWHie84UsgdSrn2AQJRoCmMH5DRmXUhPXfXz", - "88GlUvWc3Q8egZ9Q6te6Y/sExjdg6YfAwIEiG0jE25TXB+Qnqn04YUB/iOEjChPcE+7FYoxOVfKg8sTs", - "U3k+UgoPFbmYqk03uQrhwo5TQxmVgewGLqCfZEYyB8lavGwDWMltXpxIsxOZ+7ouBg0nfhqLVdjhbPQu", - "nZBV3MH4PvG1iqBdjEgZCzJcpORgbgyWMI5hiFOm33JLTNfFKg1yqyDzkhyPK7O0fz49Z5EQE4AfKmqk", - "ExgHwBf5AozmKtHMGV5gSYouCJwY3ovLN+IKOcAPlH9zhKl2Vu1cG85FYpcU5vMpxYdM//Ks3zAZQUKb", - "Yl32DWx6reDoYmhIl408zIXeE4xhVsxra6h45otgMocvtKpEfqUUVfhL3g6KMkzVYCrEpxSOpmGUqLC6", - "Mtza9xM+3pFzS2/xdBKcTDF31KIo95jiI1phBxBVGtnlIavMH7tu1i9DBkoeicUQkjvyDIKGbbkI4lf2", - "PAzg9X3nzV+1wk7T/y3AyO0nZN557q7Sv38z5HUGV+n84VP/vPP81bg4MTgzuvrrLBEyAAuaD110rTQR", - "Q3FIBJ5Y18nSRMUsajm8d2grGBDkCioMmSVGMoiI6VeEfv9mePdx8A+NsC8mVZbTc0g01GJGKUOGPofu", - "R7gcNNa61CVx9e4BLo+cCfOWwg4zupGQV2OB+VbOfRwuVFxIIXK0RgrmFKtlT2PKZustkA1RXpxQHVlh", - "jDCOoUsy0UFCR7w/6d2fmQ1KulFZkeI46yIUHeRWara8SSqh9XRYXhU3chd7pwXGyzkpDBqpdKjOwO7q", - "6M1e5GUiaw8Egyo/tyQX3vbHw/PtSgUmiPcAmxSO7SKTrXRjuLwAs3MlyUgxqY4m/Ui97prWTy6rwB6Y", - "2Sbh1/DST1RT20pVDe8zANRLzxQ6IFg6f46vr3oYxgj46D/s6ZGv7GglpbZissIxEsaOCwichTH6j1ru", - "tXx2QBhUJbDCBCwi8VCanrvcaR0G9i5c+1WfXBymLM20qUKuch+Vk7F32eySlo7iTJeFGS05lTHTRAFG", - "WySTf0fBTMi3qyZKjPA7T0HN4GQWEBBFPnILuYfWKeQuFrVWKXftvF8z8bMHNmMpCA336vLGGtLT1lN4", - "qhcWtpGRo2YPa7PWWWSUU4g/ZbR06jgJjrZ0kzVXcDGT1Q9SMb2ta16RmiNO67z9W1Z/y2ZP90TJ6iKk", - "hSn9dwOLVpUdScddCF9A1wcxICLrjdkpQXA2wo6XdXF+IXECf6UHeBSHsxgsFuzq9Ms98DH8ddMOC0Yd", - "R1HWpKrDFLYyPg7DTLcJhaJi2xtYAevG3qRgrU7EbzIcZmShstFenLpZSvb67LufT41lfwEhcBEZ1F7x", - "UZFgxaq/mpRTO6kj7MuivNVIKhbQfbnyw8V0UrrnOhIvHZaJxgbTzesZF9CxRkXjbKR94ITK2sPp56pi", - "i/3xeafbuRiMzw3L5fW22qfBpk+DHG/beRmMxdhbfhikoJtMPc1lJ12QXm4yH4BPFbnB2FVVWvDqN2aQ", - "Nl8xF5l1ZrxKbRqSGEFcv3z65YL77Bir+NA2VgY7nv2LGWyaJR2T19Bm+Zl5Ew6cOrW6Zxmu9Ze6dMv2", - "QqRmRF/HFZIgt5xgrGFGMTlWLpNYMXuYPvVYMaPYeHA1uZuoi0nXcMdPyFL6s/PRoD8plFz7OLy54R+v", - "by8pdiZ348HVhTKy/uRRZKylqdk+1w1GAY/cbFJqADaloyxlbKn4RkCQv0qds+piHc3KcXAkmJnyJkQB", - "4VGK5R0QtKiVrVlqNn3wN1rAVSPweCNN7jerZWgOYu4q1nRnVdRY3kOYCpUEJny6ldlWrVzQVJLTu51V", - "pXssQNgUI9nSNOSeg00RmamQyNL6nV9/urkcTErZ/CqSFOZfu1YrY6Jc/vMHdTbNus9bTKMThtMS9jeq", - "UKnvhWYNU7ZiA2H7B4uap8WaW3D2npTi5Alg4dbRIB+Al9eY7NygNVugjJhk9XU1w4mvxaG6DgqcBfJ9", - "hKEbBh6203HrPGELszi/pCH7gEBM6G+/1pePt0I/HV52s8d/nR9yBcoF1QuvevljBAMQoaOrMLhKfB9M", - "ffjnmOXNSFv10CIKYzapcMUvN44AveJ0ZojMk+mRGy6O54C4c0h6HnyUfx+DCB0/nh5jGD/C+DgE7Iz+", - "1gvEWJ03zNC6ZhhYshhH4CmA3nklOyo2ct68zJhVubvLA/JvDSnogPaElyRganhqeLB+wuKdU9lZq0Bt", - "4b5nURpLw6FbKo9VVFSzegWG0ljlg3Jdy8NqG7nB2S0eBCov78MAw7j5kYdEt6YOFLbvF0dqNdodVSQm", - "VkaaNAOIsNHI6815GNyjmTbfSHVx2bUqP69AfIWYI2twcuWTyzOJyHfNROsUzlLt46rW1OXWGxkNpDmv", - "0nOmm10gCuyqWn/yrJCv2MUNQblHJ/0WfC0q9Nu1ClUbYDelFZcCilPgBSTmC9kELcTT/RYtsB6MyNyg", - "99JPOWUCcS+wJ0BgfA98Xz/kzhTRteufbUeTaCg4uU9DQ2TRU4R3tEfXz6bQaKzrG7grtkrLD6S0rOYM", - "p+oAaxW25MK3cMRe5A7qVQ7dr4Uj5CXPUUpNLBF6o+NUHH0bO013lgSv24liFMpaKRq/cfHVREr6um2q", - "Plvj9ita10f858btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW3l0FsGjIwFtBWg5XsAkplh+fu", - "IZDN1kvEtIGbL0/zm3G1NRcrt/JwtQwXVeIDv6qsqQRql5k0Qh9NIWn9myHjCgXJ+dBCHRHMIfBgbHe6", - "87bFTRTT1uJKmakr1/G1SkT1FYGUDyTtpoHmXVM0pDJOLti2qIRaZbSiS53SURhCdWhMMFWRTUiUX2sH", - "KqAsHbUmt1U++NSfUR1vvlDxNv7QP+106X/OXv/O/3h9etbpdj5dvK7GXhrPqskiq0xkHxub9mIJTN3Q", - "s6hXlxthIDsxd5pZAEgSww9r0zEd2knH0wpMNAtYcSU3hobLK2bfGBumsgzNAqsJigG8KaIUPOlXXASt", - "lkYGCt7TsOLB/2HlCscDFhTD/7gdXVaTx164zkmdxtIhJtWBTWmj3DnwfRhUOYU2CNGrdICXz+6FI9GJ", - "JXCW2n35gFa29v3gajBicvP9cPLh9i1z8xsNbwbMQ69//rHT7VwOrwZ95nz3efh/THue3WA3H4Rd6aXS", - "3LdDmilb/47Wv+PH8u9oXTDKDydrGmL3+yHhYOzYDd/Iax6lNRZv8U69ltWbtc5M3tm1Lf9EnXsxTl+j", - "VTulchpeQCLLKBScfJPA3itBpGDAc1BvhVFj0Wn7d2GsgUc+GLEMHDZhP6xhpozkvQ3WD2Tg4ODNpZOp", - "deAox3J3cjiR6JaQlbc2rw7kt9eriZ7ZQj1LdcoqYF/q1UXVjho8uxgwvqknmC86lw+JIvNidhT8V3BM", - "UqNE++9FSTetSi60fl5oYqMlFhuZPEWRCq3ym8SGNNSybxL7jQxtwiBCx9XtdQ4lPMmXufjAphaJ7UwC", - "VK6KDOf0FHOG904QEieKw0fkQa/rACcGgRcuZKcn5PvOFDozGMBYXmNU6jrbGsabo9nbTwJcbW92Tcop", - "nLXIplLLbLrYqeUlL36srC+5LkbGFJf2O2DYN/YkCgIvq/AY86FWu/IvIJmHXqPVCtA/8Z6pbn8eegaq", - "/TCZ3Mjc2W7opRQsDT32+QbuAE84wGbOTfzVEuHVJCRQWXPOZ4Yq3to68ZiWAlamnU/p1mXGrkmn27m5", - "HrP/3E6YlmQ6IXkQFq6K0MLiTYjXYXJB4EQwpnR1ZF8RT5iV2G1Xm3WLUgJK8/kBjNEsgJ6TdWLWoNvb", - "4YUjSHr3tzwfTKGPq8uHsjaMzHM+IVw025EHF3J0HB0afYDJBwhiMoWAVN3Xc7vGqsGyOg7Amcve+Zvy", - "2cnZWe/0rHf6anL6+s3J729+++Pojz/+ePX6j97J6zcnJ/ZpUgBnMHpkDzABU58ZwPYQ0u2fzuZTOYYu", - "TKuSYlN2FtqGR3/wqnRhvApJjfJzaagqFtV8smqeuDaTEnayXk4YqLvYALLivFrokoBu4TC4D+24Z6R0", - "oEeTH5LsgrxKvWo+7Dgbh7luq8ih3xzwCJAPpshHZMmO51wh3IzIf6EQ3bH8t71/Jicnr6DzXXb2YVeU", - "VH7+VZ+n1A9NZxOGCxDNwxg6tJEQQysSzViONWbz6UL5rYtkZFOn2W/OJ8PPvCB3+udN/3ZsCDS2iW7h", - "e5RGtvCz0pgRTJze/DwpAFlvwOO9b+v04dvRpWb4puoxa69VbZSjonSyV2bulbmeaNdNOwtVFNnmxbVr", - "Jq9OVFqBh5d/iTVeBFIgR3lRVqiwDYJZIp6xrIXc+OIj5scu76wUqC6n19GrakK+Dr6RGGgbYO/BPGxp", - "cQwiVSG9vuyzDAM3/5h8YI8ik3/cDMbno+ENy5dy+/YfeuNOUeiWaKpW6AIuCOnQlNIKuq8UuHXxGGlD", - "Jwly4jw3uKZGPytdbqoNixbJQpmkydC64ucVnFE0qo0Hl+8+XI95qodP/as+TyHzZfD2w/X1R+NesOO5", - "bAJW16aPY0p/sXCT7jYoC8sVEVkYVl9O9O9wajii6BcdQFac/mc41R2JO9EojZiTRQQ1ajaYrb7W1DYL", - "tBe76uc54WaY3e0qVyDet5pJXOUpTSKz0mauOWHT0A0DD4knFn7LczWpTmaQKN9Z6XmN90cgk57wHHwz", - "SLDwXE27OjPaN9UaFLu9FmGM9cckBgTOarODKxBe5vrxstbmi0hZWqUQk3zh7GJ+4Vdn9eJLTl1cTVeL", - "1aotGl7o0iCmAA4vtDiUvT+iIGdIeXd7dT4ZsgPr4nbUf3tJldSL/vtKAUkHkZpIIwpms2vYS37Xqzdr", - "xWDuWDPSX++eK/bTmDuKMclHWBVOSUICfB3Fpjz2AJcGnyI5PCVLu4hNeTsHDo6gi+6Rm03i/BIBjKHn", - "PCIg/NV/1XOFERENHM7011sSJ1Azft37req5lRpgTk9OToyeWNph8r5TDd2gGi3o73AqxZjtOW4o/LB2", - "dDM/EXdtpORzC1vPy4CQcybapGOQ6vOh9Q4ylxp5u2ww+ETpVXbXaaiSGB1+1skdng2kuvIoYH+tFiZ7", - "cldWnH7sD4VREqyRV7k8yjsE/dy5r6btyGg5J8UUyVgzyVg6M7Wyu5Xdrex+KdltmOMHFO0V3pAriGY2", - "2pDAhdm/0nBfqe9srLY3ZqnQqhPurulxlmVb23gStQ0MaJDpxZS8xdwUYlHdEiKVUeuop5Qp9mZwdcET", - "xGapYjVZgPM5Y9P0sm/75x+v372rPSXZtCvdm/MCxUyMk7w4KfrbhMGNIvlLsNIGY3cOvcSvCIoydF77", - "OPpSzJNiKWBqNhvzKupGL6RcepYtsmNVPTJcuwijkYBlXG5CR3Koc96xTgstNC/NnzGENrl0VR5vyXTa", - "j4K5tN8kjzbPDl612AmY6dDrc5VxfZN/sOHkKsKsyyGsoh8hFM5jepG518sFLUtzvrxDBm6sm5A532tn", - "ZHLkTjzebnparF9hc82ggDeN5IVpyMUqA6f42axyz9UtPfoyDexOvEI0RzNPMWOUp5t82aoCQ9Fmiyyb", - "e8Kw2RD11YM5vdyDxCc3lVmWRCNjtiWrRwJxi/wT84N3YSiB9ef4+srhQJfDdtgIWica+Sz4Qo99Yexx", - "b0wLNGChdkzQAoaG4jiYIPdhaXLFod8cLJ5V7F4SFXnRgG2ZDvZ4Wngps8Kx0mfMkz/pUP6YUbY5iavN", - "Ap+U92zbd4vGyXKtr4FyWZIwcgN9red0RlabfBtqQp97sSe7Qjh3qMgehQpVhWMIub+KsaTIAnyrafHU", - "TNk31RXhkR8Jlb9MfnIIpxDEMJb5TBhG2bHCfs42ZU5IxK49YfiAoGyO6K7yn+Tb+ZuOCGHO+orUNrR3", - "gkm4sJzsmUl87haliR7gszj9myErd0WYTSz/a0qIndOjk6MTRsc8iLvzpvPq6PToRMRjM0ywmGtflImd", - "6QJk3svnedoqgBg7qT2GbjqQxU06l+L7e4YGGdDAZjk7OSkP/AECn8wZil7z724YEJFUQ9STpk2P/8ac", - "r3B6ANbw8SCOQyqFn0v+qVchSdeRI47Om7++djtY1nChq84aSp+SvwTM7hy6D52vtD/DXwyBt6xHIG2G", - "qjA4kg32HYVswQ4JHeC6MCIOicH9PXJrMZpioBalj6fHwKciJZj14AIgv8cekvHxd/az+tszx4sPieb2", - "dMF+xw5IM33R7g7rzt+mS7vQpy0GtAFzteAjMJ6JwQISpg/8VeHkU5rBEXnOO294HoRUaJSW0lGFGn8f", - "yHZsvZq8X0v09JvGkzBxXYjxfeL7S4ej1MulSSsh77nb+W1XlNd3FsCnWICewzJoeTLsiIPxauNg6KB4", - "F8ZT5HmQ3z4y+uZ0UkVmkuInrAk9rL71YqFysA+8b6erIYyv7NpLXE2WdH7dWofE+Qg/Bokzengbcnm8", - "EWLg2OGbVkBcGrdWJpNKbJHQSSTO89h41ov9jSxEuwQd7DkxwAFtxYClGODUsj0xoB6QEeqR8AEG9FSU", - "f7PTMAp1KQ1G8DF8gA4IWLJG1lp4a6UzFsREhCa0lTTo0O42UiId3iATJKx7ddzFbHmCzhl0PzZR4yZU", - "LUiHbuxE7Jwk4+y3KkpOtzxHwa4fJt6xekM3a9ClTHHy2sMGcVCACQhcWCLic/pZupeYFevt45YB4iRB", - "FnCxLwRWo7VzBKvv9WLrPykvbN96coheGHFnF3GiKfvNzeHH39l/n6v2m0op1uqotKHMKs43slYS8STR", - "JuWEp3DcpRDa3GaL1Eo1hzevofIoxBrHBtuxVrblSFzBTEbeHMUVUo3Tz1czhR/XiTW2LalUq6H5i1SA", - "/ex0f8FIuKX9/aL9BVz5DDee3rs7uEXGtSY0lR6JB3KQb+IIp2McMzs93yVs3PFLhOkFyHdyrU0bTFsP", - "8w23ttt0LrHjypQNN19mwMmtbp8IId16thGFTSjvf26TwwCRkErz4++c45+PozicQvPlUr59OiBXeYLZ", - "dXnlilwuBDPDp1PfhJiMkuCGzWtvmzIdeqnk2vGpV0FQ8Bt0E2lbYfg92umpcBUSVoEgjNF/eJZ6kdOI", - "B1/zKM2SmZMA5EPP4XZ7h22P807I82G2rfqDI0dm2Afuw/F39h8LK74zpg2VAit5ymFfRXIoe6N9bkwj", - "8TAQ99I6n8fJPqk2p7sB4zbISJhP/Ho3E/OcYyx1I/D98IlOr3sRKFKtFL3s9yoVixNdnmMCfPwdB9iK", - "W67GqtQv80uAG7BJfjAzo4iTe+/YpICMllH2kFFKBJuyytW4klECrGETqbgo1ia96kLnlVfiEos0fht7", - "Mf2jazYE8MJMK1kCFBjOXr/OAXG6CR0oikP6D+i1Z9gesabpEsnqNzggiiS1l4813qbAjwRMfXjsgRk+", - "TlO/Gy+NmN0aWTuHzAFxptAPg5maVSBNMw5m5Svl59MLwMrNTkQJ9XpzmUzwnSVo4Sm3Gcv8O4HxMuMZ", - "D8zukFd9zG0rQsRK7hTgfamLjzX1bqwG/gWYnYuYL332sQo5RKeUr39s1p/bStjtvN6V8KO3ULSIfLiA", - "ASnpBsx4IekgfToH+EErYVjD4+/0PzXPS7zSxXTJ+aYoQOgElqZ2No7x0KeA7vjIB4TARUREXhaDUBCN", - "OiospViobdrxCzU9GpneGFZ/dv78jd99tj/rRK2/TzWF+zDhSZr2RERk/FwSEeY7A7ERIcd+OKvTVfxw", - "5vgogDLzkYCjKFEuw9klCng9lkOUKiLLEwlFWt7p0iBZeBpGLTQoIKyoZDno0pA8NyYiNXbozCChqGZY", - "NsyMEbc8amauSN1guDelVQWspk4CgvwNTN13qLzrEfiNOBiC2J07bCalxnPF+lkHnUivXiujYPgI/V/w", - "r3QiFLh+4kHT/tKWuKPVdqsFvmQBOoCtcuvJ5DYUMBalYqY89vluurxLO+WgtAKulFPH6pC12p49OHJV", - "IdRAIRZRrO27eV4rTSW/cuxchrP1Tx36/70sdNj8uqoUajMePGkdth/g6MEPKDIx//09hhs5d7Z60m1f", - "pc72egUHmfba26rVORmnkzDrq9ishWKid6F/7MFpMjMb6QePwE9YvSfnfHDpwG9RDDELqgUzgAKc1U8T", - "9YE9QMCRRh6eQ/+CTXUoLgWbj2j5fHo+uGRIqAlgYZjEVBSyesFUTOiRv9M4FhV8mXaxRtRBQT2eZg2t", - "XqO+xE2TWYnFFJ4/H1yaWd6K1y30Gv4AkBc9aVXjIj8302328Y3uR9JvNDdaacx/gEusXJSM09J2za+X", - "jAxExH3dxfI8DDCiV0lBYuyRKXRZ5g3PAfcEivIT4tq+TWNDNSxTeB/GsBaYTZkf3vGtIWEOGhCzWnSh", - "i5gEfUJkrr7FFctDa+DL0koYdnbLz2T268ql8XcWgLhzxJ4eXRgTgIIsdL9qnWk2PriSoaRQ+N16cemW", - "iFVOl/S4Q7HDnyt1EIuEfS+6LdOlk2XIzXzEWTG19F5isKmUEwhrF6Ip5iCneYDLHq/IFAEUY+cXDzLB", - "R7lv6QDnX2/+9WtRbFU6QdgZtrAbRtBKHvKWtutirdeDd7t3VPv7aWuBqrNApbxhGbbRQEE7ZsewpZbG", - "z3YrTe0jXB6Ksrb1MCaJi6aMwNDdMoOOGRyhPW6BIb4/nvYaBK4y3wKC9f4FTWJY99iv0ASTxNSBMqfY", - "n/aA2khoIW4SVphSjhVnch3H5pgSLWvPKK6StuaEfTUnlKovWyjQtbfPyilKV0R2GedzHq1faqLZXQEn", - "UwyJ44LAQyzPjKTrjd4eqlbs3GLoMTbisBB6PS7DA4i0ubK3e0PRjJ1ePBTWbiDYpYhpJXte25J4yWQ7", - "x2+VrtU1vO2cs1JDDnAC+CQGNopm3vbnfrxhKODosHnAYe83KSk7rLATt+rv8s1GkEcd64myUwrA7ZP0", - "rp6kr7JX6BzDp/yZ8qY9z9trceyCxf+2CW8EdZKiceLO/VLjBLciFpftybXoL1spJg7ztmUpGmQsZysW", - "XlIs2LJ+VyFMevRXhGKkCrzZYMJnO2SLScrPPzkXz0LSHu5Gi8kKZ2yR0SrTBNcfmwce8Jw7NtMkuy/J", - "cNu4AvBNWvkK8ALJh63lg8w33MqHwzvlLZR95tu+yIrVVagFQjLKQGAnTgJH9KzOW8w9KC4RJtyLQtbG", - "O1SZVo6EUtBQ459kAejawVH10GzKQalom2XW38Dj3jrm6dOaZuiFPF0o3LxqHSPl/4XVlAMGoEWVO9r+", - "Tra+Y623SmxZCgT+xsdcpdLKu1n8rSHZAG+Igtkdr+G3I8j7Ggeih96j8OmxeCTIPInuFpWuRC9rxKaC", - "bZQEUqI1j5tWpWib42B/ApjZ3izSg8ouxsL+xI1CFBDLc3eBgoRAeh2Xf8UQPHjhU5AexQ2O4feQ3NDJ", - "D/0QZgee9A1WQneEwbrTVarUn52cnfZO6P8mJydv2P/+r0HuiO79e34T2cQBySBNPYdVUEMK3xrA3qMA", - "4Tn03rLBm4O7fdmYI7UVpCPjk1Y+7ql8zO/OxqUkPnZZKXBzFBovFZ7mo9HJO97k536gZChgqkpNgSSe", - "4yt0XIm0nUaRsUl96PE8YbUvk7J5mySqjZYtyaiCZNi4ZIph5INlVXEn+r1SMvEmP7Vk4ihoIpliibRd", - "SiYOpq1gikXrVi61cqkklwpyYYNySaT+tPG+lenV67xvRfb21v12n91vObk4dFi7+DXW/oo2XyUYUtDE", - "OB3F1t4qic4aUNGhAtLqSV7cw1VlnwYurikjt2/xeR/XFDGZ3BQoXtvL1VTEIt3E1s9V+LkKfDR55ZZM", - "+UKerpJGmri67mPy85/b17Wc2dyC9xuoTczdVfzDzt+1VmYcuMcrnVy+PUoWrvd9zbBiBna3dmhb/pf+", - "rC3v74WrSy17d1Vyq3FplfQrfFqFemjg20N2ay0owD8aj0pv1ZZHDe6qNcckDOgp2IsBgT12A6WbK/be", - "ksvq/Flrj8UD92jdLodtzzv1x1XcpYtqKxj2SHHXyIPVT3b9Df4mxCy/BwrccIGCWUqvC4gxmFWc8CPo", - "QvTYyqAmMihIfL9E+cHSicDSD4HnoMABwdIRq+12CPxGjiMfoAKlFafciQyxyEyaw9M98DFslQtD3THO", - "eBp2W5XDbe7pwme4FydB3RtHPmtg7StHliWwfenY/7ylWGRytHrr2FnWR+aHD2IfQcxyXUMr8LYYFOAD", - "0gSUjVVM2RvHb8tcNQcSrUCBSOPobDLswHjLLv5f5pDMuQAQFWqci/57TE+vMPCX6u9p3UCdQAr85Z1s", - "UKuoTMPQhyCwiOnIFZG0wNkLhXdoSl0a4zwsMvu+WLyHc++DGTtqnwRdhDFzwFDJIL1fgsBzwoTQP4X6", - "iKn+SBtIXfDIuYD3IPF5vvt/UXr4l4PunSTAkB3juuWLme7koJ1KEtpZPb2mL8Ct09C+1d3IaZSqoit/", - "H9Hf13yJUjXcYw/hyAfLHnOXqNF3RVs6rHCvCO8rlOBqHfiCD8bcLg5aH1ZEK07fsXJIEfGSAn0CdWZF", - "QJGlL1JueMsmeC0JtKKrFV1NRZfkkx7lk2rJleNRpj3oE/5n6e0qJNdADDb0Dldwtffc9p77k9xzd3ac", - "ZXKhPc1+pNMsd3rs5GQT12tz2M+EN5BepfkLe8XR1bqXngrUKUipearOkQIJhf/mrt+oFa0ZEoB83MzP", - "VKWQ9r2p6PZZYKANMHien5nPp/JLTSmJPMmBwGPOZOn5T8L0KimKJf2z4zGi+GfHiQwP0hn9WLqd5WDg", - "ts0Z62l4BVaWd7C5DFfgsvYU3+NTvBj+ZsnQ3RJBr8Dix6JkXBWnE57liyTMcJTn+6NaLh7LmnQr8rI6", - "vaKu/5isrV4/W5beUyev8zDxPR5PSy+SOs1lj3KT5LgqLRD5IrKGJXuyKLHLwnJ5kDu31NtfHdIi89ZG", - "r5+nIk0mVrUGkB9Xoq5U1bEVqq2eVJRdBC1QMKvXlkS7xtLrPSQTMcXB3n20MsiDEZnzjCU8q5njzpHv", - "xdDkusE6NJR+2xckfHNaSXLwkqSKPzctXmAkZIr88/kYxO4cPcI6LUi0EmDS7loRMiYwEu66fTmwhfiQ", - "4xmtpxLe1nV3dY1smzJJ7LvYcyuplE8q2dYF3X0+ppTrCjmZykIqx/4K80v5RLefyqYq0ZSycL1MsrmX", - "idL99vJoIGusttLoJ5FG9netVhYdjixSGH/7ksgPZ3WeUn44c3wUlHSjsjn6MpxdogDaWoNaMfSy8Uw+", - "fIS+lcsQb5mbuYoZJB3QXu8Q9D1jBjlID16HzabAUVHMhHVoCsiY99KGkgAWKBDGXtX62ee3S76WhpNf", - "q30NeODTeyiGroh2r4DiQmm2CiRZ/+0eUqo0aAvor5uCLpXCyllwGc6aHwPC0agitTnzgMDCk8jguD9h", - "P5+rji+bdszhg/OJ6pL0ctekl3HF4RA2cr4RSP2xaXwFr5uU2NLstMKfpkjkOopOXedqTcbcNUa8sFcS", - "eNOETGlgh5jB+OSzG2+5l6V4mTKppfbd3jY4MXoh5BcN+I2fwKVCGrbMlstoWp2DKeCzoWBWzVeHk4lp", - "S16nHAFNDrcopogkiMdlvEDhzvacW/+cE3yyAutVnHfHwKeEEcx6cAGQ35vFYRJVPpxS5U7eAgV5sTEc", - "NoAjBiiybp82GdAW72mDQ4l02v5JqENMw5JTxk1oeSf/mlhBrY3OMeurT3muOsb46UMq1JtbATd2Z10J", - "5Y2udqfbZe8VTkANDbV8rb37ablts6fkMYaE1LkWYbZ7sosju1RnM1DIBQWzsehzIEl9d3RMKohZ44xU", - "96RlJc21ToOmjfFRhHokfIA1yfCc/s3Q4e2quaYfoQlt1uqT+Jj5Fd0MGT6wRepIHZ9I/6jWhl5UHilF", - "ctQqzJD+uE4plyCjdjtib3VEhgBJ64pauE0TRnHSlr82HDabMVNDBqs6cCy8pXh1uZzLlCntauY006Zb", - "3Wv3hAe4tHJOoO2ap59hZPARLm3ymmQwpe7Lwwtsmw+Ty4rGAEqX6OHFiiBmMWhrpPKxgXCUBDyOUhi+", - "XsTVg+3nyzh6sKn3wM1DhUN18qggliyDEFw6j8BPoD6PEPwGFpEPqch+gMvTN6zpaadL/3XG/3VGxXt1", - "vqFPm003lC2DJy5NMw5V0zlrPDz8TEMrRdq13jWB2edSUVoYctc3IbNxDTpIewVgCGC4qDELi8TEL+Le", - "wymhic0X8h4/u3f12X/vZtaR4E+hnsJvLoQeNJRz5HvTgM/rLybH08R/MLvTvU18UccI4kwm4EqhQPv8", - "xIKBLr+hcMAvKR1wc/HQRl/smXxgbKoKCbxhKeGCwIV+hdst+84NGUri7JyKa5Ia3K2Ej/AzKxQMAfYK", - "hbgwxDDywXLjYiNz2KL/esouy0OenHhbRTzkD+H0b+haaC4MaTDLUdIKqb0VUiNGqduRT8yMZmlj5bY5", - "CzvrR7hsn/UyY+NKt3WG7PbGrruxO8L2u0k+EKeB8ZzmPIibHc0jecT8rEczR8C+HM2bMatx4Fqt/ic9", - "ML+z//aeEJn35Cdm3a4NPwIE8MMzqDQQXgAC3kPyBZH5RLJ9rfyQ7KMXHyWQd/12+cOf8nTTVknHwKii", - "PeXzvmwKZqx5t6sh8mp+RsEjIrBpwITspXcCHbKvre4rfT8VfKzk9Smx3fp66sIhMlrcUgwEn6CS1tvn", - "LCXqgaPELtiB4/ZFIxw4uKsENgjC+Nlje8/OdqT1AmL3zlXkW51cgAGY+rAXAwJ7bEzKHoLXVtGLhRSS", - "P/T4v5+5iPEhgWVhc8F+x6kZyUbQ8D4H672X5/pq2HopOg795K+VLZxC9lm25NiME2FGriZdNL+PtRH0", - "zTjhcKLoD4UTthvov5pW8GKh/pacy+E7GM4VIfiNObfq5FvAxZQxX6MbpOylZ/FP7Gt7g5TUqOBjpRuk", - "xHZ7g9TdIDNa3EyQoBjv+Dv/w0IJdIAAwrmPw0VdkC2nhh9DFRTLNsHGP++Ud3/bCu+uogP+HFy7R7lq", - "rwypaVMmzW1MA3nRlYRskUaqNIlZBPwYOvBeiIDtKr98u+yUX4GOPUl5ZSm9NHqw2LdWeL2w8DLKlRWE", - "V5XWE8XhApI5THBvQXVQt758UdbFEV1SH7y6zJQ3addPYrIf4qJA4DdyHPkAFaiiOFKTO0AZyy1TvjRT", - "Ug7Q7MumbiD/TmACrdmQtW7Mgf9Dex0Q8x12ZPMhBatu3x6So73VMlg4jzDGKAxambhPMjHdnbJElJyz", - "qkzMnvpsXL3j9LGxztd7BAi8pA3bvBr7XJ12EzkYajG5zUwLKZ3tQbaFIiy7KquR57UGwQQKO7d+hgUr", - "uIqbTNwyb4tL/uuqElf06EWhj9xlfcpJ2cHhHWwSTkpX6BvWo003eaxDy2qPRoXdaB+Pdp61FfvAfahO", - "NDmmTZwnOJ2H4UP5OZV9/sK/ts+pPMekipMmt4cCqveJHXZU8fg2AAmZhzH6D/T4xK93M/EnSOahxyp6", - "AN8Pn/TVlvkGMT2Qs4B6nrGPazHiMSYgJkZ2HNOv/By77idk7rDLSpEhb7F8tmEAXVOEsp6HyJmvTs40", - "eFC5h6FMHCs5rMwh8ITXiB9ygqmxeLINh24SI7Jk+HHD8AFBOigrivRVpQeG0vyMkhDoDqxMB3V5f8dX", - "4yIBFgRygFs5LOTw1XiooqqBJC5iuZXFeyeLy4yQSuKr8RrphgsD6xisjcZgCMjzV2WW4c3RbH5S66iK", - "4q62DL1HDG3kPEuOrjxRRZ3O3i6erETp8EN7udq+uUCHmGY2g7SedW5n2keVfXhUSfdm08/Muqrqlayb", - "FVB3pkvOUIXTmxPigdjxuvta2X2bEkNs0YryoZUIOyuFqtLiE+D1UOtEhHqo05/oRq9aZbtaTtTmBOwT", - "AheRSG7J2iriwyQ4Di0ZYCtBqlziEWa+0kKEcCLw9++C8MKPeHWMsiuGjiHtWJE7jCVZtOVh1rxl4X3M", - "ZhYngdiqGo92FEQJ84fgj7u65T7vhabS5jKrkC9sw19CoGRrqrQF8GbCWaBOuLyHZMyHbUXLy2kHzbL0", - "GiwNYrj2QrHPFwq5S1uRGgTghx4mgNQYDAF+YNWghKWwxko4AfhhzAa1FxHDix/RNpgiogGHanHd8uge", - "mAFNbLCL9EjCa6b3FMYPVckiMgdso0tT682UBZNwVHxhSKUIqarqSZGRBrzwjo7cjva5bd/ezxXyXz2J", - "oRjExEI//Tt5jn84NnZUjFczs9coBaHc2pZz9++hXGW8lQ5LRhXVD2n0hOTCu9pLPjsbfvrDMsNEW/N6", - "IxmqpfaQj9Fb3btSIpobgprXolCr/2pKUigle9vCFEphCgUvuMagm6uv/HJlKnRwW5ezV2y9OYJpL6l7", - "Wb4iv0flcOBqU1ITgfNd/WedH0uOE2pPYEGmh+zWUmB9PWgqBg9YTRDbtWpmgdbNxRzXn39Bqo/p7+Zp", - "anV+PmaPkbWPSfzJkjO0CvRRDV8P2egtc788c2dZTG6UIpQcxnXenfI4YtvdmrV3ZNb+ouI+sMkfkm1S", - "U5VhcxIHz0EEt6RHjNnYrbw5GGWCb1irUfxAGkUauyJ8hiojQ0Wldsbivp++j2ONrlHF+ixwkruyDGRh", - "v1YGbBzAS4CJM7xgCevn0PGB3EFTmiKAydAz5il6dabLU7QDH9smBT1LZflak8j++dasIEvsHW/sZCG2", - "eplgLe00mp8ycZoH70Hik86bk25OVOwihVo69+tVJh/zTGrTpcMm0E8qPpnzOexC7Wofezavb20yJWM6", - "Zm0w0LmMa5gC4s5Ljz1VGtPhBANty8tBeSfhyLB12xfRJOWnkk0/9kSKpeZ7qvSNkmDo4Vzq2bUQXM63", - "29AgJCKQ2tejmvRonGx28XKDj904DOo1EtrK+TucZkCRGM1mte4T53EY/NRqysHkd003Fnl02hkkqUp8", - "VJPG23Rx28Jdl87cFLyrOlVKOyWj+CbT0Q7NpzrMDOUVOXOnS+de5OXdWOpeVYpg+/S90+X2MvgqSsGO", - "c/jmkLGGht4euxotvXTObUldp4fu8Xf6n5781a7MXfkgtn74oIRz4EXv0tWbwMphdPdl7yzr02k3sc0P", - "XKwXp0dTs7eKPEF8fe5WPSauyVyH7J60x5y1paOzPTYPwbDf6LDeiHyoKy/JZk1ntBYOB15rcr/kw7aq", - "TaoCYsINHFa2PkoFvISjjW2vTlVQi0G2qkK1HBBsuQ1RYKfKs+PA9kFPfWWsd1NqDWb7bDBjj8gNrGWs", - "/Q5NZftox4tATJFmcF0pgMUbf1EfM3YEnyZFjBY24SSyXbj62vgsloggwdCq3qJsu4p1a8z6CjuTDXAP", - "KPCsoGING4P0EQVePTQHb0wlaAEdcE8BLTlPPwEsY5nVJXTOTs5Oeyf0f5OTkzfsf//XaKxm3ft0Aj3x", - "0mO1R6Ho2FYjpxBP4X0Yw22C/JbNsEmYK7B8jwKE56vDLPvvFM+bAnqjmN7e40DZEv/TPg0UdcfWwrEV", - "d+ntvAkwD2mb/P3AEaDRgy7P/mpCf8tAiEOuQN2q4a0avns1vNUtW93yRUKg8JoV25kAaiuL1J/vW6ie", - "np3zFFQv8enxWGM1TFuuYj8cy86tFXGfrYjbuxelBHBQnlOtMtUqUwejTGXLyET1RmyzKUhWDJ5aaTUw", - "bzVGsiRhWqvDZrUSgwawXb3keJr4D73ME1EfUfQ28R+EU9uGFBU64uH4J27JD6HMUxlabMOOpvVbs9s6", - "IpVrMieeU0ksTtu1EkJKiLdW+7x1ScHdVWokBW/k/BJD2fvXDYqNw3Gu2qnYkGk6G4gNsU/7KzbkmmrE", - "hlhHKzYMYqN2n7cpNr6nf/ZKOSNrIyD0IDcUGgceB6HBgbGakRbVexsaod/d1uGxGBthwFMzj0cDbdRE", - "SWyEAQ+6QvFBcd82D+T2rn/oMRTbliPV0RS568CGJMuBB1rsvXDZVuxFSbo0qI+akVE57+PLXllqJaQa", - "7PFTKj8HUP3ttuqytClZaXeJSlNoPmeZW6rKWDnACeCTOX+LffoWEQ91OEWv6jOJVOfMrARtR6KRY3vV", - "sDRROdq4+TuVjc2Cb9VaXWb4W8m4e8m4d4VOhKCrovLtpM5SZHHOqUcvj6VuICSyvYarU4xaKbxLKSx3", - "YAXNtEKt23PFVJXArWLail+T+BUKSZ1OvHGRy6vn9dwwCUhNvARrI3ORy7KP4BEgH0x9yKSvIm709oX3", - "kPDqfPiczXjworcuZfyBl4zIbdaKZkpOKpx82hdEg8N0DkmrFZLIs3+CYYyP3SSOYTVnY3474A0d2q3E", - "vbcYxu8hOReDbZHu6EwN6YxB3BYgfvkCxNBNYkSWTIy7YfiAYD+hsuuvr1RUFZIO5clNkjvbfg0ZzxCZ", - "J9NjF/j+FLgPRnI+DxeRDwnkNH1N53e05xGdiNuj3rOhrykuz+XwBQJ/dXJW8/bqinm98rxzCDx2uH3v", - "+CHfjPw+FMX6cwGZOdzJBebnsEQfJiA2i4Ix/boa4ljX5lhj8GwfZwy6hggLw5kPt0NvbOgfnN44+jZM", - "bxnifjh6Q8EjIrC6dhNm0UxSG+YdmNJtdXzTESas71DMtcVTXJ3IypndR1huTH6Brb5ofayymjwF7GWU", - "N9HcEHO0dwxcF0bEbHnrs+84tbCJSUrUpm4+79PZjj2JD84nUgxJBgNQBfXxlevor/WYSsmLY7u09/b0", - "FUNW3aKikj793oy+eJ/OturS08E3QF985S19VdIXx/YK9OWHMxSYyeoynGEHBQ5gZ+NRhYJxyQbaknMG", - "PYLp+PWEtLt7tB/OZtBzUNBen1/4+tzt/HZ2tqt1R3FIaYAZbQcBQWTp9JxH4COPTUY3RTRBwcyBciSz", - "wssIW3+V73a+9WBAp+rFgMAes4FTHZq/1eiYOUxIDTeHCbFj5zB5eWOVYLJwzwp1t0aqGm2aUY+tfWoB", - "F1MY4zmKGtzhlE529zh+Bn7KuomkFFslcP2kzS90KoraS90qlzoVg/UkGQGMn8K4wpUizcVOOziyfZVI", - "vZFjbk9JOp+DYJZOtE/akssg81JEteK8VZqaKU3VrM4pP8+Ma+tTMZxRSRxXXbt5C1ypUqWeUtviewnG", - "PnG8RF770Ngy/WZuSpLKN3NZwj5wH7bySDWmI+/xG1WNJG34aPUIYyxAMLo/0TWIdtIFCsP4UaOlD4P7", - "8D0kn8WgG61JrECaZWg8PTo5OtHlgFQ8j/5Ku361KDc8qVhswduygti/QCeGJImDHPIKNx0qZpMgoPyT", - "TvGtJ4fshRFPOVVmgSc4nYfhQ084oh1/Fz9YhL/To060Ljuq8d/tI9vFQGZHsHSiHfuBWYaKS/jag+3l", - "jRPF8HSVTI3eX6LFVyvmOBZ4tjFTyKbCr76GY4Tihm0TZe4t32zGf5JDz90nBWooZqoyrlCspHVABHbS", - "7WrZc4/Yk1llSlvUlEdT3mR/PNd4X/NWWsdq5pxpxXPcybTKZ1lzxh+Ox3Jj31Gx4tYeWXJKLgV8yQuK", - "2QeZqdX1lR8rCdk+7cBe0PK2ovhz54bprBAYSCTKdhcHZclralB+y2mGmovrMFvhNCkG91glAmtWg7XB", - "vWgvI2SaJNFKAWwD9F44c4QgVoViVoyP6dZpWPac0EDl+hkCxVYMDmt566V5S41CW4exbNQ+e+5qpgfu", - "BYNtXhfMI8M2Vl7kJM1x2a6VQyuJUFQPW3lgVBDXY84aNdGqXB7dpHxdvJTxHtOXDuNJ2aA83j7ws6ZE", - "BS8wsYH6watXD9YDNovDJGJ1PzIQ5EYZQWGdPsJlpzYNyJaFxJq1uOSjUluOaw+1iZXqfzUSXDI1kdG5", - "RWbVaJosaKUcQXspuSYadjlyhvfMuo0TSh3Q6zKu8gGBmKQ8hbBzD4k7h56pOlQm+PdckRJksGLioRdL", - "N6TA2yjPUJtdqM0utIXsQo1Es5AN2OJVK3eSW4ll4VtzQCaYH0Eub1nKSYep9VTBVt7tlQqYkeKqKmDR", - "8W8KQQzj1PGvq3UFZJ5kXB4ksd950+k8f33+fwEAAP//y98GQS44AwA=", + "H4sIAAAAAAAC/+y9e2/buLYo/lUE/37AnQHsvDqdM6fA/cNN3NbTNMmxnfbuOyfIoSXa5kSWPCKV1LvI", + "d7/gS6IkUqL8it0I2NiTWnwsLq61uLi4Hj9abjhfhAEMCG69+9HC7gzOAfuze9PvRVEY0b8XUbiAEUGQ", + "fXFDD9L/ehC7EVoQFAatdy3guDEm4dz5BIg7g8SBtLfDGrdb8DuYL3zYenf628lJuzUJozkgrXetGAXk", + "999a7RZZLmDrXQsFBE5h1HpuZ4cvzqb825mEkUNmCPM51ela3bThIxQwzSHGYArTWTGJUDBlk4YuvvdR", + "8KCbkv7ukNAhM+h4oRvPYUCABoC2gyYOIg78jjDBGXCmiMzi8ZEbzo9nHE8dDz7Kv3UQTRD0vSI0FAb2", + "ySEzQJTJHYQdgHHoIkCg5zwhMmPwgMXCRy4Y+5ntaAVgrkHEc7sVwX9iFEGv9e6vzNR3SeNw/Dd0CYVR", + "0gouEgtMfkcEztkf/38EJ613rf/vOKW9Y0F4xwnVPSfTgCgCywJIYlwDNF8gAUVYgO+HT+czEEzhDcD4", + "KYw0iH2aQTKDkRNGThASJ8Ywwo4LAsdlHenmo8hZyP4KLkkUwwSccRj6EAQUHj5tBAGBIxiAgNSZlHVz", + "AvjkENYXW8/YDx4R4Qu3nAyxHk7IvvKfGbUj7KAAExC40Hr2IZoG8aLG5BhNAydepKxUa8qYzCxIi5JF", + "lzZ9brcWISazcGrZ60a0ph2Xfhh0F4u+gStv6HfKbk7/gq0mxpD1oVxPqYg4OF4swohkGPH07M1vb3//", + "jz869I/c/9Hf//Pk9EzLqCb67wqcZHmArUtHFRR0ARf0HDoodsKJQzELA4JcJuhUiP9qjQFGbqvdmobh", + "1IeUFxMeL4ixAjObwO7TEyACUuznpElABVgJ1wrKSYag0lB0csKASW6FroqExMShFjf0C0UIHyKFsSjd", + "K8WpkLlyMSUy7CYl0pwoW6BPISYGCgwx+RROne5N35nRViqMM0IW+N3xsaD/I/GFEqfu+AEL9Bkuq+d5", + "gMvMNIvZw31KumDsenBiTb4DiMM4cqFejHOZ6HUNqydoDpVDMRJjOU8AC3Gakdqts5Ozs87pWef0zej0", + "7buT39/99sfRH3/88ebtH52Tt+9OTlqKuuIBAjt0Ah2qkEEgII/TjQJM20GBc3vLBQQdWgVoPD47/e2P", + "k//onP32O+z89ga87YCzt17nt9P/+P3UO3Unk/+k88/B90sYTCmTv/ldA0688FZFkw8wcUT/beAqxw+I", + "TpLuqgq6gTdG4QPUiYfvCxRBrFvytxnk7E+JldDujmh9ZL3Bc0iABzhJVpwZGQo2ypVRTq4ksB1l9/fs", + "7dsqHCawtRPxkiBDi0TXhQvCdYQB/CeGXJhk8ckVAo7Z9ahzjgIzsbZb3zshWKAOvSxMYdCB30kEOgRM", + "GRSPwEd0X1rvkhW34xh5recCIXF4det9H/sPXAfrPcKAGJcMH+VdyEpf1QxZqbnyGe6e261zeg75FgD1", + "vSxItbcjvXDFjNvqbI/VgiiEbElh4MZRBAN3eYnmiAxJBAicLvnpHc9ph/Pu1Xnv8r5/dX8zuP446A2H", + "rXbrYnB9c3/V+9Ybjlrt1n/d9m576T8/Dq5vb+4H17dXF/eD6/f9K2WPUyiVuYduuIDqnN+uB58/XF5/", + "a7Vbo+7wc2V/SAj9VSdiIoix9lJK2dlNx3DStm2qBHpUm57CAFKMOIAemc4kCucOAfjBQcEiJrjtSEZu", + "O5C4Rzox5OfxWkqgpv14ZkQwiAOsX8gcfEfzeO4E8XxM9fVJujTiPIXRw8QPn5woDrICFAXkzZn2Po/l", + "lliCy7eQdiRwMYDAo9qSTumm0Ebie3LYQod2oxh/miF3xg85dXMw32F+L+anQIWEFdjKb0BbpQm5TJ0I", + "UtdGAKmircK+P8Al1/08D9GlA/8m013dA4NNpQAT/+GHjV7GRZ08fM3yih87fQN/eHGUmkzk1kBxIiPs", + "MGFf3Az7IyKcIxIgvy0nYovRH79dfvjyG+dapy8b/84CaXgRBhgWsUakQlPEWAascjD4KGY4zqMw+CZY", + "dxSh6RRGxn1MqeyLovYUBnajMOiV0y1tciU2oKg0U7GnHXkRoTBCZJknbSZehHRqvXvDDi/+92mR5AsK", + "Ap2trVucAmdhVXcJBsvPaj3OckSXtElEfUKB7CRVtjlFhn4sxlB2AzzoLnG0PzuFDN3TbVI3oziG/CpF", + "bzJOnWOhOCz7xIBjAzoT5BNIIarmBH4dZVhLN294NVSsC8ZdJOECud3IxI5z8O8wcKSC71CKcX7pDq5+", + "lasfXg0dNsY6YizRdOco+N+n7Tn4/r/P3v5eVHkTYM1cz42OXR9GpDcHyP8YhfHCLL9pE6wTlj7ChK6R", + "t5CmrYieiJZ2nxWW76FH2GYzFtcuQK1aecUlhw+u3Wv2SW4rXSvVJ/glYyN7K9fVbkWhX6kb8dV8gVQf", + "G9D2Wny0xGBVWDHjI5iiAH6FkRTo1TDJxs/tFgweURQGc8jN3NV9e0oH64syt4VvYg8YEsNgHILIQ8H0", + "QshZvY7Fzc9GeZ4Ow6UyCR1MwgiyRxg93OneYD+eGsSgH083v/C2eHNiJ96zwUTJgNJTUqpJYNuDsAyp", + "WsVCK1EUG3DRfpuoE7XmWsOwM4dkFnrVZgIFXV94F4XYS4/blXWfdotTS9/TziHvcBWfjZqbbCCYXzuM", + "2UiVgKYbKDd7BlZBGSkdJHtQSaeXSCfvFmCKguS9oWwXb5KWiSLPRPdTHXuRyjdW7yI62lEMGxe9D93b", + "y1GL2UX1Zg11gOvIg9H75Qf5qiyHCaTiCwuW13Qkpv3uUu1dU2tdg69J8lJbfYTlWa0Ibv8iK8DzL/Ti", + "/d64EEn/gzgYxvM5iCrtPmyrvhW7lbAk15mThdzJDZdnYnbT69xInF/+HF5fOeMlgfjXauU9UdvZ9J/X", + "owE5xh4wf7KcIt9LQPcFyhIQhQS5QBF0JUhSigDstriCZJYfJglkIXqGEETuTHsamei9+HrIbO7aR2Sm", + "ZabmTtlQa+Q0GNgmAFkMzVvVGXcBA0/Yo8sGFs3qjPxPDONqiHmrOuNGcRBYQCya1RkZx64LoVcNdNLQ", + "fvSEynHZ05Dmpsi+HalX4RV4bI0TyyzWlfemP8OxRpCX+dkxea542olT7O9wfLSlF9LCmJjAhb30GhK4", + "0CG2VBUmaA7DmOiXLz5WLf1xXTX4UVF/5fWLLV2n1/4ZjgdxUCLd+Bu43bt20ilx+DQ3GUCADRezCQoQ", + "ntWb+m9OkWU7SomWtzTs3hpEF0Ec+3rzMyYgIvUWgwkgMbZYDz2feFv5vCWe4axJnG5+fSp3H2BUzgJ1", + "lqsopVUgKwdzruf610Y+iCSQZBfMXDNMtkmqHje9q4v+1cdWuzW4vbrifw1vz897vYveRavd+tDtX7I/", + "+Ms1//t99/zz9YcPWm2FqnF6fzZbL9h8V81mi0nYyxI2Py3tVHlMfHO0+iOFOGuExy8MbxaaSlcHBTYx", + "kY7M2DJ94D58g+NZGD68+CIVWDa1xHB6iQJYyzmPuUfQz1SRoJJFHql+OHV8FMA6nljcg187Bx1ONKhU", + "Uky9eQuNTSKHLdVrLQ0rSGa4S1F1CR+hnzXcvL+lgqZ/9eG61W596w6uWu1WbzC4HuhlijJOcnmy2v8M", + "BDpBIr6//N1TkpVeevCPa9w/syPUvIGKziV3UA0CVF+tHy3hPnO/YLR71m4F8Lv815t2K4jn7B+49e70", + "hFmBM5yV6axz6ZTeOQtOhcnEZ1bXKgUWrf8z/F4c+Y3dyOm6tJ6oIQG+eomlTZllx0eY8NeNNH7oxOYW", + "p5FY/0VvsF8giZCrkcdBPL+xu2IzOpYX7SPTev/L6lbNx0LcMZVdsY0DDuyu03xEcak+alU6RKSgZmZp", + "qwjRyf8BIJD5kxVRaWWzZS5wzI9K7+IGMBnACfIND7PMQVl4MKuDMe/liHWEzItoC27ebKKvwI+hreNc", + "xJ9ascMiY4TJV+z6Ewq88Em/7ZuwKVcg+tG8DilNNOuYAw/aLoJ/00/Bv7Fl0L1EgeIRlqKZx3BMwsiF", + "nq3nh3JPUPZLrjeBKkNpdypd78FhmPKY9jhMPq9xIObHKByJHJsSawoqtaNBFwZkqNxnc+9EDDwTPfOv", + "js77TzVA1LmhrmKRWMOasDWTgUBpajMoXKDz/t3lPJJsRFu9WwtY8qNrxT+cIkxgBD15s9cEDxj2OfEd", + "Rp4TJePw6CyE2WcYHWl81gvIs/MVSX2VyyaziFAxOnkOIP3r9cRSDODCB8ufKmyBL0kxU2HjyjLc8bLr", + "U5q/PTmpWG8ObtOqTWYkpbv9EZaz+9nCJ6GLqMxjoq+ErfT+w1rHXzpqzuKjGXAKMbmNDJrn7eCS+XXB", + "wGOOnuLSjx0SbscFwXRcxgH6h+pGHgwImiAYJbq1UAdFbB/3R1VDYsfQD4OphLhSym7RHdbO0Fvq4jp0", + "Z9CLfahQ2rou7Vt2SW+3CHe9t9cT6nixp4PfKejxNmf3ZqFZ9I/h+afexS39UacMJjNv101wTx3+iqtP", + "vf524dxXm8Q25w84iINz1Qhc+zGJA7Drs1QBwGaJQyvF/Vuhw0s6TqZEUeozWaTd97H/cAF9SOAHFoKx", + "ogtgEkGQeAA+wKXDLpfOAiCeboQHeTjjZTbXxANcnr5jTU+5q9oZ/9dZnbQT7dYCROkVVX91qkk3fMRv", + "VReyFalxA4M919xi4/E5Sfa+nuQrUA+L+sm10mjT6yvHfT4U043nKBD/PLXxui3HkElJ9th3b8MryRNx", + "zZxK+qXYpVlSFtQuy7lUNoc+J5Q+GnAj5F7IE1UD5FuWeoJSismise5mrqv8ZSV53ZUZuZvn3FiXqhT0", + "1WVBdZESmPqrM3HmNnkmSViyVb7XomgVztwD07bmcvC8mlReJSCiOIrJ/K1qTOWvw0M4B4tZGMGhH5IN", + "274zdmW9gyI3Z2I/5E9gooe9Q8WKdmisKlKayDUCF04Uy4VVmxpUJ7TqhSLfl96Z9istXDRKLNTWoOd4", + "M0VLW7W15+zqlGpUz5yiL80MBAH0TWCKzw7y9E9/mA7uPPHR9Y8qfIQrox1dTsHs6StOspYFDMxNq6ff", + "1lg67W5eNxt8nUXvhe3OzromEZGgO0sXbYUMtecLgQuTuNO7Es+Q70Uw6w1ZqfMifBFHLHupLrWXyP0n", + "JA7CLM3I2FfdKZQg0a24EvN7IK63qmi1tDb6U8IP5XNMaZqYiky7LXqApU/9kjAZYn+hc9zTAZzOf8cn", + "J28YKZNMTJeSW2ZTLveGJZvJW0Frhtali7CgTu7zVELXW3Cx75LeIsz4jykbsSFHfMZh30zvNZVEmemO", + "z8M4IHpwzfe4VR7e0z4lGMrb5jORBBaO6CJuImm/eTkQxsQE4ooigjmGdSfC9mKHzI0HNvAuJTuzhgZp", + "G9ND25rEiYWsqbPipEvJirnzwOrm24QCk5WVBi8I1HUjd4Ye4UHKpfrPAnslYkJ6S9R3KuH6CJJoWSJF", + "t8aPytVsNyxRcgtSkCDxqL9Rm+h9H4wWWQbUOuWJNoZECa6ZCsyv0Z6+gxICoSE5yYMW6xF+PKwHpRv4", + "COXrpG3voexjRXcfUITJEPIbgD3tXYK6vWqGmfErVAbA3MwJZhU0qXEffH9LiHlfYvwzZFpJyKlIl3ax", + "QY97AdxfXd9/ux587g1a7fTHQXfUu7/sf+mPUi+B/tXH+1H/S+/i/vqW2eaGw/7HK+5HMOoORuyv7vnn", + "q+tvl72Lj9z9oH/VH37KeiIMeqPBv7inguqUQIe+vh3dD3ofBj3RZ9BTJlHnHl5e05aXve4wGbPfu7h/", + "/6/72yFbiswWez+4vbrnyWc/9/51r/pGGJoIQLUmQh3HKEhVAoHEAgf9Uf+8e1k2WplTh/jrnqPhS+8q", + "h/gaTh/ib966LPJxBPCDPrtpmmigNKOK6B9jNko2kUCdjjrzsWxTej+2maRVNrqAQCP9k/yv9vmCcjlj", + "NReE0PfEg46dVGT7sPlEsiEBvlVnLeqSbDv5ii0wEhnveoa8hIn1J3RYa2lCm7NeWG8BAgHwlwS5+HpB", + "rmNSblMSA84AdsIFgZ4jTBPJIPo51s2Et/Vs86Zccmsno0uzIdRMH1iZ057BlY5+ZySlXLbL3aa53FIe", + "D3O2S+2a90DN0O+FLivoNOxwom0N2GPgc3ZVKJiKjOx4d0KCJ5jrfV8gusssrJ0BUz4+78Wnwc4TKzvB", + "IvQdEEEHLBZRCNwZCqa8/gRDcNn8MlsnJxIWrLMiFHzJstBHER4W3VOKC8Wm+AEgP46gBSjMVVoFJJM8", + "nuVC0s/pA8yXan76TOMAQSB2lj1/5tMPl0f8gO+SyD4wa5s4oLWhfc5ENnEAkeFqgqo2+/xllgRagM1y", + "oZc9iKSe6Icu8Flw2CP0wwX7zGKOvdjNVXpT1Dslo+72Uuk+J9VLSh+CZe0aUbdsl/VcVsvXW/UuKFjU", + "9KopP5uxxluUvWuyETJp742neMVRJBMNp3ulJg80UiOnnb05nAQp1zuT+J4W4X8xgrLPU0lZr6r1LYYR", + "73ETj33klpECG68k5bQK895suti/VTZ9IPZJStHrb1fMYtC9+NK/arVbX3pf3vcGJbKzPA9B9eWszl2s", + "DBMZOBRj2apX4/x4+XisBAGS8vPVeRLDS29wP7y8HrXard5XbrMYdYef7we3V8wmcn2lxJ6w/Cnn11/6", + "Vx/vv/Xef7q+/lyC+4wWpVMkQTQviexn34W/ulZA8xwEJHSeQMRy5RXUK95bHylfL+mBPt/BZlIY8LHN", + "S9TDv14etoQmqtk3oSC7BAZVG1Y/b8EcEhjJ7AXyHOVjOb+gI3jknDoeWLadU+cJwgf633kYkNmvK/ro", + "JOjRZjMwi12JqJvQR64mFyrX+MsuwUlZQN5UozTUELtZ9qtycBXAmVcnLKC2AtUokJRSB1IefT1ptVtf", + "T/WihPuE7iDg0BjDyp2d61TzKUma/5wMONQEZZirpKzpx17uws4Beo2VS9SVV6QU2EjREKPmpgIi+r88", + "IGZWO2hLcWNoeklD0xYNQFupW1fDkL+yHd7Ahd+Yz5M5GwO+ATHWJT5T2YQ7TjkIOwvW2gGB57ggCELi", + "AFaXlhW8l0m7CweWDjqsu49X2qOA50UQY9UuldGipaGjaJ6iHz4BPNMdNzOAZ+qQ/wvnphMHEFdEeb34", + "IS+97pzPWDVo/YRfYYQmqAq9zLpGZdCjaE5/RVEWBj0nzAC+ARg/hZHtHMBZiA4OhsTAX9t4yfIQXvhg", + "mWEEuX+1DVlZ7N4ZCOx8BoIplAgyMkEAn8xIZLwLn1KsSY1aD/sKeoccma17UQpIAkQp/taDoZBeVnxp", + "Z/BkQvllOEXB6qXbVuPvtSq57R3G5RoXVbiWSb0OCt12J6RBMOzhbsni7babpqrVeIYW+FCNrAWj8w5P", + "822cMnwy3bZ9PX0fgcCdiYCmEcAPRpYbs5am9yL+1UGssnjEknmxiuKWhVSC0DNGHtFvKw9MAH7ofScw", + "CoAxehGK70pokwja4vXQSSiWd7Rubpe84SELXIKGdorsO9MumaLLbbdJLJQqADAg0XKDG7Xi0BvYqhfb", + "oPPe5QUcx9NN12Nui2sdRvPYBwTiNGENe3R2w9j3nDFkfgZciQeBqHQWRg7IXDx1kXHZQu9FhJ/3Lp20", + "DbtmPwI/poeINqrBJzC6AUs/BIYdFEl1FrxNcX1AfqJKvBMG9IcIPqIwxh3hpS/GaJXl4CpOzD4V5yOF", + "KGuR0qzcApoptC/MoVoBm1JGaT4IA7nTTzKxn4NkSWu2AaxyPa/xpdmJNApEF8qJYz8JacztcDp6m07I", + "CldhPIl97X3KLtSqiAUZdVWI0zDGHBnHMIT702+ZJSbrYgU7uXGdORsPh6XFDr6enrOAotLDESZCotzq", + "K8VX/wJLUnRB4ERwImxYiN9r6bETRlnCVDur5uINp/Sxy6309ZTiQ2ZRetZvmAzEok2xLokNNj36cXQx", + "NCgiH3Oh9wQjmNbE2xoqnvkimMzhCy3Z/nIpqvCXvGTnZZh6ESgRn1I4moZRgiurqtlrnyH5eEfOLYZ8", + "EhyPMfd3pCj32P1BtMIOIKo0skvnV5qGecPndybnHEdI5sgzCBq25SIXhrLnYQCvJ613f1UKO03/9wAj", + "txuTWeu5vUr/7k2fl+tcpfOnL93z1vOdcXFicPZ24a+zRMgAzGk+dNGV0kQMxSEReGJdR0sTFbPg/3Di", + "0FYwIMgVVBgyg6ZkEJEaQxH63Zv+/efevzTCPp+bXE7PIdFQixmlDBn6VNSf4bJXW+tSl8TVuwe4PHJG", + "zOkQO8x2TUJe1AhmW7HrkooLKUSO1shknmC16LBP2Wy9BbIhiosTqiOrLxNGEXRJKjpI6IhnXH0UATPl", + "Sm9EK1Icpl2EooPcUs2WN0kktJ4Oi6vib0X53kmd/mJqF4NGKuMSUrDbOnqzF3mpyNoDwaDKzy3Jhffd", + "Yf98u1KBCeI9wCaFY7vIZCvdGC4vwPRcydWTz02lyeJTrbsmZciLKrAHpra1LDS89IpK01upquEkBUC9", + "9IyhA4Kl8+fw+qqDYYSAj/7NLDl8ZUcrKbUlk+WOkTByXEDgNIzQv9WqycWzA8KgLA8cJmC+EP4GybnL", + "Yz9gYO8JuV9l/sVhyrK14wpbXP8iuWkz94b0kpaM4oyXuRktOZUx00gBRltrln9HwVTIt6s6SowI30hA", + "TeFkFhCwWPjIzaXw0ifiTgu9WyxKUyG+5Hm4UEtKM+9dKn724OlFCkLDvbq4sYYsz9UUnuiFuW1k5KjZ", + "w8rkjxaJGRXiTxgtmTqKg6Mt3WTNhZDMZGXOz1NVwy9pWKcovjBKVgzNW9UZ9x+rQoa8VZ1xI7t6hqJZ", + "nZGTqv9VYycN7UfPuywm5RL/kUUU09mTPVGSIwlpYcqiX8OiVWZH0nEXwhfQ9UEEiEgeZfbtEZyNsOOl", + "XZxfSBTDX+kBvojCaQTmc3Z1+mUCfAx/3bTfj1HHUZQ1qeowha2Ij8Mw021CoSjZ9hpWwKqxNylYy+tZ", + "mAyHKVmobLQXp25a2aA6ifXXU2P1bEAInC8Maq/4qEiwfPFsTea2nZTj9mVt63Ik5etQv1wV73xWNt1z", + "HYmWDkvoZIPp+mXBc+hYozB4OtI+cEJpCe/kc1nN0u7wvNVuXfSG54bl8rJ1zdNg3adBjrftvAxGYuwt", + "PwwOICZhVOGNI1Km6qOdchqcbKpX7kU97AtIAPITaZF/XkAuqdY2RTOGRKwWTxZZuOTr+Th2H6AhJpHH", + "e8Goai4+h4h/9JfcoUN43a8yc6FyJl+xAlAp+lJ1N2Hxy0sW3ts/54nfrq9kGjk9x9P9Nln46h+Z0mnJ", + "kNH4S0lmRWahkIbban7sJc1XzORonVe09BIFSYQgrl4+/XLBPR6NNdBoGys7Lc+dyOx09VI2SutDvez2", + "vAkHTp1a3bMU13p6TbZsL07SlOgNwjBLYdtPz1gzH6McK5OHMZ97UZ+4MZ+Pcdi7Gt2P1MUka7jnilEh", + "eeT5oNcd5QpWfu7f3PCP17eXFDuj+2Hv6iIz8sXtoPv+snefCif5y6A3HF0P6FrNQqpefTz7fGIYBTw6", + "vk45F1iX2tK03IUCRwFB/iq1JMsLItUrecSRYGbdmxAFhEeCF3dAUKxWAqfpL/UJNtAcrhrlzBtp8mta", + "LQPrlBsc+6TuzqqosbykMh0rDkz4LMllHGm1pnIgNZpWDU9HlXj13o1lyXlza62L2xRJGsbJwKaI6EQo", + "pUlYz6+/3Fz2RoXcqyUpZbOPqqsVnVJsTFnFIJ1m3VdUdnEQ9vkC9jeqwKnP0uaLjGzFFWX7d7GKF+wK", + "Y0v6bJng5Alg4T1UI3uLl9XQ7IJWNFugjBin1dA1w4mv+aHaDgqcOfJ9hKEbBh6206mrHK5zszi/JAlW", + "AIGY0N9+LU/ub41+OrzsZo//Knf3EpQLqhcxUPLHBQzAAh1dhcFV7Ptg7MM/hyzLUdKqg+aLMGKTisCp", + "YuMFoDfp1hSRWTw+csP58QwQdwZJx4OP8u9jsEDHj6fHGEaPMDoOATvtv3cCMVbrHbPnsweDnulim0l5", + "QBc5A9gZQxgkl1zm/JZcOX/BBPk+N6ZhB2B58fx18+HC8Xy4AE8B9M5LBYHyCMSbF0VCWY2H4oD8W03a", + "PSBq4KVrRnWChdJzhXc2xgrt4mZrUUJRIxu2VEYxr2yndW0MJRSLR/S6prXVNnKDs1u8eJWaKfoBhlH9", + "wxaJbnU9hGwf6I7UquU7qlxPrMxRSaYoYY2SV7TzMJigqTYvVXkRcuOlz6Z8/grElwuqswYnU2a/OJPI", + "kKKZaJ0Ci+oDkKqvtbmdSoa7ac6r5Jxpp1eXQkRkaufKskK2siM3eWVeVfVbcJe/SmzX/lX+wrApfbyQ", + "eCIBXkBivgqO0Fz4pmzR1uzBBZkZNG76KaNMCIv9EyAwmgDf1w+5OxX4EJXDbeowNUU2f3SquU30/OId", + "7TfqtalSmheMDdyPG3XpJ1KXVvMzVbWPtUovc7GfO9wvMirCKsf9Xe7weskTnFITK9VR6yAXh+7GzvGd", + "pWlttxYRCmU1L01IhvhqIiV9ZVFVk67wqBetq3PSZMZtKwllv57y9H5NxPXKrpz6RxSRNbEQ1XxwEar7", + "GWC6l/GhGjIQBKARPUocoF2stuzw3D4Estl6EbMmJvrlaX4zXuzF8Ws5j1tGYiuht3cqayo5EIpMukCf", + "TdGe3Zs+4woFydmoXR0RzCDwYGR3uvO2+U0U01biSpmpLddxVyaiuopAysZot5McDm1ToLEyTiaOPa+E", + "WuVcpEsd01EYQnVojDFVkU1IlF8rB8qhLBm1IvtiNq7bn1IdbzZX8Tb81D1ttel/zt7+zv94e3rWare+", + "XLwtx14SKq7Jc65MZB92nvRiKbbd0LOoqJoZoSc7MWekaQBIHMFPa9MxHdpJxtMKTDQNWPk/N4KGyytm", + "3xgbJrIMTQOrCfKx8QmiFDzpV5wHrZJGegrek4j93v9hBXWHPRZvxv+4HVyWk8deuCdKncbSnSjRgU1e", + "2+4M+D4Myhxva0S/lsaWSFeD3JHoRBI4S+2+eEArW/uxd9UbMLn5sT/6dPueuVIO+jc95gXZPf/carcu", + "+1e9LnNw/Nr/P6Y9T2+wm89vUOqZU9+fRZopG5+WxqdlD70Y1riNNM4fxYeTNQ2x+/2QcDB27Jqv8xXP", + "4RqLt3ghX8vqzVqnJu/02pZ9HM+8VSfv4KqdUjkNua+wzkU6Duz9IUR2EzwD1VYYNc0Dbf8hjDTwyAcj", + "ltzGJqKONUyVkayfw/rBIhwcvLlMTZWuI8U0Ca0MTiS6JWTFrc2qA9nt9SoilLZQcVmdsgzYl3p1UbWj", + "Gs8uBoxv6gnmm87ZRKLIvJgdxdXmXKLUAOzuR1F0VKuSC60/DcXcWBHgWiZP4UChVX7jyFAoQfaNI7+W", + "oU0YROi4ur3OoITnzzOXx9nUIrGdSYDKVVGDg55iTn/iBCFxFlH4iDzotR3gRCDwwrns9IR83xlDZwoD", + "GMlrjEpdZ1vDeH00e/tJgKvtza5JOYGzEtlUaplNFzu1vGTFj5X1JdPFyJji0n4PDPvGnkRB4KU1iCM+", + "1GpX/jkks9CrtVoB+hfeM9Htz0PPQLWfRqMbGd7uhl5CwdLQY5/K4x7wXB5s5szEd5YILychgcqKcz41", + "VPHW1jn9tBSwMu18SbYuNXaNWu3WzfWQ/ed2xLQk0wnJA89wWVQaFm9C3EfQBYGzgBGlqyP7mq3CrMRu", + "u9qEdpQS0pImAGM0DaDnpJ2YNej2tn/hCJLe/S3PB2Po4/IC16wNI/OMTwgXzXbkwYUcHUeHRh9g8gmC", + "iIwhIGX39cyusXrlrEQKcGayd/amfHZydtY5Peucvhmdvn138vu73/44+uOPP968/aNz8vbdyYl9BiLA", + "GYwe2T1MwNhnBrA9hHT7p7P5VI6gC5O62diU+Ii24XEnvG5qGK1CUoPsXBqqikS9ubTeNK5MUoadtJcT", + "Buou1oAsP68WujigW9gPJqEd9wyUDvRo8kOSXpBTO/1NRhRWDztMx2FO4ypy6DcHPALkgzHyEVmy4zlT", + "qj0l8l8oRPcstXTnv+OTkzfQ+SE7+7Ativ4//6pPAeyHprMJwzlYzMIIOrSREEMrEs1QjjVk8+kSIVjX", + "n0mnTrLOnI/6X3sss1by5033dmgIrraJq+F7lMTU8LPSmGxPnN78PMkBWW3A471vq/Th28GlZvi66jFr", + "r1VtlKOicLKXJsWWadRo1007Cz0CPzbIRvapavLyHMAleHj5l1jjRSABcpAVZVlYfRBMY/GMZS3khhef", + "MT92eeevaZnpYgojvaom5GvvO4mAtgH2HszDFhbHIFIV0uvLLsuqcPOv0Sf2KDL6101veD7o37CcNLfv", + "/6U37uSFboGmKoUu4IKQDk0pLaf7SoFbFY+RNHTiICPOM4MXnyYZIMbq5Wgez5VJ6gydYxE+j5kz8ka1", + "Ye/yw6frIU9v8aV71eVper713n+6vv5s3At2PBdNwOra9BFUyS8WbtLtGoXLuSIiS5frg5T+DseGI4p+", + "0QFkxel/hmPdkbgTjdKIOVnmVqNmg+nqa01ss0B7sSt/nhNuhundrnQF4n2rnsRVntIkMktt5poTNgnd", + "MPCQeGLhtzxXk95lCony/WMUxguN90cgE73w9JZTSLDwXE26OlPaN9EaFLu9FmGM9YckAgROKxPvKxBe", + "Zvo9s7uw+SJSlFYJxCTzSFZI3f3mrFp8yanzq2lrsVq2Rf0LXYbRBMD+hRaHsvdnFGQMKR9ur85HfXZg", + "ibxj9K/ux1IBSQeRmkgtCmaza9hLfterN2vFYO5YM9Jf755L9tOYeYsxyWdYFk5JQgJ8HcUmPPYAlwaf", + "Ijk8JUu7iE15OwcOXkAXTZCbTuL8sgAYQ895RED4q/+q5wojImo4nOmvtySKoWb8qvdb1XMrMcCcnpyc", + "GD2xtMNkfadqukHVWtDf4ViKMdtz3FBTZe3oZn4i7tpIyecWtp6XASHjTLRJxyDV50PrHWSu4vN+WWPw", + "kdKr6K5TUyUxOvysk5Y/HUh15VHAvisXJntyV1acfuwPhUEcrJGyvDjKBwT9zLmvJgxJaTkjxRTJWDHJ", + "UDozNbK7kd2N7H4p2W2Y4ycU7SXekCuIZjZan8C52b/ScF+p7mwsZDlkSdjK0xWv6XGW5nnbePq2DQxo", + "kOn5hMb53BRiUe0CIpVRq6inkB33pnd1wZPipulxNTmUs3lyk5S677vnn68/fKg8Jdm0K92bswLFTIyj", + "rDjJ+9uEwY0i+Quw0gZDdwa92C8JijJ0Xvs4+pbPk2IpYCo2G5+zYmhGL6RMepYtsmNZqT9cuQijkYDn", + "sapBR3Koc96xSgvNNS/MnzKENjV3WRZ0yXTaj4K5tN8kj9bPrV622BGY6tDrh9FmTP7BhpOrCLMuh7CM", + "foRQOI/oRWailwtaluZ8eY8M3Fg1IXO+187I5Mi9eLzd9LRYv8L6mkEObxrJC5OQi1UGTvCzWeWeq1t6", + "9KUa2L14haiPZp5ixihPN/myVQaGos3mWTbzhGGzIeqrB3N6mYDYJzelWZZEI2O2JatHAnGL/BPzg3du", + "qC735/D6yuFAF8N22AhaJxr5LPhCj31h5HFvTAs0YKF2jNAchoYCRJgg92FpcsWh3xwsnlXsXhIVeVGD", + "bZkO9niaeymzwrHSZ8iTP+lQ/phStjl9rM0Cn5T3bNt3i9ppeq2vgXJZkjAyA91Vczojq02+DdWhz73Y", + "k10hnDtUYEP9uUkEIfdXMRZkmYPvFS2e6in7ploqPPIjpvKXyU8O4RiCCEYynwnDKDtW2M/ppswIWbBr", + "Txg+ICibI7qr/Cf5dv6uJUKY074itQ3tHWMSzi0ne2YSn7tFaaIH+CxO96bPSooRZhPL/poQYuv06OTo", + "hNExD+JuvWu9OTo9OhHx2AwTLObaFxWYp7oAmY/yeZ62CiDGTmKPoZsOZEGX1qX4/pGhQQY0sFnOTk6K", + "A3+CwCczhqK3/LsbBkQk1RCl2mnT478x5yucHIAVfNyLopBK4eeCf+pVSJJ1ZIij9e6vu3YLy7o1dNVp", + "Q+lT8peA2Z1B96F1R/sz/EUQeMtqBNJmqAyDA9lg31HIFuyQ0AGuCxfEIRGYTJBbidEEA5UofTw9Bj4V", + "KcG0A+cA+R32kIyPf7Cf1d+eOV58SDS3pwv2O3ZAkumLdndYd/42XdiFLm3Row2YqwUfgfFMBOaQMH3g", + "rxInn8IMjsiw3nrH8yAkQqOwlJYq1Pj7QLpj65W7vivQ028aT8LYdSHGk9j3lw5HqZdJk1ZA3nO79duu", + "KK/rzIFPsQA9h2XQ8mTYEQfjzcbB0EHxIYzGyPMgv32k9M3ppIzMJMWPWBN6WH3vRELlYB9431ZbQxh3", + "7NpLXE1+dn7dWofE+Qg/B4kzengfcnm8EWLg2OGblkNcErdWJJNSbJHQiSXOs9h41ov9jSxEuwQd7Bkx", + "wAFtxIClGODUsj0xoB6QC9Qh4QMM6Kko/2an4SLUpTQYwMfwATogYMkaWWvhrZXMmBMTCzSiraRBh3a3", + "kRLJ8AaZIGHdq+MuYssTdM6g+7mJGtehakE6dGNHYuckGae/lVFysuUZCnb9MPaO1Ru6WYMuZIqT1x42", + "iIMCTEDgwgIRn9PP0r3ErFhvH7cMECcO0oCLfSGwCq2dI1h9rxdb/0V5YfvekUN0wgV3dhEnmrLf3Bx+", + "/IP997lsv6mUYq2OChvKrOJ8IyslEU8SbVJOeArHXQqhzW22SK1UcXjzGiqPQqxxbLAda2RbhsQVzKTk", + "zVFcItU4/dyZKfy4SqyxbUmkWgXNXyQC7LXT/QUj4Yb294v253DlM9x4eu/u4BYZ1+rQVHIkHshBvokj", + "nI5xzOz0fJewcccvEaYXIN/JtDZtMG3dzzbc2m7TucSOK1PW3HyZASezun0ihGTr2UbkNqG4/5lNDgNE", + "QirNj39wjn8+XkThGJovl/LtU9TRk5UnmF2XV67I5EIwM3wy9U2IySAObti89rYp06GXSK4dn3olBAW/", + "QzeWthWG36OdngpXIWEVCMII/ZtnqRc5jXjwNY/SLJg5CUA+9Bxut3fY9jgfhDzvp9uqPzgyZIZ94D4c", + "/2D/sbDiO0PaUCmwkqUc9lUkh7I32mfGNBIPA3EvrfNZnOyTanO6GzBug5SE+cRvdzMxzznGUjcC3w+f", + "6PS6F4E81UrRy34vU7E40WU5JsDHP3CArbjlaqhK/SK/BLgGm2QHMzOKOLn3jk1yyGgYZQ8ZpUCwCatc", + "DUsZJcAaNpGKi2Jt0qsudF55JS6wSO23sRfTP9pmQwAvzLSSJUCB4ezt2wwQp5vQgRZRSP8BveYM2yPW", + "NF0iWf0GBywWktqLxxpvk+NHAsY+PPbAFB8nqd+Nl0bMbo2snUNmgDhj6IfBVM0qkKQZB9PilfLr6QVg", + "5WZHonh7tblMJvhOE7TwlNuMZf6JYbRMecYD03vklR9z24oQsZI7OXhf6uJjTb0bq75/AabnIuZLn32s", + "RA7RKeXrH5v1dVsJ2623uxJ+9BaK5gsfzmFACroBM15IOkiezgF+0EoY1vD4B/1PxfMSr3QxXnK+yQsQ", + "OoGlqZ2NYzz0KaA7PvIBIXC+ICIvi0EoiEYtFZZCLNQ27fi5mh61TG8Mq6+dP3/jd5/tzzpS6+9TTWES", + "xjxJ056IiJSfCyLCfGcgNiLk2A+nVbqKH04dHwVQZj4ScOQlymU4vUQBr8dyiFJFZHkioUjLO14aJAtP", + "w6iFBgWEFZUsBl0akudGRKTGDp0pJBTVDMuGmTHilkfNzCWpGwz3pqSqgNXUcUCQv4Gpuw6Vdx0CvxMH", + "QxC5M4fNpNR4Llk/66AT6eVrZRQMH6H/C/6VToQC1489aNpf2hK3tNpuucCXLEAHsFVuPZnchgLGolTM", + "lMc+34+X90mnDJRWwBVy6lgdslbbswdHriqEaijEIoq1eTfPaqWJ5FeOnctwuv6pE0FMwgiWeXKyBtxh", + "BLl0n7w4Yrl5DccPPQ5Fr30/frbLAgIJAh8iaVel9sn6NMrn/iifOe9UwQ7bUQLp/3fSSH6zs4NSNxGU", + "MSJzrvkJNEH8gBams3gywXAjauBWFc/t33DTvV7BX62xQjW33IzKoZMw6ws71kJ5MXOhf+zBcTw1ayC9", + "R+DHrPyac967dOD3RQQxi3EHU4ACnJYzFOW6PUDAkUYenkP/gk11KB4+mw8w+3p63rtkSKiIJ2OYxFQU", + "svLdVEzokb/TsDIVfEuFCgrq8TRraK4Z6sP4OJ4WWEzh+fPepZnlrXhd3Bs6XOMZRyDgMaZ6tn/Pvjsg", + "c91wJlE4V1/ngtCDbV5/EbF3uwA+OWPRNaAo7ohnX/oZEezMmR8w1gmICz4TlW589lctKTgKFJxUiAyB", + "dUnUu5ULGmAtBYQAm0VHqBfbRjYkskGwYu7eLwWDzNTisFzC64kIi6sPf7LPaiest46j611/9tGr5me6", + "Amls0PL5/QEusWLaNE5L29U3CDMyEDlyqkzB52GAkQcjSWLMLSR0Wa4szwETAkXBKGFo3+bzQDksYzgJ", + "I1gJzKYeDD7wrSFhBhoQseqxoYuYkvWEyEw9n9VaNQb40kRQhp3dsmOL/boyhXe4IsGVDhdGBKAgTbZT", + "ts4kfy5c6WmDvWUb8u+WLS7ZErHK8ZKeeShyuIORDmKRYvdFt2W8dNKc9mlUFyt/mpguDK8gxZT/2oVo", + "yi/JaR7gssNrKC4AirDziweZ4KPct3SA8z/v/ufXvNgqdVu0e4rCbriAVvKQt7RdF2u9HrzbNWPZm7Ca", + "N6OqN6OENywDLWsoaMfsGLbU0vjZbqWpfYbLQ1HWth54LHFRlxEYuhtm0DGDI7THLTDEj8fTTo1UE8wb", + "kGC9R2CdrBN7HAlggkli6mDfde3yYTQHlE0yAFwnEUBCOVacyXUcm2NKtKw8o7hK2pgT9tWcMMqkIves", + "FOjK22fpFIUrIruM8zmP1i8OVe+ugOMxhsRxQeAhlhlO0vVGbw9lK3ZuMfQYG3FYmE2+CA8g8lmGedsZ", + "ylzt9OKhsHYNwS5FTCPZs9qWxEsq2zl+y3SttuEd6JwVBxQvO3xgo2jmbV/3+y5DAUeHzRsve+JNSFm8", + "pIXBjp9vBHlUsZ4oFKkA3Hit7Mpr5crgEZfwZ8Kb9jxvr8WxCxb/2yYhAaiSFLVTbe+XGie4FbFMKp5c", + "i/6ylWDiMG9blqJBZl9oxMJLigVb1m8rhEmP/pLgyUSBNxtM+GyHbDFJ+PmVc/E0JM3hbrSYrHDG5hmt", + "NLF/9bF54ClKMsdmkhb/JRluG1cAvkkrXwFeoFyAtXyQFQIa+XB4p7yFss/CX+ZpedkStUBIRpm6w4ni", + "wBE9yysNcA+KS4QJ96KQ1WwPVaYVY5cVNFT4J1kAunY4czU0m3JQyttmmfU38Li3jnn6pAopeiFPFwo3", + "rzPLSPl/YTVJkAFoUZeWtr+Xre9Z660SW5q0iL/xMVeppFZ+GrRoSA/EG6Jges+r7u4I8q7Ggeih8yh8", + "eiweCVJPovt5qSvRyxqxmd9zHEiJVj/TiSpFm6xE+5NyhO3NPDmo7MKw7E/cRYgCYnnuzlEQE0iv4/Kv", + "CIIHL3wKkqO4xjH8EZIbOvmhH8LswJO+wUp0nzBYt9ot+B3QLW69a52dnJ12Tuj/Ricn79j//q9B7oju", + "3Qm/iWzigGSQJp7DKqghhW8NYCcoQHgGvfds8Prgbl82ZkhtBenI+KSRj3sqH7O7s3EpiY9dELjQN0es", + "nbPvSQY5nbzjTV73AyVDgUU8mcjKGTquRNpOA03ZpD70eGbPypdJ2bxJ69gE1BdkVE4ybFwyRXDhg2VZ", + "Eh/6vVQy8SavWjJxFNSRTJFE2i4lEwfTVjBFonUjlxq5BIvZjDJyYYNySSTrtvG+lQVRqrxvRb2Vxv12", + "n91vObk4dFi7+DXW/oo2XyUYUtDEMBnF1t4qic4aUNGhBNLySV7cw1VlnxourgkjN2/xWR/XBDFKJgRR", + "oWBdL1dT2alkExs/V+HnKvBR55VbMuULebpKGqnj6rqP5Upet69rsRaJBe/XUJuYu6v4h52/a6XMOHCP", + "Vzq5fHuULFzt+5pixQzsbu3Qtvwv/Vkb3t8LV5dK9m6r5Fbh0irpV/i0CvXQwLeH7NaaU4B/Nh6V3qoN", + "jxrcVSuOSRiwfIARILDDbqB0c8XeW3JZlT9r5bF44B6t2+Ww7Xmn/ryKu3RRbQTDHinuGnmw+smuv8Hf", + "hJjl90CBG85RME3odQ4xBtOSE34AXYgeGxlURwYFse8XKD9YOguw9EPgOShwQLB0xGrbLQK/k+OFD1CO", + "0vJT7kSGWOQmzeBpAnwMG+XCUCmUM56G3VblcJt7uvAZ7kRxUPXGkc0aWPnKkWYJbF469j9vKRaZHK3e", + "OnaW9ZH54YPIRxCzdPjQCrwtBgX4gNQBZWM1zvbG8dsyV82BRCtQIJI4OpsMOzDasov/txkkMy4ARE05", + "56L7EdPTKwz8pfp7UulXJ5ACf3kvG1QqKuMw9CEILGI6MmWfLXD2QuEdmuLUxjgPi8y+Lxbv4Ux8MGVH", + "7ZOgizBiDhgqGST3SxB4ThgT+qdQHzHVH2kDqQseORdwAmKfl8T4H0oP/+OgiRMHGLJjXLd8MdO9HLRV", + "j4REAtwnRGYCmsHt1VX/6qM4dJxx7D5AcuR0Ly+dCJI4CrAzDsnMCYOO4FC6NFm2jZF127m+uv92Pfjc", + "GyR9OIMwn6YoDgJ6dwkDqlGxMdpO72v/fNS7yLbPjJpFT/fy0oQSMf59Elxr7fTEOyZhojurF1z3vbxx", + "sdq3QkYZ/XtLdQsy94FjD+GFD5Yd5lxScTsQbVlhBe6MEk5KrgzlN4YLPhhzUjno24NyEOHk1S+DFBFd", + "KtAnUGdWm5STp/xo35IateUHCy0JNKKrEV11RZfkkw7lk3LJleFRpmvpyyOkyQBLJFdPDNb3DldwNVaB", + "xirwWq0CzWXlxS4rWinanP0/09mfOWt3ogcI0405pGzEG0iP5awxqOSgb1yXTwXqFKRUuEFkSIGEwjd4", + "1/4Pyh0DEoB8XM+HWaWQ5i0z71KcY6ANMHiWn5k/sfJLRZmSLMnRg5mVTpXaEgmTi7coxPXfLY8RxX+3", + "nIXB2SGlH0uXxgwM3G4+ZT0NHgbK8g42T+YKXNac4nt8iudDKy0Zul0g6BVY/FiUIyzjdMIzyFENPZzk", + "+P6okouHst7hirysTq9cbn5O1lYv6w1L76kD4XkY+x6P1abXbp3mskd5bzJclRQffRFZwxKJWZRvZiHf", + "/P7O3zXsrw6UgViNKGsT4eupdpSKVa256OeVqCtVDG2EaqMn5WUXQXMUTKu1JdGutvT6CMlITHGwdx+t", + "DPLggsx4NhyeMc9xZ8j3ImhyC2Idakq/7QsSvjmNJDl4SVLGn5sWL3AhZIr88/kYRO4MPcIqLUi0EmDS", + "7loRMiRwIVzBu3JgC/EhxzNaTyW8jVv46hrZNmWS2Hex51ZSKZuwtKk5u/tcXwnX5fJ9FYVUhv0V5pfy", + "iW4/lU1loilh4WqZZHMv421qyKOerN/bSKNXIo3s71qNLDocWaQw/vYlkR9Oq/zK/HDq+Cgo6EZFc/Rl", + "OL1EAbS1BjVi6GVj5Xz4CH0rByve0trjRtIB7fUBQd8zZieE9OB12GwKHCWFcliHuoAMeS9tmBJgQShh", + "5JWtn31+v+RrqTn5tdrXgAc+vYci6IpMCiVQXCjNVoEk7b/dQ0qVBjVu8M3hpE1vmEhh5Sy4DKf1jwHh", + "aFSSNp95QGDhSWQIcxixn89Vx5dNO+bwwflEVQmguWvSy7jicAhrOd8IpP7cNL6C101CbEnmY+FPkydy", + "HUUnrnOVJmPuGiNe2EsJvG6yryQMRsxgfPLZjbfcy1K8TMfVUPtubxucGL0Q8osG/M5P4EKRFltmy2TL", + "Lc/vFfDZUDAt56vDyfK1Ja9TjoA6h9sioogkiEexvEBR2OacW/+cE3yyAuuVnHfHwKeEEUw7cA6Q35lG", + "YbwofTilyp28BQryYmM4bABHDJBn3S5t0qMtPtIGhxIXtv2TUIeYmuXMjJvQ8E72NbGEWmudY9ZXn+Jc", + "VYzx6kMq1JtbDjd2Z10B5bWudqfbZe8VTkANDTV8rb37ablts6fkMYaEVLkWYbZ7sosju5TnflDIBQXT", + "oehzIAmjd3RMKohZ44xU96RhJc21ToOmjfHRAnVI+AArEi063Zu+w9uVc013gUa0WaNP4mPmV3TTZ/jA", + "FmlJdXwi/aMaG3peeaQUyVGrMEPy4zplgoKU2u2IvdERGQIkrStq4TZNGPlJG/7acNhsykw1GazswLHw", + "luKVCzMuU6aUvqnTTJPKd6/dEx7g0so5gbarn6yHkcFnuLRJppLClLgv9y+wba5VLitqAyhdovsXK4KY", + "xqCtkfjIBsJBHPA4SmH4ehFXD7afL+PowabeAzcPFQ7VyaOEWNJ8S3DpPAI/hvqsS/A7mC98SEX2A1ye", + "vmNNT1tt+q8z/q8zKt7LszN92WxypnQZPClukp+pnM5Z4/5u8jJt866wUqRd410TmH0uFaWFIXd9EzIb", + "16CDNFcAhgCGiwqzsEh6/SLuPZwS6th8Ie/x2r2rz/5zN7MOBH8K9RR+dyH0oKFUKN+bGnxefTE5Hsf+", + "g9md7n3sixpZEKcyAZcKBdrnFQsGuvyawgG/pHTA9cVDE32xZ/KBsakqJPCGpYQLAhf6JW637Ds3ZChp", + "xjMqrklqcLcSPsJrVigYAuwVCnFhiODCB8uNi43UYYv+6ym9LPd5KudtpYKVP4Tjv6FrobkwpME0R0kj", + "pPZWSA0YpW5HPjEzmqWNldvmLOysn+GyedZLjY0r3dYZspsbu+7G7gjb7yb5QJwGxnOa8yCudzQP5BHz", + "Wo9mjoB9OZo3Y1bjwDVa/Ss9MH+w/3aeEJl15Cdm3a4MPwIE8MMzKDUQXgACPkLyDZHZSLJ9pfyQ7KMX", + "HwWQd/12+dOf8nTTVknHwKiiOeWzvmwKZqx5t60h8nJ+RsEjIrBuwITspXcC7bOvje4rfT8VfKzk9Smx", + "3fh66sIhUlrcUgwEn6CU1pvnLCXqgaPELtiB4/ZFIxw4uKsENgjCeO2xvWdnO9J6AbF758rzrU4uwACM", + "fdiJAIEdNiZlD8Frq+jFQgrJHzr8389cxPiQwKKwuWC/48SMZCNoeJ+D9d7Lcn05bJ0EHYd+8lfKFk4h", + "+yxbMmzGiTAlV5Mumt3Hygj6epxwOFH0h8IJ2w30X00reLFQf0vO5fAdDOeKEPzanFt28s3hfMyYr9YN", + "UvbSs/gX9rW5QUpqVPCx0g1SYru5QepukCktbiZIUIx3/IP/YaEEOkAA4UyicF4VZMup4edQBcWyTbDx", + "zzvl3d+2wrur6ICvg2v3KFftlSE1bcKkmY2pIS/akpAt0kgVJjGLgJ9DB94LEbBd5Zdvl53yK9CxJymv", + "LKWXRg8W+9YIrxcWXka5soLwKtN6FlE4h2QGY9yZUx3UrS5flHZxRJfEB68qM+VN0vWLmOynuCgQ+J0c", + "L3yAclSRH6nOHaCI5YYpX5opKQdo9mVTN5B/YhhDazZkrWtz4H/RXgfEfIcd2XxIwarbt4dkaG+1DBbO", + "I4wwCoNGJu6TTEx2pygRJeesKhPTpz4bV+8oeWys8vUeAAIvacMmr8Y+V6fdRA6GSkxuM9NCQmd7kG0h", + "D8uuympkea1GMIHCzo2fYc4KruImFbfM2+KS/7qqxBU9OovQR+6yOuWk7ODwDjYJJ6Ur9A3r0aSbPNah", + "ZbVHo9xuNI9HO8/ain3gPpQnmhzSJs4THM/C8KH4nMo+f+Nfm+dUnmNSxUmd20MO1fvEDjuqeHwbgJjM", + "wgj9G3p84re7mfgLJLPQYxU9gO+HT/pqy3yDmB7IWUA9z9jHtRjxGBMQESM7DulXfo5dd2Myc9hlJc+Q", + "t1g+2zCArilCWc9D5Mw3J2caPKjcw1AmjpUMVmYQeMJrxA85wVRYPNmGQzeOEFky/Lhh+IAgHZQVRbpT", + "6YGhNDujJAS6AyvTQVXe3+HVME+AOYEc4EYOCzl8NeyrqKohifNYbmTx3sniIiMkkvhquEa64dzAOgZr", + "ojEYArL8VZpleHM0m53UOqoiv6sNQ+8RQxs5z5KjS09UUaezs4snK1E6/NBerrZvLtAhpp7NIKlnndmZ", + "5lFlHx5Vkr3Z9DOzrqp6KeumBdSd8ZIzVO705oR4IHa89r5Wdt+mxBBbtKJ8aCTCzkqhqrT4BHg91CoR", + "oR7q9Ce60atW2S6XE5U5AbuEwPlCJLdkbRXxYRIch5YMsJEgZS7xCDNfaSFCOBH4+3dBeOFHvCpG2RVD", + "R5B2LMkdxpIs2vIwa96w8D5mM4viQGxVhUc7ChYx84fgj7u65T7vhabS5DIrkS9sw19CoKRrKrUF8GbC", + "WaBKuHyEZMiHbUTLy2kH9bL0GiwNYrjmQrHPFwq5S1uRGgTghw4mgFQYDAF+YNWghKWwwko4AvhhyAa1", + "FxH9i5/RNpggogaHanHd8OgemAFNbLCL9EjCa6bzFEYPZckiUgdso0tT482UBpNwVHxjSKUIKavqSZGR", + "BLzwjo7cjua5bd/ezxXyXz2JoRjExEKv/p08wz8cGzsqxquZ2auVglBubcO5+/dQrjLeSoclo4ryhzR6", + "QnLhXe4ln54Nr/6wTDHR1LzeSIZqqT1kY/RW966UiOaGoPq1KNTqv5qSFErJ3qYwhVKYQsELrjDoZuor", + "v1yZCh3c1uXsFVtvhmCaS+pelq/I7lExHLjclFRH4PxQ/1nlx5LhhMoTWJDpIbu15FhfD5qKwQNWE8R2", + "rZpZoHFzMcf1Z1+QqmP621maWp2fj9ljZOVjEn+y5AytAn1Uwdd9NnrD3C/P3GkWkxulCCWHcZ13pyyO", + "2HY3Zu0dmbW/qbgPbPKHpJtUV2XYnMTBM7CAW9IjhmzsRt4cjDLBN6zRKH4ijSKJXRE+Q6WRoaJSO2Nx", + "30/ex7FG1yhjfRY4yV1ZerKwXyMDNg7gJcDE6V+whPUz6PhA7qApTRHApO8Z8xS9OdPlKdqBj22dgp6F", + "snyNSWT/fGtWkCX2jjd2shBbvUywlnYazatMnObBCYh90np30s6Iil2kUEvmfrvK5EOeSW28dNgE+knF", + "J3M+h12oXc1jz+b1rU2mZEzGrAwGOpdxDWNA3FnhsadMYzqcYKBteTko7yQcGbZu+yKapPhUsunHnoVi", + "qfmRKH2DOOh7OJN6di0EF/Pt1jQIiQik5vWoIj0aJ5tdvNzgYzcKg2qNhLZy/g7HKVAkQtNppfvEeRQG", + "r1pNOZj8rsnGIo9OO4UkUYmPKtJ4my5uW7jr0pnrgndVpUppp2QUX2c62qH+VIeZobwkZ+546UxEXt6N", + "pe5VpQi2T987Xm4vg6+iFOw4h28GGWto6M2xq9HSC+fcltR1euge/6D/6chf7crcFQ9i64cPSjgHXvQu", + "Wb0JrAxGd1/2zrI+nXYTm/zA+XpxejTVe6vIEsTdc7vsMXFN5jpk96Q95qwtHZ3NsXkIhv1ah/VG5ENV", + "eUk2azKjtXA48FqT+yUftlVtUhUQI27gsLL1USrgJRxtbHtVqoJaDLJRFcrlgGDLbYgCO1WeHQe2D3rq", + "K2O1m1JjMNtngxl7RK5hLWPtd2gq20c73gJEFGkG15UcWLzxN/UxY0fwaVLEaGETTiLbhaurjc9iiQhi", + "DK3qLcq2q1i3hqyvsDPZAPeAAs8KKtawNkifUeBVQ3PwxlSC5tABEwpowXn6CWAZy6wuoXV2cnbaOaH/", + "G52cvGP/+79GYzXr3qUT6ImXHqsdCkXLtho5hXgMJ2EEtwnyezbDJmEuwfIEBQjPVodZ9t8pnjcF9EYx", + "vb3HgaIl/tU+DeR1x8bCsRV36e28CTAPaZv8/cARoNGDLsv+akJ/y0CIQ65A3ajhjRq+ezW80S0b3fJF", + "QqDwmhXbmQBqKotUn+9bqJ6envMUVC/26fFYYTVMWq5iPxzKzo0VcZ+tiNu7FyUEcFCeU40y1ShTB6NM", + "pctIRfVGbLMJSFYMnlhpNTBvNUayIGEaq8NmtRKDBrBdveR4HPsPndQTUR9R9D72H4RT24YUFTri4fgn", + "bskPochTKVpsw47G1Vuz2zoipWsyJ55TSSxK2jUSQkqI91b7vHVJwd1VKiQFb+T8EkHZ+9cNio3Dca7a", + "qdiQaTpriA2xT/srNuSaKsSGWEcjNgxio3Kftyk2fiR/dgo5IysjIPQg1xQaBx4HocGBsZqRFtV7Gxqh", + "393G4TEfG2HAUz2PRwNtVERJbIQBD7pC8UFx3zYP5Oauf+gxFNuWI+XRFJnrwIYky4EHWuy9cNlW7EVB", + "utSoj5qSUTHv48teWSolpBrs8SqVnwOo/nZbdlnalKy0u0QlKTSf08wtZWWsHOAE8Mmcv8U+fYuIhzqc", + "olfVmUTKc2aWgrYj0cixvWpYmqgcbdz8ncrGesG3aq0uM/yNZNy9ZNy7QidC0JVR+XZSZymyOOPUo5fH", + "UjcQEtlew9UpRo0U3qUUljuwgmZaotbtuWKqSuBGMW3Er0n8CoWkSifeuMjl1fM6bhgHpCJegrWRuchl", + "2UfwCJAPxj5k0lcRN3r7wkdIeHU+fM5mPHjRW5Uy/sBLRmQ2a0UzJScVTj7NC6LBYTqDpNUKSWTZP8Yw", + "wsduHEWwnLMxvx3whg7tVuDeWwyjj5Cci8G2SHd0ppp0xiBuChC/fAFi6MYRIksmxt0wfECwG1PZ9dcd", + "FVW5pENZcpPkzrZfQ8ZTRGbx+NgFvj8G7oORnM/D+cKHBHKavqbzO9rziE7E7VEf2dDXFJfncvgcgb85", + "Oat4e3XFvF5x3hkEHjvcfrT8kG9Gdh/yYv05h8wM7uQCs3NYog8TEJlFwZB+XQ1xrGt9rDF4to8zBl1N", + "hIXh1IfboTc29E9Obxx9G6a3FHE/Hb2h4BERWF67CbNoJqkN8w5M6bY6vukII9a3L+ba4imuTmTlzO4j", + "LDcmu8BGX7Q+VllNnhz2UsobaW6IGdo7Bq4LF8Rseeuy7zixsIlJCtSmbj7v09qOPYkPzidSDEkGA1AJ", + "9fGV6+iv8ZhKyItju7D39vQVQVbdoqSSPv1ej754n9a26tLTwTdAX3zlDX2V0hfH9gr05YdTFJjJ6jKc", + "YgcFDmBn41GJgnHJBtqScwY9gun41YS0u3u0H06n0HNQ0FyfX/j63G79dna2q3UvopDSADPa9gKCyNLp", + "OI/ARx6bjG6KaIKCqQPlSGaFlxG2/irfbn3vwIBO1YkAgR1mA6c6NH+r0TFzGJMKbg5jYsfOYfzyxirB", + "ZOGeFepujFQV2jSjHlv71BzOxzDCM7SocYdTOtnd4/gZ+CXtJpJSbJXA9ZPWv9CpKGoudatc6lQMVpPk", + "AmD8FEYlrhRJLnbawZHty0TqjRxze0rS+QwE02SifdKWXAaZlyCqEeeN0lRPaSpndU75WWZcW5+K4JRK", + "4qjs2s1b4FKVKvGU2hbfSzD2ieMl8pqHxobpN3NTklS+mcsS9oH7sJVHqiEdeY/fqCokac1Hq0cYYQGC", + "0f2JrkG0ky5QGEaPGi29H0zCj5B8FYNutCaxAmmaofH06OToRJcDUvE8+ivpemdRbnhUstict2UJsX+D", + "TgRJHAUZ5OVuOlTMxkFA+SeZ4ntHDtkJFzzlVJEFnuB4FoYPHeGIdvxD/GAR/k6POtG66KjGf7ePbBcD", + "mR3Bkol27AdmGSou4WsOtpc3TuTD01UyNXp/iRZ3VsxxLPBsY6aQTYVffQXHCMUN2ybK3Fu+2Yz/JIee", + "u08K1FDMlGVcoVhJ6oAI7CTb1bDnHrEns8oUtqgujya8yf54rvC+5q20jtXMOdOK57iTaZnPsuaMPxyP", + "5dq+o2LFjT2y4JRcCPiSFxSzDzJTq6srP5YSsn3agb2g5W1F8WfODdNZITAQS5TtLg7KktfUoPyG0ww1", + "F9dhttxpkg/usUoEVq8Ga4170V5GyNRJopUA2ATovXDmCEGsCsWsGB/TrtKw7Dmhhsr1GgLFVgwOa3jr", + "pXlLjUJbh7Fs1D577qqnB+4Fg21eF8wiwzZWXuQkzXDZrpVDK4mQVw8beWBUENdjzgo10apcHt2kbF28", + "hPEek5cO40lZozzePvCzpkQFLzCxgfrBq1cP1gM2jcJ4wep+pCDIjTKCwjp9hstWZRqQLQuJNWtxyUel", + "phzXHmoTK9X/qiW4ZGoio3OLzKpRN1nQSjmC9lJyjTTscuT0J8y6jWNKHdBrM67yAYGYJDyFsDOBxJ1B", + "z1QdKhX8e65ICTJYMfHQi6UbUuCtlWeoyS7UZBfaQnahWqJZyAZs8aqVOcmtxLLwrTkgE8zPIJe3LOWk", + "w9R6qmAj7/ZKBUxJcVUVMO/4N4YgglHi+NfWugIyTzIuD+LIb71rtZ7vnv9fAAAA///ndIaI3kgDAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/api/v1/server/oas/transformers/v1/tasks.go b/api/v1/server/oas/transformers/v1/tasks.go index e440e1218..c04cf45ac 100644 --- a/api/v1/server/oas/transformers/v1/tasks.go +++ b/api/v1/server/oas/transformers/v1/tasks.go @@ -19,6 +19,13 @@ func jsonToMap(jsonBytes []byte) map[string]interface{} { return result } +func mapOlapStatus(olapStatus string) (gen.V1TaskStatus, bool) { + if olapStatus == "EVICTED" { + return gen.V1TaskStatusRUNNING, true + } + return gen.V1TaskStatus(olapStatus), false +} + func ToTaskSummary(task *v1.TaskWithPayloads) gen.V1TaskSummary { workflowVersionID := task.WorkflowVersionID additionalMetadata := jsonToMap(task.AdditionalMetadata) @@ -47,7 +54,10 @@ func ToTaskSummary(task *v1.TaskWithPayloads) gen.V1TaskSummary { retryCount := int(task.RetryCount) attempt := retryCount + 1 - return gen.V1TaskSummary{ + + status, isEvicted := mapOlapStatus(string(task.Status)) + + summary := gen.V1TaskSummary{ Metadata: gen.APIResourceMeta{ Id: task.ExternalID.String(), CreatedAt: task.InsertedAt.Time, @@ -62,7 +72,7 @@ func ToTaskSummary(task *v1.TaskWithPayloads) gen.V1TaskSummary { FinishedAt: finishedAt, AdditionalMetadata: &additionalMetadata, ErrorMessage: &task.ErrorMessage.String, - Status: gen.V1TaskStatus(task.Status), + Status: status, TenantId: task.TenantID, WorkflowId: task.WorkflowID, TaskId: int(task.ID), @@ -75,7 +85,10 @@ func ToTaskSummary(task *v1.TaskWithPayloads) gen.V1TaskSummary { RetryCount: &retryCount, Attempt: &attempt, ParentTaskExternalId: task.ParentTaskExternalID, + IsEvicted: &isEvicted, } + + return summary } func ToTaskSummaryRows( @@ -197,7 +210,7 @@ func ToWorkflowRunTaskRunEventsMany( } } -func ToTaskRunMetrics(metrics *[]v1.TaskRunMetric) gen.V1TaskRunMetrics { +func StatusToTaskRunMetrics(metrics *[]v1.TaskRunMetric) gen.V1TaskRunMetrics { statuses := []gen.V1TaskStatus{ gen.V1TaskStatusCANCELLED, gen.V1TaskStatusCOMPLETED, @@ -206,22 +219,29 @@ func ToTaskRunMetrics(metrics *[]v1.TaskRunMetric) gen.V1TaskRunMetrics { gen.V1TaskStatusRUNNING, } + metricsMap := make(map[gen.V1TaskStatus]v1.TaskRunMetric) + for _, m := range *metrics { + metricsMap[gen.V1TaskStatus(m.Status)] = m + } + toReturn := make([]gen.V1TaskRunMetric, len(statuses)) for i, status := range statuses { - metric := v1.TaskRunMetric{Count: 0} - - for _, m := range *metrics { - if m.Status == string(status) { - metric = m - break - } - } + metric := metricsMap[status] toReturn[i] = gen.V1TaskRunMetric{ Count: int(metric.Count), // nolint: gosec Status: status, } + + if status == gen.V1TaskStatusRUNNING { + evicted := int(metric.EvictedCount) // nolint: gosec + onWorker := int(metric.OnWorkerCount) // nolint: gosec + toReturn[i].RunningDetailCount = &gen.V1RunningDetailCount{ + Evicted: evicted, + OnWorker: onWorker, + } + } } return toReturn @@ -289,7 +309,9 @@ func ToTask(taskWithData *v1.TaskWithPayloads, workflowRunExternalId uuid.UUID, } } - return gen.V1TaskSummary{ + taskStatus, isEvicted := mapOlapStatus(string(taskWithData.Status)) + + summary := gen.V1TaskSummary{ Metadata: gen.APIResourceMeta{ Id: taskWithData.ExternalID.String(), CreatedAt: taskWithData.InsertedAt.Time, @@ -303,7 +325,7 @@ func ToTask(taskWithData *v1.TaskWithPayloads, workflowRunExternalId uuid.UUID, StartedAt: startedAt, FinishedAt: finishedAt, Output: output, - Status: gen.V1TaskStatus(taskWithData.Status), + Status: taskStatus, Input: input, TenantId: taskWithData.TenantID, WorkflowId: taskWithData.WorkflowID, @@ -320,6 +342,12 @@ func ToTask(taskWithData *v1.TaskWithPayloads, workflowRunExternalId uuid.UUID, WorkflowConfig: &workflowConfig, ParentTaskExternalId: parentTaskExternalId, } + + if isEvicted { + summary.IsEvicted = &isEvicted + } + + return summary } func ToWorkflowRunDetails( @@ -342,6 +370,8 @@ func ToWorkflowRunDetails( additionalMetadata := jsonToMap(workflowRun.AdditionalMetadata) + wrStatus, _ := mapOlapStatus(string(workflowRun.ReadableStatus)) + parsedWorkflowRun := gen.V1WorkflowRun{ AdditionalMetadata: &additionalMetadata, CreatedAt: &workflowRun.CreatedAt.Time, @@ -356,7 +386,7 @@ func ToWorkflowRunDetails( UpdatedAt: workflowRun.InsertedAt.Time, }, StartedAt: &workflowRun.StartedAt.Time, - Status: gen.V1TaskStatus(workflowRun.ReadableStatus), + Status: wrStatus, TenantId: workflowRun.TenantID, WorkflowId: workflowRun.WorkflowID, WorkflowVersionId: &workflowVersionId, @@ -431,13 +461,15 @@ func ToTaskTimings( retryCount := int(timing.RetryCount) attempt := retryCount + 1 + timingStatus, timingIsEvicted := mapOlapStatus(string(timing.Status)) + toReturn[i] = gen.V1TaskTiming{ Metadata: gen.APIResourceMeta{ Id: timing.ExternalID.String(), CreatedAt: timing.InsertedAt.Time, UpdatedAt: timing.InsertedAt.Time, }, - Status: gen.V1TaskStatus(timing.Status), + Status: timingStatus, TaskDisplayName: timing.DisplayName, TaskId: int(timing.ID), TaskInsertedAt: timing.InsertedAt.Time, @@ -450,6 +482,10 @@ func ToTaskTimings( ParentTaskExternalId: timing.ParentTaskExternalID, } + if timingIsEvicted { + toReturn[i].IsEvicted = &timingIsEvicted + } + if timing.QueuedAt.Valid { toReturn[i].QueuedAt = &timing.QueuedAt.Time } diff --git a/api/v1/server/oas/transformers/v1/workflow_runs.go b/api/v1/server/oas/transformers/v1/workflow_runs.go index 36024781a..2ffc3440b 100644 --- a/api/v1/server/oas/transformers/v1/workflow_runs.go +++ b/api/v1/server/oas/transformers/v1/workflow_runs.go @@ -73,7 +73,9 @@ func WorkflowRunDataToV1TaskSummary(task *v1.WorkflowRunData, workflowIdsToNames parentTaskExternalId = &parentTaskExternalIdValue } - return gen.V1TaskSummary{ + status, isEvicted := mapOlapStatus(string(task.ReadableStatus)) + + summary := gen.V1TaskSummary{ Metadata: gen.APIResourceMeta{ Id: task.ExternalID.String(), CreatedAt: task.InsertedAt.Time, @@ -88,7 +90,7 @@ func WorkflowRunDataToV1TaskSummary(task *v1.WorkflowRunData, workflowIdsToNames Output: output, AdditionalMetadata: &additionalMetadata, ErrorMessage: &task.ErrorMessage, - Status: gen.V1TaskStatus(task.ReadableStatus), + Status: status, TenantId: task.TenantID, WorkflowId: task.WorkflowID, WorkflowVersionId: &workflowVersionId, @@ -104,6 +106,12 @@ func WorkflowRunDataToV1TaskSummary(task *v1.WorkflowRunData, workflowIdsToNames Attempt: &attempt, ParentTaskExternalId: parentTaskExternalId, } + + if isEvicted { + summary.IsEvicted = &isEvicted + } + + return summary } func ToWorkflowRunMany( @@ -182,7 +190,9 @@ func PopulateTaskRunDataRowToV1TaskSummary(task *v1.TaskWithPayloads, workflowNa retryCount := int(task.RetryCount) attempt := retryCount + 1 - return gen.V1TaskSummary{ + taskStatus, isEvicted := mapOlapStatus(string(task.Status)) + + summary := gen.V1TaskSummary{ Metadata: gen.APIResourceMeta{ Id: task.ExternalID.String(), CreatedAt: task.InsertedAt.Time, @@ -197,7 +207,7 @@ func PopulateTaskRunDataRowToV1TaskSummary(task *v1.TaskWithPayloads, workflowNa Output: output, AdditionalMetadata: &additionalMetadata, ErrorMessage: &task.ErrorMessage.String, - Status: gen.V1TaskStatus(task.Status), + Status: taskStatus, TenantId: task.TenantID, WorkflowId: task.WorkflowID, WorkflowVersionId: &workflowVersionID, @@ -214,6 +224,12 @@ func PopulateTaskRunDataRowToV1TaskSummary(task *v1.TaskWithPayloads, workflowNa WorkflowRunExternalId: task.WorkflowRunID, ParentTaskExternalId: task.ParentTaskExternalID, } + + if isEvicted { + summary.IsEvicted = &isEvicted + } + + return summary } func TaskRunDataRowToWorkflowRunsMany( diff --git a/api/v1/server/rbac/rbac.yaml b/api/v1/server/rbac/rbac.yaml index 7b129dff0..4ed201d7d 100644 --- a/api/v1/server/rbac/rbac.yaml +++ b/api/v1/server/rbac/rbac.yaml @@ -91,6 +91,7 @@ roles: - EventUpdateReplay - StepRunGetSchema - V1DagListTasks + - V1DurableTaskBranch - MonitoringPostRunProbe - LivenessGet - UserUpdateGithubOauthStart @@ -122,6 +123,7 @@ roles: - WorkflowDelete - UserGetCurrent - V1TaskReplay + - V1TaskRestore - WorkflowRunCancel - V1EventList - EventKeyList diff --git a/cmd/hatchet-engine/engine/run.go b/cmd/hatchet-engine/engine/run.go index 5aa4ae45c..1dc6d1632 100644 --- a/cmd/hatchet-engine/engine/run.go +++ b/cmd/hatchet-engine/engine/run.go @@ -380,6 +380,7 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro dispatcherv1.WithRepository(sc.V1), dispatcherv1.WithMessageQueue(sc.MessageQueueV1), dispatcherv1.WithLogger(sc.Logger), + dispatcherv1.WithDispatcherId(d.DispatcherId()), dispatcherv1.WithAnalytics(sc.Analytics), ) @@ -387,6 +388,8 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro return nil, fmt.Errorf("could not create dispatcher (v1): %w", err) } + d.SetDurableCallbackHandler(dv1.DeliverDurableEventLogEntryCompletion) + // create the event ingestor ei, err := ingestor.NewIngestor( ingestor.WithMessageQueueV1(sc.MessageQueueV1), @@ -824,6 +827,7 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro dispatcherv1.WithRepository(sc.V1), dispatcherv1.WithMessageQueue(sc.MessageQueueV1), dispatcherv1.WithLogger(sc.Logger), + dispatcherv1.WithDispatcherId(d.DispatcherId()), dispatcherv1.WithAnalytics(sc.Analytics), ) @@ -831,6 +835,8 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro return nil, fmt.Errorf("could not create dispatcher (v1): %w", err) } + d.SetDurableCallbackHandler(dv1.DeliverDurableEventLogEntryCompletion) + // create the event ingestor ei, err := ingestor.NewIngestor( ingestor.WithMessageQueueV1(sc.MessageQueueV1), diff --git a/cmd/hatchet-engine/main.go b/cmd/hatchet-engine/main.go index 45bc0c4e1..bce0a621e 100644 --- a/cmd/hatchet-engine/main.go +++ b/cmd/hatchet-engine/main.go @@ -47,7 +47,7 @@ var rootCmd = &cobra.Command{ // Version will be linked by an ldflag during build // FIXME: automate this version update on tag, we use it to version the engine for sdks -var Version = "v0.78.23" +var Version = "v0.80.0" func main() { rootCmd.PersistentFlags().BoolVar( diff --git a/cmd/hatchet-migrate/migrate/migrations/20260313020631_v1_0_85_durable_event_log.sql b/cmd/hatchet-migrate/migrate/migrations/20260313020631_v1_0_85_durable_event_log.sql new file mode 100644 index 000000000..e639297a8 --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260313020631_v1_0_85_durable_event_log.sql @@ -0,0 +1,204 @@ +-- +goose Up +-- +goose StatementBegin +DROP FUNCTION IF EXISTS create_v1_range_partition(text, date); +CREATE OR REPLACE FUNCTION create_v1_range_partition( + targetTableName text, + targetDate date, + fillfactor integer DEFAULT 100 +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetDateStr varchar; + targetDatePlusOneDayStr varchar; + newTableName varchar; +BEGIN + SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr; + SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr; + SELECT lower(format('%s_%s', targetTableName, targetDateStr)) INTO newTableName; + -- exit if the table exists + IF EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN + RETURN 0; + END IF; + + EXECUTE + format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES INCLUDING CONSTRAINTS)', newTableName, targetTableName); + EXECUTE format('ALTER TABLE %I SET ( + fillfactor = %s, + autovacuum_vacuum_scale_factor = ''0.1'', + autovacuum_analyze_scale_factor=''0.05'', + autovacuum_vacuum_threshold=''25'', + autovacuum_analyze_threshold=''25'', + autovacuum_vacuum_cost_delay=''10'', + autovacuum_vacuum_cost_limit=''1000'' + )', newTableName, fillfactor); + EXECUTE + format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); + RETURN 1; +END; +$$; + +-- v1_durable_event_log represents the log file for the durable event history +-- of a durable task. This table stores metadata like sequence values for entries. +-- +-- Important: writers to v1_durable_event_log_entry should lock this row to increment the sequence value. +CREATE TABLE v1_durable_event_log_file ( + tenant_id UUID NOT NULL, + -- The id and inserted_at of the durable task which created this entry + durable_task_id BIGINT NOT NULL, + durable_task_inserted_at TIMESTAMPTZ NOT NULL, + + latest_inserted_at TIMESTAMPTZ NOT NULL, + + latest_invocation_count INTEGER NOT NULL, + + -- A monotonically increasing node id for this durable event log scoped to the durable task. + -- Starts at 0 and increments by 1 for each new entry. + latest_node_id BIGINT NOT NULL, + -- The latest branch id. Branches represent different execution paths on a replay. + latest_branch_id BIGINT NOT NULL, + + CONSTRAINT v1_durable_event_log_file_pkey PRIMARY KEY (durable_task_id, durable_task_inserted_at) +) PARTITION BY RANGE(durable_task_inserted_at); + +SELECT create_v1_range_partition('v1_durable_event_log_file', NOW()::DATE); +SELECT create_v1_range_partition('v1_durable_event_log_file', (NOW() + INTERVAL '1 day')::DATE); + +CREATE TYPE v1_durable_event_log_kind AS ENUM ( + 'RUN', + 'WAIT_FOR', + 'MEMO' +); + +CREATE TABLE v1_durable_event_log_entry ( + tenant_id UUID NOT NULL, + + -- need an external id for consistency with the payload store logic (unfortunately) + external_id UUID NOT NULL, + -- The id and inserted_at of the durable task which created this entry + -- The inserted_at time of this event from a DB clock perspective. + -- Important: for consistency, this should always be auto-generated via the CURRENT_TIMESTAMP! + inserted_at TIMESTAMPTZ NOT NULL, + id BIGINT NOT NULL GENERATED ALWAYS AS IDENTITY, + + durable_task_id BIGINT NOT NULL, + durable_task_inserted_at TIMESTAMPTZ NOT NULL, + + kind v1_durable_event_log_kind NOT NULL, + -- The node number in the durable event log. This represents a monotonically increasing + -- sequence value generated from v1_durable_event_log_file.latest_node_id + node_id BIGINT NOT NULL, + -- The branch id when this event was first seen. A durable event log can be a part of many branches. + branch_id BIGINT NOT NULL, + -- An idempotency key generated from the incoming data (using the type of event + wait for conditions or the trigger event payload + options) + -- to determine whether or not there's been a non-determinism error + idempotency_key BYTEA NOT NULL, + -- Access patterns: + -- Definite: we'll query directly for the node_id when a durable task is replaying its log + -- Possible: we may want to query a range of node_ids for a durable task + -- Possible: we may want to query a range of inserted_ats for a durable task + + -- Whether this callback has been seen by the engine or not. Note that is_satisfied _may_ change multiple + -- times through the lifecycle of a callback, and readers should not assume that once it's true it will always be true. + is_satisfied BOOLEAN NOT NULL DEFAULT FALSE, + + CONSTRAINT v1_durable_event_log_entry_pkey PRIMARY KEY (durable_task_id, durable_task_inserted_at, branch_id, node_id) +) PARTITION BY RANGE(durable_task_inserted_at); + + +SELECT create_v1_range_partition('v1_durable_event_log_entry', NOW()::DATE, 80); +SELECT create_v1_range_partition('v1_durable_event_log_entry', (NOW() + INTERVAL '1 day')::DATE, 80); + +CREATE TABLE v1_durable_event_log_branch_point ( + tenant_id UUID NOT NULL, + + id BIGINT NOT NULL GENERATED ALWAYS AS IDENTITY, + + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + + durable_task_id BIGINT NOT NULL, + + durable_task_inserted_at TIMESTAMPTZ NOT NULL, + + first_node_id_in_new_branch BIGINT NOT NULL, + + parent_branch_id BIGINT NOT NULL, + + next_branch_id BIGINT NOT NULL, + + CONSTRAINT v1_durable_event_log_branch_point_pkey PRIMARY KEY (durable_task_id, durable_task_inserted_at, parent_branch_id, first_node_id_in_new_branch, next_branch_id) +) PARTITION BY RANGE(durable_task_inserted_at); + +SELECT create_v1_range_partition('v1_durable_event_log_branch_point', NOW()::DATE); +SELECT create_v1_range_partition('v1_durable_event_log_branch_point', (NOW() + INTERVAL '1 day')::DATE); + + +ALTER TABLE v1_match + ADD COLUMN signal_task_external_id UUID, + ADD COLUMN durable_event_log_entry_node_id BIGINT, + ADD COLUMN durable_event_log_entry_branch_id BIGINT +; + +-- needs to be nullable so we don't have to backfill +ALTER TABLE v1_task ADD COLUMN is_durable BOOLEAN; + +ALTER TYPE v1_payload_type ADD VALUE IF NOT EXISTS 'DURABLE_EVENT_LOG_ENTRY_DATA'; +ALTER TYPE v1_payload_type ADD VALUE IF NOT EXISTS 'DURABLE_EVENT_LOG_ENTRY_RESULT_DATA'; + +ALTER TABLE "Worker" ADD COLUMN "durableTaskDispatcherId" UUID; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE v1_durable_event_log_entry; +DROP TABLE v1_durable_event_log_file; +DROP TABLE v1_durable_event_log_branch_point; +DROP TYPE v1_durable_event_log_kind; + +ALTER TABLE v1_match + DROP COLUMN signal_task_external_id, + DROP COLUMN durable_event_log_entry_node_id, + DROP COLUMN durable_event_log_entry_branch_id +; + +ALTER TABLE v1_task DROP COLUMN is_durable; + +ALTER TABLE "Worker" DROP COLUMN "durableTaskDispatcherId"; + +DROP FUNCTION IF EXISTS create_v1_range_partition(text, date, integer); +CREATE OR REPLACE FUNCTION create_v1_range_partition( + targetTableName text, + targetDate date +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetDateStr varchar; + targetDatePlusOneDayStr varchar; + newTableName varchar; +BEGIN + SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr; + SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr; + SELECT lower(format('%s_%s', targetTableName, targetDateStr)) INTO newTableName; + -- exit if the table exists + IF EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN + RETURN 0; + END IF; + + EXECUTE + format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES INCLUDING CONSTRAINTS)', newTableName, targetTableName); + EXECUTE + format('ALTER TABLE %s SET ( + autovacuum_vacuum_scale_factor = ''0.1'', + autovacuum_analyze_scale_factor=''0.05'', + autovacuum_vacuum_threshold=''25'', + autovacuum_analyze_threshold=''25'', + autovacuum_vacuum_cost_delay=''10'', + autovacuum_vacuum_cost_limit=''1000'' + )', newTableName); + EXECUTE + format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); + RETURN 1; +END; +$$; +-- +goose StatementEnd diff --git a/cmd/hatchet-migrate/migrate/migrations/20260313020632_v1_0_86_durable_eviction.sql b/cmd/hatchet-migrate/migrate/migrations/20260313020632_v1_0_86_durable_eviction.sql new file mode 100644 index 000000000..0d669c1d1 --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260313020632_v1_0_86_durable_eviction.sql @@ -0,0 +1,15 @@ +-- +goose Up + +ALTER TABLE v1_task_runtime ADD COLUMN IF NOT EXISTS evicted_at TIMESTAMPTZ DEFAULT NULL; + +CREATE INDEX IF NOT EXISTS v1_task_runtime_tenant_worker_not_evicted_idx + ON v1_task_runtime (tenant_id, worker_id) WHERE evicted_at IS NULL; + +ALTER TYPE v1_event_type_olap ADD VALUE IF NOT EXISTS 'DURABLE_EVICTED'; +ALTER TYPE v1_event_type_olap ADD VALUE IF NOT EXISTS 'DURABLE_RESTORING'; + + +-- +goose Down +ALTER TABLE v1_task_runtime DROP COLUMN IF EXISTS evicted_at; +-- NOTE: Postgres does not support removing enum values. +-- The 'DURABLE_EVICTED' and 'DURABLE_RESTORING' values in v1_event_type_olap cannot be reverted. diff --git a/cmd/hatchet-migrate/migrate/migrations/20260313020633_v1_0_87_readable_status_enum.sql b/cmd/hatchet-migrate/migrate/migrations/20260313020633_v1_0_87_readable_status_enum.sql new file mode 100644 index 000000000..eb36ec204 --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260313020633_v1_0_87_readable_status_enum.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TYPE v1_readable_status_olap ADD VALUE IF NOT EXISTS 'EVICTED'; + +-- +goose Down +-- NOTE: Postgres does not support removing enum values. +-- The 'EVICTED' value in v1_readable_status_olap cannot be reverted. +-- Any EVICTED partitions created by this migration would need to be merged/dropped separately. diff --git a/cmd/hatchet-migrate/migrate/migrations/20260313020634_v1_0_88_status_priority_functions.sql b/cmd/hatchet-migrate/migrate/migrations/20260313020634_v1_0_88_status_priority_functions.sql new file mode 100644 index 000000000..4b09334ab --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260313020634_v1_0_88_status_priority_functions.sql @@ -0,0 +1,30 @@ +-- +goose Up +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION v1_status_to_priority(s v1_readable_status_olap) +RETURNS int IMMUTABLE LANGUAGE sql AS $$ + SELECT CASE s + WHEN 'QUEUED' THEN 1 + WHEN 'RUNNING' THEN 2 + WHEN 'EVICTED' THEN 3 + WHEN 'CANCELLED' THEN 4 + WHEN 'FAILED' THEN 5 + WHEN 'COMPLETED' THEN 6 + END; +$$; + +CREATE OR REPLACE FUNCTION v1_status_from_priority(p int) +RETURNS v1_readable_status_olap IMMUTABLE LANGUAGE sql AS $$ + SELECT CASE p + WHEN 1 THEN 'QUEUED' + WHEN 2 THEN 'RUNNING' + WHEN 3 THEN 'EVICTED' + WHEN 4 THEN 'CANCELLED' + WHEN 5 THEN 'FAILED' + WHEN 6 THEN 'COMPLETED' + END::v1_readable_status_olap; +$$; +-- +goose StatementEnd + +-- +goose Down +DROP FUNCTION v1_status_from_priority(int); +DROP FUNCTION v1_status_to_priority(v1_readable_status_olap); diff --git a/cmd/hatchet-migrate/migrate/migrations/20260313020635_v1_0_89_olap_evicted_status.sql b/cmd/hatchet-migrate/migrate/migrations/20260313020635_v1_0_89_olap_evicted_status.sql new file mode 100644 index 000000000..f191e56d3 --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20260313020635_v1_0_89_olap_evicted_status.sql @@ -0,0 +1,93 @@ +-- +goose Up +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION create_v1_olap_partition_with_date_and_status( + targetTableName text, + targetDate date +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetDateStr varchar; + targetDatePlusOneDayStr varchar; + newTableName varchar; +BEGIN + SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr; + SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr; + SELECT format('%s_%s', targetTableName, targetDateStr) INTO newTableName; + IF NOT EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN + EXECUTE format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES) PARTITION BY LIST (readable_status)', newTableName, targetTableName); + END IF; + + PERFORM create_v1_partition_with_status(newTableName, 'QUEUED'); + PERFORM create_v1_partition_with_status(newTableName, 'RUNNING'); + PERFORM create_v1_partition_with_status(newTableName, 'COMPLETED'); + PERFORM create_v1_partition_with_status(newTableName, 'CANCELLED'); + PERFORM create_v1_partition_with_status(newTableName, 'FAILED'); + PERFORM create_v1_partition_with_status(newTableName, 'EVICTED'); + + -- If it's not already attached, attach the partition + IF NOT EXISTS (SELECT 1 FROM pg_inherits WHERE inhrelid = newTableName::regclass) THEN + EXECUTE format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); + END IF; + + RETURN 1; +END; +$$; + +WITH partitions AS ( + SELECT inhrelid::regclass::text AS partition_name + FROM pg_inherits + WHERE inhparent IN ( + 'v1_tasks_olap'::regclass, + 'v1_dags_olap'::regclass, + 'v1_runs_olap'::regclass + ) +) +SELECT create_v1_partition_with_status(partition_name, 'EVICTED') +FROM partitions; + +ALTER TABLE v1_task_events_olap ADD COLUMN IF NOT EXISTS durable_invocation_count INT NOT NULL DEFAULT 0; + +ANALYZE v1_tasks_olap; +ANALYZE v1_dags_olap; +ANALYZE v1_runs_olap; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE v1_task_events_olap DROP COLUMN IF EXISTS durable_invocation_count; +-- +goose StatementEnd + +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION create_v1_olap_partition_with_date_and_status( + targetTableName text, + targetDate date +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetDateStr varchar; + targetDatePlusOneDayStr varchar; + newTableName varchar; +BEGIN + SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr; + SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr; + SELECT format('%s_%s', targetTableName, targetDateStr) INTO newTableName; + IF NOT EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN + EXECUTE format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES) PARTITION BY LIST (readable_status)', newTableName, targetTableName); + END IF; + + PERFORM create_v1_partition_with_status(newTableName, 'QUEUED'); + PERFORM create_v1_partition_with_status(newTableName, 'RUNNING'); + PERFORM create_v1_partition_with_status(newTableName, 'COMPLETED'); + PERFORM create_v1_partition_with_status(newTableName, 'CANCELLED'); + PERFORM create_v1_partition_with_status(newTableName, 'FAILED'); + + IF NOT EXISTS (SELECT 1 FROM pg_inherits WHERE inhrelid = newTableName::regclass) THEN + EXECUTE format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); + END IF; + + RETURN 1; +END; +$$; +-- +goose StatementEnd diff --git a/docker-compose.release.yml b/docker-compose.release.yml new file mode 100644 index 000000000..7c9ded0db --- /dev/null +++ b/docker-compose.release.yml @@ -0,0 +1,69 @@ +services: + hatchet-migrate: + image: ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${LATEST_TAG} + environment: + DATABASE_URL: "postgresql://hatchet:hatchet@postgres:5432/hatchet?sslmode=disable" + + hatchet-admin: + image: ghcr.io/hatchet-dev/hatchet/hatchet-admin:${LATEST_TAG} + environment: + DATABASE_URL: "postgresql://hatchet:hatchet@postgres:5432/hatchet?sslmode=disable" + SEED_DEVELOPMENT: "true" + SERVER_PORT: "8080" + SERVER_URL: "http://localhost:8080" + SERVER_AUTH_COOKIE_DOMAIN: localhost + SERVER_AUTH_COOKIE_INSECURE: "true" + SERVER_GRPC_PORT: "7077" + SERVER_GRPC_BROADCAST_ADDRESS: "localhost:7077" + SERVER_GRPC_INSECURE: "true" + SERVER_DEFAULT_ENGINE_VERSION: V1 + SERVER_MSGQUEUE_KIND: postgres + volumes: + - ./generated:/hatchet/generated + + hatchet-engine: + image: ghcr.io/hatchet-dev/hatchet/hatchet-engine:${LATEST_TAG} + command: /hatchet/hatchet-engine --config /hatchet/generated + restart: on-failure + ports: + - "7077:7077" + environment: + DATABASE_URL: "postgresql://hatchet:hatchet@postgres:5432/hatchet?sslmode=disable" + SERVER_GRPC_PORT: "7077" + SERVER_GRPC_BROADCAST_ADDRESS: "localhost:7077" + SERVER_GRPC_BIND_ADDRESS: "0.0.0.0" + SERVER_GRPC_INSECURE: "true" + SERVER_PORT: "8080" + SERVER_URL: "http://localhost:8080" + SERVER_AUTH_COOKIE_DOMAIN: localhost + SERVER_AUTH_COOKIE_INSECURE: "true" + SERVER_DEFAULT_ENGINE_VERSION: V1 + SERVER_MSGQUEUE_KIND: postgres + SERVER_LOGGER_LEVEL: warn + SERVER_LOGGER_FORMAT: console + DATABASE_LOGGER_LEVEL: warn + DATABASE_LOGGER_FORMAT: console + volumes: + - ./generated:/hatchet/generated + + hatchet-api: + image: ghcr.io/hatchet-dev/hatchet/hatchet-api:${LATEST_TAG} + command: /hatchet/hatchet-api --config /hatchet/generated + restart: on-failure + ports: + - "8080:8080" + environment: + DATABASE_URL: "postgresql://hatchet:hatchet@postgres:5432/hatchet?sslmode=disable" + SERVER_PORT: "8080" + SERVER_URL: "http://localhost:8080" + SERVER_AUTH_COOKIE_DOMAIN: localhost + SERVER_AUTH_COOKIE_INSECURE: "true" + SERVER_GRPC_PORT: "7077" + SERVER_GRPC_BROADCAST_ADDRESS: "localhost:7077" + SERVER_GRPC_INSECURE: "true" + SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: "hatchet-engine:7077" + SERVER_MSGQUEUE_KIND: postgres + DATABASE_LOGGER_LEVEL: warn + DATABASE_LOGGER_FORMAT: console + volumes: + - ./generated:/hatchet/generated diff --git a/examples/python/dependency_injection/test_dependency_injection.py b/examples/python/dependency_injection/test_dependency_injection.py index 8c991d6f7..f067ad245 100644 --- a/examples/python/dependency_injection/test_dependency_injection.py +++ b/examples/python/dependency_injection/test_dependency_injection.py @@ -4,12 +4,9 @@ from examples.dependency_injection.worker import ( ASYNC_DEPENDENCY_VALUE, SYNC_DEPENDENCY_VALUE, Output, - async_dep, async_task_with_dependencies, di_workflow, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, - sync_dep, sync_task_with_dependencies, ) from hatchet_sdk import EmptyModel @@ -22,7 +19,6 @@ from hatchet_sdk.runnables.workflow import Standalone async_task_with_dependencies, sync_task_with_dependencies, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, ], ) @pytest.mark.asyncio(loop_scope="session") @@ -40,7 +36,7 @@ async def test_di_standalones( async def test_di_workflows() -> None: result = await di_workflow.aio_run() - assert len(result) == 4 + assert len(result) == 3 for output in result.values(): parsed = Output.model_validate(output) diff --git a/examples/python/dependency_injection/worker.py b/examples/python/dependency_injection/worker.py index cb2d639e1..c3d9c7e24 100644 --- a/examples/python/dependency_injection/worker.py +++ b/examples/python/dependency_injection/worker.py @@ -194,27 +194,6 @@ async def durable_async_task_with_dependencies( ) -@hatchet.durable_task() -def durable_sync_task_with_dependencies( - _i: EmptyModel, - ctx: DurableContext, - async_dep: Annotated[str, Depends(async_dep)], - sync_dep: Annotated[str, Depends(sync_dep)], - async_cm_dep: Annotated[str, Depends(async_cm_dep)], - sync_cm_dep: Annotated[str, Depends(sync_cm_dep)], - chained_dep: Annotated[str, Depends(chained_dep)], - chained_async_dep: Annotated[str, Depends(chained_async_dep)], -) -> Output: - return Output( - sync_dep=sync_dep, - async_dep=async_dep, - async_cm_dep=async_cm_dep, - sync_cm_dep=sync_cm_dep, - chained_dep=chained_dep, - chained_async_dep=chained_async_dep, - ) - - di_workflow = hatchet.workflow( name="dependency-injection-workflow", ) @@ -283,27 +262,6 @@ async def wf_durable_async_task_with_dependencies( ) -@di_workflow.durable_task() -def wf_durable_sync_task_with_dependencies( - _i: EmptyModel, - ctx: DurableContext, - async_dep: Annotated[str, Depends(async_dep)], - sync_dep: Annotated[str, Depends(sync_dep)], - async_cm_dep: Annotated[str, Depends(async_cm_dep)], - sync_cm_dep: Annotated[str, Depends(sync_cm_dep)], - chained_dep: Annotated[str, Depends(chained_dep)], - chained_async_dep: Annotated[str, Depends(chained_async_dep)], -) -> Output: - return Output( - sync_dep=sync_dep, - async_dep=async_dep, - async_cm_dep=async_cm_dep, - sync_cm_dep=sync_cm_dep, - chained_dep=chained_dep, - chained_async_dep=chained_async_dep, - ) - - def main() -> None: worker = hatchet.worker( "dependency-injection-worker", @@ -311,7 +269,6 @@ def main() -> None: async_task_with_dependencies, sync_task_with_dependencies, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, di_workflow, task_with_type_aliases, ], diff --git a/examples/python/durable/trigger.py b/examples/python/durable/trigger.py index a124984bf..70b747d5e 100644 --- a/examples/python/durable/trigger.py +++ b/examples/python/durable/trigger.py @@ -6,6 +6,7 @@ from examples.durable.worker import ( durable_workflow, ephemeral_workflow, hatchet, + AwaitedEvent, ) durable_workflow.run_no_wait() @@ -15,4 +16,4 @@ print("Sleeping") time.sleep(SLEEP_TIME + 2) print("Pushing event") -hatchet.event.push(EVENT_KEY, {}) +hatchet.event.push(EVENT_KEY, AwaitedEvent(id="123").model_dump(mode="json")) diff --git a/examples/python/durable/worker.py b/examples/python/durable/worker.py index eb334b066..3542e4c87 100644 --- a/examples/python/durable/worker.py +++ b/examples/python/durable/worker.py @@ -1,8 +1,11 @@ import asyncio import time from datetime import timedelta +from typing import Any from uuid import uuid4 +from pydantic import BaseModel + from hatchet_sdk import ( Context, DurableContext, @@ -12,9 +15,47 @@ from hatchet_sdk import ( UserEventCondition, or_, ) +from hatchet_sdk.exceptions import NonDeterminismError hatchet = Hatchet(debug=True) + +dag_child_workflow = hatchet.workflow(name="dag-child-workflow") + + +@dag_child_workflow.task() +async def dag_child_1(input: EmptyModel, ctx: Context) -> dict[str, str]: + await asyncio.sleep(1) + return {"result": "child1"} + + +@dag_child_workflow.task(parents=[dag_child_1]) +async def dag_child_2(input: EmptyModel, ctx: Context) -> dict[str, str]: + await asyncio.sleep(5) + return {"result": "child2"} + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=10)) +async def durable_spawn_dag(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + # NOTE: typically its not safe to use time.time() in a durable task, but + # this test assumes that the task is not replayed or evicted and it is + # used to ensure that the waits are accurate relative to the single invocation. + sleep_start = time.time() + sleep_result = await ctx.aio_sleep_for(timedelta(seconds=1)) + sleep_duration = time.time() - sleep_start + + spawn_start = time.time() + spawn_result = await dag_child_workflow.aio_run() + spawn_duration = time.time() - spawn_start + + return { + "sleep_duration": sleep_duration, + "sleep_result": sleep_result, + "spawn_duration": spawn_duration, + "spawn_result": spawn_result, + } + + # > Create a durable workflow durable_workflow = hatchet.workflow(name="DurableWorkflow") @@ -25,6 +66,7 @@ ephemeral_workflow = hatchet.workflow(name="EphemeralWorkflow") # > Add durable task EVENT_KEY = "durable-example:event" SLEEP_TIME = 5 +REPLAY_RESET_SLEEP_TIME = 3 @durable_workflow.task() @@ -32,21 +74,26 @@ async def ephemeral_task(input: EmptyModel, ctx: Context) -> None: print("Running non-durable task") +class AwaitedEvent(BaseModel): + id: str + + @durable_workflow.durable_task() -async def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str]: +async def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str | int]: print("Waiting for sleep") - await ctx.aio_sleep_for(duration=timedelta(seconds=SLEEP_TIME)) + sleep = await ctx.aio_sleep_for(duration=timedelta(seconds=SLEEP_TIME)) print("Sleep finished") print("Waiting for event") - await ctx.aio_wait_for( - "event", - UserEventCondition(event_key=EVENT_KEY, expression="true"), + event = await ctx.aio_wait_for_event( + EVENT_KEY, "true", payload_validator=AwaitedEvent ) print("Event received") return { "status": "success", + "event_id": event.id, + "sleep_duration_seconds": sleep.duration.seconds, } @@ -58,7 +105,7 @@ async def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str] @durable_workflow.durable_task() async def wait_for_or_group_1( _i: EmptyModel, ctx: DurableContext -) -> dict[str, str | int]: +) -> dict[str, str | int | float]: start = time.time() wait_result = await ctx.aio_wait_for( uuid4().hex, @@ -72,7 +119,7 @@ async def wait_for_or_group_1( event_id = list(wait_result[key].keys())[0] return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, "key": key, "event_id": event_id, } @@ -83,7 +130,7 @@ async def wait_for_or_group_1( @durable_workflow.durable_task() async def wait_for_or_group_2( _i: EmptyModel, ctx: DurableContext -) -> dict[str, str | int]: +) -> dict[str, str | int | float]: start = time.time() wait_result = await ctx.aio_wait_for( uuid4().hex, @@ -97,7 +144,7 @@ async def wait_for_or_group_2( event_id = list(wait_result[key].keys())[0] return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, "key": key, "event_id": event_id, } @@ -106,7 +153,7 @@ async def wait_for_or_group_2( @durable_workflow.durable_task() async def wait_for_multi_sleep( _i: EmptyModel, ctx: DurableContext -) -> dict[str, str | int]: +) -> dict[str, str | float]: start = time.time() for _ in range(3): @@ -115,7 +162,7 @@ async def wait_for_multi_sleep( ) return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, } @@ -124,10 +171,18 @@ def ephemeral_task_2(input: EmptyModel, ctx: Context) -> None: print("Running non-durable task") +@hatchet.durable_task() +async def memo_now_caching(_i: EmptyModel, ctx: DurableContext) -> dict[str, str]: + now = await ctx.aio_now() + return { + "start_time": now.isoformat(), + } + + @hatchet.durable_task() async def wait_for_sleep_twice( input: EmptyModel, ctx: DurableContext -) -> dict[str, int]: +) -> dict[str, float]: try: start = time.time() @@ -136,16 +191,163 @@ async def wait_for_sleep_twice( ) return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, } except asyncio.CancelledError: - return {"runtime": -1} + return {"runtime": -1.0} + + +class DurableBulkSpawnInput(BaseModel): + n: int = 1 + + +@hatchet.task(input_validator=DurableBulkSpawnInput) +def spawn_child_task(input: DurableBulkSpawnInput, ctx: Context) -> dict[str, str]: + return {"message": "hello from child " + str(input.n)} + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=10)) +async def durable_with_spawn(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + child_result = await spawn_child_task.aio_run() + return {"child_output": child_result} + + +@hatchet.durable_task(input_validator=DurableBulkSpawnInput) +async def durable_with_bulk_spawn( + input: DurableBulkSpawnInput, ctx: DurableContext +) -> dict[str, Any]: + child_results = await spawn_child_task.aio_run_many( + [ + spawn_child_task.create_bulk_run_item( + input=DurableBulkSpawnInput(n=i), + ) + for i in range(input.n) + ] + ) + return {"child_outputs": child_results} + + +@hatchet.durable_task() +async def durable_sleep_event_spawn( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + start = time.time() + + await ctx.aio_sleep_for(timedelta(seconds=SLEEP_TIME)) + + await ctx.aio_wait_for_event( + EVENT_KEY, + "true", + ) + + child_result = await spawn_child_task.aio_run() + + return { + "runtime": time.time() - start, + "child_output": child_result, + } + + +class NonDeterminismOutput(BaseModel): + attempt_number: int + sleep_time: int + + non_determinism_detected: bool = False + node_id: int | None = None + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=10)) +async def durable_non_determinism( + input: EmptyModel, ctx: DurableContext +) -> NonDeterminismOutput: + sleep_time = ctx.attempt_number * 2 + + try: + await ctx.aio_sleep_for(timedelta(seconds=sleep_time)) + except NonDeterminismError as e: + return NonDeterminismOutput( + attempt_number=ctx.attempt_number, + sleep_time=sleep_time, + non_determinism_detected=True, + node_id=e.node_id, + ) + + return NonDeterminismOutput( + attempt_number=ctx.attempt_number, + sleep_time=sleep_time, + ) + + +class ReplayResetResponse(BaseModel): + sleep_1_duration: float + sleep_2_duration: float + sleep_3_duration: float + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=20)) +async def durable_replay_reset( + input: EmptyModel, ctx: DurableContext +) -> ReplayResetResponse: + start = time.time() + await ctx.aio_sleep_for(timedelta(seconds=REPLAY_RESET_SLEEP_TIME)) + sleep_1_duration = time.time() - start + + start = time.time() + await ctx.aio_sleep_for(timedelta(seconds=REPLAY_RESET_SLEEP_TIME)) + sleep_2_duration = time.time() - start + + start = time.time() + await ctx.aio_sleep_for(timedelta(seconds=REPLAY_RESET_SLEEP_TIME)) + sleep_3_duration = time.time() - start + + return ReplayResetResponse( + sleep_1_duration=sleep_1_duration, + sleep_2_duration=sleep_2_duration, + sleep_3_duration=sleep_3_duration, + ) + + +class SleepResult(BaseModel): + message: str + duration: float + + +class MemoInput(BaseModel): + message: str + + +async def expensive_computation(message: str) -> SleepResult: + await asyncio.sleep(SLEEP_TIME) + + return SleepResult(message=message, duration=SLEEP_TIME) + + +@hatchet.durable_task(input_validator=MemoInput) +async def memo_task(input: MemoInput, ctx: DurableContext) -> SleepResult: + start = time.time() + res = await ctx._aio_memo( + expensive_computation, + SleepResult, + input.message, + ) + + return SleepResult(message=res.message, duration=time.time() - start) def main() -> None: worker = hatchet.worker( "durable-worker", - workflows=[durable_workflow, ephemeral_workflow, wait_for_sleep_twice], + workflows=[ + durable_workflow, + ephemeral_workflow, + wait_for_sleep_twice, + spawn_child_task, + durable_with_spawn, + durable_with_bulk_spawn, + durable_sleep_event_spawn, + durable_non_determinism, + durable_replay_reset, + ], ) worker.start() diff --git a/examples/python/durable_event/worker.py b/examples/python/durable_event/worker.py index 900783c7b..85924d8d6 100644 --- a/examples/python/durable_event/worker.py +++ b/examples/python/durable_event/worker.py @@ -8,9 +8,8 @@ EVENT_KEY = "user:update" # > Durable Event @hatchet.durable_task(name="DurableEventTask") async def durable_event_task(input: EmptyModel, ctx: DurableContext) -> None: - res = await ctx.aio_wait_for( - "event", - UserEventCondition(event_key="user:update"), + res = await ctx.aio_wait_for_event( + "user:update", ) print("got event", res) @@ -23,12 +22,7 @@ async def durable_event_task_with_filter( input: EmptyModel, ctx: DurableContext ) -> None: # > Durable Event With Filter - res = await ctx.aio_wait_for( - "event", - UserEventCondition( - event_key="user:update", expression="input.user_id == '1234'" - ), - ) + res = await ctx.aio_wait_for_event("user:update", "input.user_id == '1234'") print("got event", res) diff --git a/examples/python/durable_eviction/capacity_worker.py b/examples/python/durable_eviction/capacity_worker.py new file mode 100644 index 000000000..9e49e8ce8 --- /dev/null +++ b/examples/python/durable_eviction/capacity_worker.py @@ -0,0 +1,23 @@ +""" +Dedicated worker for capacity-eviction e2e tests. + +Runs with durable_slots=1 so that a single waiting durable task triggers +capacity pressure and gets evicted (even with ttl=None). +""" + +from __future__ import annotations + +from examples.durable_eviction.worker import capacity_evictable_sleep, hatchet + + +def main() -> None: + worker = hatchet.worker( + "capacity-eviction-worker", + durable_slots=1, + workflows=[capacity_evictable_sleep], + ) + worker.start() + + +if __name__ == "__main__": + main() diff --git a/examples/python/durable_eviction/trigger.py b/examples/python/durable_eviction/trigger.py new file mode 100644 index 000000000..8b99fcf2e --- /dev/null +++ b/examples/python/durable_eviction/trigger.py @@ -0,0 +1,4 @@ +from examples.durable_eviction.worker import evictable_sleep + +ref = evictable_sleep.run_no_wait() +print(f"Triggered evictable_sleep: workflow_run_id={ref.workflow_run_id}") diff --git a/examples/python/durable_eviction/worker.py b/examples/python/durable_eviction/worker.py new file mode 100644 index 000000000..4c80d2f56 --- /dev/null +++ b/examples/python/durable_eviction/worker.py @@ -0,0 +1,169 @@ +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Any + +from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet, UserEventCondition +from hatchet_sdk.runnables.eviction import EvictionPolicy +from pydantic import BaseModel + +hatchet = Hatchet(debug=True) + + +EVICTION_TTL_SECONDS = 5 +LONG_SLEEP_SECONDS = 15 +EVENT_KEY = "durable-eviction:event" + +EVICTION_POLICY = EvictionPolicy( + ttl=timedelta(seconds=EVICTION_TTL_SECONDS), + allow_capacity_eviction=True, + priority=0, +) + + +@hatchet.task() +async def child_task(input: EmptyModel, ctx: Context) -> dict[str, Any]: + """Simple child that sleeps long enough for the parent's TTL to fire.""" + await asyncio.sleep(LONG_SLEEP_SECONDS) + return {"child_status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_sleep(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + """Sleeps long enough for the TTL-based eviction to kick in.""" + await ctx.aio_sleep_for(timedelta(seconds=LONG_SLEEP_SECONDS)) + return {"status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_wait_for_event( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + """Waits for a user event -- long enough for TTL eviction to fire.""" + await ctx.aio_wait_for_event( + EVENT_KEY, + "true", + ) + return {"status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_child_spawn( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + """Spawns a child workflow whose runtime exceeds the eviction TTL.""" + child_result = await child_task.aio_run() + return {"child": child_result, "status": "completed"} + + +class BulkChildTaskInput(BaseModel): + sleep_for: timedelta + + +@hatchet.task( + input_validator=BulkChildTaskInput, +) +async def bulk_child_task( + input: BulkChildTaskInput, ctx: Context +) -> dict[str, str | int]: + """Simple child that sleeps long enough for the parent's TTL to fire.""" + await asyncio.sleep(input.sleep_for.total_seconds()) + return {"sleep_for": int(input.sleep_for.total_seconds()), "status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_child_bulk_spawn( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + child_results = await child_task.aio_run_many( + [ + bulk_child_task.create_bulk_run_item( + input=BulkChildTaskInput( + sleep_for=timedelta(seconds=(EVICTION_TTL_SECONDS + 5) * (i + 1)) + ), + key=f"child{i}", + ) + for i in range(3) + ] + ) + return {"child_results": child_results} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def multiple_eviction(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + """Sleeps twice, expecting eviction+restore after each sleep.""" + await ctx.aio_sleep_for(timedelta(seconds=LONG_SLEEP_SECONDS)) + await ctx.aio_sleep_for(timedelta(seconds=LONG_SLEEP_SECONDS)) + return {"status": "completed"} + + +CAPACITY_EVICTION_POLICY = EvictionPolicy( + ttl=None, + allow_capacity_eviction=True, + priority=0, +) + +CAPACITY_SLEEP_SECONDS = 20 + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=CAPACITY_EVICTION_POLICY, +) +async def capacity_evictable_sleep( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + """No TTL -- only evictable via capacity pressure (durable_slots=1).""" + await ctx.aio_sleep_for(timedelta(seconds=CAPACITY_SLEEP_SECONDS)) + return {"status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EvictionPolicy( + ttl=None, + allow_capacity_eviction=False, + priority=0, + ), +) +async def non_evictable_sleep(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + """Has eviction disabled -- should never be evicted.""" + await ctx.aio_sleep_for(timedelta(seconds=10)) + return {"status": "completed"} + + +def main() -> None: + worker = hatchet.worker( + "eviction-worker", + workflows=[ + evictable_sleep, + evictable_wait_for_event, + evictable_child_spawn, + evictable_child_bulk_spawn, + multiple_eviction, + non_evictable_sleep, + child_task, + bulk_child_task, + ], + ) + worker.start() + + +if __name__ == "__main__": + main() diff --git a/examples/python/guides/human_in_the_loop/worker.py b/examples/python/guides/human_in_the_loop/worker.py index 55bf6ea42..74cdfa8ab 100644 --- a/examples/python/guides/human_in_the_loop/worker.py +++ b/examples/python/guides/human_in_the_loop/worker.py @@ -1,4 +1,4 @@ -from hatchet_sdk import DurableContext, EmptyModel, Hatchet, UserEventCondition +from hatchet_sdk import DurableContext, EmptyModel, Hatchet hatchet = Hatchet(debug=True) @@ -8,16 +8,15 @@ APPROVAL_EVENT_KEY = "approval:decision" # > Step 02 Wait For Event async def wait_for_approval(ctx: DurableContext) -> dict: run_id = ctx.workflow_run_id - approval = await ctx.aio_wait_for( - "approval", - UserEventCondition( - event_key=APPROVAL_EVENT_KEY, - expression=f"input.runId == '{run_id}'", - ), + approval = await ctx.aio_wait_for_event( + APPROVAL_EVENT_KEY, + f"input.runId == '{run_id}'", ) return approval + + # > Step 01 Define Approval Task @hatchet.durable_task(name="ApprovalTask") async def approval_task(input: EmptyModel, ctx: DurableContext) -> dict: @@ -28,6 +27,8 @@ async def approval_task(input: EmptyModel, ctx: DurableContext) -> dict: return {"status": "rejected", "reason": approval.get("reason", "")} + + def main() -> None: # > Step 04 Run Worker worker = hatchet.worker( diff --git a/examples/python/simple/chaos_test.py b/examples/python/simple/chaos_test.py index b2a41c28a..f52445275 100644 --- a/examples/python/simple/chaos_test.py +++ b/examples/python/simple/chaos_test.py @@ -49,7 +49,7 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: @hatchet.durable_task() -def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: +async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: print("Executing durable task!") return {"result": "Hello from durable!"} diff --git a/examples/python/simple/chaos_worker.py b/examples/python/simple/chaos_worker.py index 3abe078a4..0b7ee84f3 100644 --- a/examples/python/simple/chaos_worker.py +++ b/examples/python/simple/chaos_worker.py @@ -1,4 +1,4 @@ -# This is a worker script that will introduce chaos to test +# This is a worker script that will introduce chaos to test # complex deployments and migrations. import argparse import asyncio @@ -48,7 +48,7 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: @hatchet.durable_task() -def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: +async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: print("Executing durable task!") return {"result": "Hello from durable!"} @@ -149,6 +149,5 @@ def main() -> None: print("Bye!") - if __name__ == "__main__": main() diff --git a/examples/python/simple/worker.py b/examples/python/simple/worker.py index 85bb98a8c..5787f3c15 100644 --- a/examples/python/simple/worker.py +++ b/examples/python/simple/worker.py @@ -1,5 +1,5 @@ # > Simple -from hatchet_sdk import Context, EmptyModel, Hatchet +from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet hatchet = Hatchet(debug=True) @@ -10,7 +10,8 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: @hatchet.durable_task() -def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: +async def simple_durable(input: EmptyModel, ctx: DurableContext) -> dict[str, str]: + # durable tasks should be async return {"result": "Hello, world!"} diff --git a/examples/python/unit_testing/test_unit.py b/examples/python/unit_testing/test_unit.py index cebc84f17..2045251f6 100644 --- a/examples/python/unit_testing/test_unit.py +++ b/examples/python/unit_testing/test_unit.py @@ -10,9 +10,6 @@ from examples.unit_testing.workflows import ( durable_async_complex_workflow, durable_async_simple_workflow, durable_async_standalone, - durable_sync_complex_workflow, - durable_sync_simple_workflow, - durable_sync_standalone, start, sync_complex_workflow, sync_simple_workflow, @@ -25,11 +22,8 @@ from hatchet_sdk import Task "func", [ sync_standalone, - durable_sync_standalone, sync_simple_workflow, - durable_sync_simple_workflow, sync_complex_workflow, - durable_sync_complex_workflow, ], ) def test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None: diff --git a/examples/python/unit_testing/workflows.py b/examples/python/unit_testing/workflows.py index ae42e61c9..594e69a73 100644 --- a/examples/python/unit_testing/workflows.py +++ b/examples/python/unit_testing/workflows.py @@ -44,19 +44,6 @@ async def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput ) -@hatchet.durable_task(input_validator=UnitTestInput) -def durable_sync_standalone( - input: UnitTestInput, ctx: DurableContext -) -> UnitTestOutput: - return UnitTestOutput( - key=input.key, - number=input.number, - additional_metadata=ctx.additional_metadata, - retry_count=ctx.retry_count, - mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, - ) - - @hatchet.durable_task(input_validator=UnitTestInput) async def durable_async_standalone( input: UnitTestInput, ctx: DurableContext @@ -97,19 +84,6 @@ async def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestO ) -@simple_workflow.durable_task() -def durable_sync_simple_workflow( - input: UnitTestInput, ctx: DurableContext -) -> UnitTestOutput: - return UnitTestOutput( - key=input.key, - number=input.number, - additional_metadata=ctx.additional_metadata, - retry_count=ctx.retry_count, - mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, - ) - - @simple_workflow.durable_task() async def durable_async_simple_workflow( input: UnitTestInput, ctx: DurableContext @@ -153,15 +127,6 @@ async def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTest return ctx.task_output(start) -@complex_workflow.durable_task( - parents=[start], -) -def durable_sync_complex_workflow( - input: UnitTestInput, ctx: DurableContext -) -> UnitTestOutput: - return ctx.task_output(start) - - @complex_workflow.durable_task( parents=[start], ) diff --git a/examples/python/worker.py b/examples/python/worker.py index 0ecaaab41..742affe65 100644 --- a/examples/python/worker.py +++ b/examples/python/worker.py @@ -27,12 +27,38 @@ from examples.dependency_injection.worker import ( async_task_with_dependencies, di_workflow, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, sync_task_with_dependencies, task_with_type_aliases, ) from examples.dict_input.worker import say_hello_unsafely -from examples.durable.worker import durable_workflow, wait_for_sleep_twice +from examples.durable.worker import ( + durable_sleep_event_spawn, + durable_with_bulk_spawn, + durable_with_spawn, + durable_workflow, + spawn_child_task, + wait_for_sleep_twice, + dag_child_workflow, + durable_spawn_dag, + durable_non_determinism, + durable_replay_reset, + memo_task, + memo_now_caching, +) +from examples.durable_event.worker import ( + durable_event_task, + durable_event_task_with_filter, +) +from examples.durable_eviction.worker import ( + bulk_child_task as eviction_bulk_child_task, + child_task as eviction_child_task, + evictable_child_bulk_spawn, + evictable_child_spawn, + evictable_sleep, + evictable_wait_for_event, + multiple_eviction, + non_evictable_sleep, +) from examples.events.worker import event_workflow from examples.fanout.worker import child_wf, parent_wf from examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent @@ -104,14 +130,33 @@ def main() -> None: return_exceptions_task, exception_parsing_workflow, wait_for_sleep_twice, + spawn_child_task, + durable_with_spawn, + durable_with_bulk_spawn, + durable_sleep_event_spawn, + durable_event_task, + durable_event_task_with_filter, async_task_with_dependencies, sync_task_with_dependencies, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, task_with_type_aliases, say_hello, say_hello_unsafely, serde_workflow, + durable_spawn_dag, + dag_child_workflow, + durable_non_determinism, + durable_replay_reset, + memo_task, + evictable_sleep, + evictable_wait_for_event, + evictable_child_spawn, + evictable_child_bulk_spawn, + multiple_eviction, + non_evictable_sleep, + eviction_child_task, + eviction_bulk_child_task, + memo_now_caching, ], lifespan=lifespan, ) diff --git a/examples/typescript/__e2e__/harness.ts b/examples/typescript/__e2e__/harness.ts index 2f355137f..3e62eb342 100644 --- a/examples/typescript/__e2e__/harness.ts +++ b/examples/typescript/__e2e__/harness.ts @@ -3,6 +3,8 @@ import { randomUUID } from 'crypto'; import { HatchetClient } from '@hatchet-dev/typescript-sdk/v1'; import type { BaseWorkflowDeclaration } from '@hatchet-dev/typescript-sdk/v1'; import { Worker } from '../../client/worker/worker'; +import { supportsEviction } from '../../client/worker/engine-version'; +import { fetchEngineVersion } from '../../client/worker/deprecated/legacy-worker'; export function requireEnv(name: string): string { const value = process.env[name]; @@ -50,6 +52,15 @@ export async function stopWorker(worker: Worker | undefined) { await sleep(300); } +/** + * Checks whether the connected engine supports durable eviction. + * Call from beforeAll / beforeEach and skip tests when false. + */ +export async function checkDurableEvictionSupport(client: HatchetClient): Promise { + const version = await fetchEngineVersion(client).catch(() => undefined); + return supportsEviction(version); +} + export async function poll( fn: () => Promise, { diff --git a/examples/typescript/concurrency_workflow_level/workflow.ts b/examples/typescript/concurrency_workflow_level/workflow.ts index 4215283a5..794dee512 100644 --- a/examples/typescript/concurrency_workflow_level/workflow.ts +++ b/examples/typescript/concurrency_workflow_level/workflow.ts @@ -7,7 +7,7 @@ const sleep = (ms: number) => setTimeout(resolve, ms); }); -export const SLEEP_TIME_MS = 500; +export const SLEEP_TIME_MS = 2000; export const DIGIT_MAX_RUNS = 8; export const NAME_MAX_RUNS = 3; diff --git a/examples/typescript/durable-event/workflow.ts b/examples/typescript/durable-event/workflow.ts index 72360d8c5..0719ec202 100644 --- a/examples/typescript/durable-event/workflow.ts +++ b/examples/typescript/durable-event/workflow.ts @@ -6,9 +6,7 @@ export const durableEvent = hatchet.durableTask({ name: 'durable-event', executionTimeout: '10m', fn: async (_, ctx) => { - const res = ctx.waitFor({ - eventKey: 'user:update', - }); + const res = await ctx.waitForEvent('user:update'); console.log('res', res); @@ -23,10 +21,7 @@ export const durableEventWithFilter = hatchet.durableTask({ executionTimeout: '10m', fn: async (_, ctx) => { // > Durable Event With Filter - const res = ctx.waitFor({ - eventKey: 'user:update', - expression: "input.userId == '1234'", - }); + const res = await ctx.waitForEvent('user:update', "input.userId == '1234'"); console.log('res', res); diff --git a/examples/typescript/durable/workflow.ts b/examples/typescript/durable/workflow.ts index dad4c423b..31f9e3c91 100644 --- a/examples/typescript/durable/workflow.ts +++ b/examples/typescript/durable/workflow.ts @@ -1,4 +1,6 @@ import { Or, SleepCondition, UserEventCondition } from '@hatchet-dev/typescript-sdk/v1/conditions'; +import { NonDeterminismError } from '@hatchet-dev/typescript-sdk/util/errors/non-determinism-error'; +import sleep from '@hatchet-dev/typescript-sdk/util/sleep'; import { hatchet } from '../hatchet-client'; export const EVENT_KEY = 'durable-example:event'; @@ -22,14 +24,18 @@ durableWorkflow.durableTask({ executionTimeout: '10m', fn: async (_input, ctx) => { console.log('Waiting for sleep'); - await ctx.sleepFor(SLEEP_TIME); + const sleepResult = await ctx.sleepFor(SLEEP_TIME); console.log('Sleep finished'); console.log('Waiting for event'); - await ctx.waitFor({ eventKey: EVENT_KEY }); + const event = await ctx.waitForEvent(EVENT_KEY, 'true'); console.log('Event received'); - return { status: 'success' }; + return { + status: 'success', + event: event, + sleep_duration_ms: sleepResult.durationMs, + }; }, }); @@ -40,10 +46,10 @@ function extractKeyAndEventId(waitResult: unknown): { key: string; eventId: stri if (obj && typeof obj === 'object') { const [key] = Object.keys(obj); const inner = obj[key]; - if (inner && typeof inner === 'object') { + if (inner && typeof inner === 'object' && !Array.isArray(inner)) { const [eventId] = Object.keys(inner); if (eventId) { - return { key: 'CREATE', eventId }; + return { key, eventId }; } } if (key) { @@ -114,8 +120,180 @@ export const waitForSleepTwice = hatchet.durableTask({ await ctx.sleepFor(SLEEP_TIME); return { runtime: Math.round((Date.now() - start) / 1000) }; } catch (e) { - // treat cancellation as a successful completion for parity with Python sample return { runtime: -1 }; } }, }); + +// --- Spawn child from durable task --- + +export const spawnChildTask = hatchet.task({ + name: 'spawn-child-task', + fn: async (input: { n?: number }) => { + return { message: `hello from child ${input.n ?? 1}` }; + }, +}); + +export const durableWithSpawn = hatchet.durableTask({ + name: 'durable-with-spawn', + executionTimeout: '10s', + fn: async (_input, ctx) => { + const childResult = await spawnChildTask.run({}); + return { child_output: childResult }; + }, +}); + +export const durableWithBulkSpawn = hatchet.durableTask({ + name: 'durable-with-bulk-spawn', + executionTimeout: '10m', + fn: async (input: { n?: number }, ctx) => { + const n = input.n ?? 10; + const inputs = Array.from({ length: n }, (_, i) => ({ n: i })); + const childResults = await spawnChildTask.run(inputs); + return { child_outputs: childResults }; + }, +}); + +export const durableSleepEventSpawn = hatchet.durableTask({ + name: 'durable-sleep-event-spawn', + executionTimeout: '10m', + fn: async (_input, ctx) => { + const start = Date.now(); + + await ctx.sleepFor(SLEEP_TIME); + + await ctx.waitForEvent(EVENT_KEY, 'true'); + + const childResult = await spawnChildTask.run({}); + + return { + runtime: (Date.now() - start) / 1000, + child_output: childResult, + }; + }, +}); + +// --- Spawn child using explicit ctx.spawnChild --- + +export const durableWithExplicitSpawn = hatchet.durableTask({ + name: 'durable-with-explicit-spawn', + executionTimeout: '10m', + fn: async (_input, ctx) => { + const childResult = await ctx.spawnChild(spawnChildTask, {}); + return { child_output: childResult }; + }, +}); + +// --- Non-determinism detection --- + +export const durableNonDeterminism = hatchet.durableTask({ + name: 'durable-non-determinism', + executionTimeout: '10s', + fn: async (_input, ctx) => { + const sleepTime = ctx.invocationCount * 2; + + try { + await ctx.sleepFor(`${sleepTime}s`); + } catch (e) { + if (e instanceof NonDeterminismError) { + return { + attempt_number: ctx.invocationCount, + sleep_time: sleepTime, + non_determinism_detected: true, + node_id: e.nodeId, + }; + } + throw e; + } + + return { + attempt_number: ctx.invocationCount, + sleep_time: sleepTime, + non_determinism_detected: false, + }; + }, +}); + +// --- Replay reset --- + +export const REPLAY_RESET_SLEEP_SECONDS = 3; +/** Max duration (seconds) for a replayed/memoized step; above this we treat it as a real sleep. */ +export const REPLAY_RESET_MEMOIZED_MAX_SECONDS = 5; +const REPLAY_RESET_SLEEP = `${REPLAY_RESET_SLEEP_SECONDS}s` as const; + +export const durableReplayReset = hatchet.durableTask({ + name: 'durable-replay-reset', + executionTimeout: '20s', + fn: async (_input, ctx) => { + let start = Date.now(); + await ctx.sleepFor(REPLAY_RESET_SLEEP); + const sleep1Duration = (Date.now() - start) / 1000; + + start = Date.now(); + await ctx.sleepFor(REPLAY_RESET_SLEEP); + const sleep2Duration = (Date.now() - start) / 1000; + + start = Date.now(); + await ctx.sleepFor(REPLAY_RESET_SLEEP); + const sleep3Duration = (Date.now() - start) / 1000; + + return { + sleep_1_duration: sleep1Duration, + sleep_2_duration: sleep2Duration, + sleep_3_duration: sleep3Duration, + }; + }, +}); + +export const memoNowCaching = hatchet.durableTask({ + name: 'memo-now-caching', + executionTimeout: '10m', + fn: async (_input, ctx) => { + const now = await ctx.now(); + return { start_time: now.toISOString() }; + }, +}); + +// --- Spawn DAG from durable task --- + +export const dagChildWorkflow = hatchet.workflow({ + name: 'dag-child-workflow-ts', +}); + +const dagChild1 = dagChildWorkflow.task({ + name: 'dag-child-1', + fn: async () => { + await sleep(1000); + return { result: 'child1' }; + }, +}); + +dagChildWorkflow.task({ + name: 'dag-child-2', + parents: [dagChild1], + fn: async () => { + await sleep(2000); + return { result: 'child2' }; + }, +}); + +export const durableSpawnDag = hatchet.durableTask({ + name: 'durable-spawn-dag', + executionTimeout: '10s', + fn: async (_input, ctx) => { + const sleepStart = Date.now(); + const sleepResult = await ctx.sleepFor(SLEEP_TIME); + const sleepDuration = (Date.now() - sleepStart) / 1000; + + const spawnStart = Date.now(); + const spawnResult = await dagChildWorkflow.run({}); + const spawnDuration = (Date.now() - spawnStart) / 1000; + + return { + sleep_duration: sleepDuration, + sleep_duration_ms: sleepResult.durationMs, + spawn_duration: spawnDuration, + spawn_result: spawnResult, + }; + }, +}); diff --git a/examples/typescript/durable_event/workflow.ts b/examples/typescript/durable_event/workflow.ts index 72360d8c5..a442d21b3 100644 --- a/examples/typescript/durable_event/workflow.ts +++ b/examples/typescript/durable_event/workflow.ts @@ -1,14 +1,13 @@ -// import sleep from '@hatchet-dev/typescript-sdk/util/sleep'; import { hatchet } from '../hatchet-client'; +export const EVENT_KEY = 'user:update'; + // > Durable Event export const durableEvent = hatchet.durableTask({ name: 'durable-event', executionTimeout: '10m', fn: async (_, ctx) => { - const res = ctx.waitFor({ - eventKey: 'user:update', - }); + const res = await ctx.waitForEvent(EVENT_KEY); console.log('res', res); @@ -23,10 +22,7 @@ export const durableEventWithFilter = hatchet.durableTask({ executionTimeout: '10m', fn: async (_, ctx) => { // > Durable Event With Filter - const res = ctx.waitFor({ - eventKey: 'user:update', - expression: "input.userId == '1234'", - }); + const res = await ctx.waitForEvent(EVENT_KEY, "input.userId == '1234'"); console.log('res', res); diff --git a/examples/typescript/durable_eviction/capacity-worker.ts b/examples/typescript/durable_eviction/capacity-worker.ts new file mode 100644 index 000000000..04360ab1d --- /dev/null +++ b/examples/typescript/durable_eviction/capacity-worker.ts @@ -0,0 +1,21 @@ +/** + * Dedicated worker for capacity-eviction e2e tests. + * + * Runs with durableSlots=1 so that a single waiting durable task triggers + * capacity pressure and gets evicted (even with ttl=undefined). + */ +import { hatchet } from '../hatchet-client'; +import { capacityEvictableSleep } from './workflow'; + +async function main() { + const worker = await hatchet.worker('capacity-eviction-worker', { + durableSlots: 1, + workflows: [capacityEvictableSleep], + }); + + await worker.start(); +} + +if (require.main === module) { + main(); +} diff --git a/examples/typescript/durable_eviction/worker.ts b/examples/typescript/durable_eviction/worker.ts new file mode 100644 index 000000000..bd13f1170 --- /dev/null +++ b/examples/typescript/durable_eviction/worker.ts @@ -0,0 +1,34 @@ +import { hatchet } from '../hatchet-client'; +import { + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + evictableChildBulkSpawn, + multipleEviction, + nonEvictableSleep, + childTask, + bulkChildTask, + evictableSleepForGracefulTermination, +} from './workflow'; + +async function main() { + const worker = await hatchet.worker('eviction-worker', { + workflows: [ + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + evictableChildBulkSpawn, + multipleEviction, + nonEvictableSleep, + childTask, + bulkChildTask, + evictableSleepForGracefulTermination, + ], + }); + + await worker.start(); +} + +if (require.main === module) { + main(); +} diff --git a/examples/typescript/durable_eviction/workflow.ts b/examples/typescript/durable_eviction/workflow.ts new file mode 100644 index 000000000..21f4f755f --- /dev/null +++ b/examples/typescript/durable_eviction/workflow.ts @@ -0,0 +1,128 @@ +import sleep from '@hatchet-dev/typescript-sdk/util/sleep'; +import { EvictionPolicy } from '@hatchet-dev/typescript-sdk/v1'; +import { hatchet } from '../hatchet-client'; + +export const EVICTION_TTL_SECONDS = 5; +export const LONG_SLEEP_SECONDS = 15; +export const EVENT_KEY = 'durable-eviction:event'; + +const EVICTION_POLICY: EvictionPolicy = { + ttl: `${EVICTION_TTL_SECONDS}s`, + allowCapacityEviction: true, + priority: 0, +}; + +export const childTask = hatchet.task({ + name: 'eviction-child-task', + fn: async () => { + await sleep(LONG_SLEEP_SECONDS * 1000); + return { child_status: 'completed' }; + }, +}); + +export const evictableSleep = hatchet.durableTask({ + name: 'evictable-sleep', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + await ctx.sleepFor(`${LONG_SLEEP_SECONDS}s`); + return { status: 'completed' }; + }, +}); + +// NOTE: DO NOT REGISTER ON E2E TEST WORKER +export const evictableSleepForGracefulTermination = hatchet.durableTask({ + name: 'evictable-sleep-for-graceful-termination', + executionTimeout: '5m', + evictionPolicy: { + ttl: `30m`, + allowCapacityEviction: true, + priority: 0, + }, + fn: async (_input, ctx) => { + await ctx.sleepFor(`5m`); + return { status: 'completed' }; + }, +}); + +export const evictableWaitForEvent = hatchet.durableTask({ + name: 'evictable-wait-for-event', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + await ctx.waitForEvent(EVENT_KEY, 'true'); + return { status: 'completed' }; + }, +}); + +export const evictableChildSpawn = hatchet.durableTask({ + name: 'evictable-child-spawn', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + const childResult = await childTask.run({}); + return { child: childResult, status: 'completed' }; + }, +}); + +export const multipleEviction = hatchet.durableTask({ + name: 'multiple-eviction', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + await ctx.sleepFor(`${LONG_SLEEP_SECONDS}s`); + await ctx.sleepFor(`${LONG_SLEEP_SECONDS}s`); + return { status: 'completed' }; + }, +}); + +export const bulkChildTask = hatchet.task({ + name: 'eviction-bulk-child-task', + fn: async (input: { sleepSeconds: number }) => { + await sleep(input.sleepSeconds * 1000); + return { sleepSeconds: input.sleepSeconds, status: 'completed' }; + }, +}); + +export const evictableChildBulkSpawn = hatchet.durableTask({ + name: 'evictable-child-bulk-spawn', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + const inputs = Array.from({ length: 3 }, (_, i) => ({ + sleepSeconds: (EVICTION_TTL_SECONDS + 5) * (i + 1), + })); + const childResults = await bulkChildTask.run(inputs); + return { child_results: childResults, status: 'completed' }; + }, +}); + +export const CAPACITY_SLEEP_SECONDS = 20; + +export const capacityEvictableSleep = hatchet.durableTask({ + name: 'capacity-evictable-sleep', + executionTimeout: '5m', + evictionPolicy: { + ttl: undefined, + allowCapacityEviction: true, + priority: 0, + }, + fn: async (_input, ctx) => { + await ctx.sleepFor(`${CAPACITY_SLEEP_SECONDS}s`); + return { status: 'completed' }; + }, +}); + +export const nonEvictableSleep = hatchet.durableTask({ + name: 'non-evictable-sleep', + executionTimeout: '5m', + evictionPolicy: { + ttl: undefined, + allowCapacityEviction: false, + priority: 0, + }, + fn: async (_input, ctx) => { + await ctx.sleepFor('10s'); + return { status: 'completed' }; + }, +}); diff --git a/examples/typescript/e2e-worker.ts b/examples/typescript/e2e-worker.ts index bcd1872e7..c7548a449 100644 --- a/examples/typescript/e2e-worker.ts +++ b/examples/typescript/e2e-worker.ts @@ -14,7 +14,31 @@ import { concurrencyCancelNewestWorkflow } from './concurrency_cancel_newest/wor import { concurrencyMultipleKeysWorkflow } from './concurrency_multiple_keys/workflow'; import { concurrencyWorkflowLevelWorkflow } from './concurrency_workflow_level/workflow'; import { dag } from './dag/workflow'; -import { durableWorkflow, waitForSleepTwice } from './durable/workflow'; +import { + durableWorkflow, + waitForSleepTwice, + spawnChildTask, + durableWithSpawn, + durableWithBulkSpawn, + durableSleepEventSpawn, + durableWithExplicitSpawn, + durableNonDeterminism, + durableReplayReset, + dagChildWorkflow, + durableSpawnDag, +} from './durable/workflow'; +import { durableEvent, durableEventWithFilter } from './durable_event/workflow'; +import { + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + multipleEviction, + nonEvictableSleep, + childTask as evictionChildTask, + bulkChildTask, + evictableChildBulkSpawn, +} from './durable_eviction/workflow'; +import { durableSleep } from './durable_sleep/workflow'; import { createLoggingWorkflow } from './logger/workflow'; import { nonRetryableWorkflow } from './non_retryable/workflow'; import { failureWorkflow } from './on_failure/workflow'; @@ -41,6 +65,26 @@ const workflows = [ dag, durableWorkflow, waitForSleepTwice, + spawnChildTask, + durableWithSpawn, + durableWithBulkSpawn, + durableSleepEventSpawn, + durableWithExplicitSpawn, + durableNonDeterminism, + durableReplayReset, + dagChildWorkflow, + durableSpawnDag, + durableEvent, + durableEventWithFilter, + durableSleep, + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + multipleEviction, + nonEvictableSleep, + evictionChildTask, + bulkChildTask, + evictableChildBulkSpawn, createLoggingWorkflow(hatchet), nonRetryableWorkflow, failureWorkflow, diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-options.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-options.tsx index 9225d6818..acdd6079b 100644 --- a/frontend/app/src/components/v1/molecules/data-table/data-table-options.tsx +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-options.tsx @@ -28,6 +28,7 @@ import { flattenDAGsKey, createdAfterKey, finishedBeforeKey, + runningFilterKey, statusKey, isCustomTimeRangeKey, timeWindowKey, @@ -40,6 +41,7 @@ import { Column } from '@tanstack/react-table'; import * as React from 'react'; interface FilterControlProps { + table: Table; column?: Column; filter: { columnId: string; @@ -50,7 +52,11 @@ interface FilterControlProps { }; } -function FilterControl({ column, filter }: FilterControlProps) { +function FilterControl({ + table, + column, + filter, +}: FilterControlProps) { const value = column?.getFilterValue(); const [searchTerm, setSearchTerm] = React.useState(''); const keyInputRef = React.useRef(null); @@ -365,36 +371,135 @@ function FilterControl({ column, filter }: FilterControlProps) { )}
{filteredOptions.length > 0 ? ( - filteredOptions.map((option) => ( -
- { - let newValue; - if (checked) { - newValue = [...selectedValues, option.value]; - } else { - newValue = selectedValues.filter( - (v) => v !== option.value, + filteredOptions.map((option) => { + const isChecked = selectedValues.includes(option.value); + const subColumn = option.subFilterColumnId + ? table.getColumn(option.subFilterColumnId) + : undefined; + const subValue = subColumn?.getFilterValue() as + | string + | undefined; + const allSubValues = + option.subOptions?.map((s) => s.value) || []; + + return ( + +
+ { + let newValue; + if (checked) { + newValue = [...selectedValues, option.value]; + if (subColumn && option.subOptions) { + subColumn.setFilterValue(undefined); + } + } else { + newValue = selectedValues.filter( + (v) => v !== option.value, + ); + if (subColumn) { + subColumn.setFilterValue(undefined); + } + } + column?.setFilterValue( + newValue.length > 0 ? newValue : undefined, + ); + }} + /> + +
+ {option.subOptions && + option.subFilterColumnId && + option.subOptions.map((sub) => { + const otherSub = allSubValues.find( + (v) => v !== sub.value, ); - } - column?.setFilterValue( - newValue.length > 0 ? newValue : undefined, - ); - }} - /> - -
- )) + const subIsChecked = + isChecked && + (subValue === undefined || subValue === sub.value); + + return ( +
+ { + const subColumnId = option.subFilterColumnId!; + table.setColumnFilters((prev) => { + const next = prev.filter( + (f) => + f.id !== filter.columnId && + f.id !== subColumnId, + ); + + if (checked) { + const newStatuses = isChecked + ? selectedValues + : [...selectedValues, option.value]; + next.push({ + id: filter.columnId, + value: newStatuses, + }); + if (!isChecked) { + // Running was off → turn on with just this sub + next.push({ + id: subColumnId, + value: sub.value, + }); + } else if (subValue === otherSub) { + // other was the only one, now both → ALL (no sub-filter) + } else if (subValue === undefined) { + // both already checked → stay ALL + } + } else if (subValue === undefined) { + // both checked, uncheck this → keep other + next.push({ + id: filter.columnId, + value: selectedValues, + }); + next.push({ + id: subColumnId, + value: otherSub, + }); + } else { + // only this was checked, uncheck → remove Running + const without = selectedValues.filter( + (v) => v !== option.value, + ); + if (without.length > 0) { + next.push({ + id: filter.columnId, + value: without, + }); + } + } + + return next; + }); + }} + /> + +
+ ); + })} + + ); + }) ) : (
No options found @@ -475,6 +580,10 @@ export function DataTableOptions({ return false; } + if (f.id === runningFilterKey) { + return false; + } + if (hiddenFilters.includes(f.id)) { return false; } @@ -622,6 +731,7 @@ function FiltersContent({ )}
diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx index b6091fb2b..488e799f2 100644 --- a/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx @@ -13,6 +13,8 @@ export interface FilterOption { label: string; value: string; icon?: React.ComponentType<{ className?: string }>; + subFilterColumnId?: string; + subOptions?: FilterOption[]; } export enum ToolbarType { diff --git a/frontend/app/src/components/v1/ui/badge.tsx b/frontend/app/src/components/v1/ui/badge.tsx index c16748b92..473816fc6 100644 --- a/frontend/app/src/components/v1/ui/badge.tsx +++ b/frontend/app/src/components/v1/ui/badge.tsx @@ -25,6 +25,8 @@ const badgeVariants = cva( 'border-transparent rounded-sm font-normal text-slate-800 dark:text-slate-300 bg-slate-500/20 ring-slate-500/30', cancelled: 'border-transparent rounded-sm font-normal text-orange-800 dark:text-orange-300 bg-orange-500/20 ring-orange-500/30', + evicted: + 'border-transparent rounded-sm font-normal text-indigo-800 dark:text-indigo-300 bg-indigo-500/20 ring-indigo-500/30', }, }, defaultVariants: { diff --git a/frontend/app/src/lib/api/generated/Api.ts b/frontend/app/src/lib/api/generated/Api.ts index 640e71940..e12d87f40 100644 --- a/frontend/app/src/lib/api/generated/Api.ts +++ b/frontend/app/src/lib/api/generated/Api.ts @@ -92,6 +92,8 @@ import { UserLoginRequest, UserRegisterRequest, UserTenantMembershipsList, + V1BranchDurableTaskRequest, + V1BranchDurableTaskResponse, V1CELDebugRequest, V1CELDebugResponse, V1CancelTaskRequest, @@ -108,6 +110,8 @@ import { V1LogLineOrderByDirection, V1ReplayTaskRequest, V1ReplayedTasks, + V1RestoreTaskResponse, + V1RunningFilter, V1TaskEventList, V1TaskPointMetrics, V1TaskRunMetrics, @@ -304,6 +308,23 @@ export class Api< format: "json", ...params, }); + /** + * @description Restore an evicted durable task + * + * @tags Task + * @name V1TaskRestore + * @summary Restore a task + * @request POST:/api/v1/stable/tasks/{task}/restore + * @secure + */ + v1TaskRestore = (task: string, params: RequestParams = {}) => + this.request({ + path: `/api/v1/stable/tasks/${task}/restore`, + method: "POST", + secure: true, + format: "json", + ...params, + }); /** * @description Lists all tasks that belong a specific list of dags * @@ -398,6 +419,8 @@ export class Api< triggering_event_external_id?: string; /** A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset. */ include_payloads?: boolean; + /** Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. */ + running_filter?: V1RunningFilter; }, params: RequestParams = {}, ) => @@ -462,6 +485,8 @@ export class Api< additional_metadata?: string[]; /** The workflow ids to find runs for */ workflow_ids?: string[]; + /** Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. */ + running_filter?: V1RunningFilter; }, params: RequestParams = {}, ) => @@ -496,6 +521,29 @@ export class Api< format: "json", ...params, }); + /** + * @description Branch a durable task from a specific node, creating a new branch and re-processing its matches. + * + * @tags Workflow Runs + * @name V1DurableTaskBranch + * @summary Branch durable task + * @request POST:/api/v1/stable/tenants/{tenant}/durable-tasks/branch + * @secure + */ + v1DurableTaskBranch = ( + tenant: string, + data: V1BranchDurableTaskRequest, + params: RequestParams = {}, + ) => + this.request({ + path: `/api/v1/stable/tenants/${tenant}/durable-tasks/branch`, + method: "POST", + body: data, + secure: true, + type: ContentType.Json, + format: "json", + ...params, + }); /** * @description Get a workflow run and its metadata to display on the "detail" page * diff --git a/frontend/app/src/lib/api/generated/data-contracts.ts b/frontend/app/src/lib/api/generated/data-contracts.ts index ef9bff902..998b32bcb 100644 --- a/frontend/app/src/lib/api/generated/data-contracts.ts +++ b/frontend/app/src/lib/api/generated/data-contracts.ts @@ -256,6 +256,12 @@ export enum TenantVersion { V1 = "V1", } +export enum V1RunningFilter { + ALL = "ALL", + EVICTED = "EVICTED", + ON_WORKER = "ON_WORKER", +} + export enum V1LogLineOrderByDirection { ASC = "ASC", DESC = "DESC", @@ -290,6 +296,8 @@ export enum V1TaskEventType { QUEUED = "QUEUED", SKIPPED = "SKIPPED", COULD_NOT_SEND_TO_WORKER = "COULD_NOT_SEND_TO_WORKER", + DURABLE_EVICTED = "DURABLE_EVICTED", + DURABLE_RESTORING = "DURABLE_RESTORING", } export enum V1WorkflowType { @@ -362,6 +370,8 @@ export interface V1TaskSummary { /** The output of the task run (for the latest run) */ output: object; status: V1TaskStatus; + /** Whether the task has been evicted from a worker (still counts as RUNNING). */ + isEvicted?: boolean; /** * The timestamp the task run started. * @format date-time @@ -548,6 +558,10 @@ export interface V1ReplayedTasks { ids?: string[]; } +export interface V1RestoreTaskResponse { + requeued: boolean; +} + export interface V1DagChildren { /** @format uuid */ dagId?: string; @@ -665,11 +679,53 @@ export interface V1WorkflowRunDetails { workflowConfig?: object; } +export interface V1BranchDurableTaskRequest { + /** + * The external id of the durable task to branch. + * @format uuid + * @minLength 36 + * @maxLength 36 + */ + taskExternalId: string; + /** + * The node id to replay from. + * @format int64 + */ + nodeId: number; + /** + * The branch id to replay from. + * @format int64 + */ + branchId: number; +} + +export interface V1BranchDurableTaskResponse { + /** + * The external id of the durable task. + * @format uuid + * @minLength 36 + * @maxLength 36 + */ + taskExternalId: string; + /** + * The node id of the new entry. + * @format int64 + */ + nodeId: number; + /** + * The branch id of the new entry. + * @format int64 + */ + branchId: number; +} + export interface V1TaskTiming { metadata: APIResourceMeta; /** The depth of the task in the waterfall. */ depth: number; status: V1TaskStatus; + /** Whether the task has been evicted from a worker (still counts as RUNNING). */ + isEvicted?: boolean; /** The display name of the task run. */ taskDisplayName: string; /** @@ -733,9 +789,17 @@ export interface V1TaskTimingList { rows: V1TaskTiming[]; } +export interface V1RunningDetailCount { + /** The number of evicted tasks within the RUNNING status bucket. */ + evicted: number; + /** The number of tasks currently on a worker within the RUNNING status bucket. */ + onWorker: number; +} + export interface V1TaskRunMetric { status: V1TaskStatus; count: number; + runningDetailCount?: V1RunningDetailCount; } export type V1TaskRunMetrics = V1TaskRunMetric[]; diff --git a/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/index.tsx b/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/index.tsx index 10b78a643..86bd771d8 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/index.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/index.tsx @@ -60,8 +60,12 @@ function statusToBadgeVariant(status: V1TaskStatus) { return 'cancelled'; case V1TaskStatus.QUEUED: return 'queued'; - default: + case V1TaskStatus.RUNNING: return 'inProgress'; + default: { + const exhaustivenessCheck: never = status; + throw new Error(`Unknown status: ${exhaustivenessCheck}`); + } } } diff --git a/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/v2components/events-columns.tsx b/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/v2components/events-columns.tsx index 8df7db685..8c760e36b 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/v2components/events-columns.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs-v1/$run/v2components/events-columns.tsx @@ -17,9 +17,11 @@ import { } from '@heroicons/react/24/outline'; import { Link } from '@tanstack/react-router'; +type EventSeverity = StepRunEventSeverity | 'EVICTION'; + function eventTypeToSeverity( eventType: V1TaskEventType | undefined, -): StepRunEventSeverity { +): EventSeverity { switch (eventType) { case V1TaskEventType.FAILED: case V1TaskEventType.RATE_LIMIT_ERROR: @@ -32,7 +34,10 @@ function eventTypeToSeverity( case V1TaskEventType.REQUEUED_RATE_LIMIT: case V1TaskEventType.RETRIED_BY_USER: case V1TaskEventType.RETRYING: + case V1TaskEventType.DURABLE_RESTORING: return StepRunEventSeverity.WARNING; + case V1TaskEventType.DURABLE_EVICTED: + return 'EVICTION'; default: return StepRunEventSeverity.INFO; } @@ -176,6 +181,10 @@ function mapEventTypeToTitle(eventType: V1TaskEventType | undefined): string { return 'Skipped'; case V1TaskEventType.COULD_NOT_SEND_TO_WORKER: return 'Could not send to worker'; + case V1TaskEventType.DURABLE_EVICTED: + return 'Durable task evicted'; + case V1TaskEventType.DURABLE_RESTORING: + return 'Durable task restoring'; case undefined: return 'Unknown'; default: @@ -184,13 +193,14 @@ function mapEventTypeToTitle(eventType: V1TaskEventType | undefined): string { } } -const RUN_STATUS_VARIANTS: Record = { +const RUN_STATUS_VARIANTS: Record = { INFO: 'border-transparent rounded-full bg-green-500', CRITICAL: 'border-transparent rounded-full bg-red-500', WARNING: 'border-transparent rounded-full bg-yellow-500', + EVICTION: 'border-transparent rounded-full bg-indigo-500', }; -function EventIndicator({ severity }: { severity: StepRunEventSeverity }) { +function EventIndicator({ severity }: { severity: EventSeverity }) { return (
), enableSorting: false, @@ -201,6 +204,18 @@ export const columns: ( enableSorting: false, enableHiding: false, }, + { + accessorKey: runningFilterKey, + header: ({ column }) => ( + + ), + cell: () => null, + enableSorting: false, + enableHiding: false, + }, { accessorKey: createdAtKey, header: ({ column }) => ( diff --git a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/runs-provider.tsx b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/runs-provider.tsx index 715c42743..e50946724 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/runs-provider.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/runs-provider.tsx @@ -98,6 +98,7 @@ export const RunsProvider = ({ ...initColumnVisibility, parentTaskExternalId: false, // Always hidden, used for filtering only flattenDAGs: false, // Always hidden, used for filtering only + runningFilter: false, // Always hidden, used for filtering only }); const { @@ -147,6 +148,7 @@ export const RunsProvider = ({ createdAfter: filters.apiFilters.since, finishedBefore: filters.apiFilters.until, statuses: filters.apiFilters.statuses, + runningFilter: filters.apiFilters.runningFilter, additionalMetadata: filters.apiFilters.additionalMetadata, workerId, workflowIds: diff --git a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs-table-filters.tsx b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs-table-filters.tsx index e8afe1b22..76803600c 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs-table-filters.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs-table-filters.tsx @@ -7,9 +7,10 @@ import { finishedBeforeKey, isCustomTimeRangeKey, timeWindowKey, + runningFilterKey, } from '../components/v1/task-runs-columns'; import { useZodColumnFilters } from '@/hooks/use-zod-column-filters'; -import { V1TaskStatus } from '@/lib/api'; +import { V1RunningFilter, V1TaskStatus } from '@/lib/api'; import { useSearchParams } from '@/lib/router-helpers'; import { ColumnFiltersState } from '@tanstack/react-table'; import { useCallback, useMemo } from 'react'; @@ -46,6 +47,7 @@ type APIFilters = { workflowIds?: string[]; additionalMetadata?: string[]; flattenDAGs: boolean; + runningFilter?: V1RunningFilter; }; export type FilterActions = { @@ -78,6 +80,7 @@ const createApiFilterSchema = (initialValues?: { workflowIds?: string[] }) => ), m: z.array(z.string()).optional(), // additional metadata f: z.boolean().default(false), // flatten dags + rf: z.nativeEnum(V1RunningFilter).optional(), // running sub-filter (undefined = ALL) }); export const useRunsTableFilters = ( @@ -104,6 +107,7 @@ export const useRunsTableFilters = ( w: workflowKey, m: additionalMetadataKey, f: flattenDAGsKey, + rf: runningFilterKey, }); const { @@ -122,6 +126,7 @@ export const useRunsTableFilters = ( w: selectedWorkflowIds, m: selectedAdditionalMetadata, f: selectedFlattenDAGs, + rf: selectedRunningFilter, } = zodState; const createdAfter = useMemo(() => { @@ -219,6 +224,7 @@ export const useRunsTableFilters = ( workflowIds: selectedWorkflowIds, additionalMetadata: selectedAdditionalMetadata, flattenDAGs: selectedFlattenDAGs || false, + runningFilter: selectedRunningFilter, }), [ createdAfter, @@ -227,6 +233,7 @@ export const useRunsTableFilters = ( selectedWorkflowIds, selectedAdditionalMetadata, selectedFlattenDAGs, + selectedRunningFilter, ], ); diff --git a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs.tsx b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs.tsx index aef7606d2..72ffd9675 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-runs.tsx @@ -1,7 +1,12 @@ import { useRefetchInterval } from '@/contexts/refetch-interval-context'; import { usePagination } from '@/hooks/use-pagination'; import { useCurrentTenantId } from '@/hooks/use-tenant'; -import { queries, V1TaskSummary, V1TaskStatus } from '@/lib/api'; +import { + queries, + V1RunningFilter, + V1TaskSummary, + V1TaskStatus, +} from '@/lib/api'; import { useQuery } from '@tanstack/react-query'; import { RowSelectionState } from '@tanstack/react-table'; import { useCallback, useMemo, useState } from 'react'; @@ -12,6 +17,7 @@ type UseRunsProps = { createdAfter?: string; finishedBefore?: string; statuses?: V1TaskStatus[]; + runningFilter?: V1RunningFilter; additionalMetadata?: string[]; workerId: string | undefined; workflowIds?: string[]; @@ -27,6 +33,7 @@ export const useRuns = ({ createdAfter, finishedBefore, statuses, + runningFilter, additionalMetadata, workerId, workflowIds, @@ -68,6 +75,7 @@ export const useRuns = ({ only_tasks: onlyTasks, triggering_event_external_id: triggeringEventExternalId, include_payloads: false, + running_filter: runningFilter, }), placeholderData: (prev) => prev, refetchInterval: diff --git a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-toolbar-filters.tsx b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-toolbar-filters.tsx index 3f3abda62..44950a786 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-toolbar-filters.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs-v1/hooks/use-toolbar-filters.tsx @@ -2,6 +2,7 @@ import { additionalMetadataKey, createdAtKey, flattenDAGsKey, + runningFilterKey, statusKey, workflowKey, } from '../components/v1/task-runs-columns'; @@ -13,10 +14,10 @@ import { ToolbarType, TimeRangeConfig, } from '@/components/v1/molecules/data-table/data-table-toolbar'; -import { V1TaskStatus } from '@/lib/api'; +import { V1RunningFilter, V1TaskStatus } from '@/lib/api'; import { useMemo } from 'react'; -export const workflowRunStatusFilters = [ +export const workflowRunStatusFilters: FilterOption[] = [ { value: V1TaskStatus.COMPLETED, label: 'Succeeded', @@ -28,6 +29,11 @@ export const workflowRunStatusFilters = [ { value: V1TaskStatus.RUNNING, label: 'Running', + subFilterColumnId: runningFilterKey, + subOptions: [ + { value: V1RunningFilter.ON_WORKER, label: 'On Worker' }, + { value: V1RunningFilter.EVICTED, label: 'Evicted' }, + ], }, { value: V1TaskStatus.QUEUED, diff --git a/frontend/app/src/pages/main/v1/workflow-runs/components/run-statuses.tsx b/frontend/app/src/pages/main/v1/workflow-runs/components/run-statuses.tsx index 41725eb97..7613649d5 100644 --- a/frontend/app/src/pages/main/v1/workflow-runs/components/run-statuses.tsx +++ b/frontend/app/src/pages/main/v1/workflow-runs/components/run-statuses.tsx @@ -62,8 +62,10 @@ function createV1RunStatusVariant(status: V1TaskStatus): RunStatusVariant { return { text: 'Running', variant: 'inProgress' }; case V1TaskStatus.QUEUED: return { text: 'Queued', variant: 'queued' }; - default: - return { text: 'Unknown', variant: 'outline' }; + default: { + const exhaustivenessCheck: never = status; + throw new Error(`Unknown status: ${exhaustivenessCheck}`); + } } } @@ -127,17 +129,34 @@ export function RunStatus({ export function V1RunStatus({ status, errorMessage, + isEvicted, className, }: { status: V1TaskStatus; errorMessage?: string; + isEvicted?: boolean; className?: string; }) { const { text, variant } = createV1RunStatusVariant(status); const StatusBadge = () => ( - {capitalize(text)} + + {capitalize(isEvicted ? 'Running' : text)} + {isEvicted && ( + + + + + + + This task was evicted from a worker and is waiting to be + restored + + + + )} + ); diff --git a/hack/proto/proto.sh b/hack/proto/proto.sh index f55e6faa1..c849d8b34 100644 --- a/hack/proto/proto.sh +++ b/hack/proto/proto.sh @@ -12,7 +12,8 @@ protoc --proto_path=api-contracts \ --go_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \ --go-grpc_out=./internal/services/shared/proto/v1 \ --go-grpc_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \ - v1/shared/condition.proto + v1/shared/condition.proto \ + v1/shared/trigger.proto protoc --proto_path=api-contracts \ --go_out=./internal/services/shared/proto/v1 \ @@ -36,6 +37,7 @@ protoc --proto_path=api-contracts/events --go_out=./internal/services/ingestor/c --go-grpc_out=./internal/services/ingestor/contracts --go-grpc_opt=paths=source_relative \ events.proto -protoc --proto_path=api-contracts/workflows --go_out=./internal/services/admin/contracts --go_opt=paths=source_relative \ +protoc --proto_path=api-contracts/workflows --proto_path=api-contracts \ + --go_out=./internal/services/admin/contracts --go_opt=paths=source_relative \ --go-grpc_out=./internal/services/admin/contracts --go-grpc_opt=paths=source_relative \ workflows.proto diff --git a/internal/msgqueue/message_ids.go b/internal/msgqueue/message_ids.go index 13f7a51b2..cb787210d 100644 --- a/internal/msgqueue/message_ids.go +++ b/internal/msgqueue/message_ids.go @@ -3,6 +3,8 @@ package msgqueue // Message ID constants for tenant messages const ( MsgIDCancelTasks = "cancel-tasks" + MsgIDDurableCallbackCompleted = "durable-callback-completed" + MsgIDDurableRestoreTask = "durable-restore-task" MsgIDCELEvaluationFailure = "cel-evaluation-failure" MsgIDCheckTenantQueue = "check-tenant-queue" MsgIDNewWorker = "new-worker" diff --git a/internal/services/admin/contracts/compat.go b/internal/services/admin/contracts/compat.go new file mode 100644 index 000000000..bfead0ef9 --- /dev/null +++ b/internal/services/admin/contracts/compat.go @@ -0,0 +1,9 @@ +package contracts + +import v1 "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" + +// Type aliases for backwards compatibility after these types moved to v1/shared/trigger.proto. +// Go type aliases are transparent — contracts.X IS v1.X, no conversion needed. +type TriggerWorkflowRequest = v1.TriggerWorkflowRequest +type DesiredWorkerLabels = v1.DesiredWorkerLabels +type WorkerLabelComparator = v1.WorkerLabelComparator diff --git a/internal/services/admin/contracts/workflows.pb.go b/internal/services/admin/contracts/workflows.pb.go index b01c1b873..d94631440 100644 --- a/internal/services/admin/contracts/workflows.pb.go +++ b/internal/services/admin/contracts/workflows.pb.go @@ -7,6 +7,7 @@ package contracts import ( + v1 "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" @@ -171,64 +172,6 @@ func (ConcurrencyLimitStrategy) EnumDescriptor() ([]byte, []int) { return file_workflows_proto_rawDescGZIP(), []int{2} } -type WorkerLabelComparator int32 - -const ( - WorkerLabelComparator_EQUAL WorkerLabelComparator = 0 - WorkerLabelComparator_NOT_EQUAL WorkerLabelComparator = 1 - WorkerLabelComparator_GREATER_THAN WorkerLabelComparator = 2 - WorkerLabelComparator_GREATER_THAN_OR_EQUAL WorkerLabelComparator = 3 - WorkerLabelComparator_LESS_THAN WorkerLabelComparator = 4 - WorkerLabelComparator_LESS_THAN_OR_EQUAL WorkerLabelComparator = 5 -) - -// Enum value maps for WorkerLabelComparator. -var ( - WorkerLabelComparator_name = map[int32]string{ - 0: "EQUAL", - 1: "NOT_EQUAL", - 2: "GREATER_THAN", - 3: "GREATER_THAN_OR_EQUAL", - 4: "LESS_THAN", - 5: "LESS_THAN_OR_EQUAL", - } - WorkerLabelComparator_value = map[string]int32{ - "EQUAL": 0, - "NOT_EQUAL": 1, - "GREATER_THAN": 2, - "GREATER_THAN_OR_EQUAL": 3, - "LESS_THAN": 4, - "LESS_THAN_OR_EQUAL": 5, - } -) - -func (x WorkerLabelComparator) Enum() *WorkerLabelComparator { - p := new(WorkerLabelComparator) - *p = x - return p -} - -func (x WorkerLabelComparator) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (WorkerLabelComparator) Descriptor() protoreflect.EnumDescriptor { - return file_workflows_proto_enumTypes[3].Descriptor() -} - -func (WorkerLabelComparator) Type() protoreflect.EnumType { - return &file_workflows_proto_enumTypes[3] -} - -func (x WorkerLabelComparator) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use WorkerLabelComparator.Descriptor instead. -func (WorkerLabelComparator) EnumDescriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{3} -} - type RateLimitDuration int32 const ( @@ -274,11 +217,11 @@ func (x RateLimitDuration) String() string { } func (RateLimitDuration) Descriptor() protoreflect.EnumDescriptor { - return file_workflows_proto_enumTypes[4].Descriptor() + return file_workflows_proto_enumTypes[3].Descriptor() } func (RateLimitDuration) Type() protoreflect.EnumType { - return &file_workflows_proto_enumTypes[4] + return &file_workflows_proto_enumTypes[3] } func (x RateLimitDuration) Number() protoreflect.EnumNumber { @@ -287,7 +230,7 @@ func (x RateLimitDuration) Number() protoreflect.EnumNumber { // Deprecated: Use RateLimitDuration.Descriptor instead. func (RateLimitDuration) EnumDescriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{4} + return file_workflows_proto_rawDescGZIP(), []int{3} } type PutWorkflowRequest struct { @@ -624,120 +567,29 @@ func (x *CreateWorkflowJobOpts) GetSteps() []*CreateWorkflowStepOpts { return nil } -type DesiredWorkerLabels struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // value of the affinity - StrValue *string `protobuf:"bytes,1,opt,name=str_value,json=strValue,proto3,oneof" json:"str_value,omitempty"` - IntValue *int32 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` - // * - // (optional) Specifies whether the affinity setting is required. - // If required, the worker will not accept actions that do not have a truthy affinity setting. - // - // Defaults to false. - Required *bool `protobuf:"varint,3,opt,name=required,proto3,oneof" json:"required,omitempty"` - // * - // (optional) Specifies the comparator for the affinity setting. - // If not set, the default is EQUAL. - Comparator *WorkerLabelComparator `protobuf:"varint,4,opt,name=comparator,proto3,enum=WorkerLabelComparator,oneof" json:"comparator,omitempty"` - // * - // (optional) Specifies the weight of the affinity setting. - // If not set, the default is 100. - Weight *int32 `protobuf:"varint,5,opt,name=weight,proto3,oneof" json:"weight,omitempty"` -} - -func (x *DesiredWorkerLabels) Reset() { - *x = DesiredWorkerLabels{} - if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DesiredWorkerLabels) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DesiredWorkerLabels) ProtoMessage() {} - -func (x *DesiredWorkerLabels) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DesiredWorkerLabels.ProtoReflect.Descriptor instead. -func (*DesiredWorkerLabels) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{4} -} - -func (x *DesiredWorkerLabels) GetStrValue() string { - if x != nil && x.StrValue != nil { - return *x.StrValue - } - return "" -} - -func (x *DesiredWorkerLabels) GetIntValue() int32 { - if x != nil && x.IntValue != nil { - return *x.IntValue - } - return 0 -} - -func (x *DesiredWorkerLabels) GetRequired() bool { - if x != nil && x.Required != nil { - return *x.Required - } - return false -} - -func (x *DesiredWorkerLabels) GetComparator() WorkerLabelComparator { - if x != nil && x.Comparator != nil { - return *x.Comparator - } - return WorkerLabelComparator_EQUAL -} - -func (x *DesiredWorkerLabels) GetWeight() int32 { - if x != nil && x.Weight != nil { - return *x.Weight - } - return 0 -} - // CreateWorkflowStepOpts represents options to create a workflow task. type CreateWorkflowStepOpts struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name - Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id - Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout - Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON - Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task - UserData string `protobuf:"bytes,6,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` // (optional) the custom task user data, assuming string representation of JSON - Retries int32 `protobuf:"varint,7,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0 - RateLimits []*CreateStepRateLimit `protobuf:"bytes,8,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task - WorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,9,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task - BackoffFactor *float32 `protobuf:"fixed32,10,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task - BackoffMaxSeconds *int32 `protobuf:"varint,11,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task + ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name + Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id + Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout + Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON + Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task + UserData string `protobuf:"bytes,6,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` // (optional) the custom task user data, assuming string representation of JSON + Retries int32 `protobuf:"varint,7,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0 + RateLimits []*CreateStepRateLimit `protobuf:"bytes,8,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task + WorkerLabels map[string]*v1.DesiredWorkerLabels `protobuf:"bytes,9,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task + BackoffFactor *float32 `protobuf:"fixed32,10,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task + BackoffMaxSeconds *int32 `protobuf:"varint,11,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task } func (x *CreateWorkflowStepOpts) Reset() { *x = CreateWorkflowStepOpts{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[5] + mi := &file_workflows_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -750,7 +602,7 @@ func (x *CreateWorkflowStepOpts) String() string { func (*CreateWorkflowStepOpts) ProtoMessage() {} func (x *CreateWorkflowStepOpts) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[5] + mi := &file_workflows_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -763,7 +615,7 @@ func (x *CreateWorkflowStepOpts) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateWorkflowStepOpts.ProtoReflect.Descriptor instead. func (*CreateWorkflowStepOpts) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{5} + return file_workflows_proto_rawDescGZIP(), []int{4} } func (x *CreateWorkflowStepOpts) GetReadableId() string { @@ -822,7 +674,7 @@ func (x *CreateWorkflowStepOpts) GetRateLimits() []*CreateStepRateLimit { return nil } -func (x *CreateWorkflowStepOpts) GetWorkerLabels() map[string]*DesiredWorkerLabels { +func (x *CreateWorkflowStepOpts) GetWorkerLabels() map[string]*v1.DesiredWorkerLabels { if x != nil { return x.WorkerLabels } @@ -859,7 +711,7 @@ type CreateStepRateLimit struct { func (x *CreateStepRateLimit) Reset() { *x = CreateStepRateLimit{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[6] + mi := &file_workflows_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -872,7 +724,7 @@ func (x *CreateStepRateLimit) String() string { func (*CreateStepRateLimit) ProtoMessage() {} func (x *CreateStepRateLimit) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[6] + mi := &file_workflows_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -885,7 +737,7 @@ func (x *CreateStepRateLimit) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateStepRateLimit.ProtoReflect.Descriptor instead. func (*CreateStepRateLimit) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{6} + return file_workflows_proto_rawDescGZIP(), []int{5} } func (x *CreateStepRateLimit) GetKey() string { @@ -940,7 +792,7 @@ type ListWorkflowsRequest struct { func (x *ListWorkflowsRequest) Reset() { *x = ListWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[7] + mi := &file_workflows_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -953,7 +805,7 @@ func (x *ListWorkflowsRequest) String() string { func (*ListWorkflowsRequest) ProtoMessage() {} func (x *ListWorkflowsRequest) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[7] + mi := &file_workflows_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -966,7 +818,7 @@ func (x *ListWorkflowsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListWorkflowsRequest.ProtoReflect.Descriptor instead. func (*ListWorkflowsRequest) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{7} + return file_workflows_proto_rawDescGZIP(), []int{6} } type ScheduleWorkflowRequest struct { @@ -997,7 +849,7 @@ type ScheduleWorkflowRequest struct { func (x *ScheduleWorkflowRequest) Reset() { *x = ScheduleWorkflowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[8] + mi := &file_workflows_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1010,7 +862,7 @@ func (x *ScheduleWorkflowRequest) String() string { func (*ScheduleWorkflowRequest) ProtoMessage() {} func (x *ScheduleWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[8] + mi := &file_workflows_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1023,7 +875,7 @@ func (x *ScheduleWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ScheduleWorkflowRequest.ProtoReflect.Descriptor instead. func (*ScheduleWorkflowRequest) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{8} + return file_workflows_proto_rawDescGZIP(), []int{7} } func (x *ScheduleWorkflowRequest) GetName() string { @@ -1102,7 +954,7 @@ type ScheduledWorkflow struct { func (x *ScheduledWorkflow) Reset() { *x = ScheduledWorkflow{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[9] + mi := &file_workflows_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1115,7 +967,7 @@ func (x *ScheduledWorkflow) String() string { func (*ScheduledWorkflow) ProtoMessage() {} func (x *ScheduledWorkflow) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[9] + mi := &file_workflows_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1128,7 +980,7 @@ func (x *ScheduledWorkflow) ProtoReflect() protoreflect.Message { // Deprecated: Use ScheduledWorkflow.ProtoReflect.Descriptor instead. func (*ScheduledWorkflow) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{9} + return file_workflows_proto_rawDescGZIP(), []int{8} } func (x *ScheduledWorkflow) GetId() string { @@ -1163,7 +1015,7 @@ type WorkflowVersion struct { func (x *WorkflowVersion) Reset() { *x = WorkflowVersion{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[10] + mi := &file_workflows_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1176,7 +1028,7 @@ func (x *WorkflowVersion) String() string { func (*WorkflowVersion) ProtoMessage() {} func (x *WorkflowVersion) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[10] + mi := &file_workflows_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1189,7 +1041,7 @@ func (x *WorkflowVersion) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowVersion.ProtoReflect.Descriptor instead. func (*WorkflowVersion) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{10} + return file_workflows_proto_rawDescGZIP(), []int{9} } func (x *WorkflowVersion) GetId() string { @@ -1254,7 +1106,7 @@ type WorkflowTriggerEventRef struct { func (x *WorkflowTriggerEventRef) Reset() { *x = WorkflowTriggerEventRef{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[11] + mi := &file_workflows_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1267,7 +1119,7 @@ func (x *WorkflowTriggerEventRef) String() string { func (*WorkflowTriggerEventRef) ProtoMessage() {} func (x *WorkflowTriggerEventRef) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[11] + mi := &file_workflows_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1280,7 +1132,7 @@ func (x *WorkflowTriggerEventRef) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowTriggerEventRef.ProtoReflect.Descriptor instead. func (*WorkflowTriggerEventRef) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{11} + return file_workflows_proto_rawDescGZIP(), []int{10} } func (x *WorkflowTriggerEventRef) GetParentId() string { @@ -1310,7 +1162,7 @@ type WorkflowTriggerCronRef struct { func (x *WorkflowTriggerCronRef) Reset() { *x = WorkflowTriggerCronRef{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[12] + mi := &file_workflows_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1323,7 +1175,7 @@ func (x *WorkflowTriggerCronRef) String() string { func (*WorkflowTriggerCronRef) ProtoMessage() {} func (x *WorkflowTriggerCronRef) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[12] + mi := &file_workflows_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1336,7 +1188,7 @@ func (x *WorkflowTriggerCronRef) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowTriggerCronRef.ProtoReflect.Descriptor instead. func (*WorkflowTriggerCronRef) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{12} + return file_workflows_proto_rawDescGZIP(), []int{11} } func (x *WorkflowTriggerCronRef) GetParentId() string { @@ -1358,13 +1210,13 @@ type BulkTriggerWorkflowRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Workflows []*TriggerWorkflowRequest `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` + Workflows []*v1.TriggerWorkflowRequest `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` } func (x *BulkTriggerWorkflowRequest) Reset() { *x = BulkTriggerWorkflowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[13] + mi := &file_workflows_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1377,7 +1229,7 @@ func (x *BulkTriggerWorkflowRequest) String() string { func (*BulkTriggerWorkflowRequest) ProtoMessage() {} func (x *BulkTriggerWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[13] + mi := &file_workflows_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1390,10 +1242,10 @@ func (x *BulkTriggerWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BulkTriggerWorkflowRequest.ProtoReflect.Descriptor instead. func (*BulkTriggerWorkflowRequest) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{13} + return file_workflows_proto_rawDescGZIP(), []int{12} } -func (x *BulkTriggerWorkflowRequest) GetWorkflows() []*TriggerWorkflowRequest { +func (x *BulkTriggerWorkflowRequest) GetWorkflows() []*v1.TriggerWorkflowRequest { if x != nil { return x.Workflows } @@ -1411,7 +1263,7 @@ type BulkTriggerWorkflowResponse struct { func (x *BulkTriggerWorkflowResponse) Reset() { *x = BulkTriggerWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[14] + mi := &file_workflows_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1424,7 +1276,7 @@ func (x *BulkTriggerWorkflowResponse) String() string { func (*BulkTriggerWorkflowResponse) ProtoMessage() {} func (x *BulkTriggerWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[14] + mi := &file_workflows_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1437,7 +1289,7 @@ func (x *BulkTriggerWorkflowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BulkTriggerWorkflowResponse.ProtoReflect.Descriptor instead. func (*BulkTriggerWorkflowResponse) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{14} + return file_workflows_proto_rawDescGZIP(), []int{13} } func (x *BulkTriggerWorkflowResponse) GetWorkflowRunIds() []string { @@ -1447,138 +1299,6 @@ func (x *BulkTriggerWorkflowResponse) GetWorkflowRunIds() []string { return nil } -type TriggerWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // (optional) the input data for the workflow - Input string `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` - // (optional) the parent workflow run id - ParentId *string `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3,oneof" json:"parent_id,omitempty"` - // (optional) the parent task external run id - ParentTaskRunExternalId *string `protobuf:"bytes,4,opt,name=parent_task_run_external_id,json=parentTaskRunExternalId,proto3,oneof" json:"parent_task_run_external_id,omitempty"` - // (optional) the index of the child workflow. if this is set, matches on the index or the - // child key will return an existing workflow run if the parent id, parent task run id, and - // child index/key match an existing workflow run. - ChildIndex *int32 `protobuf:"varint,5,opt,name=child_index,json=childIndex,proto3,oneof" json:"child_index,omitempty"` - // (optional) the key for the child. if this is set, matches on the index or the - // child key will return an existing workflow run if the parent id, parent task run id, and - // child index/key match an existing workflow run. - ChildKey *string `protobuf:"bytes,6,opt,name=child_key,json=childKey,proto3,oneof" json:"child_key,omitempty"` - // (optional) additional metadata for the workflow - AdditionalMetadata *string `protobuf:"bytes,7,opt,name=additional_metadata,json=additionalMetadata,proto3,oneof" json:"additional_metadata,omitempty"` - // (optional) desired worker id for the workflow run, - // requires the workflow definition to have a sticky strategy - DesiredWorkerId *string `protobuf:"bytes,8,opt,name=desired_worker_id,json=desiredWorkerId,proto3,oneof" json:"desired_worker_id,omitempty"` - // (optional) override for the priority of the workflow tasks, will set all tasks to this priority - Priority *int32 `protobuf:"varint,9,opt,name=priority,proto3,oneof" json:"priority,omitempty"` - DesiredWorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,10,rep,name=desired_worker_labels,json=desiredWorkerLabels,proto3" json:"desired_worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) override for the desired worker labels for the workflow tasks, used for routing to specific workers (or worker pools) -} - -func (x *TriggerWorkflowRequest) Reset() { - *x = TriggerWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TriggerWorkflowRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TriggerWorkflowRequest) ProtoMessage() {} - -func (x *TriggerWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TriggerWorkflowRequest.ProtoReflect.Descriptor instead. -func (*TriggerWorkflowRequest) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{15} -} - -func (x *TriggerWorkflowRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *TriggerWorkflowRequest) GetInput() string { - if x != nil { - return x.Input - } - return "" -} - -func (x *TriggerWorkflowRequest) GetParentId() string { - if x != nil && x.ParentId != nil { - return *x.ParentId - } - return "" -} - -func (x *TriggerWorkflowRequest) GetParentTaskRunExternalId() string { - if x != nil && x.ParentTaskRunExternalId != nil { - return *x.ParentTaskRunExternalId - } - return "" -} - -func (x *TriggerWorkflowRequest) GetChildIndex() int32 { - if x != nil && x.ChildIndex != nil { - return *x.ChildIndex - } - return 0 -} - -func (x *TriggerWorkflowRequest) GetChildKey() string { - if x != nil && x.ChildKey != nil { - return *x.ChildKey - } - return "" -} - -func (x *TriggerWorkflowRequest) GetAdditionalMetadata() string { - if x != nil && x.AdditionalMetadata != nil { - return *x.AdditionalMetadata - } - return "" -} - -func (x *TriggerWorkflowRequest) GetDesiredWorkerId() string { - if x != nil && x.DesiredWorkerId != nil { - return *x.DesiredWorkerId - } - return "" -} - -func (x *TriggerWorkflowRequest) GetPriority() int32 { - if x != nil && x.Priority != nil { - return *x.Priority - } - return 0 -} - -func (x *TriggerWorkflowRequest) GetDesiredWorkerLabels() map[string]*DesiredWorkerLabels { - if x != nil { - return x.DesiredWorkerLabels - } - return nil -} - type TriggerWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1590,7 +1310,7 @@ type TriggerWorkflowResponse struct { func (x *TriggerWorkflowResponse) Reset() { *x = TriggerWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[16] + mi := &file_workflows_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1603,7 +1323,7 @@ func (x *TriggerWorkflowResponse) String() string { func (*TriggerWorkflowResponse) ProtoMessage() {} func (x *TriggerWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[16] + mi := &file_workflows_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1616,7 +1336,7 @@ func (x *TriggerWorkflowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TriggerWorkflowResponse.ProtoReflect.Descriptor instead. func (*TriggerWorkflowResponse) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{16} + return file_workflows_proto_rawDescGZIP(), []int{14} } func (x *TriggerWorkflowResponse) GetWorkflowRunId() string { @@ -1642,7 +1362,7 @@ type PutRateLimitRequest struct { func (x *PutRateLimitRequest) Reset() { *x = PutRateLimitRequest{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[17] + mi := &file_workflows_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1655,7 +1375,7 @@ func (x *PutRateLimitRequest) String() string { func (*PutRateLimitRequest) ProtoMessage() {} func (x *PutRateLimitRequest) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[17] + mi := &file_workflows_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1668,7 +1388,7 @@ func (x *PutRateLimitRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutRateLimitRequest.ProtoReflect.Descriptor instead. func (*PutRateLimitRequest) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{17} + return file_workflows_proto_rawDescGZIP(), []int{15} } func (x *PutRateLimitRequest) GetKey() string { @@ -1701,7 +1421,7 @@ type PutRateLimitResponse struct { func (x *PutRateLimitResponse) Reset() { *x = PutRateLimitResponse{} if protoimpl.UnsafeEnabled { - mi := &file_workflows_proto_msgTypes[18] + mi := &file_workflows_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1714,7 +1434,7 @@ func (x *PutRateLimitResponse) String() string { func (*PutRateLimitResponse) ProtoMessage() {} func (x *PutRateLimitResponse) ProtoReflect() protoreflect.Message { - mi := &file_workflows_proto_msgTypes[18] + mi := &file_workflows_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1727,7 +1447,7 @@ func (x *PutRateLimitResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutRateLimitResponse.ProtoReflect.Descriptor instead. func (*PutRateLimitResponse) Descriptor() ([]byte, []int) { - return file_workflows_proto_rawDescGZIP(), []int{18} + return file_workflows_proto_rawDescGZIP(), []int{16} } var File_workflows_proto protoreflect.FileDescriptor @@ -1736,277 +1456,217 @@ var file_workflows_proto_rawDesc = []byte{ 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x44, 0x0a, 0x12, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6f, 0x70, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x70, - 0x74, 0x73, 0x52, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x22, 0xe7, 0x05, 0x0a, 0x19, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, 0x6f, 0x6e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, - 0x72, 0x73, 0x12, 0x49, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, - 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, - 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, - 0x70, 0x74, 0x73, 0x52, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x12, 0x3a, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2e, 0x0a, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x63, 0x72, 0x6f, - 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x41, 0x0a, 0x0e, 0x6f, 0x6e, 0x5f, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x48, 0x02, 0x52, 0x0c, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x4a, 0x6f, 0x62, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x06, - 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x53, - 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x48, 0x03, 0x52, - 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x04, 0x6b, 0x69, - 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4b, 0x69, 0x6e, 0x64, 0x48, 0x04, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x88, - 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x48, 0x05, 0x52, 0x0f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, - 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x6f, 0x6e, - 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x61, - 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6a, 0x6f, 0x62, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, - 0x69, 0x63, 0x6b, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0x13, 0x0a, - 0x11, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x22, 0xfc, 0x01, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1b, - 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6d, - 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, - 0x07, 0x6d, 0x61, 0x78, 0x52, 0x75, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x45, 0x0a, 0x0e, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x48, 0x02, - 0x52, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x88, - 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x42, - 0x11, 0x0a, 0x0f, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, - 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x20, - 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, - 0x12, 0x20, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, - 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, - 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x48, - 0x03, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, - 0x12, 0x1b, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x04, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, - 0x0a, 0x5f, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, - 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x22, 0xbe, 0x04, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, - 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, - 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x65, 0x70, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, - 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2a, - 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, - 0x66, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x62, 0x61, - 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, - 0x66, 0x66, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x88, 0x01, 0x01, 0x1a, - 0x55, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, - 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62, 0x61, - 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x65, 0x70, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x05, 0x75, - 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, - 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, - 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x45, - 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x09, 0x75, 0x6e, - 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x6e, 0x69, 0x74, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x0a, 0x09, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x4c, 0x69, 0x73, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0xf2, 0x03, 0x0a, 0x17, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x38, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x88, 0x01, 0x01, 0x12, 0x41, 0x0a, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x17, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x0a, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x03, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x34, - 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x12, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x5f, 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, - 0x79, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x5e, 0x0a, 0x11, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x74, - 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x72, 0x69, - 0x67, 0x67, 0x65, 0x72, 0x41, 0x74, 0x22, 0xad, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x72, - 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, - 0x64, 0x12, 0x43, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x53, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x66, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, 0x16, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x43, 0x72, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x22, 0x53, 0x0a, 0x1a, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, - 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, - 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x47, 0x0a, 0x1b, 0x42, - 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, - 0x6e, 0x49, 0x64, 0x73, 0x22, 0xc2, 0x05, 0x0a, 0x16, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x74, 0x72, + 0x69, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x44, 0x0a, 0x12, 0x50, + 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x04, 0x6f, 0x70, 0x74, + 0x73, 0x22, 0xe7, 0x05, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x72, + 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, + 0x72, 0x6f, 0x6e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x49, 0x0a, 0x12, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x72, + 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x04, 0x6a, 0x6f, + 0x62, 0x73, 0x12, 0x3a, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4f, 0x70, 0x74, + 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2e, + 0x0a, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x22, + 0x0a, 0x0a, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x63, 0x72, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x88, + 0x01, 0x01, 0x12, 0x41, 0x0a, 0x0e, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, 0x70, + 0x74, 0x73, 0x48, 0x02, 0x52, 0x0c, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x4a, + 0x6f, 0x62, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x48, 0x03, 0x52, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, + 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0d, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4b, 0x69, 0x6e, 0x64, + 0x48, 0x04, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x05, 0x48, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, + 0x11, 0x0a, 0x0f, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6a, + 0x6f, 0x62, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x42, 0x07, 0x0a, + 0x05, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0xfc, 0x01, 0x0a, 0x17, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x75, 0x6e, + 0x73, 0x88, 0x01, 0x01, 0x12, 0x45, 0x0a, 0x0e, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x43, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x48, 0x02, 0x52, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, + 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, + 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, + 0x4f, 0x70, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, + 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, + 0xc1, 0x04, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, + 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x65, 0x70, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, + 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2a, 0x0a, + 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, + 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x62, 0x61, 0x63, + 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x88, 0x01, 0x01, 0x1a, 0x58, + 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, + 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, + 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, + 0x65, 0x70, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, + 0x75, 0x6e, 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, + 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, 0x65, + 0x79, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x74, + 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x09, + 0x75, 0x6e, 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, + 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x12, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, + 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x6e, + 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x4c, + 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xf2, 0x03, 0x0a, 0x17, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x41, 0x0a, 0x1b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x01, 0x52, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, - 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x24, - 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, - 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, - 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, - 0x06, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x64, - 0x0a, 0x15, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x41, 0x0a, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x17, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, + 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x12, 0x20, + 0x0a, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x03, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, + 0x12, 0x34, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, + 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, + 0x6b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x5e, 0x0a, 0x11, 0x53, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, + 0x0a, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x41, 0x74, 0x22, 0xad, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x53, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x66, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, + 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x43, 0x72, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x1a, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x13, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x5c, 0x0a, 0x18, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x64, 0x65, 0x73, 0x69, 0x72, - 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0x0b, 0x0a, 0x09, - 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x41, 0x0a, 0x17, 0x54, 0x72, 0x69, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x22, 0x47, 0x0a, 0x1b, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x17, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, @@ -2032,48 +1692,40 @@ var file_workflows_proto_rawDesc = []byte{ 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, - 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x85, - 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, - 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, - 0x41, 0x4e, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, - 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, - 0x0d, 0x0a, 0x09, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, - 0x0a, 0x12, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, - 0x51, 0x55, 0x41, 0x4c, 0x10, 0x05, 0x2a, 0x5d, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, 0x54, - 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07, 0x0a, - 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10, 0x04, - 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, - 0x45, 0x41, 0x52, 0x10, 0x06, 0x32, 0xdc, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x50, 0x75, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x13, 0x2e, 0x50, 0x75, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x3e, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x17, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x54, 0x72, - 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x13, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, - 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x42, - 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x42, 0x75, 0x6c, 0x6b, + 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x5d, + 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, + 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, + 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, + 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x32, 0xdf, 0x02, + 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x13, 0x2e, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0f, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x50, 0x75, 0x74, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, - 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x50, 0x0a, 0x13, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, + 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x12, 0x14, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2088,76 +1740,71 @@ func file_workflows_proto_rawDescGZIP() []byte { return file_workflows_proto_rawDescData } -var file_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_workflows_proto_goTypes = []interface{}{ (StickyStrategy)(0), // 0: StickyStrategy (WorkflowKind)(0), // 1: WorkflowKind (ConcurrencyLimitStrategy)(0), // 2: ConcurrencyLimitStrategy - (WorkerLabelComparator)(0), // 3: WorkerLabelComparator - (RateLimitDuration)(0), // 4: RateLimitDuration - (*PutWorkflowRequest)(nil), // 5: PutWorkflowRequest - (*CreateWorkflowVersionOpts)(nil), // 6: CreateWorkflowVersionOpts - (*WorkflowConcurrencyOpts)(nil), // 7: WorkflowConcurrencyOpts - (*CreateWorkflowJobOpts)(nil), // 8: CreateWorkflowJobOpts - (*DesiredWorkerLabels)(nil), // 9: DesiredWorkerLabels - (*CreateWorkflowStepOpts)(nil), // 10: CreateWorkflowStepOpts - (*CreateStepRateLimit)(nil), // 11: CreateStepRateLimit - (*ListWorkflowsRequest)(nil), // 12: ListWorkflowsRequest - (*ScheduleWorkflowRequest)(nil), // 13: ScheduleWorkflowRequest - (*ScheduledWorkflow)(nil), // 14: ScheduledWorkflow - (*WorkflowVersion)(nil), // 15: WorkflowVersion - (*WorkflowTriggerEventRef)(nil), // 16: WorkflowTriggerEventRef - (*WorkflowTriggerCronRef)(nil), // 17: WorkflowTriggerCronRef - (*BulkTriggerWorkflowRequest)(nil), // 18: BulkTriggerWorkflowRequest - (*BulkTriggerWorkflowResponse)(nil), // 19: BulkTriggerWorkflowResponse - (*TriggerWorkflowRequest)(nil), // 20: TriggerWorkflowRequest - (*TriggerWorkflowResponse)(nil), // 21: TriggerWorkflowResponse - (*PutRateLimitRequest)(nil), // 22: PutRateLimitRequest - (*PutRateLimitResponse)(nil), // 23: PutRateLimitResponse - nil, // 24: CreateWorkflowStepOpts.WorkerLabelsEntry - nil, // 25: TriggerWorkflowRequest.DesiredWorkerLabelsEntry - (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp + (RateLimitDuration)(0), // 3: RateLimitDuration + (*PutWorkflowRequest)(nil), // 4: PutWorkflowRequest + (*CreateWorkflowVersionOpts)(nil), // 5: CreateWorkflowVersionOpts + (*WorkflowConcurrencyOpts)(nil), // 6: WorkflowConcurrencyOpts + (*CreateWorkflowJobOpts)(nil), // 7: CreateWorkflowJobOpts + (*CreateWorkflowStepOpts)(nil), // 8: CreateWorkflowStepOpts + (*CreateStepRateLimit)(nil), // 9: CreateStepRateLimit + (*ListWorkflowsRequest)(nil), // 10: ListWorkflowsRequest + (*ScheduleWorkflowRequest)(nil), // 11: ScheduleWorkflowRequest + (*ScheduledWorkflow)(nil), // 12: ScheduledWorkflow + (*WorkflowVersion)(nil), // 13: WorkflowVersion + (*WorkflowTriggerEventRef)(nil), // 14: WorkflowTriggerEventRef + (*WorkflowTriggerCronRef)(nil), // 15: WorkflowTriggerCronRef + (*BulkTriggerWorkflowRequest)(nil), // 16: BulkTriggerWorkflowRequest + (*BulkTriggerWorkflowResponse)(nil), // 17: BulkTriggerWorkflowResponse + (*TriggerWorkflowResponse)(nil), // 18: TriggerWorkflowResponse + (*PutRateLimitRequest)(nil), // 19: PutRateLimitRequest + (*PutRateLimitResponse)(nil), // 20: PutRateLimitResponse + nil, // 21: CreateWorkflowStepOpts.WorkerLabelsEntry + (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp + (*v1.TriggerWorkflowRequest)(nil), // 23: v1.TriggerWorkflowRequest + (*v1.DesiredWorkerLabels)(nil), // 24: v1.DesiredWorkerLabels } var file_workflows_proto_depIdxs = []int32{ - 6, // 0: PutWorkflowRequest.opts:type_name -> CreateWorkflowVersionOpts - 26, // 1: CreateWorkflowVersionOpts.scheduled_triggers:type_name -> google.protobuf.Timestamp - 8, // 2: CreateWorkflowVersionOpts.jobs:type_name -> CreateWorkflowJobOpts - 7, // 3: CreateWorkflowVersionOpts.concurrency:type_name -> WorkflowConcurrencyOpts - 8, // 4: CreateWorkflowVersionOpts.on_failure_job:type_name -> CreateWorkflowJobOpts + 5, // 0: PutWorkflowRequest.opts:type_name -> CreateWorkflowVersionOpts + 22, // 1: CreateWorkflowVersionOpts.scheduled_triggers:type_name -> google.protobuf.Timestamp + 7, // 2: CreateWorkflowVersionOpts.jobs:type_name -> CreateWorkflowJobOpts + 6, // 3: CreateWorkflowVersionOpts.concurrency:type_name -> WorkflowConcurrencyOpts + 7, // 4: CreateWorkflowVersionOpts.on_failure_job:type_name -> CreateWorkflowJobOpts 0, // 5: CreateWorkflowVersionOpts.sticky:type_name -> StickyStrategy 1, // 6: CreateWorkflowVersionOpts.kind:type_name -> WorkflowKind 2, // 7: WorkflowConcurrencyOpts.limit_strategy:type_name -> ConcurrencyLimitStrategy - 10, // 8: CreateWorkflowJobOpts.steps:type_name -> CreateWorkflowStepOpts - 3, // 9: DesiredWorkerLabels.comparator:type_name -> WorkerLabelComparator - 11, // 10: CreateWorkflowStepOpts.rate_limits:type_name -> CreateStepRateLimit - 24, // 11: CreateWorkflowStepOpts.worker_labels:type_name -> CreateWorkflowStepOpts.WorkerLabelsEntry - 4, // 12: CreateStepRateLimit.duration:type_name -> RateLimitDuration - 26, // 13: ScheduleWorkflowRequest.schedules:type_name -> google.protobuf.Timestamp - 26, // 14: ScheduledWorkflow.trigger_at:type_name -> google.protobuf.Timestamp - 26, // 15: WorkflowVersion.created_at:type_name -> google.protobuf.Timestamp - 26, // 16: WorkflowVersion.updated_at:type_name -> google.protobuf.Timestamp - 14, // 17: WorkflowVersion.scheduled_workflows:type_name -> ScheduledWorkflow - 20, // 18: BulkTriggerWorkflowRequest.workflows:type_name -> TriggerWorkflowRequest - 25, // 19: TriggerWorkflowRequest.desired_worker_labels:type_name -> TriggerWorkflowRequest.DesiredWorkerLabelsEntry - 4, // 20: PutRateLimitRequest.duration:type_name -> RateLimitDuration - 9, // 21: CreateWorkflowStepOpts.WorkerLabelsEntry.value:type_name -> DesiredWorkerLabels - 9, // 22: TriggerWorkflowRequest.DesiredWorkerLabelsEntry.value:type_name -> DesiredWorkerLabels - 5, // 23: WorkflowService.PutWorkflow:input_type -> PutWorkflowRequest - 13, // 24: WorkflowService.ScheduleWorkflow:input_type -> ScheduleWorkflowRequest - 20, // 25: WorkflowService.TriggerWorkflow:input_type -> TriggerWorkflowRequest - 18, // 26: WorkflowService.BulkTriggerWorkflow:input_type -> BulkTriggerWorkflowRequest - 22, // 27: WorkflowService.PutRateLimit:input_type -> PutRateLimitRequest - 15, // 28: WorkflowService.PutWorkflow:output_type -> WorkflowVersion - 15, // 29: WorkflowService.ScheduleWorkflow:output_type -> WorkflowVersion - 21, // 30: WorkflowService.TriggerWorkflow:output_type -> TriggerWorkflowResponse - 19, // 31: WorkflowService.BulkTriggerWorkflow:output_type -> BulkTriggerWorkflowResponse - 23, // 32: WorkflowService.PutRateLimit:output_type -> PutRateLimitResponse - 28, // [28:33] is the sub-list for method output_type - 23, // [23:28] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name + 8, // 8: CreateWorkflowJobOpts.steps:type_name -> CreateWorkflowStepOpts + 9, // 9: CreateWorkflowStepOpts.rate_limits:type_name -> CreateStepRateLimit + 21, // 10: CreateWorkflowStepOpts.worker_labels:type_name -> CreateWorkflowStepOpts.WorkerLabelsEntry + 3, // 11: CreateStepRateLimit.duration:type_name -> RateLimitDuration + 22, // 12: ScheduleWorkflowRequest.schedules:type_name -> google.protobuf.Timestamp + 22, // 13: ScheduledWorkflow.trigger_at:type_name -> google.protobuf.Timestamp + 22, // 14: WorkflowVersion.created_at:type_name -> google.protobuf.Timestamp + 22, // 15: WorkflowVersion.updated_at:type_name -> google.protobuf.Timestamp + 12, // 16: WorkflowVersion.scheduled_workflows:type_name -> ScheduledWorkflow + 23, // 17: BulkTriggerWorkflowRequest.workflows:type_name -> v1.TriggerWorkflowRequest + 3, // 18: PutRateLimitRequest.duration:type_name -> RateLimitDuration + 24, // 19: CreateWorkflowStepOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels + 4, // 20: WorkflowService.PutWorkflow:input_type -> PutWorkflowRequest + 11, // 21: WorkflowService.ScheduleWorkflow:input_type -> ScheduleWorkflowRequest + 23, // 22: WorkflowService.TriggerWorkflow:input_type -> v1.TriggerWorkflowRequest + 16, // 23: WorkflowService.BulkTriggerWorkflow:input_type -> BulkTriggerWorkflowRequest + 19, // 24: WorkflowService.PutRateLimit:input_type -> PutRateLimitRequest + 13, // 25: WorkflowService.PutWorkflow:output_type -> WorkflowVersion + 13, // 26: WorkflowService.ScheduleWorkflow:output_type -> WorkflowVersion + 18, // 27: WorkflowService.TriggerWorkflow:output_type -> TriggerWorkflowResponse + 17, // 28: WorkflowService.BulkTriggerWorkflow:output_type -> BulkTriggerWorkflowResponse + 20, // 29: WorkflowService.PutRateLimit:output_type -> PutRateLimitResponse + 25, // [25:30] is the sub-list for method output_type + 20, // [20:25] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_workflows_proto_init() } @@ -2215,18 +1862,6 @@ func file_workflows_proto_init() { } } file_workflows_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DesiredWorkerLabels); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_workflows_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateWorkflowStepOpts); i { case 0: return &v.state @@ -2238,7 +1873,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateStepRateLimit); i { case 0: return &v.state @@ -2250,7 +1885,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListWorkflowsRequest); i { case 0: return &v.state @@ -2262,7 +1897,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ScheduleWorkflowRequest); i { case 0: return &v.state @@ -2274,7 +1909,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ScheduledWorkflow); i { case 0: return &v.state @@ -2286,7 +1921,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowVersion); i { case 0: return &v.state @@ -2298,7 +1933,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowTriggerEventRef); i { case 0: return &v.state @@ -2310,7 +1945,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowTriggerCronRef); i { case 0: return &v.state @@ -2322,7 +1957,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BulkTriggerWorkflowRequest); i { case 0: return &v.state @@ -2334,7 +1969,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BulkTriggerWorkflowResponse); i { case 0: return &v.state @@ -2346,19 +1981,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TriggerWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_workflows_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TriggerWorkflowResponse); i { case 0: return &v.state @@ -2370,7 +1993,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PutRateLimitRequest); i { case 0: return &v.state @@ -2382,7 +2005,7 @@ func file_workflows_proto_init() { return nil } } - file_workflows_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_workflows_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PutRateLimitResponse); i { case 0: return &v.state @@ -2399,16 +2022,14 @@ func file_workflows_proto_init() { file_workflows_proto_msgTypes[2].OneofWrappers = []interface{}{} file_workflows_proto_msgTypes[4].OneofWrappers = []interface{}{} file_workflows_proto_msgTypes[5].OneofWrappers = []interface{}{} - file_workflows_proto_msgTypes[6].OneofWrappers = []interface{}{} - file_workflows_proto_msgTypes[8].OneofWrappers = []interface{}{} - file_workflows_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_workflows_proto_msgTypes[7].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_workflows_proto_rawDesc, - NumEnums: 5, - NumMessages: 21, + NumEnums: 4, + NumMessages: 18, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/services/admin/contracts/workflows/workflows.pb.go b/internal/services/admin/contracts/workflows/workflows.pb.go new file mode 100644 index 000000000..2f92f3bb6 --- /dev/null +++ b/internal/services/admin/contracts/workflows/workflows.pb.go @@ -0,0 +1,2236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v5.29.3 +// source: workflows/workflows.proto + +package contracts + +import ( + v1 "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StickyStrategy int32 + +const ( + StickyStrategy_SOFT StickyStrategy = 0 + StickyStrategy_HARD StickyStrategy = 1 +) + +// Enum value maps for StickyStrategy. +var ( + StickyStrategy_name = map[int32]string{ + 0: "SOFT", + 1: "HARD", + } + StickyStrategy_value = map[string]int32{ + "SOFT": 0, + "HARD": 1, + } +) + +func (x StickyStrategy) Enum() *StickyStrategy { + p := new(StickyStrategy) + *p = x + return p +} + +func (x StickyStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StickyStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_workflows_workflows_proto_enumTypes[0].Descriptor() +} + +func (StickyStrategy) Type() protoreflect.EnumType { + return &file_workflows_workflows_proto_enumTypes[0] +} + +func (x StickyStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StickyStrategy.Descriptor instead. +func (StickyStrategy) EnumDescriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{0} +} + +type WorkflowKind int32 + +const ( + WorkflowKind_FUNCTION WorkflowKind = 0 + WorkflowKind_DURABLE WorkflowKind = 1 + WorkflowKind_DAG WorkflowKind = 2 +) + +// Enum value maps for WorkflowKind. +var ( + WorkflowKind_name = map[int32]string{ + 0: "FUNCTION", + 1: "DURABLE", + 2: "DAG", + } + WorkflowKind_value = map[string]int32{ + "FUNCTION": 0, + "DURABLE": 1, + "DAG": 2, + } +) + +func (x WorkflowKind) Enum() *WorkflowKind { + p := new(WorkflowKind) + *p = x + return p +} + +func (x WorkflowKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WorkflowKind) Descriptor() protoreflect.EnumDescriptor { + return file_workflows_workflows_proto_enumTypes[1].Descriptor() +} + +func (WorkflowKind) Type() protoreflect.EnumType { + return &file_workflows_workflows_proto_enumTypes[1] +} + +func (x WorkflowKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WorkflowKind.Descriptor instead. +func (WorkflowKind) EnumDescriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{1} +} + +type ConcurrencyLimitStrategy int32 + +const ( + ConcurrencyLimitStrategy_CANCEL_IN_PROGRESS ConcurrencyLimitStrategy = 0 + ConcurrencyLimitStrategy_DROP_NEWEST ConcurrencyLimitStrategy = 1 // deprecated + ConcurrencyLimitStrategy_QUEUE_NEWEST ConcurrencyLimitStrategy = 2 // deprecated + ConcurrencyLimitStrategy_GROUP_ROUND_ROBIN ConcurrencyLimitStrategy = 3 + ConcurrencyLimitStrategy_CANCEL_NEWEST ConcurrencyLimitStrategy = 4 +) + +// Enum value maps for ConcurrencyLimitStrategy. +var ( + ConcurrencyLimitStrategy_name = map[int32]string{ + 0: "CANCEL_IN_PROGRESS", + 1: "DROP_NEWEST", + 2: "QUEUE_NEWEST", + 3: "GROUP_ROUND_ROBIN", + 4: "CANCEL_NEWEST", + } + ConcurrencyLimitStrategy_value = map[string]int32{ + "CANCEL_IN_PROGRESS": 0, + "DROP_NEWEST": 1, + "QUEUE_NEWEST": 2, + "GROUP_ROUND_ROBIN": 3, + "CANCEL_NEWEST": 4, + } +) + +func (x ConcurrencyLimitStrategy) Enum() *ConcurrencyLimitStrategy { + p := new(ConcurrencyLimitStrategy) + *p = x + return p +} + +func (x ConcurrencyLimitStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConcurrencyLimitStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_workflows_workflows_proto_enumTypes[2].Descriptor() +} + +func (ConcurrencyLimitStrategy) Type() protoreflect.EnumType { + return &file_workflows_workflows_proto_enumTypes[2] +} + +func (x ConcurrencyLimitStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConcurrencyLimitStrategy.Descriptor instead. +func (ConcurrencyLimitStrategy) EnumDescriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{2} +} + +type WorkerLabelComparator int32 + +const ( + WorkerLabelComparator_EQUAL WorkerLabelComparator = 0 + WorkerLabelComparator_NOT_EQUAL WorkerLabelComparator = 1 + WorkerLabelComparator_GREATER_THAN WorkerLabelComparator = 2 + WorkerLabelComparator_GREATER_THAN_OR_EQUAL WorkerLabelComparator = 3 + WorkerLabelComparator_LESS_THAN WorkerLabelComparator = 4 + WorkerLabelComparator_LESS_THAN_OR_EQUAL WorkerLabelComparator = 5 +) + +// Enum value maps for WorkerLabelComparator. +var ( + WorkerLabelComparator_name = map[int32]string{ + 0: "EQUAL", + 1: "NOT_EQUAL", + 2: "GREATER_THAN", + 3: "GREATER_THAN_OR_EQUAL", + 4: "LESS_THAN", + 5: "LESS_THAN_OR_EQUAL", + } + WorkerLabelComparator_value = map[string]int32{ + "EQUAL": 0, + "NOT_EQUAL": 1, + "GREATER_THAN": 2, + "GREATER_THAN_OR_EQUAL": 3, + "LESS_THAN": 4, + "LESS_THAN_OR_EQUAL": 5, + } +) + +func (x WorkerLabelComparator) Enum() *WorkerLabelComparator { + p := new(WorkerLabelComparator) + *p = x + return p +} + +func (x WorkerLabelComparator) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WorkerLabelComparator) Descriptor() protoreflect.EnumDescriptor { + return file_workflows_workflows_proto_enumTypes[3].Descriptor() +} + +func (WorkerLabelComparator) Type() protoreflect.EnumType { + return &file_workflows_workflows_proto_enumTypes[3] +} + +func (x WorkerLabelComparator) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WorkerLabelComparator.Descriptor instead. +func (WorkerLabelComparator) EnumDescriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{3} +} + +type RateLimitDuration int32 + +const ( + RateLimitDuration_SECOND RateLimitDuration = 0 + RateLimitDuration_MINUTE RateLimitDuration = 1 + RateLimitDuration_HOUR RateLimitDuration = 2 + RateLimitDuration_DAY RateLimitDuration = 3 + RateLimitDuration_WEEK RateLimitDuration = 4 + RateLimitDuration_MONTH RateLimitDuration = 5 + RateLimitDuration_YEAR RateLimitDuration = 6 +) + +// Enum value maps for RateLimitDuration. +var ( + RateLimitDuration_name = map[int32]string{ + 0: "SECOND", + 1: "MINUTE", + 2: "HOUR", + 3: "DAY", + 4: "WEEK", + 5: "MONTH", + 6: "YEAR", + } + RateLimitDuration_value = map[string]int32{ + "SECOND": 0, + "MINUTE": 1, + "HOUR": 2, + "DAY": 3, + "WEEK": 4, + "MONTH": 5, + "YEAR": 6, + } +) + +func (x RateLimitDuration) Enum() *RateLimitDuration { + p := new(RateLimitDuration) + *p = x + return p +} + +func (x RateLimitDuration) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitDuration) Descriptor() protoreflect.EnumDescriptor { + return file_workflows_workflows_proto_enumTypes[4].Descriptor() +} + +func (RateLimitDuration) Type() protoreflect.EnumType { + return &file_workflows_workflows_proto_enumTypes[4] +} + +func (x RateLimitDuration) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimitDuration.Descriptor instead. +func (RateLimitDuration) EnumDescriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{4} +} + +type PutWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Opts *CreateWorkflowVersionOpts `protobuf:"bytes,1,opt,name=opts,proto3" json:"opts,omitempty"` +} + +func (x *PutWorkflowRequest) Reset() { + *x = PutWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutWorkflowRequest) ProtoMessage() {} + +func (x *PutWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutWorkflowRequest.ProtoReflect.Descriptor instead. +func (*PutWorkflowRequest) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{0} +} + +func (x *PutWorkflowRequest) GetOpts() *CreateWorkflowVersionOpts { + if x != nil { + return x.Opts + } + return nil +} + +// CreateWorkflowVersionOpts represents options to create a workflow version. +type CreateWorkflowVersionOpts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // (required) the workflow name + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` // (optional) the workflow description + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // (required) the workflow version + EventTriggers []string `protobuf:"bytes,4,rep,name=event_triggers,json=eventTriggers,proto3" json:"event_triggers,omitempty"` // (optional) event triggers for the workflow + CronTriggers []string `protobuf:"bytes,5,rep,name=cron_triggers,json=cronTriggers,proto3" json:"cron_triggers,omitempty"` // (optional) cron triggers for the workflow + ScheduledTriggers []*timestamppb.Timestamp `protobuf:"bytes,6,rep,name=scheduled_triggers,json=scheduledTriggers,proto3" json:"scheduled_triggers,omitempty"` // (optional) scheduled triggers for the workflow + Jobs []*CreateWorkflowJobOpts `protobuf:"bytes,7,rep,name=jobs,proto3" json:"jobs,omitempty"` // (required) the workflow jobs + Concurrency *WorkflowConcurrencyOpts `protobuf:"bytes,8,opt,name=concurrency,proto3" json:"concurrency,omitempty"` // (optional) the workflow concurrency options + ScheduleTimeout *string `protobuf:"bytes,9,opt,name=schedule_timeout,json=scheduleTimeout,proto3,oneof" json:"schedule_timeout,omitempty"` // (optional) the timeout for the schedule + CronInput *string `protobuf:"bytes,10,opt,name=cron_input,json=cronInput,proto3,oneof" json:"cron_input,omitempty"` // (optional) the input for the cron trigger + OnFailureJob *CreateWorkflowJobOpts `protobuf:"bytes,11,opt,name=on_failure_job,json=onFailureJob,proto3,oneof" json:"on_failure_job,omitempty"` // (optional) the job to run on failure + Sticky *StickyStrategy `protobuf:"varint,12,opt,name=sticky,proto3,enum=StickyStrategy,oneof" json:"sticky,omitempty"` // (optional) the sticky strategy for assigning tasks to workers + Kind *WorkflowKind `protobuf:"varint,13,opt,name=kind,proto3,enum=WorkflowKind,oneof" json:"kind,omitempty"` // (optional) the kind of workflow + DefaultPriority *int32 `protobuf:"varint,14,opt,name=default_priority,json=defaultPriority,proto3,oneof" json:"default_priority,omitempty"` // (optional) the priority of the workflow +} + +func (x *CreateWorkflowVersionOpts) Reset() { + *x = CreateWorkflowVersionOpts{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateWorkflowVersionOpts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkflowVersionOpts) ProtoMessage() {} + +func (x *CreateWorkflowVersionOpts) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkflowVersionOpts.ProtoReflect.Descriptor instead. +func (*CreateWorkflowVersionOpts) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateWorkflowVersionOpts) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateWorkflowVersionOpts) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *CreateWorkflowVersionOpts) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *CreateWorkflowVersionOpts) GetEventTriggers() []string { + if x != nil { + return x.EventTriggers + } + return nil +} + +func (x *CreateWorkflowVersionOpts) GetCronTriggers() []string { + if x != nil { + return x.CronTriggers + } + return nil +} + +func (x *CreateWorkflowVersionOpts) GetScheduledTriggers() []*timestamppb.Timestamp { + if x != nil { + return x.ScheduledTriggers + } + return nil +} + +func (x *CreateWorkflowVersionOpts) GetJobs() []*CreateWorkflowJobOpts { + if x != nil { + return x.Jobs + } + return nil +} + +func (x *CreateWorkflowVersionOpts) GetConcurrency() *WorkflowConcurrencyOpts { + if x != nil { + return x.Concurrency + } + return nil +} + +func (x *CreateWorkflowVersionOpts) GetScheduleTimeout() string { + if x != nil && x.ScheduleTimeout != nil { + return *x.ScheduleTimeout + } + return "" +} + +func (x *CreateWorkflowVersionOpts) GetCronInput() string { + if x != nil && x.CronInput != nil { + return *x.CronInput + } + return "" +} + +func (x *CreateWorkflowVersionOpts) GetOnFailureJob() *CreateWorkflowJobOpts { + if x != nil { + return x.OnFailureJob + } + return nil +} + +func (x *CreateWorkflowVersionOpts) GetSticky() StickyStrategy { + if x != nil && x.Sticky != nil { + return *x.Sticky + } + return StickyStrategy_SOFT +} + +func (x *CreateWorkflowVersionOpts) GetKind() WorkflowKind { + if x != nil && x.Kind != nil { + return *x.Kind + } + return WorkflowKind_FUNCTION +} + +func (x *CreateWorkflowVersionOpts) GetDefaultPriority() int32 { + if x != nil && x.DefaultPriority != nil { + return *x.DefaultPriority + } + return 0 +} + +type WorkflowConcurrencyOpts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Action *string `protobuf:"bytes,1,opt,name=action,proto3,oneof" json:"action,omitempty"` // (optional) the action id for getting the concurrency group + MaxRuns *int32 `protobuf:"varint,2,opt,name=max_runs,json=maxRuns,proto3,oneof" json:"max_runs,omitempty"` // (optional) the maximum number of concurrent workflow runs, default 1 + LimitStrategy *ConcurrencyLimitStrategy `protobuf:"varint,3,opt,name=limit_strategy,json=limitStrategy,proto3,enum=ConcurrencyLimitStrategy,oneof" json:"limit_strategy,omitempty"` // (optional) the strategy to use when the concurrency limit is reached, default CANCEL_IN_PROGRESS + Expression *string `protobuf:"bytes,4,opt,name=expression,proto3,oneof" json:"expression,omitempty"` // (optional) the expression to use for concurrency +} + +func (x *WorkflowConcurrencyOpts) Reset() { + *x = WorkflowConcurrencyOpts{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowConcurrencyOpts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowConcurrencyOpts) ProtoMessage() {} + +func (x *WorkflowConcurrencyOpts) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowConcurrencyOpts.ProtoReflect.Descriptor instead. +func (*WorkflowConcurrencyOpts) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{2} +} + +func (x *WorkflowConcurrencyOpts) GetAction() string { + if x != nil && x.Action != nil { + return *x.Action + } + return "" +} + +func (x *WorkflowConcurrencyOpts) GetMaxRuns() int32 { + if x != nil && x.MaxRuns != nil { + return *x.MaxRuns + } + return 0 +} + +func (x *WorkflowConcurrencyOpts) GetLimitStrategy() ConcurrencyLimitStrategy { + if x != nil && x.LimitStrategy != nil { + return *x.LimitStrategy + } + return ConcurrencyLimitStrategy_CANCEL_IN_PROGRESS +} + +func (x *WorkflowConcurrencyOpts) GetExpression() string { + if x != nil && x.Expression != nil { + return *x.Expression + } + return "" +} + +// CreateWorkflowJobOpts represents options to create a workflow job. +type CreateWorkflowJobOpts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // (required) the job name + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` // (optional) the job description + Steps []*CreateWorkflowStepOpts `protobuf:"bytes,4,rep,name=steps,proto3" json:"steps,omitempty"` // (required) the job tasks +} + +func (x *CreateWorkflowJobOpts) Reset() { + *x = CreateWorkflowJobOpts{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateWorkflowJobOpts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkflowJobOpts) ProtoMessage() {} + +func (x *CreateWorkflowJobOpts) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkflowJobOpts.ProtoReflect.Descriptor instead. +func (*CreateWorkflowJobOpts) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateWorkflowJobOpts) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateWorkflowJobOpts) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *CreateWorkflowJobOpts) GetSteps() []*CreateWorkflowStepOpts { + if x != nil { + return x.Steps + } + return nil +} + +type DesiredWorkerLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // value of the affinity + StrValue *string `protobuf:"bytes,1,opt,name=str_value,json=strValue,proto3,oneof" json:"str_value,omitempty"` + IntValue *int32 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` + // * + // (optional) Specifies whether the affinity setting is required. + // If required, the worker will not accept actions that do not have a truthy affinity setting. + // + // Defaults to false. + Required *bool `protobuf:"varint,3,opt,name=required,proto3,oneof" json:"required,omitempty"` + // * + // (optional) Specifies the comparator for the affinity setting. + // If not set, the default is EQUAL. + Comparator *WorkerLabelComparator `protobuf:"varint,4,opt,name=comparator,proto3,enum=WorkerLabelComparator,oneof" json:"comparator,omitempty"` + // * + // (optional) Specifies the weight of the affinity setting. + // If not set, the default is 100. + Weight *int32 `protobuf:"varint,5,opt,name=weight,proto3,oneof" json:"weight,omitempty"` +} + +func (x *DesiredWorkerLabels) Reset() { + *x = DesiredWorkerLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DesiredWorkerLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DesiredWorkerLabels) ProtoMessage() {} + +func (x *DesiredWorkerLabels) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DesiredWorkerLabels.ProtoReflect.Descriptor instead. +func (*DesiredWorkerLabels) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{4} +} + +func (x *DesiredWorkerLabels) GetStrValue() string { + if x != nil && x.StrValue != nil { + return *x.StrValue + } + return "" +} + +func (x *DesiredWorkerLabels) GetIntValue() int32 { + if x != nil && x.IntValue != nil { + return *x.IntValue + } + return 0 +} + +func (x *DesiredWorkerLabels) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *DesiredWorkerLabels) GetComparator() WorkerLabelComparator { + if x != nil && x.Comparator != nil { + return *x.Comparator + } + return WorkerLabelComparator_EQUAL +} + +func (x *DesiredWorkerLabels) GetWeight() int32 { + if x != nil && x.Weight != nil { + return *x.Weight + } + return 0 +} + +// CreateWorkflowStepOpts represents options to create a workflow task. +type CreateWorkflowStepOpts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name + Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id + Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout + Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON + Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task + UserData string `protobuf:"bytes,6,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` // (optional) the custom task user data, assuming string representation of JSON + Retries int32 `protobuf:"varint,7,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0 + RateLimits []*CreateStepRateLimit `protobuf:"bytes,8,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task + WorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,9,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task + BackoffFactor *float32 `protobuf:"fixed32,10,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task + BackoffMaxSeconds *int32 `protobuf:"varint,11,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task +} + +func (x *CreateWorkflowStepOpts) Reset() { + *x = CreateWorkflowStepOpts{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateWorkflowStepOpts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkflowStepOpts) ProtoMessage() {} + +func (x *CreateWorkflowStepOpts) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkflowStepOpts.ProtoReflect.Descriptor instead. +func (*CreateWorkflowStepOpts) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateWorkflowStepOpts) GetReadableId() string { + if x != nil { + return x.ReadableId + } + return "" +} + +func (x *CreateWorkflowStepOpts) GetAction() string { + if x != nil { + return x.Action + } + return "" +} + +func (x *CreateWorkflowStepOpts) GetTimeout() string { + if x != nil { + return x.Timeout + } + return "" +} + +func (x *CreateWorkflowStepOpts) GetInputs() string { + if x != nil { + return x.Inputs + } + return "" +} + +func (x *CreateWorkflowStepOpts) GetParents() []string { + if x != nil { + return x.Parents + } + return nil +} + +func (x *CreateWorkflowStepOpts) GetUserData() string { + if x != nil { + return x.UserData + } + return "" +} + +func (x *CreateWorkflowStepOpts) GetRetries() int32 { + if x != nil { + return x.Retries + } + return 0 +} + +func (x *CreateWorkflowStepOpts) GetRateLimits() []*CreateStepRateLimit { + if x != nil { + return x.RateLimits + } + return nil +} + +func (x *CreateWorkflowStepOpts) GetWorkerLabels() map[string]*DesiredWorkerLabels { + if x != nil { + return x.WorkerLabels + } + return nil +} + +func (x *CreateWorkflowStepOpts) GetBackoffFactor() float32 { + if x != nil && x.BackoffFactor != nil { + return *x.BackoffFactor + } + return 0 +} + +func (x *CreateWorkflowStepOpts) GetBackoffMaxSeconds() int32 { + if x != nil && x.BackoffMaxSeconds != nil { + return *x.BackoffMaxSeconds + } + return 0 +} + +type CreateStepRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // (required) the key for the rate limit + Units *int32 `protobuf:"varint,2,opt,name=units,proto3,oneof" json:"units,omitempty"` // (optional) the number of units this task consumes + KeyExpr *string `protobuf:"bytes,3,opt,name=key_expr,json=keyExpr,proto3,oneof" json:"key_expr,omitempty"` // (optional) a CEL expression for determining the rate limit key + UnitsExpr *string `protobuf:"bytes,4,opt,name=units_expr,json=unitsExpr,proto3,oneof" json:"units_expr,omitempty"` // (optional) a CEL expression for determining the number of units consumed + LimitValuesExpr *string `protobuf:"bytes,5,opt,name=limit_values_expr,json=limitValuesExpr,proto3,oneof" json:"limit_values_expr,omitempty"` // (optional) a CEL expression for determining the total amount of rate limit units + Duration *RateLimitDuration `protobuf:"varint,6,opt,name=duration,proto3,enum=RateLimitDuration,oneof" json:"duration,omitempty"` // (optional) the default rate limit window to use for dynamic rate limits +} + +func (x *CreateStepRateLimit) Reset() { + *x = CreateStepRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateStepRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateStepRateLimit) ProtoMessage() {} + +func (x *CreateStepRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateStepRateLimit.ProtoReflect.Descriptor instead. +func (*CreateStepRateLimit) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateStepRateLimit) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *CreateStepRateLimit) GetUnits() int32 { + if x != nil && x.Units != nil { + return *x.Units + } + return 0 +} + +func (x *CreateStepRateLimit) GetKeyExpr() string { + if x != nil && x.KeyExpr != nil { + return *x.KeyExpr + } + return "" +} + +func (x *CreateStepRateLimit) GetUnitsExpr() string { + if x != nil && x.UnitsExpr != nil { + return *x.UnitsExpr + } + return "" +} + +func (x *CreateStepRateLimit) GetLimitValuesExpr() string { + if x != nil && x.LimitValuesExpr != nil { + return *x.LimitValuesExpr + } + return "" +} + +func (x *CreateStepRateLimit) GetDuration() RateLimitDuration { + if x != nil && x.Duration != nil { + return *x.Duration + } + return RateLimitDuration_SECOND +} + +// ListWorkflowsRequest is the request for ListWorkflows. +type ListWorkflowsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListWorkflowsRequest) Reset() { + *x = ListWorkflowsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListWorkflowsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListWorkflowsRequest) ProtoMessage() {} + +func (x *ListWorkflowsRequest) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*ListWorkflowsRequest) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{7} +} + +type ScheduleWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Schedules []*timestamppb.Timestamp `protobuf:"bytes,2,rep,name=schedules,proto3" json:"schedules,omitempty"` + // (optional) the input data for the workflow + Input string `protobuf:"bytes,3,opt,name=input,proto3" json:"input,omitempty"` + // (optional) the parent workflow run id + ParentId *string `protobuf:"bytes,4,opt,name=parent_id,json=parentId,proto3,oneof" json:"parent_id,omitempty"` + // (optional) the parent task external run id + ParentTaskRunExternalId *string `protobuf:"bytes,5,opt,name=parent_task_run_external_id,json=parentTaskRunExternalId,proto3,oneof" json:"parent_task_run_external_id,omitempty"` + // (optional) the index of the child workflow. if this is set, matches on the index or the + // child key will be a no-op, even if the schedule has changed. + ChildIndex *int32 `protobuf:"varint,6,opt,name=child_index,json=childIndex,proto3,oneof" json:"child_index,omitempty"` + // (optional) the key for the child. if this is set, matches on the index or the + // child key will be a no-op, even if the schedule has changed. + ChildKey *string `protobuf:"bytes,7,opt,name=child_key,json=childKey,proto3,oneof" json:"child_key,omitempty"` + // (optional) the additional metadata for the workflow + AdditionalMetadata *string `protobuf:"bytes,8,opt,name=additional_metadata,json=additionalMetadata,proto3,oneof" json:"additional_metadata,omitempty"` + // (optional) the priority of the workflow + Priority *int32 `protobuf:"varint,9,opt,name=priority,proto3,oneof" json:"priority,omitempty"` +} + +func (x *ScheduleWorkflowRequest) Reset() { + *x = ScheduleWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScheduleWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleWorkflowRequest) ProtoMessage() {} + +func (x *ScheduleWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleWorkflowRequest.ProtoReflect.Descriptor instead. +func (*ScheduleWorkflowRequest) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{8} +} + +func (x *ScheduleWorkflowRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScheduleWorkflowRequest) GetSchedules() []*timestamppb.Timestamp { + if x != nil { + return x.Schedules + } + return nil +} + +func (x *ScheduleWorkflowRequest) GetInput() string { + if x != nil { + return x.Input + } + return "" +} + +func (x *ScheduleWorkflowRequest) GetParentId() string { + if x != nil && x.ParentId != nil { + return *x.ParentId + } + return "" +} + +func (x *ScheduleWorkflowRequest) GetParentTaskRunExternalId() string { + if x != nil && x.ParentTaskRunExternalId != nil { + return *x.ParentTaskRunExternalId + } + return "" +} + +func (x *ScheduleWorkflowRequest) GetChildIndex() int32 { + if x != nil && x.ChildIndex != nil { + return *x.ChildIndex + } + return 0 +} + +func (x *ScheduleWorkflowRequest) GetChildKey() string { + if x != nil && x.ChildKey != nil { + return *x.ChildKey + } + return "" +} + +func (x *ScheduleWorkflowRequest) GetAdditionalMetadata() string { + if x != nil && x.AdditionalMetadata != nil { + return *x.AdditionalMetadata + } + return "" +} + +func (x *ScheduleWorkflowRequest) GetPriority() int32 { + if x != nil && x.Priority != nil { + return *x.Priority + } + return 0 +} + +// ScheduledWorkflow represents a scheduled workflow. +type ScheduledWorkflow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + TriggerAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=trigger_at,json=triggerAt,proto3" json:"trigger_at,omitempty"` +} + +func (x *ScheduledWorkflow) Reset() { + *x = ScheduledWorkflow{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScheduledWorkflow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduledWorkflow) ProtoMessage() {} + +func (x *ScheduledWorkflow) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduledWorkflow.ProtoReflect.Descriptor instead. +func (*ScheduledWorkflow) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{9} +} + +func (x *ScheduledWorkflow) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ScheduledWorkflow) GetTriggerAt() *timestamppb.Timestamp { + if x != nil { + return x.TriggerAt + } + return nil +} + +// WorkflowVersion represents the WorkflowVersion model. +type WorkflowVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + Order int64 `protobuf:"varint,6,opt,name=order,proto3" json:"order,omitempty"` + WorkflowId string `protobuf:"bytes,7,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + ScheduledWorkflows []*ScheduledWorkflow `protobuf:"bytes,8,rep,name=scheduled_workflows,json=scheduledWorkflows,proto3" json:"scheduled_workflows,omitempty"` +} + +func (x *WorkflowVersion) Reset() { + *x = WorkflowVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowVersion) ProtoMessage() {} + +func (x *WorkflowVersion) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowVersion.ProtoReflect.Descriptor instead. +func (*WorkflowVersion) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{10} +} + +func (x *WorkflowVersion) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *WorkflowVersion) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *WorkflowVersion) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *WorkflowVersion) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *WorkflowVersion) GetOrder() int64 { + if x != nil { + return x.Order + } + return 0 +} + +func (x *WorkflowVersion) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *WorkflowVersion) GetScheduledWorkflows() []*ScheduledWorkflow { + if x != nil { + return x.ScheduledWorkflows + } + return nil +} + +// WorkflowTriggerEventRef represents the WorkflowTriggerEventRef model. +type WorkflowTriggerEventRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentId string `protobuf:"bytes,1,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + EventKey string `protobuf:"bytes,2,opt,name=event_key,json=eventKey,proto3" json:"event_key,omitempty"` +} + +func (x *WorkflowTriggerEventRef) Reset() { + *x = WorkflowTriggerEventRef{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowTriggerEventRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowTriggerEventRef) ProtoMessage() {} + +func (x *WorkflowTriggerEventRef) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowTriggerEventRef.ProtoReflect.Descriptor instead. +func (*WorkflowTriggerEventRef) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{11} +} + +func (x *WorkflowTriggerEventRef) GetParentId() string { + if x != nil { + return x.ParentId + } + return "" +} + +func (x *WorkflowTriggerEventRef) GetEventKey() string { + if x != nil { + return x.EventKey + } + return "" +} + +// WorkflowTriggerCronRef represents the WorkflowTriggerCronRef model. +type WorkflowTriggerCronRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentId string `protobuf:"bytes,1,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + Cron string `protobuf:"bytes,2,opt,name=cron,proto3" json:"cron,omitempty"` +} + +func (x *WorkflowTriggerCronRef) Reset() { + *x = WorkflowTriggerCronRef{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowTriggerCronRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowTriggerCronRef) ProtoMessage() {} + +func (x *WorkflowTriggerCronRef) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowTriggerCronRef.ProtoReflect.Descriptor instead. +func (*WorkflowTriggerCronRef) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{12} +} + +func (x *WorkflowTriggerCronRef) GetParentId() string { + if x != nil { + return x.ParentId + } + return "" +} + +func (x *WorkflowTriggerCronRef) GetCron() string { + if x != nil { + return x.Cron + } + return "" +} + +type BulkTriggerWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflows []*v1.TriggerWorkflowRequest `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` +} + +func (x *BulkTriggerWorkflowRequest) Reset() { + *x = BulkTriggerWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BulkTriggerWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BulkTriggerWorkflowRequest) ProtoMessage() {} + +func (x *BulkTriggerWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BulkTriggerWorkflowRequest.ProtoReflect.Descriptor instead. +func (*BulkTriggerWorkflowRequest) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{13} +} + +func (x *BulkTriggerWorkflowRequest) GetWorkflows() []*v1.TriggerWorkflowRequest { + if x != nil { + return x.Workflows + } + return nil +} + +type BulkTriggerWorkflowResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkflowRunIds []string `protobuf:"bytes,1,rep,name=workflow_run_ids,json=workflowRunIds,proto3" json:"workflow_run_ids,omitempty"` +} + +func (x *BulkTriggerWorkflowResponse) Reset() { + *x = BulkTriggerWorkflowResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BulkTriggerWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BulkTriggerWorkflowResponse) ProtoMessage() {} + +func (x *BulkTriggerWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BulkTriggerWorkflowResponse.ProtoReflect.Descriptor instead. +func (*BulkTriggerWorkflowResponse) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{14} +} + +func (x *BulkTriggerWorkflowResponse) GetWorkflowRunIds() []string { + if x != nil { + return x.WorkflowRunIds + } + return nil +} + +type TriggerWorkflowResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkflowRunId string `protobuf:"bytes,1,opt,name=workflow_run_id,json=workflowRunId,proto3" json:"workflow_run_id,omitempty"` +} + +func (x *TriggerWorkflowResponse) Reset() { + *x = TriggerWorkflowResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TriggerWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerWorkflowResponse) ProtoMessage() {} + +func (x *TriggerWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerWorkflowResponse.ProtoReflect.Descriptor instead. +func (*TriggerWorkflowResponse) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{15} +} + +func (x *TriggerWorkflowResponse) GetWorkflowRunId() string { + if x != nil { + return x.WorkflowRunId + } + return "" +} + +type PutRateLimitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // (required) the global key for the rate limit + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // (required) the max limit for the rate limit (per unit of time) + Limit int32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + // (required) the duration of time for the rate limit (second|minute|hour) + Duration RateLimitDuration `protobuf:"varint,3,opt,name=duration,proto3,enum=RateLimitDuration" json:"duration,omitempty"` +} + +func (x *PutRateLimitRequest) Reset() { + *x = PutRateLimitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRateLimitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRateLimitRequest) ProtoMessage() {} + +func (x *PutRateLimitRequest) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRateLimitRequest.ProtoReflect.Descriptor instead. +func (*PutRateLimitRequest) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{16} +} + +func (x *PutRateLimitRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *PutRateLimitRequest) GetLimit() int32 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *PutRateLimitRequest) GetDuration() RateLimitDuration { + if x != nil { + return x.Duration + } + return RateLimitDuration_SECOND +} + +type PutRateLimitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PutRateLimitResponse) Reset() { + *x = PutRateLimitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workflows_workflows_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRateLimitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRateLimitResponse) ProtoMessage() {} + +func (x *PutRateLimitResponse) ProtoReflect() protoreflect.Message { + mi := &file_workflows_workflows_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRateLimitResponse.ProtoReflect.Descriptor instead. +func (*PutRateLimitResponse) Descriptor() ([]byte, []int) { + return file_workflows_workflows_proto_rawDescGZIP(), []int{17} +} + +var File_workflows_workflows_proto protoreflect.FileDescriptor + +var file_workflows_workflows_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x2f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x31, + 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x44, 0x0a, 0x12, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6f, + 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x22, 0xe7, 0x05, 0x0a, 0x19, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, 0x6f, 0x6e, 0x54, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x49, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x64, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, + 0x12, 0x2a, 0x0a, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, + 0x6f, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x12, 0x3a, 0x0a, 0x0b, + 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2e, 0x0a, 0x10, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x6f, 0x6e, + 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, + 0x63, 0x72, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x41, 0x0a, 0x0e, + 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x48, 0x02, 0x52, 0x0c, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x4a, 0x6f, 0x62, 0x88, 0x01, 0x01, 0x12, + 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0f, 0x2e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x48, 0x03, 0x52, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, + 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4b, 0x69, 0x6e, 0x64, 0x48, 0x04, 0x52, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x05, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, + 0x72, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6f, 0x6e, + 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6a, 0x6f, 0x62, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6b, 0x69, 0x6e, 0x64, + 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0xfc, 0x01, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4f, 0x70, 0x74, + 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1e, + 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x48, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x75, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x45, + 0x0a, 0x0e, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x48, 0x02, 0x52, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, + 0x6e, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x44, 0x65, + 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x20, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x48, 0x03, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x22, 0xbe, 0x04, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x65, 0x70, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x73, 0x12, 0x4e, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x65, 0x70, 0x4f, 0x70, 0x74, + 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x2a, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, 0x63, + 0x6b, 0x6f, 0x66, 0x66, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, + 0x13, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x11, 0x62, 0x61, + 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x88, + 0x01, 0x01, 0x1a, 0x55, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, + 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, + 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x74, 0x65, 0x70, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, + 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, + 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, + 0x09, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, + 0x11, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, + 0x70, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, + 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x12, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, + 0x09, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, + 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0xf2, 0x03, 0x0a, 0x17, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x41, 0x0a, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x17, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, + 0x52, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x12, + 0x20, 0x0a, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x03, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x88, 0x01, + 0x01, 0x12, 0x34, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, + 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, + 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x5e, 0x0a, 0x11, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, + 0x0a, 0x0a, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x41, 0x74, 0x22, 0xad, 0x02, 0x0a, 0x0f, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x53, 0x0a, 0x17, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x66, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x49, + 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x43, 0x72, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x72, 0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x1a, 0x42, 0x75, 0x6c, + 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x22, 0x47, 0x0a, 0x1b, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x17, 0x54, 0x72, + 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0x6d, 0x0a, + 0x13, 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2e, 0x0a, 0x08, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, + 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x24, 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x48, 0x41, 0x52, 0x44, 0x10, 0x01, 0x2a, 0x32, 0x0a, 0x0c, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0c, 0x0a, 0x08, 0x46, 0x55, + 0x4e, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x55, 0x52, 0x41, + 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x47, 0x10, 0x02, 0x2a, 0x7f, + 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, + 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x4e, 0x45, 0x57, + 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, + 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, + 0x85, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, + 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, + 0x4c, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, + 0x48, 0x41, 0x4e, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, + 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, + 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, + 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, + 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x05, 0x2a, 0x5d, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, + 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07, + 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, + 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x32, 0xdf, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x50, 0x75, + 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x13, 0x2e, 0x50, 0x75, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x3e, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x47, 0x0a, 0x0f, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x13, 0x42, 0x75, 0x6c, + 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x1b, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x42, 0x75, 0x6c, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x50, + 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x2e, 0x50, 0x75, + 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x15, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, + 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_workflows_workflows_proto_rawDescOnce sync.Once + file_workflows_workflows_proto_rawDescData = file_workflows_workflows_proto_rawDesc +) + +func file_workflows_workflows_proto_rawDescGZIP() []byte { + file_workflows_workflows_proto_rawDescOnce.Do(func() { + file_workflows_workflows_proto_rawDescData = protoimpl.X.CompressGZIP(file_workflows_workflows_proto_rawDescData) + }) + return file_workflows_workflows_proto_rawDescData +} + +var file_workflows_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_workflows_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_workflows_workflows_proto_goTypes = []interface{}{ + (StickyStrategy)(0), // 0: StickyStrategy + (WorkflowKind)(0), // 1: WorkflowKind + (ConcurrencyLimitStrategy)(0), // 2: ConcurrencyLimitStrategy + (WorkerLabelComparator)(0), // 3: WorkerLabelComparator + (RateLimitDuration)(0), // 4: RateLimitDuration + (*PutWorkflowRequest)(nil), // 5: PutWorkflowRequest + (*CreateWorkflowVersionOpts)(nil), // 6: CreateWorkflowVersionOpts + (*WorkflowConcurrencyOpts)(nil), // 7: WorkflowConcurrencyOpts + (*CreateWorkflowJobOpts)(nil), // 8: CreateWorkflowJobOpts + (*DesiredWorkerLabels)(nil), // 9: DesiredWorkerLabels + (*CreateWorkflowStepOpts)(nil), // 10: CreateWorkflowStepOpts + (*CreateStepRateLimit)(nil), // 11: CreateStepRateLimit + (*ListWorkflowsRequest)(nil), // 12: ListWorkflowsRequest + (*ScheduleWorkflowRequest)(nil), // 13: ScheduleWorkflowRequest + (*ScheduledWorkflow)(nil), // 14: ScheduledWorkflow + (*WorkflowVersion)(nil), // 15: WorkflowVersion + (*WorkflowTriggerEventRef)(nil), // 16: WorkflowTriggerEventRef + (*WorkflowTriggerCronRef)(nil), // 17: WorkflowTriggerCronRef + (*BulkTriggerWorkflowRequest)(nil), // 18: BulkTriggerWorkflowRequest + (*BulkTriggerWorkflowResponse)(nil), // 19: BulkTriggerWorkflowResponse + (*TriggerWorkflowResponse)(nil), // 20: TriggerWorkflowResponse + (*PutRateLimitRequest)(nil), // 21: PutRateLimitRequest + (*PutRateLimitResponse)(nil), // 22: PutRateLimitResponse + nil, // 23: CreateWorkflowStepOpts.WorkerLabelsEntry + (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp + (*v1.TriggerWorkflowRequest)(nil), // 25: v1.TriggerWorkflowRequest +} +var file_workflows_workflows_proto_depIdxs = []int32{ + 6, // 0: PutWorkflowRequest.opts:type_name -> CreateWorkflowVersionOpts + 24, // 1: CreateWorkflowVersionOpts.scheduled_triggers:type_name -> google.protobuf.Timestamp + 8, // 2: CreateWorkflowVersionOpts.jobs:type_name -> CreateWorkflowJobOpts + 7, // 3: CreateWorkflowVersionOpts.concurrency:type_name -> WorkflowConcurrencyOpts + 8, // 4: CreateWorkflowVersionOpts.on_failure_job:type_name -> CreateWorkflowJobOpts + 0, // 5: CreateWorkflowVersionOpts.sticky:type_name -> StickyStrategy + 1, // 6: CreateWorkflowVersionOpts.kind:type_name -> WorkflowKind + 2, // 7: WorkflowConcurrencyOpts.limit_strategy:type_name -> ConcurrencyLimitStrategy + 10, // 8: CreateWorkflowJobOpts.steps:type_name -> CreateWorkflowStepOpts + 3, // 9: DesiredWorkerLabels.comparator:type_name -> WorkerLabelComparator + 11, // 10: CreateWorkflowStepOpts.rate_limits:type_name -> CreateStepRateLimit + 23, // 11: CreateWorkflowStepOpts.worker_labels:type_name -> CreateWorkflowStepOpts.WorkerLabelsEntry + 4, // 12: CreateStepRateLimit.duration:type_name -> RateLimitDuration + 24, // 13: ScheduleWorkflowRequest.schedules:type_name -> google.protobuf.Timestamp + 24, // 14: ScheduledWorkflow.trigger_at:type_name -> google.protobuf.Timestamp + 24, // 15: WorkflowVersion.created_at:type_name -> google.protobuf.Timestamp + 24, // 16: WorkflowVersion.updated_at:type_name -> google.protobuf.Timestamp + 14, // 17: WorkflowVersion.scheduled_workflows:type_name -> ScheduledWorkflow + 25, // 18: BulkTriggerWorkflowRequest.workflows:type_name -> v1.TriggerWorkflowRequest + 4, // 19: PutRateLimitRequest.duration:type_name -> RateLimitDuration + 9, // 20: CreateWorkflowStepOpts.WorkerLabelsEntry.value:type_name -> DesiredWorkerLabels + 5, // 21: WorkflowService.PutWorkflow:input_type -> PutWorkflowRequest + 13, // 22: WorkflowService.ScheduleWorkflow:input_type -> ScheduleWorkflowRequest + 25, // 23: WorkflowService.TriggerWorkflow:input_type -> v1.TriggerWorkflowRequest + 18, // 24: WorkflowService.BulkTriggerWorkflow:input_type -> BulkTriggerWorkflowRequest + 21, // 25: WorkflowService.PutRateLimit:input_type -> PutRateLimitRequest + 15, // 26: WorkflowService.PutWorkflow:output_type -> WorkflowVersion + 15, // 27: WorkflowService.ScheduleWorkflow:output_type -> WorkflowVersion + 20, // 28: WorkflowService.TriggerWorkflow:output_type -> TriggerWorkflowResponse + 19, // 29: WorkflowService.BulkTriggerWorkflow:output_type -> BulkTriggerWorkflowResponse + 22, // 30: WorkflowService.PutRateLimit:output_type -> PutRateLimitResponse + 26, // [26:31] is the sub-list for method output_type + 21, // [21:26] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_workflows_workflows_proto_init() } +func file_workflows_workflows_proto_init() { + if File_workflows_workflows_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_workflows_workflows_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateWorkflowVersionOpts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowConcurrencyOpts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateWorkflowJobOpts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DesiredWorkerLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateWorkflowStepOpts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateStepRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListWorkflowsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScheduleWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScheduledWorkflow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowTriggerEventRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowTriggerCronRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkTriggerWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BulkTriggerWorkflowResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TriggerWorkflowResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRateLimitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workflows_workflows_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRateLimitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_workflows_workflows_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_workflows_workflows_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_workflows_workflows_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_workflows_workflows_proto_msgTypes[5].OneofWrappers = []interface{}{} + file_workflows_workflows_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_workflows_workflows_proto_msgTypes[8].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_workflows_workflows_proto_rawDesc, + NumEnums: 5, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_workflows_workflows_proto_goTypes, + DependencyIndexes: file_workflows_workflows_proto_depIdxs, + EnumInfos: file_workflows_workflows_proto_enumTypes, + MessageInfos: file_workflows_workflows_proto_msgTypes, + }.Build() + File_workflows_workflows_proto = out.File + file_workflows_workflows_proto_rawDesc = nil + file_workflows_workflows_proto_goTypes = nil + file_workflows_workflows_proto_depIdxs = nil +} diff --git a/internal/services/admin/contracts/workflows/workflows_grpc.pb.go b/internal/services/admin/contracts/workflows/workflows_grpc.pb.go new file mode 100644 index 000000000..33dd1d223 --- /dev/null +++ b/internal/services/admin/contracts/workflows/workflows_grpc.pb.go @@ -0,0 +1,250 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v5.29.3 +// source: workflows/workflows.proto + +package contracts + +import ( + context "context" + v1 "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// WorkflowServiceClient is the client API for WorkflowService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type WorkflowServiceClient interface { + PutWorkflow(ctx context.Context, in *PutWorkflowRequest, opts ...grpc.CallOption) (*WorkflowVersion, error) + ScheduleWorkflow(ctx context.Context, in *ScheduleWorkflowRequest, opts ...grpc.CallOption) (*WorkflowVersion, error) + TriggerWorkflow(ctx context.Context, in *v1.TriggerWorkflowRequest, opts ...grpc.CallOption) (*TriggerWorkflowResponse, error) + BulkTriggerWorkflow(ctx context.Context, in *BulkTriggerWorkflowRequest, opts ...grpc.CallOption) (*BulkTriggerWorkflowResponse, error) + PutRateLimit(ctx context.Context, in *PutRateLimitRequest, opts ...grpc.CallOption) (*PutRateLimitResponse, error) +} + +type workflowServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewWorkflowServiceClient(cc grpc.ClientConnInterface) WorkflowServiceClient { + return &workflowServiceClient{cc} +} + +func (c *workflowServiceClient) PutWorkflow(ctx context.Context, in *PutWorkflowRequest, opts ...grpc.CallOption) (*WorkflowVersion, error) { + out := new(WorkflowVersion) + err := c.cc.Invoke(ctx, "/WorkflowService/PutWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) ScheduleWorkflow(ctx context.Context, in *ScheduleWorkflowRequest, opts ...grpc.CallOption) (*WorkflowVersion, error) { + out := new(WorkflowVersion) + err := c.cc.Invoke(ctx, "/WorkflowService/ScheduleWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) TriggerWorkflow(ctx context.Context, in *v1.TriggerWorkflowRequest, opts ...grpc.CallOption) (*TriggerWorkflowResponse, error) { + out := new(TriggerWorkflowResponse) + err := c.cc.Invoke(ctx, "/WorkflowService/TriggerWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) BulkTriggerWorkflow(ctx context.Context, in *BulkTriggerWorkflowRequest, opts ...grpc.CallOption) (*BulkTriggerWorkflowResponse, error) { + out := new(BulkTriggerWorkflowResponse) + err := c.cc.Invoke(ctx, "/WorkflowService/BulkTriggerWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) PutRateLimit(ctx context.Context, in *PutRateLimitRequest, opts ...grpc.CallOption) (*PutRateLimitResponse, error) { + out := new(PutRateLimitResponse) + err := c.cc.Invoke(ctx, "/WorkflowService/PutRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// WorkflowServiceServer is the server API for WorkflowService service. +// All implementations must embed UnimplementedWorkflowServiceServer +// for forward compatibility +type WorkflowServiceServer interface { + PutWorkflow(context.Context, *PutWorkflowRequest) (*WorkflowVersion, error) + ScheduleWorkflow(context.Context, *ScheduleWorkflowRequest) (*WorkflowVersion, error) + TriggerWorkflow(context.Context, *v1.TriggerWorkflowRequest) (*TriggerWorkflowResponse, error) + BulkTriggerWorkflow(context.Context, *BulkTriggerWorkflowRequest) (*BulkTriggerWorkflowResponse, error) + PutRateLimit(context.Context, *PutRateLimitRequest) (*PutRateLimitResponse, error) + mustEmbedUnimplementedWorkflowServiceServer() +} + +// UnimplementedWorkflowServiceServer must be embedded to have forward compatible implementations. +type UnimplementedWorkflowServiceServer struct { +} + +func (UnimplementedWorkflowServiceServer) PutWorkflow(context.Context, *PutWorkflowRequest) (*WorkflowVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method PutWorkflow not implemented") +} +func (UnimplementedWorkflowServiceServer) ScheduleWorkflow(context.Context, *ScheduleWorkflowRequest) (*WorkflowVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method ScheduleWorkflow not implemented") +} +func (UnimplementedWorkflowServiceServer) TriggerWorkflow(context.Context, *v1.TriggerWorkflowRequest) (*TriggerWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TriggerWorkflow not implemented") +} +func (UnimplementedWorkflowServiceServer) BulkTriggerWorkflow(context.Context, *BulkTriggerWorkflowRequest) (*BulkTriggerWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BulkTriggerWorkflow not implemented") +} +func (UnimplementedWorkflowServiceServer) PutRateLimit(context.Context, *PutRateLimitRequest) (*PutRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PutRateLimit not implemented") +} +func (UnimplementedWorkflowServiceServer) mustEmbedUnimplementedWorkflowServiceServer() {} + +// UnsafeWorkflowServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to WorkflowServiceServer will +// result in compilation errors. +type UnsafeWorkflowServiceServer interface { + mustEmbedUnimplementedWorkflowServiceServer() +} + +func RegisterWorkflowServiceServer(s grpc.ServiceRegistrar, srv WorkflowServiceServer) { + s.RegisterService(&WorkflowService_ServiceDesc, srv) +} + +func _WorkflowService_PutWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).PutWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/WorkflowService/PutWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).PutWorkflow(ctx, req.(*PutWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_ScheduleWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ScheduleWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).ScheduleWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/WorkflowService/ScheduleWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).ScheduleWorkflow(ctx, req.(*ScheduleWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_TriggerWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.TriggerWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).TriggerWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/WorkflowService/TriggerWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).TriggerWorkflow(ctx, req.(*v1.TriggerWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_BulkTriggerWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BulkTriggerWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).BulkTriggerWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/WorkflowService/BulkTriggerWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).BulkTriggerWorkflow(ctx, req.(*BulkTriggerWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_PutRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRateLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).PutRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/WorkflowService/PutRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).PutRateLimit(ctx, req.(*PutRateLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// WorkflowService_ServiceDesc is the grpc.ServiceDesc for WorkflowService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var WorkflowService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "WorkflowService", + HandlerType: (*WorkflowServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PutWorkflow", + Handler: _WorkflowService_PutWorkflow_Handler, + }, + { + MethodName: "ScheduleWorkflow", + Handler: _WorkflowService_ScheduleWorkflow_Handler, + }, + { + MethodName: "TriggerWorkflow", + Handler: _WorkflowService_TriggerWorkflow_Handler, + }, + { + MethodName: "BulkTriggerWorkflow", + Handler: _WorkflowService_BulkTriggerWorkflow_Handler, + }, + { + MethodName: "PutRateLimit", + Handler: _WorkflowService_PutRateLimit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "workflows/workflows.proto", +} diff --git a/internal/services/admin/contracts/workflows_grpc.pb.go b/internal/services/admin/contracts/workflows_grpc.pb.go index 932e7ec85..e0a8aa6f1 100644 --- a/internal/services/admin/contracts/workflows_grpc.pb.go +++ b/internal/services/admin/contracts/workflows_grpc.pb.go @@ -8,6 +8,7 @@ package contracts import ( context "context" + v1 "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -24,7 +25,7 @@ const _ = grpc.SupportPackageIsVersion7 type WorkflowServiceClient interface { PutWorkflow(ctx context.Context, in *PutWorkflowRequest, opts ...grpc.CallOption) (*WorkflowVersion, error) ScheduleWorkflow(ctx context.Context, in *ScheduleWorkflowRequest, opts ...grpc.CallOption) (*WorkflowVersion, error) - TriggerWorkflow(ctx context.Context, in *TriggerWorkflowRequest, opts ...grpc.CallOption) (*TriggerWorkflowResponse, error) + TriggerWorkflow(ctx context.Context, in *v1.TriggerWorkflowRequest, opts ...grpc.CallOption) (*TriggerWorkflowResponse, error) BulkTriggerWorkflow(ctx context.Context, in *BulkTriggerWorkflowRequest, opts ...grpc.CallOption) (*BulkTriggerWorkflowResponse, error) PutRateLimit(ctx context.Context, in *PutRateLimitRequest, opts ...grpc.CallOption) (*PutRateLimitResponse, error) } @@ -55,7 +56,7 @@ func (c *workflowServiceClient) ScheduleWorkflow(ctx context.Context, in *Schedu return out, nil } -func (c *workflowServiceClient) TriggerWorkflow(ctx context.Context, in *TriggerWorkflowRequest, opts ...grpc.CallOption) (*TriggerWorkflowResponse, error) { +func (c *workflowServiceClient) TriggerWorkflow(ctx context.Context, in *v1.TriggerWorkflowRequest, opts ...grpc.CallOption) (*TriggerWorkflowResponse, error) { out := new(TriggerWorkflowResponse) err := c.cc.Invoke(ctx, "/WorkflowService/TriggerWorkflow", in, out, opts...) if err != nil { @@ -88,7 +89,7 @@ func (c *workflowServiceClient) PutRateLimit(ctx context.Context, in *PutRateLim type WorkflowServiceServer interface { PutWorkflow(context.Context, *PutWorkflowRequest) (*WorkflowVersion, error) ScheduleWorkflow(context.Context, *ScheduleWorkflowRequest) (*WorkflowVersion, error) - TriggerWorkflow(context.Context, *TriggerWorkflowRequest) (*TriggerWorkflowResponse, error) + TriggerWorkflow(context.Context, *v1.TriggerWorkflowRequest) (*TriggerWorkflowResponse, error) BulkTriggerWorkflow(context.Context, *BulkTriggerWorkflowRequest) (*BulkTriggerWorkflowResponse, error) PutRateLimit(context.Context, *PutRateLimitRequest) (*PutRateLimitResponse, error) mustEmbedUnimplementedWorkflowServiceServer() @@ -104,7 +105,7 @@ func (UnimplementedWorkflowServiceServer) PutWorkflow(context.Context, *PutWorkf func (UnimplementedWorkflowServiceServer) ScheduleWorkflow(context.Context, *ScheduleWorkflowRequest) (*WorkflowVersion, error) { return nil, status.Errorf(codes.Unimplemented, "method ScheduleWorkflow not implemented") } -func (UnimplementedWorkflowServiceServer) TriggerWorkflow(context.Context, *TriggerWorkflowRequest) (*TriggerWorkflowResponse, error) { +func (UnimplementedWorkflowServiceServer) TriggerWorkflow(context.Context, *v1.TriggerWorkflowRequest) (*TriggerWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TriggerWorkflow not implemented") } func (UnimplementedWorkflowServiceServer) BulkTriggerWorkflow(context.Context, *BulkTriggerWorkflowRequest) (*BulkTriggerWorkflowResponse, error) { @@ -163,7 +164,7 @@ func _WorkflowService_ScheduleWorkflow_Handler(srv interface{}, ctx context.Cont } func _WorkflowService_TriggerWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TriggerWorkflowRequest) + in := new(v1.TriggerWorkflowRequest) if err := dec(in); err != nil { return nil, err } @@ -175,7 +176,7 @@ func _WorkflowService_TriggerWorkflow_Handler(srv interface{}, ctx context.Conte FullMethod: "/WorkflowService/TriggerWorkflow", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).TriggerWorkflow(ctx, req.(*TriggerWorkflowRequest)) + return srv.(WorkflowServiceServer).TriggerWorkflow(ctx, req.(*v1.TriggerWorkflowRequest)) } return interceptor(ctx, in, info, handler) } diff --git a/internal/services/admin/server.go b/internal/services/admin/server.go index 16f6f5b3a..41becb0e9 100644 --- a/internal/services/admin/server.go +++ b/internal/services/admin/server.go @@ -11,17 +11,18 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" + v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" "github.com/hatchet-dev/hatchet/internal/msgqueue" - "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" "github.com/hatchet-dev/hatchet/pkg/analytics" "github.com/hatchet-dev/hatchet/pkg/client/types" v1 "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" ) -func (a *AdminServiceImpl) TriggerWorkflow(ctx context.Context, req *contracts.TriggerWorkflowRequest) (*contracts.TriggerWorkflowResponse, error) { +func (a *AdminServiceImpl) TriggerWorkflow(ctx context.Context, req *v1contracts.TriggerWorkflowRequest) (*contracts.TriggerWorkflowResponse, error) { a.analytics.Count(ctx, analytics.WorkflowRun, analytics.Create, analytics.Props( "has_priority", req.Priority != nil, "is_child", req.ParentId != nil, diff --git a/internal/services/admin/server_v1.go b/internal/services/admin/server_v1.go index e23ff8385..c8de5ca2c 100644 --- a/internal/services/admin/server_v1.go +++ b/internal/services/admin/server_v1.go @@ -5,15 +5,16 @@ import ( "errors" "fmt" - "go.opentelemetry.io/otel/attribute" "golang.org/x/sync/errgroup" "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" "github.com/hatchet-dev/hatchet/internal/datautils" "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" "github.com/hatchet-dev/hatchet/internal/services/controllers/task/trigger" + v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" "github.com/hatchet-dev/hatchet/pkg/constants" grpcmiddleware "github.com/hatchet-dev/hatchet/pkg/grpc/middleware" @@ -27,7 +28,7 @@ import ( schedulingv1 "github.com/hatchet-dev/hatchet/pkg/scheduling/v1" ) -func (a *AdminServiceImpl) triggerWorkflowV1(ctx context.Context, req *contracts.TriggerWorkflowRequest) (*contracts.TriggerWorkflowResponse, error) { +func (a *AdminServiceImpl) triggerWorkflowV1(ctx context.Context, req *v1contracts.TriggerWorkflowRequest) (*contracts.TriggerWorkflowResponse, error) { tenant := ctx.Value("tenant").(*sqlcv1.Tenant) tenantId := tenant.ID @@ -53,8 +54,14 @@ func (a *AdminServiceImpl) triggerWorkflowV1(ctx context.Context, req *contracts opt, err := a.newTriggerOpt(ctx, tenantId, req) + re, isInvalidArgument := err.(*v1.TriggerOptInvalidArgumentError) + if err != nil { - return nil, fmt.Errorf("could not create trigger opt: %w", err) + if isInvalidArgument { + return nil, status.Errorf(codes.InvalidArgument, "Invalid request: %s", re.Err) + } else { + return nil, fmt.Errorf("could not create trigger opt: %w", err) + } } if err := v1.ValidateJSONB(opt.Data, "payload"); err != nil { @@ -111,8 +118,14 @@ func (a *AdminServiceImpl) bulkTriggerWorkflowV1(ctx context.Context, req *contr for i, workflow := range req.Workflows { opt, err := a.newTriggerOpt(ctx, tenantId, workflow) + re, isInvalidArgument := err.(*v1.TriggerOptInvalidArgumentError) + if err != nil { - return nil, fmt.Errorf("could not create trigger opt: %w", err) + if isInvalidArgument { + return nil, status.Errorf(codes.InvalidArgument, "Invalid request: %s", re.Err) + } else { + return nil, fmt.Errorf("could not create trigger opt: %w", err) + } } if err := v1.ValidateJSONB(opt.Data, "payload"); err != nil { @@ -170,7 +183,7 @@ func (a *AdminServiceImpl) bulkTriggerWorkflowV1(ctx context.Context, req *contr func (i *AdminServiceImpl) newTriggerOpt( ctx context.Context, tenantId uuid.UUID, - req *contracts.TriggerWorkflowRequest, + req *v1contracts.TriggerWorkflowRequest, ) (*v1.WorkflowNameTriggerOpts, error) { ctx, span := telemetry.NewSpan(ctx, "admin_service.new_trigger_opt") defer span.End() @@ -181,55 +194,7 @@ func (i *AdminServiceImpl) newTriggerOpt( attribute.Bool("admin_service.new_trigger_opt.is_child_workflow", req.ParentTaskRunExternalId != nil), ) - additionalMeta := "" - - if req.AdditionalMetadata != nil { - additionalMeta = *req.AdditionalMetadata - } - - var desiredWorkerId *uuid.UUID - if req.DesiredWorkerId != nil { - workerId, err := uuid.Parse(*req.DesiredWorkerId) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "desiredWorkerId must be a valid UUID: %s", err) - } - desiredWorkerId = &workerId - } - - t := &v1.TriggerTaskData{ - WorkflowName: req.Name, - Data: []byte(req.Input), - AdditionalMetadata: []byte(additionalMeta), - DesiredWorkerId: desiredWorkerId, - Priority: req.Priority, - } - - if len(req.DesiredWorkerLabels) > 0 { - labels := make([]*sqlcv1.GetDesiredLabelsRow, 0, len(req.DesiredWorkerLabels)) - for key, label := range req.DesiredWorkerLabels { - var comparator *string - if label.Comparator != nil { - c := label.Comparator.String() - comparator = &c - } - labels = append(labels, v1.ProtoToDesiredWorkerLabel( - key, - label.StrValue, - label.IntValue, - label.Required, - label.Weight, - comparator, - )) - } - t.DesiredWorkerLabels = labels - } - - if req.Priority != nil { - if *req.Priority < 1 || *req.Priority > 3 { - return nil, status.Errorf(codes.InvalidArgument, "priority must be between 1 and 3, got %d", *req.Priority) - } - t.Priority = req.Priority - } + var parentTask *sqlcv1.FlattenExternalIdsRow if req.ParentTaskRunExternalId != nil { parentTaskExternalId, err := uuid.Parse(*req.ParentTaskRunExternalId) @@ -238,8 +203,7 @@ func (i *AdminServiceImpl) newTriggerOpt( return nil, status.Errorf(codes.InvalidArgument, "parentStepRunId must be a valid UUID: %s", err) } - // lookup the parent external id - parentTask, err := i.repov1.Tasks().GetTaskByExternalId( + maybeParentTask, err := i.repov1.Tasks().GetTaskByExternalId( ctx, tenantId, parentTaskExternalId, @@ -250,14 +214,19 @@ func (i *AdminServiceImpl) newTriggerOpt( return nil, fmt.Errorf("could not find parent task: %w", err) } - parentExternalId := parentTask.ExternalID - childIndex := int64(*req.ChildIndex) + parentTask = maybeParentTask + } - t.ParentExternalId = &parentExternalId - t.ParentTaskId = &parentTask.ID - t.ParentTaskInsertedAt = &parentTask.InsertedAt.Time - t.ChildIndex = &childIndex - t.ChildKey = req.ChildKey + t, err := i.repov1.Triggers().NewTriggerTaskData(ctx, tenantId, req, parentTask) + + if err != nil { + re, isInvalidArgument := err.(*v1.TriggerOptInvalidArgumentError) + + if isInvalidArgument { + return nil, re + } else { + return nil, fmt.Errorf("could not create trigger opt: %w", err) + } } return &v1.WorkflowNameTriggerOpts{ diff --git a/internal/services/admin/v1/server.go b/internal/services/admin/v1/server.go index d6bf7e740..bde7cf2e5 100644 --- a/internal/services/admin/v1/server.go +++ b/internal/services/admin/v1/server.go @@ -407,8 +407,14 @@ func (a *AdminServiceImpl) TriggerWorkflowRun(ctx context.Context, req *contract opt, err := a.newTriggerOpt(ctx, tenantId, req) + re, isInvalidArgument := err.(*v1.TriggerOptInvalidArgumentError) + if err != nil { - return nil, fmt.Errorf("could not create trigger opt: %w", err) + if isInvalidArgument { + return nil, status.Errorf(codes.InvalidArgument, "Invalid request: %s", re.Err) + } else { + return nil, fmt.Errorf("could not create trigger opt: %w", err) + } } err = a.generateExternalIds(ctx, tenantId, []*v1.WorkflowNameTriggerOpts{opt}) @@ -432,6 +438,56 @@ func (a *AdminServiceImpl) TriggerWorkflowRun(ctx context.Context, req *contract }, nil } +func (a *AdminServiceImpl) BranchDurableTask(ctx context.Context, req *contracts.BranchDurableTaskRequest) (*contracts.BranchDurableTaskResponse, error) { + tenant := ctx.Value("tenant").(*sqlcv1.Tenant) + tenantId := tenant.ID + + taskExternalId, err := uuid.Parse(req.TaskExternalId) + if err != nil { + return nil, status.Error(codes.InvalidArgument, "invalid task_external_id") + } + + a.analytics.Count(ctx, analytics.DurableTask, analytics.Branch) + + task, err := a.repo.Tasks().GetTaskByExternalId(ctx, tenantId, taskExternalId, true) + if err != nil { + return nil, status.Errorf(codes.NotFound, "task not found: %v", err) + } + + result, err := a.repo.DurableEvents().HandleBranch(ctx, tenantId, req.NodeId, req.BranchId, task) + + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to branch durable task: %v", err) + } + + replayPayload := tasktypes.ReplayTasksPayload{ + Tasks: []tasktypes.TaskIdInsertedAtRetryCountWithExternalId{{ + TaskIdInsertedAtRetryCount: v1.TaskIdInsertedAtRetryCount{ + Id: task.ID, + InsertedAt: task.InsertedAt, + RetryCount: task.RetryCount, + }, + WorkflowRunExternalId: task.WorkflowRunExternalID, + TaskExternalId: task.ExternalID, + }}, + } + + msg, err := msgqueue.NewTenantMessage(tenantId, msgqueue.MsgIDReplayTasks, false, true, replayPayload) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create replay message: %v", err) + } + + if err := a.mq.SendMessage(ctx, msgqueue.TASK_PROCESSING_QUEUE, msg); err != nil { + return nil, status.Errorf(codes.Internal, "failed to send replay message: %v", err) + } + + return &contracts.BranchDurableTaskResponse{ + TaskExternalId: taskExternalId.String(), + NodeId: result.NodeId, + BranchId: result.EventLogFile.LatestBranchID, + }, nil +} + func (a *AdminServiceImpl) GetRunDetails(ctx context.Context, req *contracts.GetRunDetailsRequest) (*contracts.GetRunDetailsResponse, error) { tenant := ctx.Value("tenant").(*sqlcv1.Tenant) tenantId := tenant.ID @@ -472,10 +528,14 @@ func (a *AdminServiceImpl) GetRunDetails(ctx context.Context, req *contracts.Get Output: details.OutputPayload, ReadableId: string(readableId), ExternalId: details.ExternalId.String(), + IsEvicted: details.Status.IsEvicted(), } } - done := !listutils.Any(statuses, "QUEUED") && !listutils.Any(statuses, "RUNNING") + done := !listutils.Any(statuses, "QUEUED") && !listutils.Any(statuses, "RUNNING") && !listutils.Any(statuses, "EVICTED") + + anyEvicted := listutils.Any(statuses, "EVICTED") + derivedWorkflowRunStatus, err := statusutils.DeriveWorkflowRunStatus(ctx, statuses) if err != nil { @@ -494,10 +554,11 @@ func (a *AdminServiceImpl) GetRunDetails(ctx context.Context, req *contracts.Get TaskRuns: taskRunDetails, Status: *derivedStatusPtr, Done: done, + IsEvicted: anyEvicted, }, nil } -func (i *AdminServiceImpl) newTriggerOpt( +func (a *AdminServiceImpl) newTriggerOpt( ctx context.Context, tenantId uuid.UUID, req *contracts.TriggerWorkflowRunRequest, @@ -524,11 +585,11 @@ func (i *AdminServiceImpl) newTriggerOpt( }, nil } -func (i *AdminServiceImpl) generateExternalIds(ctx context.Context, tenantId uuid.UUID, opts []*v1.WorkflowNameTriggerOpts) error { - return i.repo.Triggers().PopulateExternalIdsForWorkflow(ctx, tenantId, opts) +func (a *AdminServiceImpl) generateExternalIds(ctx context.Context, tenantId uuid.UUID, opts []*v1.WorkflowNameTriggerOpts) error { + return a.repo.Triggers().PopulateExternalIdsForWorkflow(ctx, tenantId, opts) } -func (i *AdminServiceImpl) ingest(ctx context.Context, tenantId uuid.UUID, opts ...*v1.WorkflowNameTriggerOpts) error { +func (a *AdminServiceImpl) ingest(ctx context.Context, tenantId uuid.UUID, opts ...*v1.WorkflowNameTriggerOpts) error { optsToSend := make([]*v1.WorkflowNameTriggerOpts, 0) for _, opt := range opts { @@ -543,28 +604,28 @@ func (i *AdminServiceImpl) ingest(ctx context.Context, tenantId uuid.UUID, opts return nil } - if i.localScheduler != nil { + if a.localScheduler != nil { localWorkerIds := map[uuid.UUID]struct{}{} - if i.localDispatcher != nil { - localWorkerIds = i.localDispatcher.GetLocalWorkerIds() + if a.localDispatcher != nil { + localWorkerIds = a.localDispatcher.GetLocalWorkerIds() } - localAssigned, schedulingErr := i.localScheduler.RunOptimisticScheduling(ctx, tenantId, opts, localWorkerIds) + localAssigned, schedulingErr := a.localScheduler.RunOptimisticScheduling(ctx, tenantId, opts, localWorkerIds) // if we have a scheduling error, we'll fall back to normal ingestion if schedulingErr != nil { if !errors.Is(schedulingErr, schedulingv1.ErrTenantNotFound) && !errors.Is(schedulingErr, schedulingv1.ErrNoOptimisticSlots) { - i.l.Error().Err(schedulingErr).Msg("could not run optimistic scheduling") + a.l.Error().Err(schedulingErr).Msg("could not run optimistic scheduling") } } - if i.localDispatcher != nil && len(localAssigned) > 0 { + if a.localDispatcher != nil && len(localAssigned) > 0 { eg := errgroup.Group{} for workerId, assignedItems := range localAssigned { eg.Go(func() error { - err := i.localDispatcher.HandleLocalAssignments(ctx, tenantId, workerId, assignedItems) + err := a.localDispatcher.HandleLocalAssignments(ctx, tenantId, workerId, assignedItems) if err != nil { return fmt.Errorf("could not dispatch assigned items: %w", err) @@ -577,7 +638,7 @@ func (i *AdminServiceImpl) ingest(ctx context.Context, tenantId uuid.UUID, opts dispatcherErr := eg.Wait() if dispatcherErr != nil { - i.l.Error().Err(dispatcherErr).Msg("could not handle local assignments") + a.l.Error().Err(dispatcherErr).Msg("could not handle local assignments") } // we return nil because the failed assignments would have been requeued by the local dispatcher, @@ -589,18 +650,18 @@ func (i *AdminServiceImpl) ingest(ctx context.Context, tenantId uuid.UUID, opts if schedulingErr == nil { return nil } - } else if i.tw != nil { - triggerErr := i.tw.TriggerFromWorkflowNames(ctx, tenantId, optsToSend) + } else if a.tw != nil { + triggerErr := a.tw.TriggerFromWorkflowNames(ctx, tenantId, optsToSend) // if we fail to trigger via gRPC, we fall back to normal ingestion if triggerErr != nil && !errors.Is(triggerErr, trigger.ErrNoTriggerSlots) { - i.l.Error().Err(triggerErr).Msg("could not trigger workflow runs via gRPC") + a.l.Error().Err(triggerErr).Msg("could not trigger workflow runs via gRPC") } else if triggerErr == nil { return nil } } - verifyErr := i.repo.Triggers().PreflightVerifyWorkflowNameOpts(ctx, tenantId, optsToSend) + verifyErr := a.repo.Triggers().PreflightVerifyWorkflowNameOpts(ctx, tenantId, optsToSend) if verifyErr != nil { namesNotFound := &v1.ErrNamesNotFound{} @@ -624,7 +685,7 @@ func (i *AdminServiceImpl) ingest(ctx context.Context, tenantId uuid.UUID, opts return fmt.Errorf("could not create event task: %w", err) } - err = i.mq.SendMessage(ctx, msgqueue.TASK_PROCESSING_QUEUE, msg) + err = a.mq.SendMessage(ctx, msgqueue.TASK_PROCESSING_QUEUE, msg) if err != nil { return fmt.Errorf("could not add event to task queue: %w", err) diff --git a/internal/services/controllers/olap/controller.go b/internal/services/controllers/olap/controller.go index 1944555c3..e03927557 100644 --- a/internal/services/controllers/olap/controller.go +++ b/internal/services/controllers/olap/controller.go @@ -635,6 +635,7 @@ func (tc *OLAPControllerImpl) handleCreateMonitoringEvent(ctx context.Context, t taskIds := make([]int64, 0) taskInsertedAts := make([]pgtype.Timestamptz, 0) retryCounts := make([]int32, 0) + durableInvocationCounts := make([]int32, 0) workerIds := make([]uuid.UUID, 0) workflowIds := make([]uuid.UUID, 0) eventTypes := make([]sqlcv1.V1EventTypeOlap, 0) @@ -661,6 +662,7 @@ func (tc *OLAPControllerImpl) handleCreateMonitoringEvent(ctx context.Context, t taskInsertedAts = append(taskInsertedAts, taskMeta.InsertedAt) workflowIds = append(workflowIds, taskMeta.WorkflowID) retryCounts = append(retryCounts, msg.RetryCount) + durableInvocationCounts = append(durableInvocationCounts, msg.DurableInvocationCount) eventTypes = append(eventTypes, msg.EventType) eventPayloads = append(eventPayloads, msg.EventPayload) eventMessages = append(eventMessages, msg.EventMessage) @@ -717,6 +719,10 @@ func (tc *OLAPControllerImpl) handleCreateMonitoringEvent(ctx context.Context, t readableStatuses = append(readableStatuses, sqlcv1.V1ReadableStatusOlapCOMPLETED) case sqlcv1.V1EventTypeOlapCOULDNOTSENDTOWORKER: readableStatuses = append(readableStatuses, sqlcv1.V1ReadableStatusOlapFAILED) + case sqlcv1.V1EventTypeOlapDURABLEEVICTED: + readableStatuses = append(readableStatuses, sqlcv1.V1ReadableStatusOlapEVICTED) + case sqlcv1.V1EventTypeOlapDURABLERESTORING: + readableStatuses = append(readableStatuses, sqlcv1.V1ReadableStatusOlapRUNNING) } } @@ -738,6 +744,7 @@ func (tc *OLAPControllerImpl) handleCreateMonitoringEvent(ctx context.Context, t EventTimestamp: timestamps[i], ReadableStatus: readableStatuses[i], RetryCount: retryCounts[i], + DurableInvocationCount: durableInvocationCounts[i], WorkerID: workerId, AdditionalEventMessage: sqlchelpers.TextFromStr(eventMessages[i]), ExternalID: eventExternalIds[i], diff --git a/internal/services/controllers/olap/signal/signal.go b/internal/services/controllers/olap/signal/signal.go index 71eb991eb..eee553517 100644 --- a/internal/services/controllers/olap/signal/signal.go +++ b/internal/services/controllers/olap/signal/signal.go @@ -34,6 +34,24 @@ func NewOLAPSignaler(mq msgqueue.MessageQueue, repo v1.Repository, l *zerolog.Lo } } +func (s *OLAPSignaler) SignalCreated(ctx context.Context, tenantId uuid.UUID, tasks []*v1.V1TaskWithPayload, dags []*v1.DAGWithData) error { + eg := &errgroup.Group{} + + if len(tasks) > 0 { + eg.Go(func() error { + return s.SignalTasksCreated(ctx, tenantId, tasks) + }) + } + + if len(dags) > 0 { + eg.Go(func() error { + return s.SignalDAGsCreated(ctx, tenantId, dags) + }) + } + + return eg.Wait() +} + func (s *OLAPSignaler) SignalDAGsCreated(ctx context.Context, tenantId uuid.UUID, dags []*v1.DAGWithData) error { // notify that tasks have been created // TODO: make this transactionally safe? diff --git a/internal/services/controllers/task/controller.go b/internal/services/controllers/task/controller.go index 0b76f7b96..64a92a138 100644 --- a/internal/services/controllers/task/controller.go +++ b/internal/services/controllers/task/controller.go @@ -437,6 +437,8 @@ func (tc *TasksControllerImpl) handleBufferedMsgs(tenantId uuid.UUID, msgId stri return tc.handleProcessInternalEvents(context.Background(), tenantId, payloads) case msgqueue.MsgIDTaskTrigger: return tc.handleProcessTaskTrigger(context.Background(), tenantId, payloads) + case msgqueue.MsgIDDurableRestoreTask: + return tc.handleDurableRestoreTask(context.Background(), tenantId, payloads) } return fmt.Errorf("unknown message id: %s", msgId) @@ -1097,6 +1099,12 @@ func (tc *TasksControllerImpl) processUserEventMatches(ctx context.Context, tena } } + if len(matchResult.SatisfiedDurableEventLogEntries) > 0 { + if err := tc.processSatisfiedEventLogEntry(ctx, tenantId, matchResult.SatisfiedDurableEventLogEntries); err != nil { + tc.l.Error().Err(err).Msg("could not process satisfied entries") + } + } + return nil } @@ -1137,6 +1145,12 @@ func (tc *TasksControllerImpl) processInternalEvents(ctx context.Context, tenant } } + if len(matchResult.SatisfiedDurableEventLogEntries) > 0 { + if err := tc.processSatisfiedEventLogEntry(ctx, tenantId, matchResult.SatisfiedDurableEventLogEntries); err != nil { + tc.l.Error().Err(err).Msg("could not process satisfied entries") + } + } + return nil } diff --git a/internal/services/controllers/task/durable_callbacks.go b/internal/services/controllers/task/durable_callbacks.go new file mode 100644 index 000000000..6ca246560 --- /dev/null +++ b/internal/services/controllers/task/durable_callbacks.go @@ -0,0 +1,229 @@ +package task + +import ( + "context" + "fmt" + "time" + + "github.com/google/uuid" + + "github.com/hatchet-dev/hatchet/internal/msgqueue" + tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" + v1 "github.com/hatchet-dev/hatchet/pkg/repository" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" +) + +func (tc *TasksControllerImpl) processSatisfiedEventLogEntry(ctx context.Context, tenantId uuid.UUID, callbacks []v1.SatisfiedEntry) error { + if len(callbacks) == 0 { + return nil + } + + idInsertedAtTuples := make([]v1.IdInsertedAt, 0) + + for _, cb := range callbacks { + idInsertedAtTuples = append(idInsertedAtTuples, v1.IdInsertedAt{ + ID: cb.DurableTaskId, + InsertedAt: cb.DurableTaskInsertedAt, + }) + } + + idInsertedAtToDispatcherId, err := tc.repov1.Workers().GetDurableDispatcherIdsForTasks(ctx, tenantId, idInsertedAtTuples) + + if err != nil { + return fmt.Errorf("could not list dispatcher ids for tasks: %w", err) + } + + dispatcherToMsgs := make(map[uuid.UUID][]*msgqueue.Message) + + for _, cb := range callbacks { + key := v1.IdInsertedAt{ + ID: cb.DurableTaskId, + InsertedAt: cb.DurableTaskInsertedAt, + } + + dispatcherLookup, ok := idInsertedAtToDispatcherId[key] + + if !ok { + tc.l.Warn().Msgf("no runtime/dispatcher lookup row for task %d, skipping callback delivery", cb.DurableTaskId) + continue + } + + if dispatcherLookup.IsEvicted { + tc.l.Debug().Msgf("task %d is evicted, publishing restore message", cb.DurableTaskId) + + restoreMsg, err := tasktypes.DurableRestoreTaskMessage(tenantId, cb.DurableTaskExternalId, "callback satisfied while task evicted") + if err != nil { + tc.l.Error().Err(err).Msgf("failed to create restore message for task %s", cb.DurableTaskExternalId) + continue + } + + if err := tc.mq.SendMessage(ctx, msgqueue.TASK_PROCESSING_QUEUE, restoreMsg); err != nil { + tc.l.Error().Err(err).Msgf("failed to publish restore message for task %s", cb.DurableTaskExternalId) + } + continue + } + + dispatcherId := dispatcherLookup.DispatcherId + if dispatcherId == nil { + tc.l.Warn().Msgf("task %d has runtime but no durable dispatcher id, skipping callback delivery", cb.DurableTaskId) + continue + } + + msg, err := tasktypes.DurableCallbackCompletedMessage( + tenantId, + cb.DurableTaskExternalId, + cb.InvocationCount, + cb.BranchId, + cb.NodeId, + cb.Data, + ) + if err != nil { + tc.l.Error().Err(err).Msgf("failed to create callback completed message for task %s node %d", cb.DurableTaskExternalId, cb.NodeId) + continue + } + + dispatcherToMsgs[*dispatcherId] = append(dispatcherToMsgs[*dispatcherId], msg) + } + + for dispatcherId, msgs := range dispatcherToMsgs { + for _, m := range msgs { + if err := tc.mq.SendMessage(ctx, msgqueue.QueueTypeFromDispatcherID(dispatcherId), m); err != nil { + tc.l.Error().Err(err).Msgf("failed to send callback completed message to dispatcher %s", dispatcherId) + } + } + } + + return nil +} + +func (tc *TasksControllerImpl) handleDurableRestoreTask(ctx context.Context, tenantId uuid.UUID, payloads [][]byte) error { + msgs := msgqueue.JSONConvert[tasktypes.DurableRestoreTaskPayload](payloads) + + externalIds := make([]uuid.UUID, 0, len(msgs)) + reasonByExternalId := make(map[uuid.UUID]string, len(msgs)) + for _, msg := range msgs { + externalIds = append(externalIds, msg.TaskExternalId) + reasonByExternalId[msg.TaskExternalId] = msg.Reason + } + + flatTasks, err := tc.repov1.Tasks().FlattenExternalIds(ctx, tenantId, externalIds) + if err != nil { + return fmt.Errorf("failed to batch-lookup tasks for restore: %w", err) + } + + if len(flatTasks) == 0 { + return nil + } + + tasksToRestore := make([]v1.TaskIdInsertedAtRetryCount, 0, len(flatTasks)) + for _, t := range flatTasks { + tasksToRestore = append(tasksToRestore, v1.TaskIdInsertedAtRetryCount{ + Id: t.ID, + InsertedAt: t.InsertedAt, + RetryCount: t.RetryCount, + }) + } + + restoredRows, err := tc.repov1.Tasks().RestoreEvictedTasks(ctx, tenantId, tasksToRestore) + if err != nil { + return fmt.Errorf("failed to batch-restore evicted tasks: %w", err) + } + + restoredByTaskId := make(map[int64]*sqlcv1.RestoreEvictedTasksRow, len(restoredRows)) + for _, r := range restoredRows { + restoredByTaskId[r.TaskID] = r + } + + invCountOpts := make([]v1.IdInsertedAt, 0, len(flatTasks)) + for _, t := range flatTasks { + invCountOpts = append(invCountOpts, v1.IdInsertedAt{ID: t.ID, InsertedAt: t.InsertedAt}) + } + + invocationCounts, err := tc.repov1.DurableEvents().GetDurableTaskInvocationCounts(ctx, tenantId, invCountOpts) + if err != nil { + return fmt.Errorf("failed to get durable task invocation counts for restoring tasks: %w", err) + } + + queues := make(map[string]struct{}) + + for _, t := range flatTasks { + restored, ok := restoredByTaskId[t.ID] + if !ok || !restored.Queued { + tc.l.Warn().Msgf("task %s was not requeued (not evicted or already queued)", t.ExternalID) + continue + } + + var durableInvCount int32 + if count, ok := invocationCounts[v1.IdInsertedAt{ID: t.ID, InsertedAt: t.InsertedAt}]; ok && count != nil { + durableInvCount = *count + } + + reason := reasonByExternalId[t.ExternalID] + + olapMsg, err := tasktypes.MonitoringEventMessageFromInternal( + tenantId, + tasktypes.CreateMonitoringEventPayload{ + TaskId: t.ID, + RetryCount: t.RetryCount, + DurableInvocationCount: durableInvCount, + EventTimestamp: time.Now(), + EventType: sqlcv1.V1EventTypeOlapDURABLERESTORING, + EventMessage: fmt.Sprintf("Restoring evicted task: %s", reason), + }, + ) + if err == nil { + if pubErr := tc.pubBuffer.Pub(ctx, msgqueue.OLAP_QUEUE, olapMsg, false); pubErr != nil { + tc.l.Warn().Err(pubErr).Msg("failed to publish DURABLE_RESTORING to OLAP") + } + } + + if restored.Queue != "" { + queues[restored.Queue] = struct{}{} + } else { + tc.l.Warn().Str("task_id", t.ExternalID.String()).Msg("restored task has empty queue, skipping scheduler notification") + } + } + + if len(queues) > 0 { + if err := tc.notifySchedulerQueues(ctx, tenantId, queues); err != nil { + tc.l.Error().Err(err).Msg("failed to notify scheduler queues") + } + } + + return nil +} + +func (tc *TasksControllerImpl) notifySchedulerQueues(ctx context.Context, tenantId uuid.UUID, queues map[string]struct{}) error { + tenant, err := tc.repov1.Tenant().GetTenantByID(ctx, tenantId) + if err != nil { + return fmt.Errorf("could not get tenant for scheduler notification: %w", err) + } + + if !tenant.SchedulerPartitionId.Valid { + return nil + } + + queueNames := make([]string, 0, len(queues)) + for q := range queues { + queueNames = append(queueNames, q) + } + + msg, err := msgqueue.NewTenantMessage( + tenantId, + msgqueue.MsgIDCheckTenantQueue, + true, + false, + tasktypes.CheckTenantQueuesPayload{ + QueueNames: queueNames, + }, + ) + if err != nil { + return fmt.Errorf("failed to build check-tenant-queue message for queues %v: %w", queueNames, err) + } + + if err := tc.mq.SendMessage(ctx, msgqueue.QueueTypeFromPartitionIDAndController(tenant.SchedulerPartitionId.String, msgqueue.Scheduler), msg); err != nil { + return fmt.Errorf("failed to notify scheduler for queues %v: %w", queueNames, err) + } + + return nil +} diff --git a/internal/services/controllers/task/process_sleeps.go b/internal/services/controllers/task/process_sleeps.go index de7c0d6fd..814ed8082 100644 --- a/internal/services/controllers/task/process_sleeps.go +++ b/internal/services/controllers/task/process_sleeps.go @@ -34,5 +34,11 @@ func (tc *TasksControllerImpl) processSleeps(ctx context.Context, tenantId strin } } + if len(matchResult.SatisfiedDurableEventLogEntries) > 0 { + if err := tc.processSatisfiedEventLogEntry(ctx, tenantIdUUID, matchResult.SatisfiedDurableEventLogEntries); err != nil { + tc.l.Error().Err(err).Msg("could not process satisfied entries from sleep") + } + } + return shouldContinue, nil } diff --git a/internal/services/controllers/task/trigger/trigger.go b/internal/services/controllers/task/trigger/trigger.go index 527ccd835..b90a1b6f5 100644 --- a/internal/services/controllers/task/trigger/trigger.go +++ b/internal/services/controllers/task/trigger/trigger.go @@ -137,22 +137,20 @@ func (tw *TriggerWriter) TriggerFromWorkflowNames(ctx context.Context, tenantId return fmt.Errorf("could not trigger workflows from names: %w", err) } - eg := &errgroup.Group{} - - eg.Go(func() error { - return tw.signaler.SignalTasksCreated(ctx, tenantId, tasks) - }) - - eg.Go(func() error { - return tw.signaler.SignalDAGsCreated(ctx, tenantId, dags) - }) - // signaling errors do not result in a failure, since we have already written the tasks to the database, but // we log the error // FIXME: we need a mechanism to DLQ these failed signals - if err := eg.Wait(); err != nil { + if err := tw.signaler.SignalCreated(ctx, tenantId, tasks, dags); err != nil { tw.l.Error().Err(err).Msg("failed to signal created tasks and DAGs in TriggerFromWorkflowNames") } return nil } + +func (tw *TriggerWriter) SignalCreated(ctx context.Context, tenantId uuid.UUID, tasks []*v1.V1TaskWithPayload, dags []*v1.DAGWithData) error { + if err := tw.signaler.SignalCreated(ctx, tenantId, tasks, dags); err != nil { + tw.l.Error().Err(err).Msg("failed to signal created tasks and DAGs in SignalCreated") + } + + return nil +} diff --git a/internal/services/dispatcher/contracts/dispatcher.pb.go b/internal/services/dispatcher/contracts/dispatcher.pb.go index 21722b3b1..30dd5cd88 100644 --- a/internal/services/dispatcher/contracts/dispatcher.pb.go +++ b/internal/services/dispatcher/contracts/dispatcher.pb.go @@ -857,6 +857,8 @@ type AssignedAction struct { WorkflowId *string `protobuf:"bytes,19,opt,name=workflow_id,json=workflowId,proto3,oneof" json:"workflow_id,omitempty"` // (optional) the workflow version id WorkflowVersionId *string `protobuf:"bytes,20,opt,name=workflow_version_id,json=workflowVersionId,proto3,oneof" json:"workflow_version_id,omitempty"` + // (optional) the invocation count for durable task events (required for durable events, otherwise null) + DurableTaskInvocationCount *int32 `protobuf:"varint,21,opt,name=durable_task_invocation_count,json=durableTaskInvocationCount,proto3,oneof" json:"durable_task_invocation_count,omitempty"` } func (x *AssignedAction) Reset() { @@ -1031,6 +1033,13 @@ func (x *AssignedAction) GetWorkflowVersionId() string { return "" } +func (x *AssignedAction) GetDurableTaskInvocationCount() int32 { + if x != nil && x.DurableTaskInvocationCount != nil { + return *x.DurableTaskInvocationCount + } + return 0 +} + type WorkerListenRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2265,6 +2274,100 @@ func (*ReleaseSlotResponse) Descriptor() ([]byte, []int) { return file_dispatcher_proto_rawDescGZIP(), []int{25} } +type RestoreEvictedTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TaskRunExternalId string `protobuf:"bytes,1,opt,name=task_run_external_id,json=taskRunExternalId,proto3" json:"task_run_external_id,omitempty"` +} + +func (x *RestoreEvictedTaskRequest) Reset() { + *x = RestoreEvictedTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_dispatcher_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreEvictedTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreEvictedTaskRequest) ProtoMessage() {} + +func (x *RestoreEvictedTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_dispatcher_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreEvictedTaskRequest.ProtoReflect.Descriptor instead. +func (*RestoreEvictedTaskRequest) Descriptor() ([]byte, []int) { + return file_dispatcher_proto_rawDescGZIP(), []int{26} +} + +func (x *RestoreEvictedTaskRequest) GetTaskRunExternalId() string { + if x != nil { + return x.TaskRunExternalId + } + return "" +} + +type RestoreEvictedTaskResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Requeued bool `protobuf:"varint,1,opt,name=requeued,proto3" json:"requeued,omitempty"` +} + +func (x *RestoreEvictedTaskResponse) Reset() { + *x = RestoreEvictedTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_dispatcher_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreEvictedTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreEvictedTaskResponse) ProtoMessage() {} + +func (x *RestoreEvictedTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_dispatcher_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreEvictedTaskResponse.ProtoReflect.Descriptor instead. +func (*RestoreEvictedTaskResponse) Descriptor() ([]byte, []int) { + return file_dispatcher_proto_rawDescGZIP(), []int{27} +} + +func (x *RestoreEvictedTaskResponse) GetRequeued() bool { + if x != nil { + return x.Requeued + } + return false +} + type GetVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2274,7 +2377,7 @@ type GetVersionRequest struct { func (x *GetVersionRequest) Reset() { *x = GetVersionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_dispatcher_proto_msgTypes[26] + mi := &file_dispatcher_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2287,7 +2390,7 @@ func (x *GetVersionRequest) String() string { func (*GetVersionRequest) ProtoMessage() {} func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_dispatcher_proto_msgTypes[26] + mi := &file_dispatcher_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2300,7 +2403,7 @@ func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return file_dispatcher_proto_rawDescGZIP(), []int{26} + return file_dispatcher_proto_rawDescGZIP(), []int{28} } type GetVersionResponse struct { @@ -2314,7 +2417,7 @@ type GetVersionResponse struct { func (x *GetVersionResponse) Reset() { *x = GetVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_dispatcher_proto_msgTypes[27] + mi := &file_dispatcher_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2327,7 +2430,7 @@ func (x *GetVersionResponse) String() string { func (*GetVersionResponse) ProtoMessage() {} func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_dispatcher_proto_msgTypes[27] + mi := &file_dispatcher_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2340,7 +2443,7 @@ func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return file_dispatcher_proto_rawDescGZIP(), []int{27} + return file_dispatcher_proto_rawDescGZIP(), []int{29} } func (x *GetVersionResponse) GetVersion() string { @@ -2438,7 +2541,7 @@ var file_dispatcher_proto_rawDesc = []byte{ 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x49, 0x64, 0x22, 0xab, 0x07, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x65, 0x72, 0x49, 0x64, 0x22, 0x95, 0x08, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, @@ -2488,326 +2591,346 @@ var file_dispatcher_proto_rawDesc = []byte{ 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x48, 0x05, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, - 0x17, 0x0a, 0x15, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x42, - 0x19, 0x0a, 0x17, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x18, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, - 0x55, 0x0a, 0x19, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0xca, 0x02, 0x0a, 0x13, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, - 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x14, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x67, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x52, 0x75, - 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x37, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x22, 0xe3, 0x03, 0x0a, 0x0f, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, - 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x33, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x53, 0x74, 0x65, 0x70, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, - 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x10, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x01, 0x52, 0x0e, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x4e, 0x6f, 0x74, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x22, 0x4f, 0x0a, 0x13, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x83, 0x02, 0x0a, 0x20, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2b, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x11, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x88, 0x01, - 0x01, 0x12, 0x37, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x02, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, - 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x48, 0x0a, 0x1e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, - 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0xe6, 0x03, 0x0a, 0x0d, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, 0x12, 0x26, - 0x0a, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x74, 0x72, - 0x69, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x0a, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x02, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, - 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x74, 0x72, - 0x69, 0x65, 0x73, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x22, 0xdf, 0x01, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, - 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, - 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, - 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, - 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, - 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, - 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, - 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, - 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x6e, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x6f, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x1d, 0x64, 0x75, 0x72, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x48, 0x06, + 0x52, 0x1a, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x76, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, + 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x69, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x20, 0x0a, 0x1e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x76, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x32, 0x0a, 0x13, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x3d, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x22, - 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7a, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, - 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, - 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x30, - 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, - 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, - 0x22, 0x53, 0x0a, 0x16, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x45, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, - 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, - 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x41, 0x0a, 0x04, 0x53, 0x44, 0x4b, 0x53, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x06, 0x0a, - 0x02, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, - 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x10, - 0x03, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x55, 0x42, 0x59, 0x10, 0x04, 0x2a, 0x4e, 0x0a, 0x0a, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, - 0x52, 0x54, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, - 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x47, 0x45, 0x54, 0x5f, - 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, 0x2a, 0xa2, 0x01, 0x0a, 0x17, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, 0x55, 0x50, - 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x47, - 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x1f, 0x0a, 0x1b, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, - 0x2a, 0xac, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, - 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x20, 0x0a, - 0x1c, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x41, 0x43, 0x4b, 0x4e, 0x4f, 0x57, 0x4c, 0x45, 0x44, 0x47, 0x45, 0x44, 0x10, 0x04, 0x2a, - 0x65, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, - 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x45, 0x50, - 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, - 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x02, 0x2a, 0xfe, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, - 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1f, 0x0a, - 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x21, - 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, - 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x49, 0x4d, 0x45, - 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, - 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x06, 0x2a, 0x3c, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x5f, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4e, 0x49, 0x53, - 0x48, 0x45, 0x44, 0x10, 0x00, 0x32, 0xb1, 0x07, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, 0x14, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x56, 0x32, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x34, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x11, 0x2e, 0x48, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x12, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x22, 0x37, 0x0a, 0x18, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x55, 0x0a, 0x19, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x22, 0xca, 0x02, 0x0a, 0x13, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, + 0x14, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x72, + 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x67, 0x65, 0x74, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x37, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xe3, 0x03, + 0x0a, 0x0f, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x15, + 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x14, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, + 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x33, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, + 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x2d, 0x0a, 0x10, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x68, 0x6f, + 0x75, 0x6c, 0x64, 0x4e, 0x6f, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0e, + 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x13, + 0x0a, 0x11, 0x5f, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x22, 0x4f, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x22, 0x83, 0x02, 0x0a, 0x20, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x21, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x17, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x75, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3f, - 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x47, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x10, 0x50, 0x75, 0x74, 0x4f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x0e, 0x2e, 0x4f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x4f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0b, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x19, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1a, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, - 0x0a, 0x0e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x12, 0x16, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, - 0x6f, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x4f, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1b, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x37, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, - 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, - 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x0f, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x37, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x13, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, + 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x48, 0x0a, 0x1e, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x75, 0x6e, 0x49, 0x64, 0x22, 0xe6, 0x03, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x32, + 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x31, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x68, 0x61, 0x6e, 0x67, 0x75, 0x70, 0x12, 0x26, 0x0a, 0x0c, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, + 0x52, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x88, 0x01, 0x01, + 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x0a, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, + 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xdf, 0x01, + 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, + 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, + 0xc8, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x14, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, + 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x17, 0x0a, 0x15, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6e, 0x0a, 0x10, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7a, + 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x22, 0x53, 0x0a, 0x16, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, + 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x41, 0x74, 0x22, + 0x45, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, + 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x74, 0x61, + 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, + 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x1a, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2a, 0x41, 0x0a, 0x04, 0x53, 0x44, + 0x4b, 0x53, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x06, 0x0a, 0x02, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, 0x4f, + 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, 0x50, + 0x54, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x55, 0x42, 0x59, 0x10, 0x04, 0x2a, 0x4e, 0x0a, + 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, + 0x55, 0x4e, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x47, 0x45, + 0x54, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, 0x2a, 0xa2, 0x01, + 0x0a, 0x17, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x47, + 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x22, 0x0a, + 0x1e, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, + 0x10, 0x03, 0x2a, 0xac, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, + 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x20, 0x0a, 0x1c, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x41, 0x43, 0x4b, 0x4e, 0x4f, 0x57, 0x4c, 0x45, 0x44, 0x47, 0x45, 0x44, 0x10, + 0x04, 0x2a, 0x65, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, + 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, + 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, + 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x02, 0x2a, 0xfe, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, + 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x49, + 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, + 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x06, 0x2a, 0x3c, 0x0a, 0x14, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, + 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4e, + 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x00, 0x32, 0x82, 0x08, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x70, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, + 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x56, 0x32, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x34, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x11, + 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x12, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x17, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, + 0x12, 0x3f, 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x47, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, + 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x10, 0x50, 0x75, + 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x0e, + 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, + 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0b, 0x55, 0x6e, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x19, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x43, 0x0a, 0x0e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x16, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x52, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4f, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x76, 0x69, 0x63, + 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x45, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x76, 0x69, + 0x63, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x47, 0x5a, 0x45, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2823,7 +2946,7 @@ func file_dispatcher_proto_rawDescGZIP() []byte { } var file_dispatcher_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 31) +var file_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 33) var file_dispatcher_proto_goTypes = []interface{}{ (SDKS)(0), // 0: SDKS (ActionType)(0), // 1: ActionType @@ -2858,32 +2981,34 @@ var file_dispatcher_proto_goTypes = []interface{}{ (*RefreshTimeoutResponse)(nil), // 30: RefreshTimeoutResponse (*ReleaseSlotRequest)(nil), // 31: ReleaseSlotRequest (*ReleaseSlotResponse)(nil), // 32: ReleaseSlotResponse - (*GetVersionRequest)(nil), // 33: GetVersionRequest - (*GetVersionResponse)(nil), // 34: GetVersionResponse - nil, // 35: WorkerRegisterRequest.LabelsEntry - nil, // 36: WorkerRegisterRequest.SlotConfigEntry - nil, // 37: UpsertWorkerLabelsRequest.LabelsEntry - (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp + (*RestoreEvictedTaskRequest)(nil), // 33: RestoreEvictedTaskRequest + (*RestoreEvictedTaskResponse)(nil), // 34: RestoreEvictedTaskResponse + (*GetVersionRequest)(nil), // 35: GetVersionRequest + (*GetVersionResponse)(nil), // 36: GetVersionResponse + nil, // 37: WorkerRegisterRequest.LabelsEntry + nil, // 38: WorkerRegisterRequest.SlotConfigEntry + nil, // 39: UpsertWorkerLabelsRequest.LabelsEntry + (*timestamppb.Timestamp)(nil), // 40: google.protobuf.Timestamp } var file_dispatcher_proto_depIdxs = []int32{ 0, // 0: RuntimeInfo.language:type_name -> SDKS - 35, // 1: WorkerRegisterRequest.labels:type_name -> WorkerRegisterRequest.LabelsEntry + 37, // 1: WorkerRegisterRequest.labels:type_name -> WorkerRegisterRequest.LabelsEntry 8, // 2: WorkerRegisterRequest.runtime_info:type_name -> RuntimeInfo - 36, // 3: WorkerRegisterRequest.slot_config:type_name -> WorkerRegisterRequest.SlotConfigEntry - 37, // 4: UpsertWorkerLabelsRequest.labels:type_name -> UpsertWorkerLabelsRequest.LabelsEntry + 38, // 3: WorkerRegisterRequest.slot_config:type_name -> WorkerRegisterRequest.SlotConfigEntry + 39, // 4: UpsertWorkerLabelsRequest.labels:type_name -> UpsertWorkerLabelsRequest.LabelsEntry 1, // 5: AssignedAction.action_type:type_name -> ActionType - 38, // 6: GroupKeyActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 40, // 6: GroupKeyActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp 2, // 7: GroupKeyActionEvent.event_type:type_name -> GroupKeyActionEventType - 38, // 8: StepActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 40, // 8: StepActionEvent.event_timestamp:type_name -> google.protobuf.Timestamp 3, // 9: StepActionEvent.event_type:type_name -> StepActionEventType 4, // 10: WorkflowEvent.resource_type:type_name -> ResourceType 5, // 11: WorkflowEvent.event_type:type_name -> ResourceEventType - 38, // 12: WorkflowEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 40, // 12: WorkflowEvent.event_timestamp:type_name -> google.protobuf.Timestamp 6, // 13: WorkflowRunEvent.event_type:type_name -> WorkflowRunEventType - 38, // 14: WorkflowRunEvent.event_timestamp:type_name -> google.protobuf.Timestamp + 40, // 14: WorkflowRunEvent.event_timestamp:type_name -> google.protobuf.Timestamp 24, // 15: WorkflowRunEvent.results:type_name -> StepRunResult - 38, // 16: HeartbeatRequest.heartbeat_at:type_name -> google.protobuf.Timestamp - 38, // 17: RefreshTimeoutResponse.timeout_at:type_name -> google.protobuf.Timestamp + 40, // 16: HeartbeatRequest.heartbeat_at:type_name -> google.protobuf.Timestamp + 40, // 17: RefreshTimeoutResponse.timeout_at:type_name -> google.protobuf.Timestamp 7, // 18: WorkerRegisterRequest.LabelsEntry.value:type_name -> WorkerLabels 7, // 19: UpsertWorkerLabelsRequest.LabelsEntry.value:type_name -> WorkerLabels 9, // 20: Dispatcher.Register:input_type -> WorkerRegisterRequest @@ -2898,24 +3023,26 @@ var file_dispatcher_proto_depIdxs = []int32{ 15, // 29: Dispatcher.Unsubscribe:input_type -> WorkerUnsubscribeRequest 29, // 30: Dispatcher.RefreshTimeout:input_type -> RefreshTimeoutRequest 31, // 31: Dispatcher.ReleaseSlot:input_type -> ReleaseSlotRequest - 11, // 32: Dispatcher.UpsertWorkerLabels:input_type -> UpsertWorkerLabelsRequest - 33, // 33: Dispatcher.GetVersion:input_type -> GetVersionRequest - 10, // 34: Dispatcher.Register:output_type -> WorkerRegisterResponse - 13, // 35: Dispatcher.Listen:output_type -> AssignedAction - 13, // 36: Dispatcher.ListenV2:output_type -> AssignedAction - 28, // 37: Dispatcher.Heartbeat:output_type -> HeartbeatResponse - 22, // 38: Dispatcher.SubscribeToWorkflowEvents:output_type -> WorkflowEvent - 23, // 39: Dispatcher.SubscribeToWorkflowRuns:output_type -> WorkflowRunEvent - 19, // 40: Dispatcher.SendStepActionEvent:output_type -> ActionEventResponse - 19, // 41: Dispatcher.SendGroupKeyActionEvent:output_type -> ActionEventResponse - 26, // 42: Dispatcher.PutOverridesData:output_type -> OverridesDataResponse - 16, // 43: Dispatcher.Unsubscribe:output_type -> WorkerUnsubscribeResponse - 30, // 44: Dispatcher.RefreshTimeout:output_type -> RefreshTimeoutResponse - 32, // 45: Dispatcher.ReleaseSlot:output_type -> ReleaseSlotResponse - 12, // 46: Dispatcher.UpsertWorkerLabels:output_type -> UpsertWorkerLabelsResponse - 34, // 47: Dispatcher.GetVersion:output_type -> GetVersionResponse - 34, // [34:48] is the sub-list for method output_type - 20, // [20:34] is the sub-list for method input_type + 33, // 32: Dispatcher.RestoreEvictedTask:input_type -> RestoreEvictedTaskRequest + 11, // 33: Dispatcher.UpsertWorkerLabels:input_type -> UpsertWorkerLabelsRequest + 35, // 34: Dispatcher.GetVersion:input_type -> GetVersionRequest + 10, // 35: Dispatcher.Register:output_type -> WorkerRegisterResponse + 13, // 36: Dispatcher.Listen:output_type -> AssignedAction + 13, // 37: Dispatcher.ListenV2:output_type -> AssignedAction + 28, // 38: Dispatcher.Heartbeat:output_type -> HeartbeatResponse + 22, // 39: Dispatcher.SubscribeToWorkflowEvents:output_type -> WorkflowEvent + 23, // 40: Dispatcher.SubscribeToWorkflowRuns:output_type -> WorkflowRunEvent + 19, // 41: Dispatcher.SendStepActionEvent:output_type -> ActionEventResponse + 19, // 42: Dispatcher.SendGroupKeyActionEvent:output_type -> ActionEventResponse + 26, // 43: Dispatcher.PutOverridesData:output_type -> OverridesDataResponse + 16, // 44: Dispatcher.Unsubscribe:output_type -> WorkerUnsubscribeResponse + 30, // 45: Dispatcher.RefreshTimeout:output_type -> RefreshTimeoutResponse + 32, // 46: Dispatcher.ReleaseSlot:output_type -> ReleaseSlotResponse + 34, // 47: Dispatcher.RestoreEvictedTask:output_type -> RestoreEvictedTaskResponse + 12, // 48: Dispatcher.UpsertWorkerLabels:output_type -> UpsertWorkerLabelsResponse + 36, // 49: Dispatcher.GetVersion:output_type -> GetVersionResponse + 35, // [35:50] is the sub-list for method output_type + 20, // [20:35] is the sub-list for method input_type 20, // [20:20] is the sub-list for extension type_name 20, // [20:20] is the sub-list for extension extendee 0, // [0:20] is the sub-list for field type_name @@ -3240,7 +3367,7 @@ func file_dispatcher_proto_init() { } } file_dispatcher_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionRequest); i { + switch v := v.(*RestoreEvictedTaskRequest); i { case 0: return &v.state case 1: @@ -3252,6 +3379,30 @@ func file_dispatcher_proto_init() { } } file_dispatcher_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreEvictedTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dispatcher_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dispatcher_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVersionResponse); i { case 0: return &v.state @@ -3278,7 +3429,7 @@ func file_dispatcher_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_dispatcher_proto_rawDesc, NumEnums: 7, - NumMessages: 31, + NumMessages: 33, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go b/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go index 7fc5c8e92..3f5c21515 100644 --- a/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go +++ b/internal/services/dispatcher/contracts/dispatcher_grpc.pb.go @@ -37,6 +37,7 @@ type DispatcherClient interface { Unsubscribe(ctx context.Context, in *WorkerUnsubscribeRequest, opts ...grpc.CallOption) (*WorkerUnsubscribeResponse, error) RefreshTimeout(ctx context.Context, in *RefreshTimeoutRequest, opts ...grpc.CallOption) (*RefreshTimeoutResponse, error) ReleaseSlot(ctx context.Context, in *ReleaseSlotRequest, opts ...grpc.CallOption) (*ReleaseSlotResponse, error) + RestoreEvictedTask(ctx context.Context, in *RestoreEvictedTaskRequest, opts ...grpc.CallOption) (*RestoreEvictedTaskResponse, error) UpsertWorkerLabels(ctx context.Context, in *UpsertWorkerLabelsRequest, opts ...grpc.CallOption) (*UpsertWorkerLabelsResponse, error) // GetVersion returns the dispatcher protocol version as a simple integer. // SDKs use this to determine feature support (e.g. slot_config registration). @@ -251,6 +252,15 @@ func (c *dispatcherClient) ReleaseSlot(ctx context.Context, in *ReleaseSlotReque return out, nil } +func (c *dispatcherClient) RestoreEvictedTask(ctx context.Context, in *RestoreEvictedTaskRequest, opts ...grpc.CallOption) (*RestoreEvictedTaskResponse, error) { + out := new(RestoreEvictedTaskResponse) + err := c.cc.Invoke(ctx, "/Dispatcher/RestoreEvictedTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dispatcherClient) UpsertWorkerLabels(ctx context.Context, in *UpsertWorkerLabelsRequest, opts ...grpc.CallOption) (*UpsertWorkerLabelsResponse, error) { out := new(UpsertWorkerLabelsResponse) err := c.cc.Invoke(ctx, "/Dispatcher/UpsertWorkerLabels", in, out, opts...) @@ -288,6 +298,7 @@ type DispatcherServer interface { Unsubscribe(context.Context, *WorkerUnsubscribeRequest) (*WorkerUnsubscribeResponse, error) RefreshTimeout(context.Context, *RefreshTimeoutRequest) (*RefreshTimeoutResponse, error) ReleaseSlot(context.Context, *ReleaseSlotRequest) (*ReleaseSlotResponse, error) + RestoreEvictedTask(context.Context, *RestoreEvictedTaskRequest) (*RestoreEvictedTaskResponse, error) UpsertWorkerLabels(context.Context, *UpsertWorkerLabelsRequest) (*UpsertWorkerLabelsResponse, error) // GetVersion returns the dispatcher protocol version as a simple integer. // SDKs use this to determine feature support (e.g. slot_config registration). @@ -336,6 +347,9 @@ func (UnimplementedDispatcherServer) RefreshTimeout(context.Context, *RefreshTim func (UnimplementedDispatcherServer) ReleaseSlot(context.Context, *ReleaseSlotRequest) (*ReleaseSlotResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReleaseSlot not implemented") } +func (UnimplementedDispatcherServer) RestoreEvictedTask(context.Context, *RestoreEvictedTaskRequest) (*RestoreEvictedTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreEvictedTask not implemented") +} func (UnimplementedDispatcherServer) UpsertWorkerLabels(context.Context, *UpsertWorkerLabelsRequest) (*UpsertWorkerLabelsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpsertWorkerLabels not implemented") } @@ -588,6 +602,24 @@ func _Dispatcher_ReleaseSlot_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _Dispatcher_RestoreEvictedTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreEvictedTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).RestoreEvictedTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/Dispatcher/RestoreEvictedTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).RestoreEvictedTask(ctx, req.(*RestoreEvictedTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Dispatcher_UpsertWorkerLabels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpsertWorkerLabelsRequest) if err := dec(in); err != nil { @@ -663,6 +695,10 @@ var Dispatcher_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReleaseSlot", Handler: _Dispatcher_ReleaseSlot_Handler, }, + { + MethodName: "RestoreEvictedTask", + Handler: _Dispatcher_RestoreEvictedTask_Handler, + }, { MethodName: "UpsertWorkerLabels", Handler: _Dispatcher_UpsertWorkerLabels_Handler, diff --git a/internal/services/dispatcher/dispatcher.go b/internal/services/dispatcher/dispatcher.go index 97f383e8f..b94761a27 100644 --- a/internal/services/dispatcher/dispatcher.go +++ b/internal/services/dispatcher/dispatcher.go @@ -14,6 +14,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts" "github.com/hatchet-dev/hatchet/internal/services/shared/recoveryutils" + tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" "github.com/hatchet-dev/hatchet/internal/syncx" "github.com/hatchet-dev/hatchet/pkg/analytics" "github.com/hatchet-dev/hatchet/pkg/logger" @@ -50,8 +51,10 @@ type DispatcherImpl struct { dispatcherId uuid.UUID workers *workers a *hatcheterrors.Wrapped - analytics analytics.Analytics - version string + + durableCallbackFn func(taskExternalId uuid.UUID, invocationCount int32, branchId, nodeId int64, payload []byte) error + analytics analytics.Analytics + version string } var ErrWorkerNotFound = fmt.Errorf("worker not found") @@ -402,6 +405,8 @@ func (d *DispatcherImpl) handleV1Task(ctx context.Context, task *msgqueue.Messag err = d.a.WrapErr(d.handleTaskBulkAssignedTask(ctx, task), map[string]interface{}{}) case "task-cancelled": err = d.a.WrapErr(d.handleTaskCancelled(ctx, task), map[string]interface{}{}) + case msgqueue.MsgIDDurableCallbackCompleted: + err = d.a.WrapErr(d.handleDurableCallbackCompleted(ctx, task), map[string]interface{}{}) default: err = fmt.Errorf("unknown task: %s", task.ID) } @@ -409,6 +414,38 @@ func (d *DispatcherImpl) handleV1Task(ctx context.Context, task *msgqueue.Messag return err } +func (d *DispatcherImpl) DispatcherId() uuid.UUID { + return d.dispatcherId +} + +func (d *DispatcherImpl) SetDurableCallbackHandler(fn func(uuid.UUID, int32, int64, int64, []byte) error) { + d.durableCallbackFn = fn +} + +func (d *DispatcherImpl) handleDurableCallbackCompleted(ctx context.Context, task *msgqueue.Message) error { + if d.durableCallbackFn == nil { + return nil + } + + payloads := msgqueue.JSONConvert[tasktypes.DurableCallbackCompletedPayload](task.Payloads) + + for _, payload := range payloads { + err := d.durableCallbackFn( + payload.TaskExternalId, + payload.InvocationCount, + payload.BranchId, + payload.NodeId, + payload.Payload, + ) + + if err != nil { + d.l.Warn().Err(err).Msgf("failed to deliver callback completion for task %s (worker may still be reconnecting; polling path will catch up)", payload.TaskExternalId) + } + } + + return nil +} + func (d *DispatcherImpl) runUpdateHeartbeat(ctx context.Context) func() { return func() { d.l.Debug().Msgf("dispatcher: updating heartbeat") diff --git a/internal/services/dispatcher/dispatcher_v1.go b/internal/services/dispatcher/dispatcher_v1.go index 8089ba9e3..9c7530816 100644 --- a/internal/services/dispatcher/dispatcher_v1.go +++ b/internal/services/dispatcher/dispatcher_v1.go @@ -119,12 +119,41 @@ func (d *DispatcherImpl) HandleLocalAssignments(ctx context.Context, tenantId, w } // we already have payloads; no lookups necessary. we can just send them to the worker - taskIdToData := make(map[int64]*v1.V1TaskWithPayload) + taskIdToData := make(map[int64]*V1TaskWithPayloadAndInvocationCount) taskIds := make([]int64, 0, len(tasks)) + getDurableInvocationCountOpts := make([]v1.IdInsertedAt, 0) + for _, assigned := range tasks { - taskIdToData[assigned.Task.ID] = assigned.Task + taskIdToData[assigned.Task.ID] = &V1TaskWithPayloadAndInvocationCount{ + V1TaskWithPayload: assigned.Task, + } taskIds = append(taskIds, assigned.Task.ID) + + if assigned.Task.IsDurable.Valid && assigned.Task.IsDurable.Bool { + getDurableInvocationCountOpts = append(getDurableInvocationCountOpts, v1.IdInsertedAt{ + ID: assigned.Task.ID, + InsertedAt: assigned.Task.InsertedAt, + }) + } + } + + if len(getDurableInvocationCountOpts) > 0 { + invocationCounts, err := d.repov1.DurableEvents().GetDurableTaskInvocationCounts(ctx, tenantId, getDurableInvocationCountOpts) + + if err != nil { + d.l.Error().Err(err).Msgf("could not get durable task invocation counts for %d tasks", len(getDurableInvocationCountOpts)) + } else { + for _, assigned := range tasks { + if assigned.Task.IsDurable.Valid && assigned.Task.IsDurable.Bool { + count := invocationCounts[v1.IdInsertedAt{ + ID: assigned.Task.ID, + InsertedAt: assigned.Task.InsertedAt, + }] + taskIdToData[assigned.Task.ID].InvocationCount = count + } + } + } } // this is one of the core differences from handleTaskBulkAssignedTask: we run this synchronously @@ -139,12 +168,17 @@ func (d *DispatcherImpl) HandleLocalAssignments(ctx context.Context, tenantId, w return err } +type V1TaskWithPayloadAndInvocationCount struct { + *v1.V1TaskWithPayload + InvocationCount *int32 // only used for durable tasks +} + func (d *DispatcherImpl) populateTaskData( ctx context.Context, requeue func(task *sqlcv1.V1Task), tenantId uuid.UUID, taskIds []int64, -) (map[int64]*v1.V1TaskWithPayload, error) { +) (map[int64]*V1TaskWithPayloadAndInvocationCount, error) { bulkDatas, err := d.repov1.Tasks().ListTasks(ctx, tenantId, taskIds) if err != nil { @@ -156,6 +190,32 @@ func (d *DispatcherImpl) populateTaskData( return nil, err } + getInvocationCountOpts := make([]v1.IdInsertedAt, 0) + + for _, task := range bulkDatas { + if task.IsDurable.Valid && task.IsDurable.Bool { + getInvocationCountOpts = append(getInvocationCountOpts, v1.IdInsertedAt{ + ID: task.ID, + InsertedAt: task.InsertedAt, + }) + } + } + + invocationCounts := make(map[v1.IdInsertedAt]*int32) + + if len(getInvocationCountOpts) > 0 { + invocationCounts, err = d.repov1.DurableEvents().GetDurableTaskInvocationCounts(ctx, tenantId, getInvocationCountOpts) + + if err != nil { + for _, task := range bulkDatas { + requeue(task) + } + + d.l.Error().Err(err).Msgf("could not get durable task invocation counts for %d tasks", len(getInvocationCountOpts)) + return nil, err + } + } + parentDataMap, err := d.repov1.Tasks().ListTaskParentOutputs(ctx, tenantId, bulkDatas) if err != nil { @@ -252,7 +312,7 @@ func (d *DispatcherImpl) populateTaskData( } } - taskIdToData := make(map[int64]*v1.V1TaskWithPayload) + taskIdToData := make(map[int64]*V1TaskWithPayloadAndInvocationCount) for _, task := range bulkDatas { input, ok := inputs[v1.RetrievePayloadOpts{ @@ -268,9 +328,17 @@ func (d *DispatcherImpl) populateTaskData( input = task.Input } - taskIdToData[task.ID] = &v1.V1TaskWithPayload{ - V1Task: task, - Payload: input, + invocationCount := invocationCounts[v1.IdInsertedAt{ + ID: task.ID, + InsertedAt: task.InsertedAt, + }] + + taskIdToData[task.ID] = &V1TaskWithPayloadAndInvocationCount{ + &v1.V1TaskWithPayload{ + V1Task: task, + Payload: input, + }, + invocationCount, } } @@ -282,7 +350,7 @@ func (d *DispatcherImpl) sendTasksToWorker( requeue func(task *sqlcv1.V1Task), tenantId, workerId uuid.UUID, taskIds []int64, - tasks map[int64]*v1.V1TaskWithPayload, + tasks map[int64]*V1TaskWithPayloadAndInvocationCount, ) error { // get the worker for this task workers, err := d.workers.Get(workerId) @@ -312,7 +380,7 @@ func (d *DispatcherImpl) sendTasksToWorker( var success bool for i, w := range workers { - err := w.StartTaskFromBulk(ctx, tenantId, task) + err := w.StartTaskFromBulk(ctx, tenantId, task.V1TaskWithPayload, task.InvocationCount) if err != nil { multiErr = multierror.Append( @@ -326,15 +394,21 @@ func (d *DispatcherImpl) sendTasksToWorker( } if success { + var durableInvCount int32 + if task.InvocationCount != nil { + durableInvCount = *task.InvocationCount + } + msg, err := tasktypesv1.MonitoringEventMessageFromInternal( task.TenantID, tasktypesv1.CreateMonitoringEventPayload{ - TaskId: task.ID, - RetryCount: task.RetryCount, - WorkerId: &workerId, - EventType: sqlcv1.V1EventTypeOlapSENTTOWORKER, - EventTimestamp: time.Now().UTC(), - EventMessage: "Sent task run to the assigned worker", + TaskId: task.ID, + RetryCount: task.RetryCount, + DurableInvocationCount: durableInvCount, + WorkerId: &workerId, + EventType: sqlcv1.V1EventTypeOlapSENTTOWORKER, + EventTimestamp: time.Now().UTC(), + EventMessage: "Sent task run to the assigned worker", }, ) diff --git a/internal/services/dispatcher/server.go b/internal/services/dispatcher/server.go index 03795f85a..c9f175c68 100644 --- a/internal/services/dispatcher/server.go +++ b/internal/services/dispatcher/server.go @@ -477,7 +477,14 @@ func (s *DispatcherImpl) Heartbeat(ctx context.Context, req *contracts.Heartbeat func (s *DispatcherImpl) ReleaseSlot(ctx context.Context, req *contracts.ReleaseSlotRequest) (*contracts.ReleaseSlotResponse, error) { tenant := ctx.Value("tenant").(*sqlcv1.Tenant) s.analytics.Count(ctx, analytics.Worker, analytics.Release) - return s.releaseSlotV1(ctx, tenant, req) + return s.releaseSlot(ctx, tenant, req) +} + +func (s *DispatcherImpl) RestoreEvictedTask(ctx context.Context, req *contracts.RestoreEvictedTaskRequest) (*contracts.RestoreEvictedTaskResponse, error) { + tenant := ctx.Value("tenant").(*sqlcv1.Tenant) + s.analytics.Count(ctx, analytics.DurableTask, analytics.Restore) + + return s.restoreEvictedTask(ctx, tenant, req) } func (s *DispatcherImpl) SubscribeToWorkflowEvents(request *contracts.SubscribeToWorkflowEventsRequest, stream contracts.Dispatcher_SubscribeToWorkflowEventsServer) error { diff --git a/internal/services/dispatcher/server_v1.go b/internal/services/dispatcher/server_v1.go index 785011cef..3200f983c 100644 --- a/internal/services/dispatcher/server_v1.go +++ b/internal/services/dispatcher/server_v1.go @@ -581,9 +581,19 @@ func (s *DispatcherImpl) sendStepActionEventV1(ctx context.Context, request *con } } + var durableInvCount int32 + invocationCounts, err := s.repov1.DurableEvents().GetDurableTaskInvocationCounts(ctx, tenant.ID, []v1.IdInsertedAt{ + {ID: task.ID, InsertedAt: task.InsertedAt}, + }) + if err == nil { + if count, ok := invocationCounts[v1.IdInsertedAt{ID: task.ID, InsertedAt: task.InsertedAt}]; ok && count != nil { + durableInvCount = *count + } + } + switch request.EventType { case contracts.StepActionEventType_STEP_EVENT_TYPE_STARTED: - return s.handleTaskStarted(ctx, task, retryCount, request) + return s.handleTaskStarted(ctx, task, retryCount, durableInvCount, request) case contracts.StepActionEventType_STEP_EVENT_TYPE_ACKNOWLEDGED: // TODO: IMPLEMENT tenant := ctx.Value("tenant").(*sqlcv1.Tenant) @@ -592,15 +602,15 @@ func (s *DispatcherImpl) sendStepActionEventV1(ctx context.Context, request *con WorkerId: request.WorkerId, }, nil case contracts.StepActionEventType_STEP_EVENT_TYPE_COMPLETED: - return s.handleTaskCompleted(ctx, task, retryCount, request) + return s.handleTaskCompleted(ctx, task, retryCount, durableInvCount, request) case contracts.StepActionEventType_STEP_EVENT_TYPE_FAILED: - return s.handleTaskFailed(ctx, task, retryCount, request) + return s.handleTaskFailed(ctx, task, retryCount, durableInvCount, request) } return nil, status.Errorf(codes.InvalidArgument, "invalid task external run id %s", request.TaskRunExternalId) } -func (s *DispatcherImpl) handleTaskStarted(inputCtx context.Context, task *sqlcv1.FlattenExternalIdsRow, retryCount int32, request *contracts.StepActionEvent) (*contracts.ActionEventResponse, error) { +func (s *DispatcherImpl) handleTaskStarted(inputCtx context.Context, task *sqlcv1.FlattenExternalIdsRow, retryCount, durableInvocationCount int32, request *contracts.StepActionEvent) (*contracts.ActionEventResponse, error) { tenant := inputCtx.Value("tenant").(*sqlcv1.Tenant) tenantId := tenant.ID @@ -608,6 +618,7 @@ func (s *DispatcherImpl) handleTaskStarted(inputCtx context.Context, task *sqlcv tenantId, task.ID, retryCount, + durableInvocationCount, request, ) @@ -627,7 +638,7 @@ func (s *DispatcherImpl) handleTaskStarted(inputCtx context.Context, task *sqlcv }, nil } -func (s *DispatcherImpl) handleTaskCompleted(inputCtx context.Context, task *sqlcv1.FlattenExternalIdsRow, retryCount int32, request *contracts.StepActionEvent) (*contracts.ActionEventResponse, error) { +func (s *DispatcherImpl) handleTaskCompleted(inputCtx context.Context, task *sqlcv1.FlattenExternalIdsRow, retryCount int32, durableInvocationCount int32, request *contracts.StepActionEvent) (*contracts.ActionEventResponse, error) { tenant := inputCtx.Value("tenant").(*sqlcv1.Tenant) tenantId := tenant.ID @@ -664,6 +675,7 @@ func (s *DispatcherImpl) handleTaskCompleted(inputCtx context.Context, task *sql tenantId, task.ID, retryCount, + durableInvocationCount, request, ) @@ -682,7 +694,7 @@ func (s *DispatcherImpl) handleTaskCompleted(inputCtx context.Context, task *sql return resp, nil } -func (s *DispatcherImpl) handleTaskFailed(inputCtx context.Context, task *sqlcv1.FlattenExternalIdsRow, retryCount int32, request *contracts.StepActionEvent) (*contracts.ActionEventResponse, error) { +func (s *DispatcherImpl) handleTaskFailed(inputCtx context.Context, task *sqlcv1.FlattenExternalIdsRow, retryCount int32, durableInvocationCount int32, request *contracts.StepActionEvent) (*contracts.ActionEventResponse, error) { tenant := inputCtx.Value("tenant").(*sqlcv1.Tenant) tenantId := tenant.ID @@ -778,7 +790,7 @@ func (d *DispatcherImpl) refreshTimeoutV1(ctx context.Context, tenant *sqlcv1.Te }, nil } -func (d *DispatcherImpl) releaseSlotV1(ctx context.Context, tenant *sqlcv1.Tenant, request *contracts.ReleaseSlotRequest) (*contracts.ReleaseSlotResponse, error) { +func (d *DispatcherImpl) releaseSlot(ctx context.Context, tenant *sqlcv1.Tenant, request *contracts.ReleaseSlotRequest) (*contracts.ReleaseSlotResponse, error) { tenantId := tenant.ID stepRunId, err := uuid.Parse(request.TaskRunExternalId) if err != nil { @@ -818,6 +830,26 @@ func (d *DispatcherImpl) releaseSlotV1(ctx context.Context, tenant *sqlcv1.Tenan return &contracts.ReleaseSlotResponse{}, nil } +func (d *DispatcherImpl) restoreEvictedTask(ctx context.Context, tenant *sqlcv1.Tenant, request *contracts.RestoreEvictedTaskRequest) (*contracts.RestoreEvictedTaskResponse, error) { + tenantId := tenant.ID + taskExternalId, err := uuid.Parse(request.TaskRunExternalId) + if err != nil { + return nil, fmt.Errorf("invalid task_run_external_id: %w", err) + } + + msg, err := tasktypes.DurableRestoreTaskMessage(tenantId, taskExternalId, "Restore via dispatcher RPC") + if err != nil { + return nil, err + } + + err = d.mqv1.SendMessage(ctx, msgqueue.TASK_PROCESSING_QUEUE, msg) + if err != nil { + return nil, err + } + + return &contracts.RestoreEvictedTaskResponse{Requeued: true}, nil +} + func (s *DispatcherImpl) subscribeToWorkflowEventsV1(request *contracts.SubscribeToWorkflowEventsRequest, stream contracts.Dispatcher_SubscribeToWorkflowEventsServer) error { if request.WorkflowRunId != nil { workflowRunId, err := uuid.Parse(*request.WorkflowRunId) diff --git a/internal/services/dispatcher/subscribed_worker_v1.go b/internal/services/dispatcher/subscribed_worker_v1.go index cd35fc393..b73c192eb 100644 --- a/internal/services/dispatcher/subscribed_worker_v1.go +++ b/internal/services/dispatcher/subscribed_worker_v1.go @@ -23,6 +23,7 @@ func (worker *subscribedWorker) StartTaskFromBulk( ctx context.Context, tenantId uuid.UUID, task *v1.V1TaskWithPayload, + durableInvocationCount *int32, ) error { if ctx.Err() != nil { return fmt.Errorf("context done before starting task: %w", ctx.Err()) @@ -37,7 +38,7 @@ func (worker *subscribedWorker) StartTaskFromBulk( inputBytes = task.Payload } - action := populateAssignedAction(tenantId, task.V1Task, task.RetryCount) + action := populateAssignedAction(tenantId, task.V1Task, task.RetryCount, durableInvocationCount) action.ActionType = contracts.ActionType_START_STEP_RUN action.ActionPayload = string(inputBytes) @@ -189,7 +190,7 @@ func (worker *subscribedWorker) CancelTask( ctx, span := telemetry.NewSpan(ctx, "cancel-task") // nolint:ineffassign defer span.End() - action := populateAssignedAction(tenantId, task, retryCount) + action := populateAssignedAction(tenantId, task, retryCount, nil) action.ActionType = contracts.ActionType_CANCEL_STEP_RUN @@ -243,24 +244,25 @@ func (worker *subscribedWorker) CancelTask( return nil } -func populateAssignedAction(tenantID uuid.UUID, task *sqlcv1.V1Task, retryCount int32) *contracts.AssignedAction { +func populateAssignedAction(tenantID uuid.UUID, task *sqlcv1.V1Task, retryCount int32, invocationCount *int32) *contracts.AssignedAction { workflowId := task.WorkflowID.String() workflowVersionId := task.WorkflowVersionID.String() action := &contracts.AssignedAction{ - TenantId: tenantID.String(), - JobId: task.StepID.String(), // FIXME - JobName: task.StepReadableID, - JobRunId: task.ExternalID.String(), // FIXME - TaskId: task.StepID.String(), - TaskRunExternalId: task.ExternalID.String(), - ActionId: task.ActionID, - TaskName: task.StepReadableID, - WorkflowRunId: task.WorkflowRunID.String(), - RetryCount: retryCount, - Priority: task.Priority.Int32, - WorkflowId: &workflowId, - WorkflowVersionId: &workflowVersionId, + TenantId: tenantID.String(), + JobId: task.StepID.String(), // FIXME + JobName: task.StepReadableID, + JobRunId: task.ExternalID.String(), // FIXME + TaskId: task.StepID.String(), + TaskRunExternalId: task.ExternalID.String(), + ActionId: task.ActionID, + TaskName: task.StepReadableID, + WorkflowRunId: task.WorkflowRunID.String(), + RetryCount: retryCount, + Priority: task.Priority.Int32, + WorkflowId: &workflowId, + WorkflowVersionId: &workflowVersionId, + DurableTaskInvocationCount: invocationCount, } if task.AdditionalMetadata != nil { diff --git a/internal/services/dispatcher/v1/dispatcher.go b/internal/services/dispatcher/v1/dispatcher.go index 7c63e1d32..c52209dfd 100644 --- a/internal/services/dispatcher/v1/dispatcher.go +++ b/internal/services/dispatcher/v1/dispatcher.go @@ -3,10 +3,13 @@ package v1 import ( "fmt" + "github.com/google/uuid" "github.com/rs/zerolog" "github.com/hatchet-dev/hatchet/internal/msgqueue" + "github.com/hatchet-dev/hatchet/internal/services/controllers/task/trigger" contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" + "github.com/hatchet-dev/hatchet/internal/syncx" "github.com/hatchet-dev/hatchet/pkg/analytics" "github.com/hatchet-dev/hatchet/pkg/logger" v1 "github.com/hatchet-dev/hatchet/pkg/repository" @@ -15,26 +18,34 @@ import ( type DispatcherService interface { contracts.V1DispatcherServer + DeliverDurableEventLogEntryCompletion(taskExternalId uuid.UUID, invocationCount int32, branchId, nodeId int64, payload []byte) error } type DispatcherServiceImpl struct { contracts.UnimplementedV1DispatcherServer - repo v1.Repository - mq msgqueue.MessageQueue - v validator.Validator - analytics analytics.Analytics - l *zerolog.Logger + triggerWriter *trigger.TriggerWriter + pubBuffer *msgqueue.MQPubBuffer + dispatcherId uuid.UUID + + durableInvocations syncx.Map[uuid.UUID, *durableTaskInvocation] + workerInvocations syncx.Map[uuid.UUID, *durableTaskInvocation] + repo v1.Repository + mq msgqueue.MessageQueue + v validator.Validator + analytics analytics.Analytics + l *zerolog.Logger } type DispatcherServiceOpt func(*DispatcherServiceOpts) type DispatcherServiceOpts struct { - repo v1.Repository - mq msgqueue.MessageQueue - v validator.Validator - analytics analytics.Analytics - l *zerolog.Logger + dispatcherId uuid.UUID + repo v1.Repository + mq msgqueue.MessageQueue + v validator.Validator + analytics analytics.Analytics + l *zerolog.Logger } func defaultDispatcherServiceOpts() *DispatcherServiceOpts { @@ -72,6 +83,12 @@ func WithLogger(l *zerolog.Logger) DispatcherServiceOpt { } } +func WithDispatcherId(id uuid.UUID) DispatcherServiceOpt { + return func(opts *DispatcherServiceOpts) { + opts.dispatcherId = id + } +} + func WithAnalytics(a analytics.Analytics) DispatcherServiceOpt { return func(opts *DispatcherServiceOpts) { opts.analytics = a @@ -93,11 +110,17 @@ func NewDispatcherService(fs ...DispatcherServiceOpt) (DispatcherService, error) return nil, fmt.Errorf("task queue is required. use WithMessageQueue") } + pubBuffer := msgqueue.NewMQPubBuffer(opts.mq) + tw := trigger.NewTriggerWriter(opts.mq, opts.repo, opts.l, pubBuffer, 0) + return &DispatcherServiceImpl{ - repo: opts.repo, - mq: opts.mq, - v: opts.v, - analytics: opts.analytics, - l: opts.l, + repo: opts.repo, + mq: opts.mq, + v: opts.v, + l: opts.l, + triggerWriter: tw, + pubBuffer: pubBuffer, + dispatcherId: opts.dispatcherId, + analytics: opts.analytics, }, nil } diff --git a/internal/services/dispatcher/v1/server.go b/internal/services/dispatcher/v1/server.go index 4b860fc4a..cd8a0eefa 100644 --- a/internal/services/dispatcher/v1/server.go +++ b/internal/services/dispatcher/v1/server.go @@ -14,7 +14,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/hatchet-dev/hatchet/internal/msgqueue" contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" + tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" "github.com/hatchet-dev/hatchet/pkg/analytics" v1 "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" @@ -79,6 +81,7 @@ func (d *DispatcherServiceImpl) RegisterDurableEvent(ctx context.Context, req *c SignalTaskId: task.ID, SignalTaskInsertedAt: task.InsertedAt, SignalExternalId: task.ExternalID, + SignalTaskExternalId: task.ExternalID, SignalKey: req.SignalKey, }) @@ -321,3 +324,720 @@ func waitFor(wg *sync.WaitGroup, timeout time.Duration, l *zerolog.Logger) { l.Error().Msg("timed out waiting for wait group") } } + +type durableTaskInvocation struct { + server contracts.V1Dispatcher_DurableTaskServer + tenantId uuid.UUID + workerId uuid.UUID + l *zerolog.Logger + + sendMu sync.Mutex +} + +func (s *durableTaskInvocation) send(resp *contracts.DurableTaskResponse) error { + s.sendMu.Lock() + defer s.sendMu.Unlock() + return s.server.Send(resp) +} + +func (d *DispatcherServiceImpl) DurableTask(server contracts.V1Dispatcher_DurableTaskServer) error { + tenant := server.Context().Value("tenant").(*sqlcv1.Tenant) + tenantId := tenant.ID + + ctx, cancel := context.WithCancel(server.Context()) + defer cancel() + + invocation := &durableTaskInvocation{ + server: server, + tenantId: tenantId, + l: d.l, + } + + registeredTasks := make(map[uuid.UUID]struct{}) + defer func() { + for taskId := range registeredTasks { + d.durableInvocations.Delete(taskId) + } + d.workerInvocations.Delete(invocation.workerId) + }() + + registerTask := func(externalIdStr string) { + taskExtId, err := uuid.Parse(externalIdStr) + if err != nil { + return + } + if _, exists := registeredTasks[taskExtId]; !exists { + d.durableInvocations.Store(taskExtId, invocation) + registeredTasks[taskExtId] = struct{}{} + } + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + req, err := server.Recv() + if err != nil { + if errors.Is(err, io.EOF) || status.Code(err) == codes.Canceled { + return nil + } + d.l.Error().Err(err).Msg("error receiving durable task request") + return err + } + + switch msg := req.GetMessage().(type) { + case *contracts.DurableTaskRequest_Memo: + registerTask(msg.Memo.DurableTaskExternalId) + case *contracts.DurableTaskRequest_TriggerRuns: + registerTask(msg.TriggerRuns.DurableTaskExternalId) + case *contracts.DurableTaskRequest_WaitFor: + registerTask(msg.WaitFor.DurableTaskExternalId) + } + + if err := d.handleDurableTaskRequest(ctx, invocation, req); err != nil { + d.l.Error().Err(err).Msg("error handling durable task request") + } + } +} + +func (d *DispatcherServiceImpl) handleDurableTaskRequest( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskRequest, +) error { + switch msg := req.GetMessage().(type) { + case *contracts.DurableTaskRequest_RegisterWorker: + return d.handleRegisterWorker(ctx, invocation, msg.RegisterWorker) + case *contracts.DurableTaskRequest_Memo: + return d.handleMemo(ctx, invocation, msg.Memo) + case *contracts.DurableTaskRequest_TriggerRuns: + return d.handleTriggerRuns(ctx, invocation, msg.TriggerRuns) + case *contracts.DurableTaskRequest_WaitFor: + return d.handleWaitFor(ctx, invocation, msg.WaitFor) + case *contracts.DurableTaskRequest_EvictInvocation: + return d.handleEvictInvocation(ctx, invocation, msg.EvictInvocation) + case *contracts.DurableTaskRequest_WorkerStatus: + return d.handleWorkerStatus(ctx, invocation, msg.WorkerStatus) + case *contracts.DurableTaskRequest_CompleteMemo: + return d.handleCompleteMemo(ctx, invocation, msg.CompleteMemo) + default: + return status.Errorf(codes.InvalidArgument, "unknown message type: %T", msg) + } +} + +func (d *DispatcherServiceImpl) handleRegisterWorker( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskRequestRegisterWorker, +) error { + workerId, err := uuid.Parse(req.WorkerId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid worker id: %v", err) + } + + d.analytics.Count(ctx, analytics.DurableTask, analytics.Register) + + invocation.workerId = workerId + d.workerInvocations.Store(workerId, invocation) + + err = d.repo.Workers().UpdateWorkerDurableTaskDispatcherId(ctx, invocation.tenantId, workerId, d.dispatcherId) + if err != nil { + return status.Errorf(codes.Internal, "failed to update worker durable task dispatcher id: %v", err) + } + + return invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_RegisterWorker{ + RegisterWorker: &contracts.DurableTaskResponseRegisterWorker{ + WorkerId: req.WorkerId, + }, + }, + }) +} + +func newEntryRef(taskExternalId string, invocationCount int32, nodeAndBranch v1.NodeIdBranchIdTuple) *contracts.DurableEventLogEntryRef { + return &contracts.DurableEventLogEntryRef{ + DurableTaskExternalId: taskExternalId, + InvocationCount: invocationCount, + BranchId: nodeAndBranch.BranchId, + NodeId: nodeAndBranch.NodeId, + } +} + +func (d *DispatcherServiceImpl) sendNonDeterminismError(invocation *durableTaskInvocation, nde *v1.NonDeterminismError, invocationCount int32) error { + return invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_Error{ + Error: &contracts.DurableTaskErrorResponse{ + Ref: &contracts.DurableEventLogEntryRef{ + DurableTaskExternalId: nde.TaskExternalId.String(), + InvocationCount: invocationCount, + BranchId: nde.BranchId, + NodeId: nde.NodeId, + }, + ErrorType: contracts.DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_NONDETERMINISM, + ErrorMessage: nde.Error(), + }, + }, + }) +} + +func (d *DispatcherServiceImpl) sendStaleInvocationEviction(invocation *durableTaskInvocation, sie *v1.StaleInvocationError) error { + return invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_ServerEvict{ + ServerEvict: &contracts.DurableTaskServerEvictNotice{ + DurableTaskExternalId: sie.TaskExternalId.String(), + InvocationCount: sie.ActualInvocationCount, + Reason: sie.Error(), + }, + }, + }) +} + +func (d *DispatcherServiceImpl) deliverSatisfiedEntries(taskExternalId string, result *v1.IngestDurableTaskEventResult) error { + switch result.Kind { + case sqlcv1.V1DurableEventLogKindRUN: + for _, entry := range result.TriggerRunsResult.Entries { + if entry.IsSatisfied { + taskExtId, _ := uuid.Parse(taskExternalId) + if err := d.DeliverDurableEventLogEntryCompletion( + taskExtId, + result.TriggerRunsResult.InvocationCount, + entry.BranchId, + entry.NodeId, + entry.ResultPayload, + ); err != nil { + return fmt.Errorf("failed to deliver callback completion for node %d: %w", entry.NodeId, err) + } + } + } + case sqlcv1.V1DurableEventLogKindMEMO: + if result.MemoResult.IsSatisfied { + taskExtId, _ := uuid.Parse(taskExternalId) + if err := d.DeliverDurableEventLogEntryCompletion( + taskExtId, + result.MemoResult.InvocationCount, + result.MemoResult.BranchId, + result.MemoResult.NodeId, + result.MemoResult.ResultPayload, + ); err != nil { + return fmt.Errorf("failed to deliver callback completion for node %d: %w", result.MemoResult.NodeId, err) + } + } + case sqlcv1.V1DurableEventLogKindWAITFOR: + if result.WaitForResult.IsSatisfied { + taskExtId, _ := uuid.Parse(taskExternalId) + if err := d.DeliverDurableEventLogEntryCompletion( + taskExtId, + result.WaitForResult.InvocationCount, + result.WaitForResult.BranchId, + result.WaitForResult.NodeId, + result.WaitForResult.ResultPayload, + ); err != nil { + return fmt.Errorf("failed to deliver callback completion for node %d: %w", result.WaitForResult.NodeId, err) + } + } + default: + return fmt.Errorf("unknown durable event log kind: %s", result.Kind) + } + return nil +} + +func (d *DispatcherServiceImpl) handleMemo( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskMemoRequest, +) error { + taskExternalId, err := uuid.Parse(req.DurableTaskExternalId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid durable_task_external_id: %v", err) + } + + d.analytics.Count(ctx, analytics.DurableTask, analytics.Memo) + + task, err := d.repo.Tasks().GetTaskByExternalId(ctx, invocation.tenantId, taskExternalId, false) + if err != nil { + return status.Errorf(codes.NotFound, "task not found: %v", err) + } + + ingestionResult, err := d.repo.DurableEvents().IngestDurableTaskEvent(ctx, v1.IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &v1.BaseIngestEventOpts{ + TenantId: invocation.tenantId, + Task: task, + Kind: sqlcv1.V1DurableEventLogKindMEMO, + InvocationCount: req.InvocationCount, + }, + Memo: &v1.IngestMemoOpts{ + Payload: req.Payload, + MemoKey: req.Key, + }, + }) + + var nde *v1.NonDeterminismError + var sie *v1.StaleInvocationError + if err != nil && errors.As(err, &nde) { + return d.sendNonDeterminismError(invocation, nde, req.InvocationCount) + } else if err != nil && errors.As(err, &sie) { + return d.sendStaleInvocationEviction(invocation, sie) + } else if err != nil { + return status.Errorf(codes.Internal, "failed to ingest memo event: %v", err) + } + + err = invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_MemoAck{ + MemoAck: &contracts.DurableTaskEventMemoAckResponse{ + Ref: newEntryRef(req.DurableTaskExternalId, req.InvocationCount, v1.NodeIdBranchIdTuple{ + NodeId: ingestionResult.MemoResult.NodeId, + BranchId: ingestionResult.MemoResult.BranchId, + }), + MemoAlreadyExisted: ingestionResult.MemoResult.AlreadyExisted, + MemoResultPayload: ingestionResult.MemoResult.ResultPayload, + }, + }, + }) + if err != nil { + return status.Errorf(codes.Internal, "failed to send memo ack: %v", err) + } + + return d.deliverSatisfiedEntries(req.DurableTaskExternalId, ingestionResult) +} + +func (d *DispatcherServiceImpl) handleTriggerRuns( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskTriggerRunsRequest, +) error { + taskExternalId, err := uuid.Parse(req.DurableTaskExternalId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid durable_task_external_id: %v", err) + } + + for _, w := range req.TriggerOpts { + d.analytics.Count(ctx, analytics.WorkflowRun, analytics.Create, analytics.Props( + "parent_is_durable_task", w.ParentTaskRunExternalId != nil, + "has_priority", w.Priority != nil, + "is_child", w.ParentId != nil, + "has_additional_meta", w.AdditionalMetadata != nil, + "has_desired_worker_id", w.DesiredWorkerId != nil, + "has_desired_worker_labels", len(w.DesiredWorkerLabels) > 0, + )) + } + + task, err := d.repo.Tasks().GetTaskByExternalId(ctx, invocation.tenantId, taskExternalId, false) + if err != nil { + return status.Errorf(codes.NotFound, "task not found: %v", err) + } + + triggerOpts := make([]*v1.WorkflowNameTriggerOpts, 0, len(req.TriggerOpts)) + for _, triggerReq := range req.TriggerOpts { + triggerTaskData, triggerErr := d.repo.Triggers().NewTriggerTaskData(ctx, invocation.tenantId, triggerReq, task) + if triggerErr != nil { + return status.Errorf(codes.Internal, "failed to create trigger options: %v", triggerErr) + } + triggerOpts = append(triggerOpts, &v1.WorkflowNameTriggerOpts{ + TriggerTaskData: triggerTaskData, + }) + } + + if populateErr := d.repo.Triggers().PopulateExternalIdsForWorkflow(ctx, invocation.tenantId, triggerOpts); populateErr != nil { + return status.Errorf(codes.Internal, "failed to populate external ids for workflow: %v", populateErr) + } + + ingestionResult, err := d.repo.DurableEvents().IngestDurableTaskEvent(ctx, v1.IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &v1.BaseIngestEventOpts{ + TenantId: invocation.tenantId, + Task: task, + Kind: sqlcv1.V1DurableEventLogKindRUN, + InvocationCount: req.InvocationCount, + }, + TriggerRuns: &v1.IngestTriggerRunsOpts{ + TriggerOpts: triggerOpts, + }, + }) + + var nde *v1.NonDeterminismError + var sie *v1.StaleInvocationError + if err != nil && errors.As(err, &nde) { + return d.sendNonDeterminismError(invocation, nde, req.InvocationCount) + } else if err != nil && errors.As(err, &sie) { + return d.sendStaleInvocationEviction(invocation, sie) + } else if err != nil { + return status.Errorf(codes.Internal, "failed to ingest trigger runs event: %v", err) + } + + ackResp := &contracts.DurableTaskEventTriggerRunsAckResponse{ + DurableTaskExternalId: req.DurableTaskExternalId, + InvocationCount: req.InvocationCount, + } + + for _, entry := range ingestionResult.TriggerRunsResult.Entries { + ackResp.RunEntries = append(ackResp.RunEntries, &contracts.DurableTaskRunAckEntry{ + NodeId: entry.NodeId, + BranchId: entry.BranchId, + }) + } + + dags := ingestionResult.TriggerRunsResult.CreatedDAGs + tasks := ingestionResult.TriggerRunsResult.CreatedTasks + + if len(dags) > 0 || len(tasks) > 0 { + if sigErr := d.triggerWriter.SignalCreated(ctx, invocation.tenantId, tasks, dags); sigErr != nil { + d.l.Error().Err(sigErr).Msg("failed to signal created tasks/DAGs for durable run trigger") + } + } + + err = invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_TriggerRunsAck{ + TriggerRunsAck: ackResp, + }, + }) + if err != nil { + return status.Errorf(codes.Internal, "failed to send trigger runs ack: %v", err) + } + + return d.deliverSatisfiedEntries(req.DurableTaskExternalId, ingestionResult) +} + +func (d *DispatcherServiceImpl) handleWaitFor( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskWaitForRequest, +) error { + taskExternalId, err := uuid.Parse(req.DurableTaskExternalId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid durable_task_external_id: %v", err) + } + + var hasSleep, hasUserEvent bool + if req.WaitForConditions != nil { + hasSleep = len(req.WaitForConditions.SleepConditions) > 0 + hasUserEvent = len(req.WaitForConditions.UserEventConditions) > 0 + } + d.analytics.Count(ctx, analytics.DurableTask, analytics.WaitFor, analytics.Props( + "has_sleep", hasSleep, + "has_user_event", hasUserEvent, + )) + + task, err := d.repo.Tasks().GetTaskByExternalId(ctx, invocation.tenantId, taskExternalId, false) + if err != nil { + return status.Errorf(codes.NotFound, "task not found: %v", err) + } + + var createConditionOpts []v1.CreateExternalSignalConditionOpt + + if req.WaitForConditions != nil { + for _, condition := range req.WaitForConditions.SleepConditions { + orGroupId, err := uuid.Parse(condition.Base.OrGroupId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "or group id is not a valid uuid: %v", err) + } + createConditionOpts = append(createConditionOpts, v1.CreateExternalSignalConditionOpt{ + Kind: v1.CreateExternalSignalConditionKindSLEEP, + ReadableDataKey: condition.Base.ReadableDataKey, + OrGroupId: orGroupId, + SleepFor: &condition.SleepFor, + }) + } + + for _, condition := range req.WaitForConditions.UserEventConditions { + orGroupId, err := uuid.Parse(condition.Base.OrGroupId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "or group id is not a valid uuid: %v", err) + } + createConditionOpts = append(createConditionOpts, v1.CreateExternalSignalConditionOpt{ + Kind: v1.CreateExternalSignalConditionKindUSEREVENT, + ReadableDataKey: condition.Base.ReadableDataKey, + OrGroupId: orGroupId, + UserEventKey: &condition.UserEventKey, + Expression: condition.Base.Expression, + }) + } + } + + ingestionResult, err := d.repo.DurableEvents().IngestDurableTaskEvent(ctx, v1.IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &v1.BaseIngestEventOpts{ + TenantId: invocation.tenantId, + Task: task, + Kind: sqlcv1.V1DurableEventLogKindWAITFOR, + InvocationCount: req.InvocationCount, + }, + WaitFor: &v1.IngestWaitForOpts{ + WaitForConditions: createConditionOpts, + }, + }) + + var nde *v1.NonDeterminismError + var sie *v1.StaleInvocationError + if err != nil && errors.As(err, &nde) { + return d.sendNonDeterminismError(invocation, nde, req.InvocationCount) + } else if err != nil && errors.As(err, &sie) { + return d.sendStaleInvocationEviction(invocation, sie) + } else if err != nil { + return status.Errorf(codes.Internal, "failed to ingest wait_for event: %v", err) + } + + err = invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_WaitForAck{ + WaitForAck: &contracts.DurableTaskEventWaitForAckResponse{ + Ref: newEntryRef(req.DurableTaskExternalId, req.InvocationCount, v1.NodeIdBranchIdTuple{ + NodeId: ingestionResult.WaitForResult.NodeId, + BranchId: ingestionResult.WaitForResult.BranchId, + }), + }, + }, + }) + if err != nil { + return status.Errorf(codes.Internal, "failed to send wait_for ack: %v", err) + } + + return d.deliverSatisfiedEntries(req.DurableTaskExternalId, ingestionResult) +} + +func (d *DispatcherServiceImpl) handleCompleteMemo( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskCompleteMemoRequest, +) error { + if req.Ref == nil { + return status.Errorf(codes.InvalidArgument, "ref is required") + } + + taskExternalId, err := uuid.Parse(req.Ref.DurableTaskExternalId) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid durable_task_external_id: %v", err) + } + + d.analytics.Count(ctx, analytics.DurableTask, analytics.Memo) + + err = d.repo.DurableEvents().CompleteMemoEntry(ctx, v1.CompleteMemoEntryOpts{ + TenantId: invocation.tenantId, + TaskExternalId: taskExternalId, + InvocationCount: req.Ref.InvocationCount, + BranchId: req.Ref.BranchId, + NodeId: req.Ref.NodeId, + MemoKey: req.MemoKey, + Payload: req.Payload, + }) + if err != nil { + return status.Errorf(codes.Internal, "failed to complete memo entry: %v", err) + } + + return nil +} + +func (d *DispatcherServiceImpl) sendEvictionError(invocation *durableTaskInvocation, req *contracts.DurableTaskEvictInvocationRequest, errMsg string) error { + return invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_Error{ + Error: &contracts.DurableTaskErrorResponse{ + Ref: &contracts.DurableEventLogEntryRef{ + DurableTaskExternalId: req.DurableTaskExternalId, + InvocationCount: req.InvocationCount, + }, + ErrorType: contracts.DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_UNSPECIFIED, + ErrorMessage: errMsg, + }, + }, + }) +} + +func (d *DispatcherServiceImpl) handleEvictInvocation( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskEvictInvocationRequest, +) error { + ctx, cancel := context.WithTimeout(ctx, 20*time.Second) + defer cancel() + + taskExternalId, err := uuid.Parse(req.DurableTaskExternalId) + if err != nil { + return d.sendEvictionError(invocation, req, fmt.Sprintf("invalid durable_task_external_id: %v", err)) + } + + d.analytics.Count(ctx, analytics.DurableTask, analytics.Evict) + + task, err := d.repo.Tasks().GetTaskByExternalId(ctx, invocation.tenantId, taskExternalId, false) + if err != nil { + return d.sendEvictionError(invocation, req, fmt.Sprintf("task not found: %v", err)) + } + + wasEvicted, err := d.repo.Tasks().EvictTask(ctx, invocation.tenantId, v1.TaskIdInsertedAtRetryCount{ + Id: task.ID, + InsertedAt: task.InsertedAt, + RetryCount: task.RetryCount, + }) + if err != nil { + return d.sendEvictionError(invocation, req, fmt.Sprintf("failed to evict task: %v", err)) + } + + if wasEvicted { + msg, err := tasktypes.MonitoringEventMessageFromInternal( + invocation.tenantId, + tasktypes.CreateMonitoringEventPayload{ + TaskId: task.ID, + RetryCount: task.RetryCount, + DurableInvocationCount: req.InvocationCount, + EventTimestamp: time.Now(), + EventType: sqlcv1.V1EventTypeOlapDURABLEEVICTED, + EventMessage: durableEvictionMessage(req), + }, + ) + if err != nil { + d.l.Warn().Err(err).Msg("failed to build DURABLE_EVICTED monitoring message") + } else if err := d.pubBuffer.Pub(ctx, msgqueue.OLAP_QUEUE, msg, false); err != nil { + d.l.Warn().Err(err).Msg("failed to publish DURABLE_EVICTED to OLAP") + } + } else { + d.l.Debug().Str("task_external_id", req.DurableTaskExternalId).Msg("eviction skipped, task likely already timed out") + } + + return invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_EvictionAck{ + EvictionAck: &contracts.DurableTaskEvictionAckResponse{ + InvocationCount: req.InvocationCount, + DurableTaskExternalId: req.DurableTaskExternalId, + }, + }, + }) +} + +func durableEvictionMessage(req *contracts.DurableTaskEvictInvocationRequest) string { + if reason := req.GetReason(); reason != "" { + return reason + } + return "Task paused and evicted from worker" +} + +func (d *DispatcherServiceImpl) handleWorkerStatus( + ctx context.Context, + invocation *durableTaskInvocation, + req *contracts.DurableTaskWorkerStatusRequest, +) error { + if len(req.WaitingEntries) == 0 { + return nil + } + + uniqueExternalIds := make(map[uuid.UUID]int32) + waiting := make([]v1.TaskExternalIdNodeIdBranchId, 0, len(req.WaitingEntries)) + + for _, cb := range req.WaitingEntries { + taskExternalId, err := uuid.Parse(cb.DurableTaskExternalId) + if err != nil { + d.l.Warn().Err(err).Msgf("invalid durable_task_external_id in worker_status: %s", cb.DurableTaskExternalId) + continue + } + + uniqueExternalIds[taskExternalId] = cb.InvocationCount + + waiting = append(waiting, v1.TaskExternalIdNodeIdBranchId{ + TaskExternalId: taskExternalId, + NodeId: cb.NodeId, + BranchId: cb.BranchId, + }) + } + + if len(waiting) == 0 { + return nil + } + + if len(uniqueExternalIds) > 0 { + externalIds := make([]uuid.UUID, 0, len(uniqueExternalIds)) + for extId := range uniqueExternalIds { + externalIds = append(externalIds, extId) + } + + tasks, err := d.repo.Tasks().FlattenExternalIds(ctx, invocation.tenantId, externalIds) + if err != nil { + return fmt.Errorf("failed to look up tasks for invocation count check in worker_status: %w", err) + } + if len(tasks) > 0 { + idInsertedAts := make([]v1.IdInsertedAt, 0, len(tasks)) + taskIdToExternalId := make(map[v1.IdInsertedAt]uuid.UUID, len(tasks)) + + for _, t := range tasks { + key := v1.IdInsertedAt{ID: t.ID, InsertedAt: t.InsertedAt} + idInsertedAts = append(idInsertedAts, key) + taskIdToExternalId[key] = t.ExternalID + } + + idInsertedAtToInvocationCount, err := d.repo.DurableEvents().GetDurableTaskInvocationCounts(ctx, invocation.tenantId, idInsertedAts) + if err != nil { + return fmt.Errorf("failed to get invocation counts in worker_status: %w", err) + } + for key, currentCount := range idInsertedAtToInvocationCount { + extId, ok := taskIdToExternalId[key] + if !ok || currentCount == nil { + continue + } + workerInvocationCount, has := uniqueExternalIds[extId] + if !has { + continue + } + if workerInvocationCount < *currentCount { + err = invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_ServerEvict{ + ServerEvict: &contracts.DurableTaskServerEvictNotice{ + DurableTaskExternalId: extId.String(), + InvocationCount: workerInvocationCount, + Reason: fmt.Sprintf("stale invocation: server has %d, worker sent %d", *currentCount, workerInvocationCount), + }, + }, + }) + if err != nil { + d.l.Error().Err(err).Msgf("failed to send server eviction notification for task %s", extId.String()) + } + } + } + } + } + + callbacks, err := d.repo.DurableEvents().GetSatisfiedDurableEvents(ctx, invocation.tenantId, waiting) + if err != nil { + return fmt.Errorf("failed to get satisfied callbacks: %w", err) + } + + for _, cb := range callbacks { + if err := d.deliverEntryCompleted(invocation, cb); err != nil { + d.l.Error().Err(err).Msgf("failed to send event_log_entry for task %s node %d", cb.TaskExternalId, cb.NodeID) + } + } + + return nil +} + +func (d *DispatcherServiceImpl) deliverEntryCompleted(invocation *durableTaskInvocation, cb *v1.SatisfiedEventWithPayload) error { + return invocation.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_EntryCompleted{ + EntryCompleted: &contracts.DurableTaskEventLogEntryCompletedResponse{ + Ref: &contracts.DurableEventLogEntryRef{ + DurableTaskExternalId: cb.TaskExternalId.String(), + InvocationCount: cb.InvocationCount, + BranchId: cb.BranchID, + NodeId: cb.NodeID, + }, + Payload: cb.Result, + }, + }, + }) +} + +func (d *DispatcherServiceImpl) DeliverDurableEventLogEntryCompletion(taskExternalId uuid.UUID, invocationCount int32, branchId, nodeId int64, payload []byte) error { + inv, ok := d.durableInvocations.Load(taskExternalId) + if !ok { + return fmt.Errorf("no active invocation found for task %s", taskExternalId) + } + + return inv.send(&contracts.DurableTaskResponse{ + Message: &contracts.DurableTaskResponse_EntryCompleted{ + EntryCompleted: &contracts.DurableTaskEventLogEntryCompletedResponse{ + Ref: &contracts.DurableEventLogEntryRef{ + DurableTaskExternalId: taskExternalId.String(), + InvocationCount: invocationCount, + BranchId: branchId, + NodeId: nodeId, + }, + Payload: payload, + }, + }, + }) +} diff --git a/internal/services/scheduler/v1/optimistic.go b/internal/services/scheduler/v1/optimistic.go index b62177190..65fccb837 100644 --- a/internal/services/scheduler/v1/optimistic.go +++ b/internal/services/scheduler/v1/optimistic.go @@ -22,20 +22,8 @@ func (s *Scheduler) RunOptimisticScheduling(ctx context.Context, tenantId uuid.U ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - eg := &errgroup.Group{} - - eg.Go(func() error { - return s.signaler.SignalTasksCreated(ctx, tenantId, tasks) - }) - - eg.Go(func() error { - return s.signaler.SignalDAGsCreated(ctx, tenantId, dags) - }) - - innerErr := eg.Wait() - - if innerErr != nil { - s.l.Error().Err(innerErr).Msgf("failed to signal optimistic scheduling results for tenant %s", tenantId) + if err := s.signaler.SignalCreated(ctx, tenantId, tasks, dags); err != nil { + s.l.Error().Err(err).Msgf("failed to signal optimistic scheduling results for tenant %s", tenantId) } }() @@ -70,11 +58,7 @@ func (s *Scheduler) RunOptimisticSchedulingFromEvents(ctx context.Context, tenan }) eg.Go(func() error { - return s.signaler.SignalTasksCreated(ctx, tenantId, eventRes.Tasks) - }) - - eg.Go(func() error { - return s.signaler.SignalDAGsCreated(ctx, tenantId, eventRes.Dags) + return s.signaler.SignalCreated(ctx, tenantId, eventRes.Tasks, eventRes.Dags) }) innerErr := eg.Wait() diff --git a/internal/services/scheduler/v1/scheduler.go b/internal/services/scheduler/v1/scheduler.go index 26c42f9e5..b27220d36 100644 --- a/internal/services/scheduler/v1/scheduler.go +++ b/internal/services/scheduler/v1/scheduler.go @@ -430,6 +430,19 @@ func (s *Scheduler) scheduleStepRuns(ctx context.Context, tenantId uuid.UUID, re assignedMsgs := make([]*msgqueue.Message, 0) + invCountOpts := make([]repov1.IdInsertedAt, 0, len(res.Assigned)) + for _, a := range res.Assigned { + invCountOpts = append(invCountOpts, repov1.IdInsertedAt{ + ID: a.QueueItem.TaskID, + InsertedAt: a.QueueItem.TaskInsertedAt, + }) + } + + invocationCounts, invCountErr := s.repov1.DurableEvents().GetDurableTaskInvocationCounts(ctx, tenantId, invCountOpts) + if invCountErr != nil { + return fmt.Errorf("could not get durable task invocation counts for assigned tasks: %w", invCountErr) + } + for _, bulkAssigned := range res.Assigned { _, hasNoDispatcher := workersWithoutDispatchers[bulkAssigned.WorkerId] dispatcherId, ok := workerIdToDispatcherId[bulkAssigned.WorkerId] @@ -458,14 +471,20 @@ func (s *Scheduler) scheduleStepRuns(ctx context.Context, tenantId uuid.UUID, re taskId := bulkAssigned.QueueItem.TaskID + var durableInvCount int32 + if count, ok := invocationCounts[repov1.IdInsertedAt{ID: taskId, InsertedAt: bulkAssigned.QueueItem.TaskInsertedAt}]; ok && count != nil { + durableInvCount = *count + } + assignedMsg, err := tasktypes.MonitoringEventMessageFromInternal( tenantId, tasktypes.CreateMonitoringEventPayload{ - TaskId: taskId, - RetryCount: bulkAssigned.QueueItem.RetryCount, - WorkerId: &workerId, - EventType: sqlcv1.V1EventTypeOlapASSIGNED, - EventTimestamp: time.Now(), + TaskId: taskId, + RetryCount: bulkAssigned.QueueItem.RetryCount, + DurableInvocationCount: durableInvCount, + WorkerId: &workerId, + EventType: sqlcv1.V1EventTypeOlapASSIGNED, + EventTimestamp: time.Now(), }, ) diff --git a/internal/services/shared/proto/v1/dispatcher.pb.go b/internal/services/shared/proto/v1/dispatcher.pb.go index d8acd18a2..37ade6cc1 100644 --- a/internal/services/shared/proto/v1/dispatcher.pb.go +++ b/internal/services/shared/proto/v1/dispatcher.pb.go @@ -20,6 +20,1465 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type DurableTaskErrorType int32 + +const ( + DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_UNSPECIFIED DurableTaskErrorType = 0 + DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_NONDETERMINISM DurableTaskErrorType = 1 +) + +// Enum value maps for DurableTaskErrorType. +var ( + DurableTaskErrorType_name = map[int32]string{ + 0: "DURABLE_TASK_ERROR_TYPE_UNSPECIFIED", + 1: "DURABLE_TASK_ERROR_TYPE_NONDETERMINISM", + } + DurableTaskErrorType_value = map[string]int32{ + "DURABLE_TASK_ERROR_TYPE_UNSPECIFIED": 0, + "DURABLE_TASK_ERROR_TYPE_NONDETERMINISM": 1, + } +) + +func (x DurableTaskErrorType) Enum() *DurableTaskErrorType { + p := new(DurableTaskErrorType) + *p = x + return p +} + +func (x DurableTaskErrorType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DurableTaskErrorType) Descriptor() protoreflect.EnumDescriptor { + return file_v1_dispatcher_proto_enumTypes[0].Descriptor() +} + +func (DurableTaskErrorType) Type() protoreflect.EnumType { + return &file_v1_dispatcher_proto_enumTypes[0] +} + +func (x DurableTaskErrorType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DurableTaskErrorType.Descriptor instead. +func (DurableTaskErrorType) EnumDescriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{0} +} + +type DurableTaskRequestRegisterWorker struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` +} + +func (x *DurableTaskRequestRegisterWorker) Reset() { + *x = DurableTaskRequestRegisterWorker{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskRequestRegisterWorker) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskRequestRegisterWorker) ProtoMessage() {} + +func (x *DurableTaskRequestRegisterWorker) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskRequestRegisterWorker.ProtoReflect.Descriptor instead. +func (*DurableTaskRequestRegisterWorker) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{0} +} + +func (x *DurableTaskRequestRegisterWorker) GetWorkerId() string { + if x != nil { + return x.WorkerId + } + return "" +} + +type DurableTaskResponseRegisterWorker struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` +} + +func (x *DurableTaskResponseRegisterWorker) Reset() { + *x = DurableTaskResponseRegisterWorker{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskResponseRegisterWorker) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskResponseRegisterWorker) ProtoMessage() {} + +func (x *DurableTaskResponseRegisterWorker) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskResponseRegisterWorker.ProtoReflect.Descriptor instead. +func (*DurableTaskResponseRegisterWorker) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{1} +} + +func (x *DurableTaskResponseRegisterWorker) GetWorkerId() string { + if x != nil { + return x.WorkerId + } + return "" +} + +type DurableEventLogEntryRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DurableTaskExternalId string `protobuf:"bytes,1,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + InvocationCount int32 `protobuf:"varint,2,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + BranchId int64 `protobuf:"varint,3,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` + NodeId int64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (x *DurableEventLogEntryRef) Reset() { + *x = DurableEventLogEntryRef{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableEventLogEntryRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableEventLogEntryRef) ProtoMessage() {} + +func (x *DurableEventLogEntryRef) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableEventLogEntryRef.ProtoReflect.Descriptor instead. +func (*DurableEventLogEntryRef) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{2} +} + +func (x *DurableEventLogEntryRef) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableEventLogEntryRef) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableEventLogEntryRef) GetBranchId() int64 { + if x != nil { + return x.BranchId + } + return 0 +} + +func (x *DurableEventLogEntryRef) GetNodeId() int64 { + if x != nil { + return x.NodeId + } + return 0 +} + +type DurableTaskRunAckEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeId int64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + BranchId int64 `protobuf:"varint,2,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` +} + +func (x *DurableTaskRunAckEntry) Reset() { + *x = DurableTaskRunAckEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskRunAckEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskRunAckEntry) ProtoMessage() {} + +func (x *DurableTaskRunAckEntry) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskRunAckEntry.ProtoReflect.Descriptor instead. +func (*DurableTaskRunAckEntry) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{3} +} + +func (x *DurableTaskRunAckEntry) GetNodeId() int64 { + if x != nil { + return x.NodeId + } + return 0 +} + +func (x *DurableTaskRunAckEntry) GetBranchId() int64 { + if x != nil { + return x.BranchId + } + return 0 +} + +type DurableTaskEventMemoAckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref *DurableEventLogEntryRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + MemoAlreadyExisted bool `protobuf:"varint,2,opt,name=memo_already_existed,json=memoAlreadyExisted,proto3" json:"memo_already_existed,omitempty"` + MemoResultPayload []byte `protobuf:"bytes,3,opt,name=memo_result_payload,json=memoResultPayload,proto3,oneof" json:"memo_result_payload,omitempty"` +} + +func (x *DurableTaskEventMemoAckResponse) Reset() { + *x = DurableTaskEventMemoAckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskEventMemoAckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskEventMemoAckResponse) ProtoMessage() {} + +func (x *DurableTaskEventMemoAckResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskEventMemoAckResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskEventMemoAckResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{4} +} + +func (x *DurableTaskEventMemoAckResponse) GetRef() *DurableEventLogEntryRef { + if x != nil { + return x.Ref + } + return nil +} + +func (x *DurableTaskEventMemoAckResponse) GetMemoAlreadyExisted() bool { + if x != nil { + return x.MemoAlreadyExisted + } + return false +} + +func (x *DurableTaskEventMemoAckResponse) GetMemoResultPayload() []byte { + if x != nil { + return x.MemoResultPayload + } + return nil +} + +type DurableTaskEventTriggerRunsAckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DurableTaskExternalId string `protobuf:"bytes,1,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + InvocationCount int32 `protobuf:"varint,2,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + RunEntries []*DurableTaskRunAckEntry `protobuf:"bytes,3,rep,name=run_entries,json=runEntries,proto3" json:"run_entries,omitempty"` +} + +func (x *DurableTaskEventTriggerRunsAckResponse) Reset() { + *x = DurableTaskEventTriggerRunsAckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskEventTriggerRunsAckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskEventTriggerRunsAckResponse) ProtoMessage() {} + +func (x *DurableTaskEventTriggerRunsAckResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskEventTriggerRunsAckResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskEventTriggerRunsAckResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{5} +} + +func (x *DurableTaskEventTriggerRunsAckResponse) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskEventTriggerRunsAckResponse) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskEventTriggerRunsAckResponse) GetRunEntries() []*DurableTaskRunAckEntry { + if x != nil { + return x.RunEntries + } + return nil +} + +type DurableTaskEventWaitForAckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref *DurableEventLogEntryRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` +} + +func (x *DurableTaskEventWaitForAckResponse) Reset() { + *x = DurableTaskEventWaitForAckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskEventWaitForAckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskEventWaitForAckResponse) ProtoMessage() {} + +func (x *DurableTaskEventWaitForAckResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskEventWaitForAckResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskEventWaitForAckResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{6} +} + +func (x *DurableTaskEventWaitForAckResponse) GetRef() *DurableEventLogEntryRef { + if x != nil { + return x.Ref + } + return nil +} + +type DurableTaskEventLogEntryCompletedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref *DurableEventLogEntryRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *DurableTaskEventLogEntryCompletedResponse) Reset() { + *x = DurableTaskEventLogEntryCompletedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskEventLogEntryCompletedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskEventLogEntryCompletedResponse) ProtoMessage() {} + +func (x *DurableTaskEventLogEntryCompletedResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskEventLogEntryCompletedResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskEventLogEntryCompletedResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{7} +} + +func (x *DurableTaskEventLogEntryCompletedResponse) GetRef() *DurableEventLogEntryRef { + if x != nil { + return x.Ref + } + return nil +} + +func (x *DurableTaskEventLogEntryCompletedResponse) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type DurableTaskEvictInvocationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InvocationCount int32 `protobuf:"varint,1,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + DurableTaskExternalId string `protobuf:"bytes,2,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` +} + +func (x *DurableTaskEvictInvocationRequest) Reset() { + *x = DurableTaskEvictInvocationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskEvictInvocationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskEvictInvocationRequest) ProtoMessage() {} + +func (x *DurableTaskEvictInvocationRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskEvictInvocationRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskEvictInvocationRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{8} +} + +func (x *DurableTaskEvictInvocationRequest) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskEvictInvocationRequest) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskEvictInvocationRequest) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +// Sent by the server after recording eviction for an evict_invocation request. +type DurableTaskEvictionAckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InvocationCount int32 `protobuf:"varint,1,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + DurableTaskExternalId string `protobuf:"bytes,2,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` +} + +func (x *DurableTaskEvictionAckResponse) Reset() { + *x = DurableTaskEvictionAckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskEvictionAckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskEvictionAckResponse) ProtoMessage() {} + +func (x *DurableTaskEvictionAckResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskEvictionAckResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskEvictionAckResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{9} +} + +func (x *DurableTaskEvictionAckResponse) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskEvictionAckResponse) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +type DurableTaskAwaitedCompletedEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DurableTaskExternalId string `protobuf:"bytes,1,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + BranchId int64 `protobuf:"varint,2,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` + NodeId int64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + InvocationCount int32 `protobuf:"varint,4,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` +} + +func (x *DurableTaskAwaitedCompletedEntry) Reset() { + *x = DurableTaskAwaitedCompletedEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskAwaitedCompletedEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskAwaitedCompletedEntry) ProtoMessage() {} + +func (x *DurableTaskAwaitedCompletedEntry) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskAwaitedCompletedEntry.ProtoReflect.Descriptor instead. +func (*DurableTaskAwaitedCompletedEntry) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{10} +} + +func (x *DurableTaskAwaitedCompletedEntry) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskAwaitedCompletedEntry) GetBranchId() int64 { + if x != nil { + return x.BranchId + } + return 0 +} + +func (x *DurableTaskAwaitedCompletedEntry) GetNodeId() int64 { + if x != nil { + return x.NodeId + } + return 0 +} + +func (x *DurableTaskAwaitedCompletedEntry) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +// Sent by the server to notify a worker that its invocation is stale and should be cancelled. +type DurableTaskServerEvictNotice struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DurableTaskExternalId string `protobuf:"bytes,1,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + InvocationCount int32 `protobuf:"varint,2,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *DurableTaskServerEvictNotice) Reset() { + *x = DurableTaskServerEvictNotice{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskServerEvictNotice) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskServerEvictNotice) ProtoMessage() {} + +func (x *DurableTaskServerEvictNotice) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskServerEvictNotice.ProtoReflect.Descriptor instead. +func (*DurableTaskServerEvictNotice) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{11} +} + +func (x *DurableTaskServerEvictNotice) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskServerEvictNotice) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskServerEvictNotice) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type DurableTaskWorkerStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` + WaitingEntries []*DurableTaskAwaitedCompletedEntry `protobuf:"bytes,2,rep,name=waiting_entries,json=waitingEntries,proto3" json:"waiting_entries,omitempty"` +} + +func (x *DurableTaskWorkerStatusRequest) Reset() { + *x = DurableTaskWorkerStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskWorkerStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskWorkerStatusRequest) ProtoMessage() {} + +func (x *DurableTaskWorkerStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskWorkerStatusRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskWorkerStatusRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{12} +} + +func (x *DurableTaskWorkerStatusRequest) GetWorkerId() string { + if x != nil { + return x.WorkerId + } + return "" +} + +func (x *DurableTaskWorkerStatusRequest) GetWaitingEntries() []*DurableTaskAwaitedCompletedEntry { + if x != nil { + return x.WaitingEntries + } + return nil +} + +type DurableTaskCompleteMemoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref *DurableEventLogEntryRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + MemoKey []byte `protobuf:"bytes,3,opt,name=memo_key,json=memoKey,proto3" json:"memo_key,omitempty"` +} + +func (x *DurableTaskCompleteMemoRequest) Reset() { + *x = DurableTaskCompleteMemoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskCompleteMemoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskCompleteMemoRequest) ProtoMessage() {} + +func (x *DurableTaskCompleteMemoRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskCompleteMemoRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskCompleteMemoRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{13} +} + +func (x *DurableTaskCompleteMemoRequest) GetRef() *DurableEventLogEntryRef { + if x != nil { + return x.Ref + } + return nil +} + +func (x *DurableTaskCompleteMemoRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *DurableTaskCompleteMemoRequest) GetMemoKey() []byte { + if x != nil { + return x.MemoKey + } + return nil +} + +type DurableTaskMemoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + // at running a durable task. Each time the task is started, it gets a new invocation count (which has) + // incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + // differentiating between different attempts of the same task running in different places, to prevent race conditions + // and other problems from duplication. It also allows for older invocations to be evicted cleanly + InvocationCount int32 `protobuf:"varint,1,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + DurableTaskExternalId string `protobuf:"bytes,2,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // optional payload because we can send a memo request to check if a memo already exists + Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3,oneof" json:"payload,omitempty"` +} + +func (x *DurableTaskMemoRequest) Reset() { + *x = DurableTaskMemoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskMemoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskMemoRequest) ProtoMessage() {} + +func (x *DurableTaskMemoRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskMemoRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskMemoRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{14} +} + +func (x *DurableTaskMemoRequest) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskMemoRequest) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskMemoRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *DurableTaskMemoRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type DurableTaskTriggerRunsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + // at running a durable task. Each time the task is started, it gets a new invocation count (which has) + // incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + // differentiating between different attempts of the same task running in different places, to prevent race conditions + // and other problems from duplication. It also allows for older invocations to be evicted cleanly + InvocationCount int32 `protobuf:"varint,1,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + DurableTaskExternalId string `protobuf:"bytes,2,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + TriggerOpts []*TriggerWorkflowRequest `protobuf:"bytes,3,rep,name=trigger_opts,json=triggerOpts,proto3" json:"trigger_opts,omitempty"` +} + +func (x *DurableTaskTriggerRunsRequest) Reset() { + *x = DurableTaskTriggerRunsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskTriggerRunsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskTriggerRunsRequest) ProtoMessage() {} + +func (x *DurableTaskTriggerRunsRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskTriggerRunsRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskTriggerRunsRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{15} +} + +func (x *DurableTaskTriggerRunsRequest) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskTriggerRunsRequest) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskTriggerRunsRequest) GetTriggerOpts() []*TriggerWorkflowRequest { + if x != nil { + return x.TriggerOpts + } + return nil +} + +type DurableTaskWaitForRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + // at running a durable task. Each time the task is started, it gets a new invocation count (which has) + // incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + // differentiating between different attempts of the same task running in different places, to prevent race conditions + // and other problems from duplication. It also allows for older invocations to be evicted cleanly + InvocationCount int32 `protobuf:"varint,1,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"` + DurableTaskExternalId string `protobuf:"bytes,2,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"` + // Fields for DURABLE_TASK_TRIGGER_KIND_WAIT_FOR + WaitForConditions *DurableEventListenerConditions `protobuf:"bytes,3,opt,name=wait_for_conditions,json=waitForConditions,proto3,oneof" json:"wait_for_conditions,omitempty"` +} + +func (x *DurableTaskWaitForRequest) Reset() { + *x = DurableTaskWaitForRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskWaitForRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskWaitForRequest) ProtoMessage() {} + +func (x *DurableTaskWaitForRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskWaitForRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskWaitForRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{16} +} + +func (x *DurableTaskWaitForRequest) GetInvocationCount() int32 { + if x != nil { + return x.InvocationCount + } + return 0 +} + +func (x *DurableTaskWaitForRequest) GetDurableTaskExternalId() string { + if x != nil { + return x.DurableTaskExternalId + } + return "" +} + +func (x *DurableTaskWaitForRequest) GetWaitForConditions() *DurableEventListenerConditions { + if x != nil { + return x.WaitForConditions + } + return nil +} + +type DurableTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Message: + // + // *DurableTaskRequest_RegisterWorker + // *DurableTaskRequest_Memo + // *DurableTaskRequest_TriggerRuns + // *DurableTaskRequest_WaitFor + // *DurableTaskRequest_EvictInvocation + // *DurableTaskRequest_WorkerStatus + // *DurableTaskRequest_CompleteMemo + Message isDurableTaskRequest_Message `protobuf_oneof:"message"` +} + +func (x *DurableTaskRequest) Reset() { + *x = DurableTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskRequest) ProtoMessage() {} + +func (x *DurableTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskRequest.ProtoReflect.Descriptor instead. +func (*DurableTaskRequest) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{17} +} + +func (m *DurableTaskRequest) GetMessage() isDurableTaskRequest_Message { + if m != nil { + return m.Message + } + return nil +} + +func (x *DurableTaskRequest) GetRegisterWorker() *DurableTaskRequestRegisterWorker { + if x, ok := x.GetMessage().(*DurableTaskRequest_RegisterWorker); ok { + return x.RegisterWorker + } + return nil +} + +func (x *DurableTaskRequest) GetMemo() *DurableTaskMemoRequest { + if x, ok := x.GetMessage().(*DurableTaskRequest_Memo); ok { + return x.Memo + } + return nil +} + +func (x *DurableTaskRequest) GetTriggerRuns() *DurableTaskTriggerRunsRequest { + if x, ok := x.GetMessage().(*DurableTaskRequest_TriggerRuns); ok { + return x.TriggerRuns + } + return nil +} + +func (x *DurableTaskRequest) GetWaitFor() *DurableTaskWaitForRequest { + if x, ok := x.GetMessage().(*DurableTaskRequest_WaitFor); ok { + return x.WaitFor + } + return nil +} + +func (x *DurableTaskRequest) GetEvictInvocation() *DurableTaskEvictInvocationRequest { + if x, ok := x.GetMessage().(*DurableTaskRequest_EvictInvocation); ok { + return x.EvictInvocation + } + return nil +} + +func (x *DurableTaskRequest) GetWorkerStatus() *DurableTaskWorkerStatusRequest { + if x, ok := x.GetMessage().(*DurableTaskRequest_WorkerStatus); ok { + return x.WorkerStatus + } + return nil +} + +func (x *DurableTaskRequest) GetCompleteMemo() *DurableTaskCompleteMemoRequest { + if x, ok := x.GetMessage().(*DurableTaskRequest_CompleteMemo); ok { + return x.CompleteMemo + } + return nil +} + +type isDurableTaskRequest_Message interface { + isDurableTaskRequest_Message() +} + +type DurableTaskRequest_RegisterWorker struct { + RegisterWorker *DurableTaskRequestRegisterWorker `protobuf:"bytes,1,opt,name=register_worker,json=registerWorker,proto3,oneof"` +} + +type DurableTaskRequest_Memo struct { + Memo *DurableTaskMemoRequest `protobuf:"bytes,2,opt,name=memo,proto3,oneof"` +} + +type DurableTaskRequest_TriggerRuns struct { + TriggerRuns *DurableTaskTriggerRunsRequest `protobuf:"bytes,3,opt,name=trigger_runs,json=triggerRuns,proto3,oneof"` +} + +type DurableTaskRequest_WaitFor struct { + WaitFor *DurableTaskWaitForRequest `protobuf:"bytes,4,opt,name=wait_for,json=waitFor,proto3,oneof"` +} + +type DurableTaskRequest_EvictInvocation struct { + EvictInvocation *DurableTaskEvictInvocationRequest `protobuf:"bytes,5,opt,name=evict_invocation,json=evictInvocation,proto3,oneof"` +} + +type DurableTaskRequest_WorkerStatus struct { + WorkerStatus *DurableTaskWorkerStatusRequest `protobuf:"bytes,6,opt,name=worker_status,json=workerStatus,proto3,oneof"` +} + +type DurableTaskRequest_CompleteMemo struct { + CompleteMemo *DurableTaskCompleteMemoRequest `protobuf:"bytes,7,opt,name=complete_memo,json=completeMemo,proto3,oneof"` +} + +func (*DurableTaskRequest_RegisterWorker) isDurableTaskRequest_Message() {} + +func (*DurableTaskRequest_Memo) isDurableTaskRequest_Message() {} + +func (*DurableTaskRequest_TriggerRuns) isDurableTaskRequest_Message() {} + +func (*DurableTaskRequest_WaitFor) isDurableTaskRequest_Message() {} + +func (*DurableTaskRequest_EvictInvocation) isDurableTaskRequest_Message() {} + +func (*DurableTaskRequest_WorkerStatus) isDurableTaskRequest_Message() {} + +func (*DurableTaskRequest_CompleteMemo) isDurableTaskRequest_Message() {} + +type DurableTaskErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref *DurableEventLogEntryRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + ErrorType DurableTaskErrorType `protobuf:"varint,2,opt,name=error_type,json=errorType,proto3,enum=v1.DurableTaskErrorType" json:"error_type,omitempty"` + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *DurableTaskErrorResponse) Reset() { + *x = DurableTaskErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskErrorResponse) ProtoMessage() {} + +func (x *DurableTaskErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskErrorResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskErrorResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{18} +} + +func (x *DurableTaskErrorResponse) GetRef() *DurableEventLogEntryRef { + if x != nil { + return x.Ref + } + return nil +} + +func (x *DurableTaskErrorResponse) GetErrorType() DurableTaskErrorType { + if x != nil { + return x.ErrorType + } + return DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_UNSPECIFIED +} + +func (x *DurableTaskErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +type DurableTaskResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Message: + // + // *DurableTaskResponse_RegisterWorker + // *DurableTaskResponse_MemoAck + // *DurableTaskResponse_TriggerRunsAck + // *DurableTaskResponse_WaitForAck + // *DurableTaskResponse_EntryCompleted + // *DurableTaskResponse_Error + // *DurableTaskResponse_EvictionAck + // *DurableTaskResponse_ServerEvict + Message isDurableTaskResponse_Message `protobuf_oneof:"message"` +} + +func (x *DurableTaskResponse) Reset() { + *x = DurableTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_dispatcher_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurableTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurableTaskResponse) ProtoMessage() {} + +func (x *DurableTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_dispatcher_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurableTaskResponse.ProtoReflect.Descriptor instead. +func (*DurableTaskResponse) Descriptor() ([]byte, []int) { + return file_v1_dispatcher_proto_rawDescGZIP(), []int{19} +} + +func (m *DurableTaskResponse) GetMessage() isDurableTaskResponse_Message { + if m != nil { + return m.Message + } + return nil +} + +func (x *DurableTaskResponse) GetRegisterWorker() *DurableTaskResponseRegisterWorker { + if x, ok := x.GetMessage().(*DurableTaskResponse_RegisterWorker); ok { + return x.RegisterWorker + } + return nil +} + +func (x *DurableTaskResponse) GetMemoAck() *DurableTaskEventMemoAckResponse { + if x, ok := x.GetMessage().(*DurableTaskResponse_MemoAck); ok { + return x.MemoAck + } + return nil +} + +func (x *DurableTaskResponse) GetTriggerRunsAck() *DurableTaskEventTriggerRunsAckResponse { + if x, ok := x.GetMessage().(*DurableTaskResponse_TriggerRunsAck); ok { + return x.TriggerRunsAck + } + return nil +} + +func (x *DurableTaskResponse) GetWaitForAck() *DurableTaskEventWaitForAckResponse { + if x, ok := x.GetMessage().(*DurableTaskResponse_WaitForAck); ok { + return x.WaitForAck + } + return nil +} + +func (x *DurableTaskResponse) GetEntryCompleted() *DurableTaskEventLogEntryCompletedResponse { + if x, ok := x.GetMessage().(*DurableTaskResponse_EntryCompleted); ok { + return x.EntryCompleted + } + return nil +} + +func (x *DurableTaskResponse) GetError() *DurableTaskErrorResponse { + if x, ok := x.GetMessage().(*DurableTaskResponse_Error); ok { + return x.Error + } + return nil +} + +func (x *DurableTaskResponse) GetEvictionAck() *DurableTaskEvictionAckResponse { + if x, ok := x.GetMessage().(*DurableTaskResponse_EvictionAck); ok { + return x.EvictionAck + } + return nil +} + +func (x *DurableTaskResponse) GetServerEvict() *DurableTaskServerEvictNotice { + if x, ok := x.GetMessage().(*DurableTaskResponse_ServerEvict); ok { + return x.ServerEvict + } + return nil +} + +type isDurableTaskResponse_Message interface { + isDurableTaskResponse_Message() +} + +type DurableTaskResponse_RegisterWorker struct { + RegisterWorker *DurableTaskResponseRegisterWorker `protobuf:"bytes,1,opt,name=register_worker,json=registerWorker,proto3,oneof"` +} + +type DurableTaskResponse_MemoAck struct { + MemoAck *DurableTaskEventMemoAckResponse `protobuf:"bytes,2,opt,name=memo_ack,json=memoAck,proto3,oneof"` +} + +type DurableTaskResponse_TriggerRunsAck struct { + TriggerRunsAck *DurableTaskEventTriggerRunsAckResponse `protobuf:"bytes,3,opt,name=trigger_runs_ack,json=triggerRunsAck,proto3,oneof"` +} + +type DurableTaskResponse_WaitForAck struct { + WaitForAck *DurableTaskEventWaitForAckResponse `protobuf:"bytes,4,opt,name=wait_for_ack,json=waitForAck,proto3,oneof"` +} + +type DurableTaskResponse_EntryCompleted struct { + EntryCompleted *DurableTaskEventLogEntryCompletedResponse `protobuf:"bytes,5,opt,name=entry_completed,json=entryCompleted,proto3,oneof"` +} + +type DurableTaskResponse_Error struct { + Error *DurableTaskErrorResponse `protobuf:"bytes,6,opt,name=error,proto3,oneof"` +} + +type DurableTaskResponse_EvictionAck struct { + EvictionAck *DurableTaskEvictionAckResponse `protobuf:"bytes,7,opt,name=eviction_ack,json=evictionAck,proto3,oneof"` +} + +type DurableTaskResponse_ServerEvict struct { + ServerEvict *DurableTaskServerEvictNotice `protobuf:"bytes,8,opt,name=server_evict,json=serverEvict,proto3,oneof"` +} + +func (*DurableTaskResponse_RegisterWorker) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_MemoAck) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_TriggerRunsAck) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_WaitForAck) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_EntryCompleted) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_Error) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_EvictionAck) isDurableTaskResponse_Message() {} + +func (*DurableTaskResponse_ServerEvict) isDurableTaskResponse_Message() {} + type RegisterDurableEventRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -33,7 +1492,7 @@ type RegisterDurableEventRequest struct { func (x *RegisterDurableEventRequest) Reset() { *x = RegisterDurableEventRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_dispatcher_proto_msgTypes[0] + mi := &file_v1_dispatcher_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -46,7 +1505,7 @@ func (x *RegisterDurableEventRequest) String() string { func (*RegisterDurableEventRequest) ProtoMessage() {} func (x *RegisterDurableEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_dispatcher_proto_msgTypes[0] + mi := &file_v1_dispatcher_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -59,7 +1518,7 @@ func (x *RegisterDurableEventRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RegisterDurableEventRequest.ProtoReflect.Descriptor instead. func (*RegisterDurableEventRequest) Descriptor() ([]byte, []int) { - return file_v1_dispatcher_proto_rawDescGZIP(), []int{0} + return file_v1_dispatcher_proto_rawDescGZIP(), []int{20} } func (x *RegisterDurableEventRequest) GetTaskId() string { @@ -92,7 +1551,7 @@ type RegisterDurableEventResponse struct { func (x *RegisterDurableEventResponse) Reset() { *x = RegisterDurableEventResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_dispatcher_proto_msgTypes[1] + mi := &file_v1_dispatcher_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -105,7 +1564,7 @@ func (x *RegisterDurableEventResponse) String() string { func (*RegisterDurableEventResponse) ProtoMessage() {} func (x *RegisterDurableEventResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_dispatcher_proto_msgTypes[1] + mi := &file_v1_dispatcher_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -118,7 +1577,7 @@ func (x *RegisterDurableEventResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RegisterDurableEventResponse.ProtoReflect.Descriptor instead. func (*RegisterDurableEventResponse) Descriptor() ([]byte, []int) { - return file_v1_dispatcher_proto_rawDescGZIP(), []int{1} + return file_v1_dispatcher_proto_rawDescGZIP(), []int{21} } type ListenForDurableEventRequest struct { @@ -133,7 +1592,7 @@ type ListenForDurableEventRequest struct { func (x *ListenForDurableEventRequest) Reset() { *x = ListenForDurableEventRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_dispatcher_proto_msgTypes[2] + mi := &file_v1_dispatcher_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -146,7 +1605,7 @@ func (x *ListenForDurableEventRequest) String() string { func (*ListenForDurableEventRequest) ProtoMessage() {} func (x *ListenForDurableEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_dispatcher_proto_msgTypes[2] + mi := &file_v1_dispatcher_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -159,7 +1618,7 @@ func (x *ListenForDurableEventRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListenForDurableEventRequest.ProtoReflect.Descriptor instead. func (*ListenForDurableEventRequest) Descriptor() ([]byte, []int) { - return file_v1_dispatcher_proto_rawDescGZIP(), []int{2} + return file_v1_dispatcher_proto_rawDescGZIP(), []int{22} } func (x *ListenForDurableEventRequest) GetTaskId() string { @@ -189,7 +1648,7 @@ type DurableEvent struct { func (x *DurableEvent) Reset() { *x = DurableEvent{} if protoimpl.UnsafeEnabled { - mi := &file_v1_dispatcher_proto_msgTypes[3] + mi := &file_v1_dispatcher_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -202,7 +1661,7 @@ func (x *DurableEvent) String() string { func (*DurableEvent) ProtoMessage() {} func (x *DurableEvent) ProtoReflect() protoreflect.Message { - mi := &file_v1_dispatcher_proto_msgTypes[3] + mi := &file_v1_dispatcher_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -215,7 +1674,7 @@ func (x *DurableEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use DurableEvent.ProtoReflect.Descriptor instead. func (*DurableEvent) Descriptor() ([]byte, []int) { - return file_v1_dispatcher_proto_rawDescGZIP(), []int{3} + return file_v1_dispatcher_proto_rawDescGZIP(), []int{23} } func (x *DurableEvent) GetTaskId() string { @@ -245,47 +1704,304 @@ var file_v1_dispatcher_proto_rawDesc = []byte{ 0x0a, 0x13, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x1a, 0x19, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x0a, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, - 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x56, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, - 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x22, 0x5a, 0x0a, 0x0c, 0x44, 0x75, 0x72, 0x61, - 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x32, 0xbe, 0x01, 0x0a, 0x0c, 0x56, 0x31, 0x44, 0x69, 0x73, 0x70, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, - 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, - 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x51, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, - 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, - 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, - 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, + 0x20, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x22, 0x40, + 0x0a, 0x21, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x22, 0xb3, 0x01, 0x0a, 0x17, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x66, 0x12, 0x37, 0x0a, 0x18, + 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, + 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x49, 0x64, 0x12, 0x17, 0x0a, + 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0x4e, 0x0a, 0x16, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x41, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x61, + 0x6e, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x72, + 0x61, 0x6e, 0x63, 0x68, 0x49, 0x64, 0x22, 0xcf, 0x01, 0x0a, 0x1f, 0x44, 0x75, 0x72, 0x61, 0x62, + 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x6d, 0x6f, 0x41, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x66, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x65, 0x6d, + 0x6f, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x41, 0x6c, 0x72, + 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x13, 0x6d, + 0x65, 0x6d, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x65, 0x6d, 0x6f, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xc9, 0x01, 0x0a, 0x26, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x52, 0x75, 0x6e, 0x73, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, + 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, + 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, + 0x41, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x45, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x22, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x41, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x66, 0x52, 0x03, 0x72, 0x65, 0x66, 0x22, 0x74, 0x0a, 0x29, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x66, + 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, + 0xaf, 0x01, 0x0a, 0x21, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x76, 0x69, 0x63, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x1e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x45, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, + 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xbc, 0x01, 0x0a, 0x20, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x77, 0x61, 0x69, 0x74, 0x65, 0x64, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x37, 0x0a, + 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x72, 0x61, 0x6e, 0x63, + 0x68, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, + 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9a, 0x01, 0x0a, 0x1c, 0x44, 0x75, 0x72, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x45, 0x76, 0x69, + 0x63, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, + 0x64, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x69, 0x6e, 0x76, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x1e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x77, + 0x61, 0x69, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x1e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x66, + 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x6d, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0xb9, 0x01, 0x0a, 0x16, 0x44, + 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x6d, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xc2, 0x01, 0x0a, 0x1d, 0x44, 0x75, 0x72, 0x61, 0x62, + 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x75, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x19, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, 0x46, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x57, 0x0a, + 0x13, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x00, + 0x52, 0x11, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, + 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x90, + 0x04, 0x0a, 0x12, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x04, 0x6d, 0x65, 0x6d, 0x6f, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x6d, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x04, 0x6d, 0x65, 0x6d, 0x6f, 0x12, 0x46, 0x0a, 0x0c, 0x74, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x75, 0x6e, 0x73, + 0x12, 0x3a, 0x0a, 0x08, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x07, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x10, + 0x65, 0x76, 0x69, 0x63, 0x74, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x69, 0x63, 0x74, 0x49, 0x6e, 0x76, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, + 0x0f, 0x65, 0x76, 0x69, 0x63, 0x74, 0x49, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x63, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x4d, 0x65, 0x6d, 0x6f, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0xa7, 0x01, 0x0a, 0x18, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, + 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x66, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x37, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf8, 0x04, 0x0a, 0x13, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, + 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65, 0x6d, 0x6f, 0x5f, 0x61, 0x63, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x6d, + 0x6f, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x07, + 0x6d, 0x65, 0x6d, 0x6f, 0x41, 0x63, 0x6b, 0x12, 0x56, 0x0a, 0x10, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x5f, 0x61, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x75, + 0x6e, 0x73, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x0e, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x75, 0x6e, 0x73, 0x41, 0x63, 0x6b, 0x12, + 0x4a, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, + 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x46, + 0x6f, 0x72, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x0a, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x41, 0x63, 0x6b, 0x12, 0x58, 0x0a, 0x0f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x47, 0x0a, 0x0c, 0x65, + 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x65, 0x76, 0x69, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x41, 0x63, 0x6b, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x65, + 0x76, 0x69, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x31, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x45, 0x76, 0x69, 0x63, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x45, 0x76, 0x69, 0x63, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x42, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, + 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, + 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x22, 0x5a, 0x0a, 0x0c, 0x44, 0x75, + 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, + 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, + 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, + 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x2a, 0x6b, 0x0a, 0x14, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, + 0x0a, 0x23, 0x44, 0x55, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2a, 0x0a, 0x26, 0x44, 0x55, 0x52, 0x41, 0x42, + 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x44, 0x45, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x49, 0x53, + 0x4d, 0x10, 0x01, 0x32, 0x84, 0x02, 0x0a, 0x0c, 0x56, 0x31, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0b, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x5b, 0x0a, 0x14, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x12, 0x1f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x12, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, + 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, + 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, + 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -300,25 +2016,75 @@ func file_v1_dispatcher_proto_rawDescGZIP() []byte { return file_v1_dispatcher_proto_rawDescData } -var file_v1_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_v1_dispatcher_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_v1_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 24) var file_v1_dispatcher_proto_goTypes = []interface{}{ - (*RegisterDurableEventRequest)(nil), // 0: v1.RegisterDurableEventRequest - (*RegisterDurableEventResponse)(nil), // 1: v1.RegisterDurableEventResponse - (*ListenForDurableEventRequest)(nil), // 2: v1.ListenForDurableEventRequest - (*DurableEvent)(nil), // 3: v1.DurableEvent - (*DurableEventListenerConditions)(nil), // 4: v1.DurableEventListenerConditions + (DurableTaskErrorType)(0), // 0: v1.DurableTaskErrorType + (*DurableTaskRequestRegisterWorker)(nil), // 1: v1.DurableTaskRequestRegisterWorker + (*DurableTaskResponseRegisterWorker)(nil), // 2: v1.DurableTaskResponseRegisterWorker + (*DurableEventLogEntryRef)(nil), // 3: v1.DurableEventLogEntryRef + (*DurableTaskRunAckEntry)(nil), // 4: v1.DurableTaskRunAckEntry + (*DurableTaskEventMemoAckResponse)(nil), // 5: v1.DurableTaskEventMemoAckResponse + (*DurableTaskEventTriggerRunsAckResponse)(nil), // 6: v1.DurableTaskEventTriggerRunsAckResponse + (*DurableTaskEventWaitForAckResponse)(nil), // 7: v1.DurableTaskEventWaitForAckResponse + (*DurableTaskEventLogEntryCompletedResponse)(nil), // 8: v1.DurableTaskEventLogEntryCompletedResponse + (*DurableTaskEvictInvocationRequest)(nil), // 9: v1.DurableTaskEvictInvocationRequest + (*DurableTaskEvictionAckResponse)(nil), // 10: v1.DurableTaskEvictionAckResponse + (*DurableTaskAwaitedCompletedEntry)(nil), // 11: v1.DurableTaskAwaitedCompletedEntry + (*DurableTaskServerEvictNotice)(nil), // 12: v1.DurableTaskServerEvictNotice + (*DurableTaskWorkerStatusRequest)(nil), // 13: v1.DurableTaskWorkerStatusRequest + (*DurableTaskCompleteMemoRequest)(nil), // 14: v1.DurableTaskCompleteMemoRequest + (*DurableTaskMemoRequest)(nil), // 15: v1.DurableTaskMemoRequest + (*DurableTaskTriggerRunsRequest)(nil), // 16: v1.DurableTaskTriggerRunsRequest + (*DurableTaskWaitForRequest)(nil), // 17: v1.DurableTaskWaitForRequest + (*DurableTaskRequest)(nil), // 18: v1.DurableTaskRequest + (*DurableTaskErrorResponse)(nil), // 19: v1.DurableTaskErrorResponse + (*DurableTaskResponse)(nil), // 20: v1.DurableTaskResponse + (*RegisterDurableEventRequest)(nil), // 21: v1.RegisterDurableEventRequest + (*RegisterDurableEventResponse)(nil), // 22: v1.RegisterDurableEventResponse + (*ListenForDurableEventRequest)(nil), // 23: v1.ListenForDurableEventRequest + (*DurableEvent)(nil), // 24: v1.DurableEvent + (*TriggerWorkflowRequest)(nil), // 25: v1.TriggerWorkflowRequest + (*DurableEventListenerConditions)(nil), // 26: v1.DurableEventListenerConditions } var file_v1_dispatcher_proto_depIdxs = []int32{ - 4, // 0: v1.RegisterDurableEventRequest.conditions:type_name -> v1.DurableEventListenerConditions - 0, // 1: v1.V1Dispatcher.RegisterDurableEvent:input_type -> v1.RegisterDurableEventRequest - 2, // 2: v1.V1Dispatcher.ListenForDurableEvent:input_type -> v1.ListenForDurableEventRequest - 1, // 3: v1.V1Dispatcher.RegisterDurableEvent:output_type -> v1.RegisterDurableEventResponse - 3, // 4: v1.V1Dispatcher.ListenForDurableEvent:output_type -> v1.DurableEvent - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 3, // 0: v1.DurableTaskEventMemoAckResponse.ref:type_name -> v1.DurableEventLogEntryRef + 4, // 1: v1.DurableTaskEventTriggerRunsAckResponse.run_entries:type_name -> v1.DurableTaskRunAckEntry + 3, // 2: v1.DurableTaskEventWaitForAckResponse.ref:type_name -> v1.DurableEventLogEntryRef + 3, // 3: v1.DurableTaskEventLogEntryCompletedResponse.ref:type_name -> v1.DurableEventLogEntryRef + 11, // 4: v1.DurableTaskWorkerStatusRequest.waiting_entries:type_name -> v1.DurableTaskAwaitedCompletedEntry + 3, // 5: v1.DurableTaskCompleteMemoRequest.ref:type_name -> v1.DurableEventLogEntryRef + 25, // 6: v1.DurableTaskTriggerRunsRequest.trigger_opts:type_name -> v1.TriggerWorkflowRequest + 26, // 7: v1.DurableTaskWaitForRequest.wait_for_conditions:type_name -> v1.DurableEventListenerConditions + 1, // 8: v1.DurableTaskRequest.register_worker:type_name -> v1.DurableTaskRequestRegisterWorker + 15, // 9: v1.DurableTaskRequest.memo:type_name -> v1.DurableTaskMemoRequest + 16, // 10: v1.DurableTaskRequest.trigger_runs:type_name -> v1.DurableTaskTriggerRunsRequest + 17, // 11: v1.DurableTaskRequest.wait_for:type_name -> v1.DurableTaskWaitForRequest + 9, // 12: v1.DurableTaskRequest.evict_invocation:type_name -> v1.DurableTaskEvictInvocationRequest + 13, // 13: v1.DurableTaskRequest.worker_status:type_name -> v1.DurableTaskWorkerStatusRequest + 14, // 14: v1.DurableTaskRequest.complete_memo:type_name -> v1.DurableTaskCompleteMemoRequest + 3, // 15: v1.DurableTaskErrorResponse.ref:type_name -> v1.DurableEventLogEntryRef + 0, // 16: v1.DurableTaskErrorResponse.error_type:type_name -> v1.DurableTaskErrorType + 2, // 17: v1.DurableTaskResponse.register_worker:type_name -> v1.DurableTaskResponseRegisterWorker + 5, // 18: v1.DurableTaskResponse.memo_ack:type_name -> v1.DurableTaskEventMemoAckResponse + 6, // 19: v1.DurableTaskResponse.trigger_runs_ack:type_name -> v1.DurableTaskEventTriggerRunsAckResponse + 7, // 20: v1.DurableTaskResponse.wait_for_ack:type_name -> v1.DurableTaskEventWaitForAckResponse + 8, // 21: v1.DurableTaskResponse.entry_completed:type_name -> v1.DurableTaskEventLogEntryCompletedResponse + 19, // 22: v1.DurableTaskResponse.error:type_name -> v1.DurableTaskErrorResponse + 10, // 23: v1.DurableTaskResponse.eviction_ack:type_name -> v1.DurableTaskEvictionAckResponse + 12, // 24: v1.DurableTaskResponse.server_evict:type_name -> v1.DurableTaskServerEvictNotice + 26, // 25: v1.RegisterDurableEventRequest.conditions:type_name -> v1.DurableEventListenerConditions + 18, // 26: v1.V1Dispatcher.DurableTask:input_type -> v1.DurableTaskRequest + 21, // 27: v1.V1Dispatcher.RegisterDurableEvent:input_type -> v1.RegisterDurableEventRequest + 23, // 28: v1.V1Dispatcher.ListenForDurableEvent:input_type -> v1.ListenForDurableEventRequest + 20, // 29: v1.V1Dispatcher.DurableTask:output_type -> v1.DurableTaskResponse + 22, // 30: v1.V1Dispatcher.RegisterDurableEvent:output_type -> v1.RegisterDurableEventResponse + 24, // 31: v1.V1Dispatcher.ListenForDurableEvent:output_type -> v1.DurableEvent + 29, // [29:32] is the sub-list for method output_type + 26, // [26:29] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_v1_dispatcher_proto_init() } @@ -327,9 +2093,10 @@ func file_v1_dispatcher_proto_init() { return } file_v1_shared_condition_proto_init() + file_v1_shared_trigger_proto_init() if !protoimpl.UnsafeEnabled { file_v1_dispatcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegisterDurableEventRequest); i { + switch v := v.(*DurableTaskRequestRegisterWorker); i { case 0: return &v.state case 1: @@ -341,7 +2108,7 @@ func file_v1_dispatcher_proto_init() { } } file_v1_dispatcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegisterDurableEventResponse); i { + switch v := v.(*DurableTaskResponseRegisterWorker); i { case 0: return &v.state case 1: @@ -353,7 +2120,7 @@ func file_v1_dispatcher_proto_init() { } } file_v1_dispatcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListenForDurableEventRequest); i { + switch v := v.(*DurableEventLogEntryRef); i { case 0: return &v.state case 1: @@ -365,6 +2132,246 @@ func file_v1_dispatcher_proto_init() { } } file_v1_dispatcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskRunAckEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskEventMemoAckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskEventTriggerRunsAckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskEventWaitForAckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskEventLogEntryCompletedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskEvictInvocationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskEvictionAckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskAwaitedCompletedEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskServerEvictNotice); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskWorkerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskCompleteMemoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskMemoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskTriggerRunsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskWaitForRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurableTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegisterDurableEventRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegisterDurableEventResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenForDurableEventRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_dispatcher_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DurableEvent); i { case 0: return &v.state @@ -377,18 +2384,42 @@ func file_v1_dispatcher_proto_init() { } } } + file_v1_dispatcher_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_v1_dispatcher_proto_msgTypes[8].OneofWrappers = []interface{}{} + file_v1_dispatcher_proto_msgTypes[14].OneofWrappers = []interface{}{} + file_v1_dispatcher_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_v1_dispatcher_proto_msgTypes[17].OneofWrappers = []interface{}{ + (*DurableTaskRequest_RegisterWorker)(nil), + (*DurableTaskRequest_Memo)(nil), + (*DurableTaskRequest_TriggerRuns)(nil), + (*DurableTaskRequest_WaitFor)(nil), + (*DurableTaskRequest_EvictInvocation)(nil), + (*DurableTaskRequest_WorkerStatus)(nil), + (*DurableTaskRequest_CompleteMemo)(nil), + } + file_v1_dispatcher_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*DurableTaskResponse_RegisterWorker)(nil), + (*DurableTaskResponse_MemoAck)(nil), + (*DurableTaskResponse_TriggerRunsAck)(nil), + (*DurableTaskResponse_WaitForAck)(nil), + (*DurableTaskResponse_EntryCompleted)(nil), + (*DurableTaskResponse_Error)(nil), + (*DurableTaskResponse_EvictionAck)(nil), + (*DurableTaskResponse_ServerEvict)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_v1_dispatcher_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, + NumEnums: 1, + NumMessages: 24, NumExtensions: 0, NumServices: 1, }, GoTypes: file_v1_dispatcher_proto_goTypes, DependencyIndexes: file_v1_dispatcher_proto_depIdxs, + EnumInfos: file_v1_dispatcher_proto_enumTypes, MessageInfos: file_v1_dispatcher_proto_msgTypes, }.Build() File_v1_dispatcher_proto = out.File diff --git a/internal/services/shared/proto/v1/dispatcher_grpc.pb.go b/internal/services/shared/proto/v1/dispatcher_grpc.pb.go index 676e5ce92..953cce76b 100644 --- a/internal/services/shared/proto/v1/dispatcher_grpc.pb.go +++ b/internal/services/shared/proto/v1/dispatcher_grpc.pb.go @@ -22,6 +22,8 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type V1DispatcherClient interface { + DurableTask(ctx context.Context, opts ...grpc.CallOption) (V1Dispatcher_DurableTaskClient, error) + // NOTE: deprecated after DurableEventLog is implemented RegisterDurableEvent(ctx context.Context, in *RegisterDurableEventRequest, opts ...grpc.CallOption) (*RegisterDurableEventResponse, error) ListenForDurableEvent(ctx context.Context, opts ...grpc.CallOption) (V1Dispatcher_ListenForDurableEventClient, error) } @@ -34,6 +36,37 @@ func NewV1DispatcherClient(cc grpc.ClientConnInterface) V1DispatcherClient { return &v1DispatcherClient{cc} } +func (c *v1DispatcherClient) DurableTask(ctx context.Context, opts ...grpc.CallOption) (V1Dispatcher_DurableTaskClient, error) { + stream, err := c.cc.NewStream(ctx, &V1Dispatcher_ServiceDesc.Streams[0], "/v1.V1Dispatcher/DurableTask", opts...) + if err != nil { + return nil, err + } + x := &v1DispatcherDurableTaskClient{stream} + return x, nil +} + +type V1Dispatcher_DurableTaskClient interface { + Send(*DurableTaskRequest) error + Recv() (*DurableTaskResponse, error) + grpc.ClientStream +} + +type v1DispatcherDurableTaskClient struct { + grpc.ClientStream +} + +func (x *v1DispatcherDurableTaskClient) Send(m *DurableTaskRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *v1DispatcherDurableTaskClient) Recv() (*DurableTaskResponse, error) { + m := new(DurableTaskResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *v1DispatcherClient) RegisterDurableEvent(ctx context.Context, in *RegisterDurableEventRequest, opts ...grpc.CallOption) (*RegisterDurableEventResponse, error) { out := new(RegisterDurableEventResponse) err := c.cc.Invoke(ctx, "/v1.V1Dispatcher/RegisterDurableEvent", in, out, opts...) @@ -44,7 +77,7 @@ func (c *v1DispatcherClient) RegisterDurableEvent(ctx context.Context, in *Regis } func (c *v1DispatcherClient) ListenForDurableEvent(ctx context.Context, opts ...grpc.CallOption) (V1Dispatcher_ListenForDurableEventClient, error) { - stream, err := c.cc.NewStream(ctx, &V1Dispatcher_ServiceDesc.Streams[0], "/v1.V1Dispatcher/ListenForDurableEvent", opts...) + stream, err := c.cc.NewStream(ctx, &V1Dispatcher_ServiceDesc.Streams[1], "/v1.V1Dispatcher/ListenForDurableEvent", opts...) if err != nil { return nil, err } @@ -78,6 +111,8 @@ func (x *v1DispatcherListenForDurableEventClient) Recv() (*DurableEvent, error) // All implementations must embed UnimplementedV1DispatcherServer // for forward compatibility type V1DispatcherServer interface { + DurableTask(V1Dispatcher_DurableTaskServer) error + // NOTE: deprecated after DurableEventLog is implemented RegisterDurableEvent(context.Context, *RegisterDurableEventRequest) (*RegisterDurableEventResponse, error) ListenForDurableEvent(V1Dispatcher_ListenForDurableEventServer) error mustEmbedUnimplementedV1DispatcherServer() @@ -87,6 +122,9 @@ type V1DispatcherServer interface { type UnimplementedV1DispatcherServer struct { } +func (UnimplementedV1DispatcherServer) DurableTask(V1Dispatcher_DurableTaskServer) error { + return status.Errorf(codes.Unimplemented, "method DurableTask not implemented") +} func (UnimplementedV1DispatcherServer) RegisterDurableEvent(context.Context, *RegisterDurableEventRequest) (*RegisterDurableEventResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RegisterDurableEvent not implemented") } @@ -106,6 +144,32 @@ func RegisterV1DispatcherServer(s grpc.ServiceRegistrar, srv V1DispatcherServer) s.RegisterService(&V1Dispatcher_ServiceDesc, srv) } +func _V1Dispatcher_DurableTask_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(V1DispatcherServer).DurableTask(&v1DispatcherDurableTaskServer{stream}) +} + +type V1Dispatcher_DurableTaskServer interface { + Send(*DurableTaskResponse) error + Recv() (*DurableTaskRequest, error) + grpc.ServerStream +} + +type v1DispatcherDurableTaskServer struct { + grpc.ServerStream +} + +func (x *v1DispatcherDurableTaskServer) Send(m *DurableTaskResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *v1DispatcherDurableTaskServer) Recv() (*DurableTaskRequest, error) { + m := new(DurableTaskRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func _V1Dispatcher_RegisterDurableEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RegisterDurableEventRequest) if err := dec(in); err != nil { @@ -163,6 +227,12 @@ var V1Dispatcher_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "DurableTask", + Handler: _V1Dispatcher_DurableTask_Handler, + ServerStreams: true, + ClientStreams: true, + }, { StreamName: "ListenForDurableEvent", Handler: _V1Dispatcher_ListenForDurableEvent_Handler, diff --git a/internal/services/shared/proto/v1/trigger.pb.go b/internal/services/shared/proto/v1/trigger.pb.go new file mode 100644 index 000000000..9769ffbe8 --- /dev/null +++ b/internal/services/shared/proto/v1/trigger.pb.go @@ -0,0 +1,471 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v5.29.3 +// source: v1/shared/trigger.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type WorkerLabelComparator int32 + +const ( + WorkerLabelComparator_EQUAL WorkerLabelComparator = 0 + WorkerLabelComparator_NOT_EQUAL WorkerLabelComparator = 1 + WorkerLabelComparator_GREATER_THAN WorkerLabelComparator = 2 + WorkerLabelComparator_GREATER_THAN_OR_EQUAL WorkerLabelComparator = 3 + WorkerLabelComparator_LESS_THAN WorkerLabelComparator = 4 + WorkerLabelComparator_LESS_THAN_OR_EQUAL WorkerLabelComparator = 5 +) + +// Enum value maps for WorkerLabelComparator. +var ( + WorkerLabelComparator_name = map[int32]string{ + 0: "EQUAL", + 1: "NOT_EQUAL", + 2: "GREATER_THAN", + 3: "GREATER_THAN_OR_EQUAL", + 4: "LESS_THAN", + 5: "LESS_THAN_OR_EQUAL", + } + WorkerLabelComparator_value = map[string]int32{ + "EQUAL": 0, + "NOT_EQUAL": 1, + "GREATER_THAN": 2, + "GREATER_THAN_OR_EQUAL": 3, + "LESS_THAN": 4, + "LESS_THAN_OR_EQUAL": 5, + } +) + +func (x WorkerLabelComparator) Enum() *WorkerLabelComparator { + p := new(WorkerLabelComparator) + *p = x + return p +} + +func (x WorkerLabelComparator) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WorkerLabelComparator) Descriptor() protoreflect.EnumDescriptor { + return file_v1_shared_trigger_proto_enumTypes[0].Descriptor() +} + +func (WorkerLabelComparator) Type() protoreflect.EnumType { + return &file_v1_shared_trigger_proto_enumTypes[0] +} + +func (x WorkerLabelComparator) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WorkerLabelComparator.Descriptor instead. +func (WorkerLabelComparator) EnumDescriptor() ([]byte, []int) { + return file_v1_shared_trigger_proto_rawDescGZIP(), []int{0} +} + +type DesiredWorkerLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // value of the affinity + StrValue *string `protobuf:"bytes,1,opt,name=str_value,json=strValue,proto3,oneof" json:"str_value,omitempty"` + IntValue *int32 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` + // * + // (optional) Specifies whether the affinity setting is required. + // If required, the worker will not accept actions that do not have a truthy affinity setting. + // + // Defaults to false. + Required *bool `protobuf:"varint,3,opt,name=required,proto3,oneof" json:"required,omitempty"` + // * + // (optional) Specifies the comparator for the affinity setting. + // If not set, the default is EQUAL. + Comparator *WorkerLabelComparator `protobuf:"varint,4,opt,name=comparator,proto3,enum=v1.WorkerLabelComparator,oneof" json:"comparator,omitempty"` + // * + // (optional) Specifies the weight of the affinity setting. + // If not set, the default is 100. + Weight *int32 `protobuf:"varint,5,opt,name=weight,proto3,oneof" json:"weight,omitempty"` +} + +func (x *DesiredWorkerLabels) Reset() { + *x = DesiredWorkerLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_shared_trigger_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DesiredWorkerLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DesiredWorkerLabels) ProtoMessage() {} + +func (x *DesiredWorkerLabels) ProtoReflect() protoreflect.Message { + mi := &file_v1_shared_trigger_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DesiredWorkerLabels.ProtoReflect.Descriptor instead. +func (*DesiredWorkerLabels) Descriptor() ([]byte, []int) { + return file_v1_shared_trigger_proto_rawDescGZIP(), []int{0} +} + +func (x *DesiredWorkerLabels) GetStrValue() string { + if x != nil && x.StrValue != nil { + return *x.StrValue + } + return "" +} + +func (x *DesiredWorkerLabels) GetIntValue() int32 { + if x != nil && x.IntValue != nil { + return *x.IntValue + } + return 0 +} + +func (x *DesiredWorkerLabels) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *DesiredWorkerLabels) GetComparator() WorkerLabelComparator { + if x != nil && x.Comparator != nil { + return *x.Comparator + } + return WorkerLabelComparator_EQUAL +} + +func (x *DesiredWorkerLabels) GetWeight() int32 { + if x != nil && x.Weight != nil { + return *x.Weight + } + return 0 +} + +type TriggerWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // (optional) the input data for the workflow + Input string `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` + // (optional) the parent workflow run id + ParentId *string `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3,oneof" json:"parent_id,omitempty"` + // (optional) the parent task external run id + ParentTaskRunExternalId *string `protobuf:"bytes,4,opt,name=parent_task_run_external_id,json=parentTaskRunExternalId,proto3,oneof" json:"parent_task_run_external_id,omitempty"` + // (optional) the index of the child workflow. if this is set, matches on the index or the + // child key will return an existing workflow run if the parent id, parent task run id, and + // child index/key match an existing workflow run. + ChildIndex *int32 `protobuf:"varint,5,opt,name=child_index,json=childIndex,proto3,oneof" json:"child_index,omitempty"` + // (optional) the key for the child. if this is set, matches on the index or the + // child key will return an existing workflow run if the parent id, parent task run id, and + // child index/key match an existing workflow run. + ChildKey *string `protobuf:"bytes,6,opt,name=child_key,json=childKey,proto3,oneof" json:"child_key,omitempty"` + // (optional) additional metadata for the workflow + AdditionalMetadata *string `protobuf:"bytes,7,opt,name=additional_metadata,json=additionalMetadata,proto3,oneof" json:"additional_metadata,omitempty"` + // (optional) desired worker id for the workflow run, + // requires the workflow definition to have a sticky strategy + DesiredWorkerId *string `protobuf:"bytes,8,opt,name=desired_worker_id,json=desiredWorkerId,proto3,oneof" json:"desired_worker_id,omitempty"` + // (optional) override for the priority of the workflow tasks, will set all tasks to this priority + Priority *int32 `protobuf:"varint,9,opt,name=priority,proto3,oneof" json:"priority,omitempty"` + // (optional) the desired worker labels for the workflow run, which will be used to determine which workers can pick up the workflow's tasks. if not set, defaults to an empty set of labels, which means any worker can pick up the tasks. + DesiredWorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,10,rep,name=desired_worker_labels,json=desiredWorkerLabels,proto3" json:"desired_worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *TriggerWorkflowRequest) Reset() { + *x = TriggerWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_shared_trigger_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TriggerWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerWorkflowRequest) ProtoMessage() {} + +func (x *TriggerWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_shared_trigger_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerWorkflowRequest.ProtoReflect.Descriptor instead. +func (*TriggerWorkflowRequest) Descriptor() ([]byte, []int) { + return file_v1_shared_trigger_proto_rawDescGZIP(), []int{1} +} + +func (x *TriggerWorkflowRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TriggerWorkflowRequest) GetInput() string { + if x != nil { + return x.Input + } + return "" +} + +func (x *TriggerWorkflowRequest) GetParentId() string { + if x != nil && x.ParentId != nil { + return *x.ParentId + } + return "" +} + +func (x *TriggerWorkflowRequest) GetParentTaskRunExternalId() string { + if x != nil && x.ParentTaskRunExternalId != nil { + return *x.ParentTaskRunExternalId + } + return "" +} + +func (x *TriggerWorkflowRequest) GetChildIndex() int32 { + if x != nil && x.ChildIndex != nil { + return *x.ChildIndex + } + return 0 +} + +func (x *TriggerWorkflowRequest) GetChildKey() string { + if x != nil && x.ChildKey != nil { + return *x.ChildKey + } + return "" +} + +func (x *TriggerWorkflowRequest) GetAdditionalMetadata() string { + if x != nil && x.AdditionalMetadata != nil { + return *x.AdditionalMetadata + } + return "" +} + +func (x *TriggerWorkflowRequest) GetDesiredWorkerId() string { + if x != nil && x.DesiredWorkerId != nil { + return *x.DesiredWorkerId + } + return "" +} + +func (x *TriggerWorkflowRequest) GetPriority() int32 { + if x != nil && x.Priority != nil { + return *x.Priority + } + return 0 +} + +func (x *TriggerWorkflowRequest) GetDesiredWorkerLabels() map[string]*DesiredWorkerLabels { + if x != nil { + return x.DesiredWorkerLabels + } + return nil +} + +var File_v1_shared_trigger_proto protoreflect.FileDescriptor + +var file_v1_shared_trigger_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x74, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x22, 0x9a, 0x02, + 0x0a, 0x13, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x69, 0x6e, + 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x03, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x06, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x74, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xc8, 0x05, 0x0a, 0x16, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x88, 0x01, + 0x01, 0x12, 0x41, 0x0a, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x0a, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, + 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x12, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x88, + 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x05, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, + 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x06, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x88, 0x01, 0x01, 0x12, 0x67, 0x0a, 0x15, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, + 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x5f, 0x0a, + 0x18, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, + 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x42, 0x1e, 0x0a, 0x1c, + 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x42, 0x0e, 0x0a, 0x0c, + 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x2a, 0x85, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, + 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x47, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, + 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, + 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, + 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x05, 0x42, 0x42, 0x5a, + 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_v1_shared_trigger_proto_rawDescOnce sync.Once + file_v1_shared_trigger_proto_rawDescData = file_v1_shared_trigger_proto_rawDesc +) + +func file_v1_shared_trigger_proto_rawDescGZIP() []byte { + file_v1_shared_trigger_proto_rawDescOnce.Do(func() { + file_v1_shared_trigger_proto_rawDescData = protoimpl.X.CompressGZIP(file_v1_shared_trigger_proto_rawDescData) + }) + return file_v1_shared_trigger_proto_rawDescData +} + +var file_v1_shared_trigger_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_v1_shared_trigger_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_v1_shared_trigger_proto_goTypes = []interface{}{ + (WorkerLabelComparator)(0), // 0: v1.WorkerLabelComparator + (*DesiredWorkerLabels)(nil), // 1: v1.DesiredWorkerLabels + (*TriggerWorkflowRequest)(nil), // 2: v1.TriggerWorkflowRequest + nil, // 3: v1.TriggerWorkflowRequest.DesiredWorkerLabelsEntry +} +var file_v1_shared_trigger_proto_depIdxs = []int32{ + 0, // 0: v1.DesiredWorkerLabels.comparator:type_name -> v1.WorkerLabelComparator + 3, // 1: v1.TriggerWorkflowRequest.desired_worker_labels:type_name -> v1.TriggerWorkflowRequest.DesiredWorkerLabelsEntry + 1, // 2: v1.TriggerWorkflowRequest.DesiredWorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_v1_shared_trigger_proto_init() } +func file_v1_shared_trigger_proto_init() { + if File_v1_shared_trigger_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_v1_shared_trigger_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DesiredWorkerLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_shared_trigger_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TriggerWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_v1_shared_trigger_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_v1_shared_trigger_proto_msgTypes[1].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_v1_shared_trigger_proto_rawDesc, + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_v1_shared_trigger_proto_goTypes, + DependencyIndexes: file_v1_shared_trigger_proto_depIdxs, + EnumInfos: file_v1_shared_trigger_proto_enumTypes, + MessageInfos: file_v1_shared_trigger_proto_msgTypes, + }.Build() + File_v1_shared_trigger_proto = out.File + file_v1_shared_trigger_proto_rawDesc = nil + file_v1_shared_trigger_proto_goTypes = nil + file_v1_shared_trigger_proto_depIdxs = nil +} diff --git a/internal/services/shared/proto/v1/workflows.pb.go b/internal/services/shared/proto/v1/workflows.pb.go index 4182b44a5..bf3d29cb9 100644 --- a/internal/services/shared/proto/v1/workflows.pb.go +++ b/internal/services/shared/proto/v1/workflows.pb.go @@ -136,6 +136,7 @@ const ( RunStatus_COMPLETED RunStatus = 2 RunStatus_FAILED RunStatus = 3 RunStatus_CANCELLED RunStatus = 4 + RunStatus_EVICTED RunStatus = 5 ) // Enum value maps for RunStatus. @@ -146,6 +147,7 @@ var ( 2: "COMPLETED", 3: "FAILED", 4: "CANCELLED", + 5: "EVICTED", } RunStatus_value = map[string]int32{ "QUEUED": 0, @@ -153,6 +155,7 @@ var ( "COMPLETED": 2, "FAILED": 3, "CANCELLED": 4, + "EVICTED": 5, } ) @@ -238,64 +241,6 @@ func (ConcurrencyLimitStrategy) EnumDescriptor() ([]byte, []int) { return file_v1_workflows_proto_rawDescGZIP(), []int{3} } -type WorkerLabelComparator int32 - -const ( - WorkerLabelComparator_EQUAL WorkerLabelComparator = 0 - WorkerLabelComparator_NOT_EQUAL WorkerLabelComparator = 1 - WorkerLabelComparator_GREATER_THAN WorkerLabelComparator = 2 - WorkerLabelComparator_GREATER_THAN_OR_EQUAL WorkerLabelComparator = 3 - WorkerLabelComparator_LESS_THAN WorkerLabelComparator = 4 - WorkerLabelComparator_LESS_THAN_OR_EQUAL WorkerLabelComparator = 5 -) - -// Enum value maps for WorkerLabelComparator. -var ( - WorkerLabelComparator_name = map[int32]string{ - 0: "EQUAL", - 1: "NOT_EQUAL", - 2: "GREATER_THAN", - 3: "GREATER_THAN_OR_EQUAL", - 4: "LESS_THAN", - 5: "LESS_THAN_OR_EQUAL", - } - WorkerLabelComparator_value = map[string]int32{ - "EQUAL": 0, - "NOT_EQUAL": 1, - "GREATER_THAN": 2, - "GREATER_THAN_OR_EQUAL": 3, - "LESS_THAN": 4, - "LESS_THAN_OR_EQUAL": 5, - } -) - -func (x WorkerLabelComparator) Enum() *WorkerLabelComparator { - p := new(WorkerLabelComparator) - *p = x - return p -} - -func (x WorkerLabelComparator) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (WorkerLabelComparator) Descriptor() protoreflect.EnumDescriptor { - return file_v1_workflows_proto_enumTypes[4].Descriptor() -} - -func (WorkerLabelComparator) Type() protoreflect.EnumType { - return &file_v1_workflows_proto_enumTypes[4] -} - -func (x WorkerLabelComparator) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use WorkerLabelComparator.Descriptor instead. -func (WorkerLabelComparator) EnumDescriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{4} -} - type CancelTasksRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -705,6 +650,132 @@ func (x *TriggerWorkflowRunResponse) GetExternalId() string { return "" } +type BranchDurableTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TaskExternalId string `protobuf:"bytes,1,opt,name=task_external_id,json=taskExternalId,proto3" json:"task_external_id,omitempty"` // (required) the external id (uuid) of the durable task + NodeId int64 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // (required) the node id to branch from + BranchId int64 `protobuf:"varint,3,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` // (required) the branch id to branch from +} + +func (x *BranchDurableTaskRequest) Reset() { + *x = BranchDurableTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_workflows_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BranchDurableTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BranchDurableTaskRequest) ProtoMessage() {} + +func (x *BranchDurableTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_workflows_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BranchDurableTaskRequest.ProtoReflect.Descriptor instead. +func (*BranchDurableTaskRequest) Descriptor() ([]byte, []int) { + return file_v1_workflows_proto_rawDescGZIP(), []int{7} +} + +func (x *BranchDurableTaskRequest) GetTaskExternalId() string { + if x != nil { + return x.TaskExternalId + } + return "" +} + +func (x *BranchDurableTaskRequest) GetNodeId() int64 { + if x != nil { + return x.NodeId + } + return 0 +} + +func (x *BranchDurableTaskRequest) GetBranchId() int64 { + if x != nil { + return x.BranchId + } + return 0 +} + +type BranchDurableTaskResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TaskExternalId string `protobuf:"bytes,1,opt,name=task_external_id,json=taskExternalId,proto3" json:"task_external_id,omitempty"` // the external id of the durable task + NodeId int64 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // the node id of the new entry + BranchId int64 `protobuf:"varint,3,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` // the branch id of the new entry +} + +func (x *BranchDurableTaskResponse) Reset() { + *x = BranchDurableTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_workflows_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BranchDurableTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BranchDurableTaskResponse) ProtoMessage() {} + +func (x *BranchDurableTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_v1_workflows_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BranchDurableTaskResponse.ProtoReflect.Descriptor instead. +func (*BranchDurableTaskResponse) Descriptor() ([]byte, []int) { + return file_v1_workflows_proto_rawDescGZIP(), []int{8} +} + +func (x *BranchDurableTaskResponse) GetTaskExternalId() string { + if x != nil { + return x.TaskExternalId + } + return "" +} + +func (x *BranchDurableTaskResponse) GetNodeId() int64 { + if x != nil { + return x.NodeId + } + return 0 +} + +func (x *BranchDurableTaskResponse) GetBranchId() int64 { + if x != nil { + return x.BranchId + } + return 0 +} + // CreateWorkflowVersionRequest represents options to create a workflow version. type CreateWorkflowVersionRequest struct { state protoimpl.MessageState @@ -731,7 +802,7 @@ type CreateWorkflowVersionRequest struct { func (x *CreateWorkflowVersionRequest) Reset() { *x = CreateWorkflowVersionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[7] + mi := &file_v1_workflows_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -744,7 +815,7 @@ func (x *CreateWorkflowVersionRequest) String() string { func (*CreateWorkflowVersionRequest) ProtoMessage() {} func (x *CreateWorkflowVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[7] + mi := &file_v1_workflows_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -757,7 +828,7 @@ func (x *CreateWorkflowVersionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateWorkflowVersionRequest.ProtoReflect.Descriptor instead. func (*CreateWorkflowVersionRequest) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{7} + return file_v1_workflows_proto_rawDescGZIP(), []int{9} } func (x *CreateWorkflowVersionRequest) GetName() string { @@ -871,7 +942,7 @@ type DefaultFilter struct { func (x *DefaultFilter) Reset() { *x = DefaultFilter{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[8] + mi := &file_v1_workflows_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -884,7 +955,7 @@ func (x *DefaultFilter) String() string { func (*DefaultFilter) ProtoMessage() {} func (x *DefaultFilter) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[8] + mi := &file_v1_workflows_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -897,7 +968,7 @@ func (x *DefaultFilter) ProtoReflect() protoreflect.Message { // Deprecated: Use DefaultFilter.ProtoReflect.Descriptor instead. func (*DefaultFilter) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{8} + return file_v1_workflows_proto_rawDescGZIP(), []int{10} } func (x *DefaultFilter) GetExpression() string { @@ -934,7 +1005,7 @@ type Concurrency struct { func (x *Concurrency) Reset() { *x = Concurrency{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[9] + mi := &file_v1_workflows_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -947,7 +1018,7 @@ func (x *Concurrency) String() string { func (*Concurrency) ProtoMessage() {} func (x *Concurrency) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[9] + mi := &file_v1_workflows_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -960,7 +1031,7 @@ func (x *Concurrency) ProtoReflect() protoreflect.Message { // Deprecated: Use Concurrency.ProtoReflect.Descriptor instead. func (*Concurrency) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{9} + return file_v1_workflows_proto_rawDescGZIP(), []int{11} } func (x *Concurrency) GetExpression() string { @@ -984,97 +1055,6 @@ func (x *Concurrency) GetLimitStrategy() ConcurrencyLimitStrategy { return ConcurrencyLimitStrategy_CANCEL_IN_PROGRESS } -type DesiredWorkerLabels struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // value of the affinity - StrValue *string `protobuf:"bytes,1,opt,name=str_value,json=strValue,proto3,oneof" json:"str_value,omitempty"` - IntValue *int32 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` - // * - // (optional) Specifies whether the affinity setting is required. - // If required, the worker will not accept actions that do not have a truthy affinity setting. - // - // Defaults to false. - Required *bool `protobuf:"varint,3,opt,name=required,proto3,oneof" json:"required,omitempty"` - // * - // (optional) Specifies the comparator for the affinity setting. - // If not set, the default is EQUAL. - Comparator *WorkerLabelComparator `protobuf:"varint,4,opt,name=comparator,proto3,enum=v1.WorkerLabelComparator,oneof" json:"comparator,omitempty"` - // * - // (optional) Specifies the weight of the affinity setting. - // If not set, the default is 100. - Weight *int32 `protobuf:"varint,5,opt,name=weight,proto3,oneof" json:"weight,omitempty"` -} - -func (x *DesiredWorkerLabels) Reset() { - *x = DesiredWorkerLabels{} - if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DesiredWorkerLabels) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DesiredWorkerLabels) ProtoMessage() {} - -func (x *DesiredWorkerLabels) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DesiredWorkerLabels.ProtoReflect.Descriptor instead. -func (*DesiredWorkerLabels) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{10} -} - -func (x *DesiredWorkerLabels) GetStrValue() string { - if x != nil && x.StrValue != nil { - return *x.StrValue - } - return "" -} - -func (x *DesiredWorkerLabels) GetIntValue() int32 { - if x != nil && x.IntValue != nil { - return *x.IntValue - } - return 0 -} - -func (x *DesiredWorkerLabels) GetRequired() bool { - if x != nil && x.Required != nil { - return *x.Required - } - return false -} - -func (x *DesiredWorkerLabels) GetComparator() WorkerLabelComparator { - if x != nil && x.Comparator != nil { - return *x.Comparator - } - return WorkerLabelComparator_EQUAL -} - -func (x *DesiredWorkerLabels) GetWeight() int32 { - if x != nil && x.Weight != nil { - return *x.Weight - } - return 0 -} - // CreateTaskOpts represents options to create a task. type CreateTaskOpts struct { state protoimpl.MessageState @@ -1101,7 +1081,7 @@ type CreateTaskOpts struct { func (x *CreateTaskOpts) Reset() { *x = CreateTaskOpts{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[11] + mi := &file_v1_workflows_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1114,7 +1094,7 @@ func (x *CreateTaskOpts) String() string { func (*CreateTaskOpts) ProtoMessage() {} func (x *CreateTaskOpts) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[11] + mi := &file_v1_workflows_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1127,7 +1107,7 @@ func (x *CreateTaskOpts) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateTaskOpts.ProtoReflect.Descriptor instead. func (*CreateTaskOpts) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{11} + return file_v1_workflows_proto_rawDescGZIP(), []int{12} } func (x *CreateTaskOpts) GetReadableId() string { @@ -1251,7 +1231,7 @@ type CreateTaskRateLimit struct { func (x *CreateTaskRateLimit) Reset() { *x = CreateTaskRateLimit{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[12] + mi := &file_v1_workflows_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1264,7 +1244,7 @@ func (x *CreateTaskRateLimit) String() string { func (*CreateTaskRateLimit) ProtoMessage() {} func (x *CreateTaskRateLimit) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[12] + mi := &file_v1_workflows_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1277,7 +1257,7 @@ func (x *CreateTaskRateLimit) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateTaskRateLimit.ProtoReflect.Descriptor instead. func (*CreateTaskRateLimit) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{12} + return file_v1_workflows_proto_rawDescGZIP(), []int{13} } func (x *CreateTaskRateLimit) GetKey() string { @@ -1335,7 +1315,7 @@ type CreateWorkflowVersionResponse struct { func (x *CreateWorkflowVersionResponse) Reset() { *x = CreateWorkflowVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[13] + mi := &file_v1_workflows_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1348,7 +1328,7 @@ func (x *CreateWorkflowVersionResponse) String() string { func (*CreateWorkflowVersionResponse) ProtoMessage() {} func (x *CreateWorkflowVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[13] + mi := &file_v1_workflows_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1361,7 +1341,7 @@ func (x *CreateWorkflowVersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateWorkflowVersionResponse.ProtoReflect.Descriptor instead. func (*CreateWorkflowVersionResponse) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{13} + return file_v1_workflows_proto_rawDescGZIP(), []int{14} } func (x *CreateWorkflowVersionResponse) GetId() string { @@ -1389,7 +1369,7 @@ type GetRunDetailsRequest struct { func (x *GetRunDetailsRequest) Reset() { *x = GetRunDetailsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[14] + mi := &file_v1_workflows_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1402,7 +1382,7 @@ func (x *GetRunDetailsRequest) String() string { func (*GetRunDetailsRequest) ProtoMessage() {} func (x *GetRunDetailsRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[14] + mi := &file_v1_workflows_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1415,7 +1395,7 @@ func (x *GetRunDetailsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRunDetailsRequest.ProtoReflect.Descriptor instead. func (*GetRunDetailsRequest) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{14} + return file_v1_workflows_proto_rawDescGZIP(), []int{15} } func (x *GetRunDetailsRequest) GetExternalId() string { @@ -1435,12 +1415,13 @@ type TaskRunDetail struct { Error *string `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` // (optional) error message from the task run, if any Output []byte `protobuf:"bytes,4,opt,name=output,proto3,oneof" json:"output,omitempty"` // (optional) the output payload for the task run ReadableId string `protobuf:"bytes,5,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // the readable id of the task + IsEvicted bool `protobuf:"varint,6,opt,name=is_evicted,json=isEvicted,proto3" json:"is_evicted,omitempty"` // whether the task has been evicted from a worker (status will be RUNNING) } func (x *TaskRunDetail) Reset() { *x = TaskRunDetail{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[15] + mi := &file_v1_workflows_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1453,7 +1434,7 @@ func (x *TaskRunDetail) String() string { func (*TaskRunDetail) ProtoMessage() {} func (x *TaskRunDetail) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[15] + mi := &file_v1_workflows_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1466,7 +1447,7 @@ func (x *TaskRunDetail) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskRunDetail.ProtoReflect.Descriptor instead. func (*TaskRunDetail) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{15} + return file_v1_workflows_proto_rawDescGZIP(), []int{16} } func (x *TaskRunDetail) GetExternalId() string { @@ -1504,6 +1485,13 @@ func (x *TaskRunDetail) GetReadableId() string { return "" } +func (x *TaskRunDetail) GetIsEvicted() bool { + if x != nil { + return x.IsEvicted + } + return false +} + type GetRunDetailsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1514,12 +1502,13 @@ type GetRunDetailsResponse struct { TaskRuns map[string]*TaskRunDetail `protobuf:"bytes,3,rep,name=task_runs,json=taskRuns,proto3" json:"task_runs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // map of task run external ids to their details Done bool `protobuf:"varint,4,opt,name=done,proto3" json:"done,omitempty"` // indicates if the workflow run is done AdditionalMetadata []byte `protobuf:"bytes,5,opt,name=additional_metadata,json=additionalMetadata,proto3" json:"additional_metadata,omitempty"` // (optional) additional metadata for the workflow run + IsEvicted bool `protobuf:"varint,6,opt,name=is_evicted,json=isEvicted,proto3" json:"is_evicted,omitempty"` // whether any task in this run has been evicted } func (x *GetRunDetailsResponse) Reset() { *x = GetRunDetailsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_v1_workflows_proto_msgTypes[16] + mi := &file_v1_workflows_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1532,7 +1521,7 @@ func (x *GetRunDetailsResponse) String() string { func (*GetRunDetailsResponse) ProtoMessage() {} func (x *GetRunDetailsResponse) ProtoReflect() protoreflect.Message { - mi := &file_v1_workflows_proto_msgTypes[16] + mi := &file_v1_workflows_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1545,7 +1534,7 @@ func (x *GetRunDetailsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRunDetailsResponse.ProtoReflect.Descriptor instead. func (*GetRunDetailsResponse) Descriptor() ([]byte, []int) { - return file_v1_workflows_proto_rawDescGZIP(), []int{16} + return file_v1_workflows_proto_rawDescGZIP(), []int{17} } func (x *GetRunDetailsResponse) GetInput() []byte { @@ -1583,6 +1572,13 @@ func (x *GetRunDetailsResponse) GetAdditionalMetadata() []byte { return nil } +func (x *GetRunDetailsResponse) GetIsEvicted() bool { + if x != nil { + return x.IsEvicted + } + return false +} + var File_v1_workflows_proto protoreflect.FileDescriptor var file_v1_workflows_proto_rawDesc = []byte{ @@ -1591,285 +1587,289 @@ var file_v1_workflows_proto_rawDesc = []byte{ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, 0x12, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x2c, 0x0a, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x70, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, - 0x2c, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf0, 0x01, 0x0a, 0x0b, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, - 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x73, - 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x3e, 0x0a, 0x13, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x3c, 0x0a, 0x13, 0x52, - 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x5f, 0x74, - 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, - 0x61, 0x79, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x82, 0x03, 0x0a, 0x19, 0x54, 0x72, - 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x88, 0x01, 0x01, 0x12, 0x6a, 0x0a, 0x15, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, - 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x64, 0x65, 0x73, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, + 0x12, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, + 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, + 0x70, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x22, 0xf0, 0x01, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x30, 0x0a, + 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, + 0x35, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, + 0x74, 0x69, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, + 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x3e, 0x0a, 0x13, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, + 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x54, + 0x61, 0x73, 0x6b, 0x73, 0x22, 0x3c, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, + 0x65, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x54, 0x61, 0x73, + 0x6b, 0x73, 0x22, 0x82, 0x03, 0x0a, 0x19, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, + 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x6a, 0x0a, + 0x15, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x69, + 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x5f, 0x0a, 0x18, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x1a, 0x5f, 0x0a, 0x18, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x3d, - 0x0a, 0x1a, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xdd, 0x05, - 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, - 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, - 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x74, 0x72, - 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, - 0x6f, 0x6e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x28, 0x0a, 0x05, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x05, 0x74, - 0x61, 0x73, 0x6b, 0x73, 0x12, 0x31, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x6f, 0x6e, 0x5f, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x63, - 0x72, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x0f, 0x6f, - 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x48, 0x01, 0x52, 0x0d, 0x6f, 0x6e, 0x46, 0x61, - 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x06, - 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, - 0x48, 0x02, 0x52, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, - 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, - 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x61, 0x72, 0x72, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x79, 0x41, 0x72, 0x72, 0x12, 0x3a, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x52, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x11, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x04, - 0x52, 0x0f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x69, 0x63, - 0x6b, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x70, 0x0a, - 0x0d, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, - 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, - 0xb7, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, - 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1e, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x75, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, - 0x48, 0x0a, 0x0e, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, - 0x61, 0x74, 0x65, 0x67, 0x79, 0x48, 0x01, 0x52, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, - 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6d, 0x61, - 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x9a, 0x02, 0x0a, 0x13, 0x44, 0x65, - 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x12, 0x20, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x72, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x03, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, - 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x0b, 0x0a, 0x09, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x0d, 0x0a, 0x0b, - 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x85, 0x07, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, - 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2a, 0x0a, - 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, - 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x62, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, - 0x66, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x88, 0x01, 0x01, 0x12, 0x31, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, - 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x02, 0x52, 0x0a, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x69, 0x73, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x6c, 0x6f, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, - 0x0a, 0x11, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, - 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, - 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, - 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, - 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xb8, - 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, - 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x45, 0x78, 0x70, 0x72, - 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x73, - 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x6e, 0x69, 0x74, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x0a, 0x09, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x49, 0x64, 0x22, 0xc5, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, - 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0xaf, 0x02, 0x0a, - 0x15, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x06, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, + 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x3d, 0x0a, 0x1a, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0x7a, 0x0a, 0x18, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, + 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, + 0x49, 0x64, 0x22, 0x7b, 0x0a, 0x19, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x28, 0x0a, 0x10, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x49, 0x64, 0x22, + 0xdd, 0x05, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x6f, 0x6e, 0x5f, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x63, 0x72, 0x6f, 0x6e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x12, 0x28, 0x0a, 0x05, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x52, + 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x31, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x6f, + 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x09, 0x63, 0x72, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, + 0x0f, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x48, 0x01, 0x52, 0x0d, 0x6f, 0x6e, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x2f, + 0x0a, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x48, 0x02, 0x52, 0x06, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x88, 0x01, 0x01, 0x12, + 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x0f, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, + 0x38, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x61, + 0x72, 0x72, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x41, 0x72, 0x72, 0x12, 0x3a, 0x0a, 0x0f, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x11, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x04, 0x52, 0x0f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x6f, 0x6e, 0x5f, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, + 0x69, 0x63, 0x6b, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, + 0x70, 0x0a, 0x0d, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x22, 0xb7, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, + 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x75, 0x6e, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x48, 0x0a, 0x0e, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x65, 0x67, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x48, 0x01, 0x52, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x85, 0x07, 0x0a, 0x0e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x38, 0x0a, + 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, + 0x74, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x2a, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0d, 0x62, 0x61, + 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x33, + 0x0a, 0x13, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x11, 0x62, + 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x02, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, + 0x2e, 0x0a, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x49, + 0x0a, 0x0d, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x6c, 0x6f, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x11, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, + 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x42, + 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x13, + 0x0a, 0x11, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x22, 0xb8, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, + 0x75, 0x6e, 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, + 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, 0x65, + 0x79, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x74, + 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x09, + 0x75, 0x6e, 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, + 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, + 0x0b, 0x0a, 0x09, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, + 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, + 0x72, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, + 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, + 0x22, 0x37, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xe4, 0x01, 0x0a, 0x0d, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x2f, 0x0a, - 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x4e, - 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x24, - 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, - 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x41, - 0x52, 0x44, 0x10, 0x01, 0x2a, 0x5d, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, - 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, - 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44, - 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10, 0x04, 0x12, 0x09, - 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, - 0x52, 0x10, 0x06, 0x2a, 0x4e, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, - 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, - 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, - 0x44, 0x10, 0x04, 0x2a, 0x7f, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, + 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, + 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x73, 0x5f, 0x65, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x69, 0x73, 0x45, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x22, 0xce, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, + 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x65, 0x76, 0x69, 0x63, 0x74, 0x65, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x76, 0x69, 0x63, 0x74, 0x65, + 0x64, 0x1a, 0x4e, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x2a, 0x24, 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, + 0x65, 0x67, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, + 0x04, 0x48, 0x41, 0x52, 0x44, 0x10, 0x01, 0x2a, 0x5d, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, + 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07, + 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, + 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x2a, 0x5b, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x56, 0x49, 0x43, 0x54, 0x45, + 0x44, 0x10, 0x05, 0x2a, 0x7f, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, @@ -1877,44 +1877,41 @@ var file_v1_workflows_proto_rawDesc = []byte{ 0x45, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x4e, 0x45, 0x57, 0x45, - 0x53, 0x54, 0x10, 0x04, 0x2a, 0x85, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x09, - 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, - 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, 0x41, - 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x47, 0x52, - 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, - 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, - 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, - 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x05, 0x32, 0xfd, 0x02, 0x0a, - 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, - 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x20, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, - 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, - 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x53, 0x0a, 0x12, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x12, 0x1d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, - 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x54, 0x10, 0x04, 0x32, 0xcf, 0x03, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x52, 0x65, 0x70, + 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x54, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x12, + 0x1d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x44, 0x75, + 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1c, 0x2e, 0x76, 0x31, 0x2e, 0x42, + 0x72, 0x61, 0x6e, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x72, 0x61, + 0x6e, 0x63, 0x68, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1929,25 +1926,25 @@ func file_v1_workflows_proto_rawDescGZIP() []byte { return file_v1_workflows_proto_rawDescData } -var file_v1_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_v1_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_v1_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_v1_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_v1_workflows_proto_goTypes = []interface{}{ (StickyStrategy)(0), // 0: v1.StickyStrategy (RateLimitDuration)(0), // 1: v1.RateLimitDuration (RunStatus)(0), // 2: v1.RunStatus (ConcurrencyLimitStrategy)(0), // 3: v1.ConcurrencyLimitStrategy - (WorkerLabelComparator)(0), // 4: v1.WorkerLabelComparator - (*CancelTasksRequest)(nil), // 5: v1.CancelTasksRequest - (*ReplayTasksRequest)(nil), // 6: v1.ReplayTasksRequest - (*TasksFilter)(nil), // 7: v1.TasksFilter - (*CancelTasksResponse)(nil), // 8: v1.CancelTasksResponse - (*ReplayTasksResponse)(nil), // 9: v1.ReplayTasksResponse - (*TriggerWorkflowRunRequest)(nil), // 10: v1.TriggerWorkflowRunRequest - (*TriggerWorkflowRunResponse)(nil), // 11: v1.TriggerWorkflowRunResponse - (*CreateWorkflowVersionRequest)(nil), // 12: v1.CreateWorkflowVersionRequest - (*DefaultFilter)(nil), // 13: v1.DefaultFilter - (*Concurrency)(nil), // 14: v1.Concurrency - (*DesiredWorkerLabels)(nil), // 15: v1.DesiredWorkerLabels + (*CancelTasksRequest)(nil), // 4: v1.CancelTasksRequest + (*ReplayTasksRequest)(nil), // 5: v1.ReplayTasksRequest + (*TasksFilter)(nil), // 6: v1.TasksFilter + (*CancelTasksResponse)(nil), // 7: v1.CancelTasksResponse + (*ReplayTasksResponse)(nil), // 8: v1.ReplayTasksResponse + (*TriggerWorkflowRunRequest)(nil), // 9: v1.TriggerWorkflowRunRequest + (*TriggerWorkflowRunResponse)(nil), // 10: v1.TriggerWorkflowRunResponse + (*BranchDurableTaskRequest)(nil), // 11: v1.BranchDurableTaskRequest + (*BranchDurableTaskResponse)(nil), // 12: v1.BranchDurableTaskResponse + (*CreateWorkflowVersionRequest)(nil), // 13: v1.CreateWorkflowVersionRequest + (*DefaultFilter)(nil), // 14: v1.DefaultFilter + (*Concurrency)(nil), // 15: v1.Concurrency (*CreateTaskOpts)(nil), // 16: v1.CreateTaskOpts (*CreateTaskRateLimit)(nil), // 17: v1.CreateTaskRateLimit (*CreateWorkflowVersionResponse)(nil), // 18: v1.CreateWorkflowVersionResponse @@ -1960,48 +1957,50 @@ var file_v1_workflows_proto_goTypes = []interface{}{ nil, // 25: v1.GetRunDetailsResponse.TaskRunsEntry (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp (*TaskConditions)(nil), // 27: v1.TaskConditions + (*DesiredWorkerLabels)(nil), // 28: v1.DesiredWorkerLabels } var file_v1_workflows_proto_depIdxs = []int32{ - 7, // 0: v1.CancelTasksRequest.filter:type_name -> v1.TasksFilter - 7, // 1: v1.ReplayTasksRequest.filter:type_name -> v1.TasksFilter + 6, // 0: v1.CancelTasksRequest.filter:type_name -> v1.TasksFilter + 6, // 1: v1.ReplayTasksRequest.filter:type_name -> v1.TasksFilter 26, // 2: v1.TasksFilter.since:type_name -> google.protobuf.Timestamp 26, // 3: v1.TasksFilter.until:type_name -> google.protobuf.Timestamp 22, // 4: v1.TriggerWorkflowRunRequest.desired_worker_labels:type_name -> v1.TriggerWorkflowRunRequest.DesiredWorkerLabelsEntry 16, // 5: v1.CreateWorkflowVersionRequest.tasks:type_name -> v1.CreateTaskOpts - 14, // 6: v1.CreateWorkflowVersionRequest.concurrency:type_name -> v1.Concurrency + 15, // 6: v1.CreateWorkflowVersionRequest.concurrency:type_name -> v1.Concurrency 16, // 7: v1.CreateWorkflowVersionRequest.on_failure_task:type_name -> v1.CreateTaskOpts 0, // 8: v1.CreateWorkflowVersionRequest.sticky:type_name -> v1.StickyStrategy - 14, // 9: v1.CreateWorkflowVersionRequest.concurrency_arr:type_name -> v1.Concurrency - 13, // 10: v1.CreateWorkflowVersionRequest.default_filters:type_name -> v1.DefaultFilter + 15, // 9: v1.CreateWorkflowVersionRequest.concurrency_arr:type_name -> v1.Concurrency + 14, // 10: v1.CreateWorkflowVersionRequest.default_filters:type_name -> v1.DefaultFilter 3, // 11: v1.Concurrency.limit_strategy:type_name -> v1.ConcurrencyLimitStrategy - 4, // 12: v1.DesiredWorkerLabels.comparator:type_name -> v1.WorkerLabelComparator - 17, // 13: v1.CreateTaskOpts.rate_limits:type_name -> v1.CreateTaskRateLimit - 23, // 14: v1.CreateTaskOpts.worker_labels:type_name -> v1.CreateTaskOpts.WorkerLabelsEntry - 14, // 15: v1.CreateTaskOpts.concurrency:type_name -> v1.Concurrency - 27, // 16: v1.CreateTaskOpts.conditions:type_name -> v1.TaskConditions - 24, // 17: v1.CreateTaskOpts.slot_requests:type_name -> v1.CreateTaskOpts.SlotRequestsEntry - 1, // 18: v1.CreateTaskRateLimit.duration:type_name -> v1.RateLimitDuration - 2, // 19: v1.TaskRunDetail.status:type_name -> v1.RunStatus - 2, // 20: v1.GetRunDetailsResponse.status:type_name -> v1.RunStatus - 25, // 21: v1.GetRunDetailsResponse.task_runs:type_name -> v1.GetRunDetailsResponse.TaskRunsEntry - 15, // 22: v1.TriggerWorkflowRunRequest.DesiredWorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels - 15, // 23: v1.CreateTaskOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels - 20, // 24: v1.GetRunDetailsResponse.TaskRunsEntry.value:type_name -> v1.TaskRunDetail - 12, // 25: v1.AdminService.PutWorkflow:input_type -> v1.CreateWorkflowVersionRequest - 5, // 26: v1.AdminService.CancelTasks:input_type -> v1.CancelTasksRequest - 6, // 27: v1.AdminService.ReplayTasks:input_type -> v1.ReplayTasksRequest - 10, // 28: v1.AdminService.TriggerWorkflowRun:input_type -> v1.TriggerWorkflowRunRequest - 19, // 29: v1.AdminService.GetRunDetails:input_type -> v1.GetRunDetailsRequest + 17, // 12: v1.CreateTaskOpts.rate_limits:type_name -> v1.CreateTaskRateLimit + 23, // 13: v1.CreateTaskOpts.worker_labels:type_name -> v1.CreateTaskOpts.WorkerLabelsEntry + 15, // 14: v1.CreateTaskOpts.concurrency:type_name -> v1.Concurrency + 27, // 15: v1.CreateTaskOpts.conditions:type_name -> v1.TaskConditions + 24, // 16: v1.CreateTaskOpts.slot_requests:type_name -> v1.CreateTaskOpts.SlotRequestsEntry + 1, // 17: v1.CreateTaskRateLimit.duration:type_name -> v1.RateLimitDuration + 2, // 18: v1.TaskRunDetail.status:type_name -> v1.RunStatus + 2, // 19: v1.GetRunDetailsResponse.status:type_name -> v1.RunStatus + 25, // 20: v1.GetRunDetailsResponse.task_runs:type_name -> v1.GetRunDetailsResponse.TaskRunsEntry + 28, // 21: v1.TriggerWorkflowRunRequest.DesiredWorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels + 28, // 22: v1.CreateTaskOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels + 20, // 23: v1.GetRunDetailsResponse.TaskRunsEntry.value:type_name -> v1.TaskRunDetail + 13, // 24: v1.AdminService.PutWorkflow:input_type -> v1.CreateWorkflowVersionRequest + 4, // 25: v1.AdminService.CancelTasks:input_type -> v1.CancelTasksRequest + 5, // 26: v1.AdminService.ReplayTasks:input_type -> v1.ReplayTasksRequest + 9, // 27: v1.AdminService.TriggerWorkflowRun:input_type -> v1.TriggerWorkflowRunRequest + 19, // 28: v1.AdminService.GetRunDetails:input_type -> v1.GetRunDetailsRequest + 11, // 29: v1.AdminService.BranchDurableTask:input_type -> v1.BranchDurableTaskRequest 18, // 30: v1.AdminService.PutWorkflow:output_type -> v1.CreateWorkflowVersionResponse - 8, // 31: v1.AdminService.CancelTasks:output_type -> v1.CancelTasksResponse - 9, // 32: v1.AdminService.ReplayTasks:output_type -> v1.ReplayTasksResponse - 11, // 33: v1.AdminService.TriggerWorkflowRun:output_type -> v1.TriggerWorkflowRunResponse + 7, // 31: v1.AdminService.CancelTasks:output_type -> v1.CancelTasksResponse + 8, // 32: v1.AdminService.ReplayTasks:output_type -> v1.ReplayTasksResponse + 10, // 33: v1.AdminService.TriggerWorkflowRun:output_type -> v1.TriggerWorkflowRunResponse 21, // 34: v1.AdminService.GetRunDetails:output_type -> v1.GetRunDetailsResponse - 30, // [30:35] is the sub-list for method output_type - 25, // [25:30] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 12, // 35: v1.AdminService.BranchDurableTask:output_type -> v1.BranchDurableTaskResponse + 30, // [30:36] is the sub-list for method output_type + 24, // [24:30] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_v1_workflows_proto_init() } @@ -2010,6 +2009,7 @@ func file_v1_workflows_proto_init() { return } file_v1_shared_condition_proto_init() + file_v1_shared_trigger_proto_init() if !protoimpl.UnsafeEnabled { file_v1_workflows_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CancelTasksRequest); i { @@ -2096,7 +2096,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateWorkflowVersionRequest); i { + switch v := v.(*BranchDurableTaskRequest); i { case 0: return &v.state case 1: @@ -2108,7 +2108,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DefaultFilter); i { + switch v := v.(*BranchDurableTaskResponse); i { case 0: return &v.state case 1: @@ -2120,7 +2120,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Concurrency); i { + switch v := v.(*CreateWorkflowVersionRequest); i { case 0: return &v.state case 1: @@ -2132,7 +2132,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DesiredWorkerLabels); i { + switch v := v.(*DefaultFilter); i { case 0: return &v.state case 1: @@ -2144,7 +2144,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTaskOpts); i { + switch v := v.(*Concurrency); i { case 0: return &v.state case 1: @@ -2156,7 +2156,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTaskRateLimit); i { + switch v := v.(*CreateTaskOpts); i { case 0: return &v.state case 1: @@ -2168,7 +2168,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateWorkflowVersionResponse); i { + switch v := v.(*CreateTaskRateLimit); i { case 0: return &v.state case 1: @@ -2180,7 +2180,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRunDetailsRequest); i { + switch v := v.(*CreateWorkflowVersionResponse); i { case 0: return &v.state case 1: @@ -2192,7 +2192,7 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskRunDetail); i { + switch v := v.(*GetRunDetailsRequest); i { case 0: return &v.state case 1: @@ -2204,6 +2204,18 @@ func file_v1_workflows_proto_init() { } } file_v1_workflows_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskRunDetail); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_workflows_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRunDetailsResponse); i { case 0: return &v.state @@ -2220,20 +2232,19 @@ func file_v1_workflows_proto_init() { file_v1_workflows_proto_msgTypes[1].OneofWrappers = []interface{}{} file_v1_workflows_proto_msgTypes[2].OneofWrappers = []interface{}{} file_v1_workflows_proto_msgTypes[5].OneofWrappers = []interface{}{} - file_v1_workflows_proto_msgTypes[7].OneofWrappers = []interface{}{} - file_v1_workflows_proto_msgTypes[8].OneofWrappers = []interface{}{} file_v1_workflows_proto_msgTypes[9].OneofWrappers = []interface{}{} file_v1_workflows_proto_msgTypes[10].OneofWrappers = []interface{}{} file_v1_workflows_proto_msgTypes[11].OneofWrappers = []interface{}{} file_v1_workflows_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_v1_workflows_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_v1_workflows_proto_msgTypes[13].OneofWrappers = []interface{}{} + file_v1_workflows_proto_msgTypes[16].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_v1_workflows_proto_rawDesc, - NumEnums: 5, - NumMessages: 21, + NumEnums: 4, + NumMessages: 22, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/services/shared/proto/v1/workflows_grpc.pb.go b/internal/services/shared/proto/v1/workflows_grpc.pb.go index 7fa7b84ec..96a3b1b66 100644 --- a/internal/services/shared/proto/v1/workflows_grpc.pb.go +++ b/internal/services/shared/proto/v1/workflows_grpc.pb.go @@ -27,6 +27,7 @@ type AdminServiceClient interface { ReplayTasks(ctx context.Context, in *ReplayTasksRequest, opts ...grpc.CallOption) (*ReplayTasksResponse, error) TriggerWorkflowRun(ctx context.Context, in *TriggerWorkflowRunRequest, opts ...grpc.CallOption) (*TriggerWorkflowRunResponse, error) GetRunDetails(ctx context.Context, in *GetRunDetailsRequest, opts ...grpc.CallOption) (*GetRunDetailsResponse, error) + BranchDurableTask(ctx context.Context, in *BranchDurableTaskRequest, opts ...grpc.CallOption) (*BranchDurableTaskResponse, error) } type adminServiceClient struct { @@ -82,6 +83,15 @@ func (c *adminServiceClient) GetRunDetails(ctx context.Context, in *GetRunDetail return out, nil } +func (c *adminServiceClient) BranchDurableTask(ctx context.Context, in *BranchDurableTaskRequest, opts ...grpc.CallOption) (*BranchDurableTaskResponse, error) { + out := new(BranchDurableTaskResponse) + err := c.cc.Invoke(ctx, "/v1.AdminService/BranchDurableTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AdminServiceServer is the server API for AdminService service. // All implementations must embed UnimplementedAdminServiceServer // for forward compatibility @@ -91,6 +101,7 @@ type AdminServiceServer interface { ReplayTasks(context.Context, *ReplayTasksRequest) (*ReplayTasksResponse, error) TriggerWorkflowRun(context.Context, *TriggerWorkflowRunRequest) (*TriggerWorkflowRunResponse, error) GetRunDetails(context.Context, *GetRunDetailsRequest) (*GetRunDetailsResponse, error) + BranchDurableTask(context.Context, *BranchDurableTaskRequest) (*BranchDurableTaskResponse, error) mustEmbedUnimplementedAdminServiceServer() } @@ -113,6 +124,9 @@ func (UnimplementedAdminServiceServer) TriggerWorkflowRun(context.Context, *Trig func (UnimplementedAdminServiceServer) GetRunDetails(context.Context, *GetRunDetailsRequest) (*GetRunDetailsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetRunDetails not implemented") } +func (UnimplementedAdminServiceServer) BranchDurableTask(context.Context, *BranchDurableTaskRequest) (*BranchDurableTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BranchDurableTask not implemented") +} func (UnimplementedAdminServiceServer) mustEmbedUnimplementedAdminServiceServer() {} // UnsafeAdminServiceServer may be embedded to opt out of forward compatibility for this service. @@ -216,6 +230,24 @@ func _AdminService_GetRunDetails_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _AdminService_BranchDurableTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BranchDurableTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).BranchDurableTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1.AdminService/BranchDurableTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).BranchDurableTask(ctx, req.(*BranchDurableTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + // AdminService_ServiceDesc is the grpc.ServiceDesc for AdminService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -243,6 +275,10 @@ var AdminService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetRunDetails", Handler: _AdminService_GetRunDetails_Handler, }, + { + MethodName: "BranchDurableTask", + Handler: _AdminService_BranchDurableTask_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "v1/workflows.proto", diff --git a/internal/services/shared/tasktypes/v1/olap.go b/internal/services/shared/tasktypes/v1/olap.go index a0023d9b7..75dca9750 100644 --- a/internal/services/shared/tasktypes/v1/olap.go +++ b/internal/services/shared/tasktypes/v1/olap.go @@ -90,7 +90,8 @@ func CreatedEventTriggerMessage(tenantId uuid.UUID, eventTriggers CreatedEventTr type CreateMonitoringEventPayload struct { TaskId int64 `json:"task_id"` - RetryCount int32 `json:"retry_count"` + RetryCount int32 `json:"retry_count"` + DurableInvocationCount int32 `json:"durable_invocation_count"` WorkerId *uuid.UUID `json:"worker_id,omitempty"` @@ -101,7 +102,7 @@ type CreateMonitoringEventPayload struct { EventMessage string `json:"event_message,omitempty"` } -func MonitoringEventMessageFromActionEvent(tenantId uuid.UUID, taskId int64, retryCount int32, request *contracts.StepActionEvent) (*msgqueue.Message, error) { +func MonitoringEventMessageFromActionEvent(tenantId uuid.UUID, taskId int64, retryCount int32, durableInvocationCount int32, request *contracts.StepActionEvent) (*msgqueue.Message, error) { var workerId *uuid.UUID parsedId, err := uuid.Parse(request.WorkerId) @@ -110,11 +111,12 @@ func MonitoringEventMessageFromActionEvent(tenantId uuid.UUID, taskId int64, ret } payload := CreateMonitoringEventPayload{ - TaskId: taskId, - RetryCount: retryCount, - WorkerId: workerId, - EventTimestamp: request.EventTimestamp.AsTime(), - EventPayload: request.EventPayload, + TaskId: taskId, + RetryCount: retryCount, + DurableInvocationCount: durableInvocationCount, + WorkerId: workerId, + EventTimestamp: request.EventTimestamp.AsTime(), + EventPayload: request.EventPayload, } switch request.EventType { diff --git a/internal/services/shared/tasktypes/v1/task.go b/internal/services/shared/tasktypes/v1/task.go index c1743fd33..87917b4c3 100644 --- a/internal/services/shared/tasktypes/v1/task.go +++ b/internal/services/shared/tasktypes/v1/task.go @@ -214,3 +214,47 @@ type CandidateFinalizedPayload struct { // (required) the workflow run id (can either be a workflow run id or single task) WorkflowRunId uuid.UUID `validate:"required"` } + +type DurableRestoreTaskPayload struct { + Reason string + TaskExternalId uuid.UUID +} + +func DurableRestoreTaskMessage(tenantId uuid.UUID, taskExternalId uuid.UUID, reason string) (*msgqueue.Message, error) { + return msgqueue.NewTenantMessage( + tenantId, + msgqueue.MsgIDDurableRestoreTask, + false, + true, + DurableRestoreTaskPayload{ + TaskExternalId: taskExternalId, + Reason: reason, + }, + ) +} + +type DurableCallbackCompletedPayload struct { + TaskExternalId uuid.UUID + BranchId int64 + NodeId int64 + InvocationCount int32 + Payload []byte +} + +func DurableCallbackCompletedMessage( + tenantId, taskExternalId uuid.UUID, invocationCount int32, branchId, nodeId int64, payload []byte, +) (*msgqueue.Message, error) { + return msgqueue.NewTenantMessage( + tenantId, + msgqueue.MsgIDDurableCallbackCompleted, + false, + true, + DurableCallbackCompletedPayload{ + TaskExternalId: taskExternalId, + InvocationCount: invocationCount, + BranchId: branchId, + NodeId: nodeId, + Payload: payload, + }, + ) +} diff --git a/internal/statusutils/status.go b/internal/statusutils/status.go index 8090f8e17..6841ed6be 100644 --- a/internal/statusutils/status.go +++ b/internal/statusutils/status.go @@ -14,6 +14,7 @@ type V1RunStatus string const ( V1RunStatusQueued V1RunStatus = "QUEUED" V1RunStatusRunning V1RunStatus = "RUNNING" + V1RunStatusEvicted V1RunStatus = "EVICTED" V1RunStatusCancelled V1RunStatus = "CANCELLED" V1RunStatusFailed V1RunStatus = "FAILED" V1RunStatusCompleted V1RunStatus = "COMPLETED" @@ -27,6 +28,9 @@ func V1RunStatusFromProto(status contracts.RunStatus) (*V1RunStatus, error) { case contracts.RunStatus_RUNNING: r := V1RunStatusRunning return &r, nil + case contracts.RunStatus_EVICTED: + e := V1RunStatusEvicted + return &e, nil case contracts.RunStatus_CANCELLED: c := V1RunStatusCancelled return &c, nil @@ -42,6 +46,10 @@ func V1RunStatusFromProto(status contracts.RunStatus) (*V1RunStatus, error) { } func (s *V1RunStatus) ToProto() (*contracts.RunStatus, error) { + if s == nil { + return nil, fmt.Errorf("nil run status") + } + switch *s { case V1RunStatusQueued: r := contracts.RunStatus_QUEUED @@ -49,6 +57,9 @@ func (s *V1RunStatus) ToProto() (*contracts.RunStatus, error) { case V1RunStatusRunning: r := contracts.RunStatus_RUNNING return &r, nil + case V1RunStatusEvicted: + r := contracts.RunStatus_RUNNING + return &r, nil case V1RunStatusCancelled: r := contracts.RunStatus_CANCELLED return &r, nil @@ -63,6 +74,10 @@ func (s *V1RunStatus) ToProto() (*contracts.RunStatus, error) { } } +func (s *V1RunStatus) IsEvicted() bool { + return s != nil && *s == V1RunStatusEvicted +} + func V1RunStatusFromEventType(eventType sqlcv1.V1TaskEventType) (*V1RunStatus, error) { switch eventType { case sqlcv1.V1TaskEventTypeCANCELLED: @@ -91,7 +106,7 @@ func DeriveWorkflowRunStatus(ctx context.Context, statuses []V1RunStatus) (*V1Ru return &f, nil } - if listutils.Any(uniqueStatuses, "RUNNING") || listutils.Any(uniqueStatuses, "QUEUED") { + if listutils.Any(uniqueStatuses, "RUNNING") || listutils.Any(uniqueStatuses, "QUEUED") || listutils.Any(uniqueStatuses, "EVICTED") { r := V1RunStatusRunning return &r, nil } diff --git a/pkg/analytics/aggregating.go b/pkg/analytics/aggregating.go index 65d929a8f..0cabc6b8a 100644 --- a/pkg/analytics/aggregating.go +++ b/pkg/analytics/aggregating.go @@ -34,14 +34,14 @@ type FlushFunc func(resource Resource, action Action, tenantID uuid.UUID, tokenI type Aggregator struct { done chan struct{} flushFn FlushFunc + l *zerolog.Logger counters sync.Map wg sync.WaitGroup interval time.Duration maxKeys int64 keyCount atomic.Int64 - l *zerolog.Logger - disabled bool flushMu sync.Mutex + disabled bool } func NewAggregator(l *zerolog.Logger, enabled bool, interval time.Duration, maxKeys int64, fn FlushFunc) *Aggregator { diff --git a/pkg/analytics/analytics.go b/pkg/analytics/analytics.go index 412173180..df0d92105 100644 --- a/pkg/analytics/analytics.go +++ b/pkg/analytics/analytics.go @@ -18,6 +18,7 @@ const ( Event Resource = "event" WorkflowRun Resource = "workflow-run" TaskRun Resource = "task-run" + DurableTask Resource = "durable-task" Worker Resource = "worker" RateLimit Resource = "rate-limit" Webhook Resource = "webhook" @@ -45,6 +46,11 @@ const ( Release Action = "release" Refresh Action = "refresh" Send Action = "send" + Evict Action = "evict" + Restore Action = "restore" + Branch Action = "branch" + Memo Action = "memo" + WaitFor Action = "wait-for" ) type Properties map[string]interface{} diff --git a/pkg/client/admin.go b/pkg/client/admin.go index db3d00417..17555f841 100644 --- a/pkg/client/admin.go +++ b/pkg/client/admin.go @@ -18,12 +18,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" + v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" "github.com/hatchet-dev/hatchet/pkg/client/rest" "github.com/hatchet-dev/hatchet/pkg/client/types" "github.com/hatchet-dev/hatchet/pkg/config/client" "github.com/hatchet-dev/hatchet/pkg/validator" - - v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" ) type ChildWorkflowOpts struct { @@ -253,10 +252,10 @@ func (a *adminClientImpl) ScheduleWorkflow(workflowName string, fs ...ScheduleOp return nil } -type RunOptFunc func(*admincontracts.TriggerWorkflowRequest) error +type RunOptFunc func(*v1contracts.TriggerWorkflowRequest) error func WithRunMetadata(metadata interface{}) RunOptFunc { - return func(r *admincontracts.TriggerWorkflowRequest) error { + return func(r *v1contracts.TriggerWorkflowRequest) error { metadataBytes, err := json.Marshal(metadata) if err != nil { return err @@ -271,7 +270,7 @@ func WithRunMetadata(metadata interface{}) RunOptFunc { } func WithPriority(priority int32) RunOptFunc { - return func(r *admincontracts.TriggerWorkflowRequest) error { + return func(r *v1contracts.TriggerWorkflowRequest) error { r.Priority = &priority return nil @@ -337,7 +336,7 @@ func (a *adminClientImpl) RunWorkflow(workflowName string, input interface{}, op workflowName = client.ApplyNamespace(workflowName, &a.namespace) - request := &admincontracts.TriggerWorkflowRequest{ + request := &v1contracts.TriggerWorkflowRequest{ Name: workflowName, Input: string(inputBytes), } @@ -375,7 +374,7 @@ func (a *adminClientImpl) RunWorkflow(workflowName string, input interface{}, op func (a *adminClientImpl) BulkRunWorkflow(workflows []*WorkflowRun) ([]string, error) { - triggerWorkflowRequests := make([]*admincontracts.TriggerWorkflowRequest, len(workflows)) + triggerWorkflowRequests := make([]*v1contracts.TriggerWorkflowRequest, len(workflows)) for i, workflow := range workflows { inputBytes, err := json.Marshal(workflow.Input) @@ -384,7 +383,7 @@ func (a *adminClientImpl) BulkRunWorkflow(workflows []*WorkflowRun) ([]string, e } workflowName := client.ApplyNamespace(workflow.Name, &a.namespace) - triggerWorkflowRequests[i] = &admincontracts.TriggerWorkflowRequest{ + triggerWorkflowRequests[i] = &v1contracts.TriggerWorkflowRequest{ Name: workflowName, Input: string(inputBytes), } @@ -430,7 +429,7 @@ func (a *adminClientImpl) RunChildWorkflow(workflowName string, input interface{ metadata := string(metadataBytes) - res, err := a.client.TriggerWorkflow(a.ctx.newContext(context.Background()), &admincontracts.TriggerWorkflowRequest{ + res, err := a.client.TriggerWorkflow(a.ctx.newContext(context.Background()), &v1contracts.TriggerWorkflowRequest{ Name: workflowName, Input: string(inputBytes), ParentId: &opts.ParentId, @@ -465,7 +464,7 @@ type RunChildWorkflowsOpts struct { func (a *adminClientImpl) RunChildWorkflows(workflows []*RunChildWorkflowsOpts) ([]string, error) { - triggerWorkflowRequests := make([]*admincontracts.TriggerWorkflowRequest, len(workflows)) + triggerWorkflowRequests := make([]*v1contracts.TriggerWorkflowRequest, len(workflows)) for i, workflow := range workflows { if workflow.Opts == nil { @@ -493,7 +492,7 @@ func (a *adminClientImpl) RunChildWorkflows(workflows []*RunChildWorkflowsOpts) metadata := string(metadataBytes) - triggerWorkflowRequests[i] = &admincontracts.TriggerWorkflowRequest{ + triggerWorkflowRequests[i] = &v1contracts.TriggerWorkflowRequest{ Name: workflowName, Input: string(inputBytes), ParentId: &workflow.Opts.ParentId, diff --git a/pkg/client/rest/gen.go b/pkg/client/rest/gen.go index d413f0c92..37cc238b0 100644 --- a/pkg/client/rest/gen.go +++ b/pkg/client/rest/gen.go @@ -233,6 +233,13 @@ const ( V1LogLineOrderByDirectionDESC V1LogLineOrderByDirection = "DESC" ) +// Defines values for V1RunningFilter. +const ( + ALL V1RunningFilter = "ALL" + EVICTED V1RunningFilter = "EVICTED" + ONWORKER V1RunningFilter = "ON_WORKER" +) + // Defines values for V1TaskEventType. const ( V1TaskEventTypeACKNOWLEDGED V1TaskEventType = "ACKNOWLEDGED" @@ -240,6 +247,8 @@ const ( V1TaskEventTypeCANCELLED V1TaskEventType = "CANCELLED" V1TaskEventTypeCOULDNOTSENDTOWORKER V1TaskEventType = "COULD_NOT_SEND_TO_WORKER" V1TaskEventTypeCREATED V1TaskEventType = "CREATED" + V1TaskEventTypeDURABLEEVICTED V1TaskEventType = "DURABLE_EVICTED" + V1TaskEventTypeDURABLERESTORING V1TaskEventType = "DURABLE_RESTORING" V1TaskEventTypeFAILED V1TaskEventType = "FAILED" V1TaskEventTypeFINISHED V1TaskEventType = "FINISHED" V1TaskEventTypeQUEUED V1TaskEventType = "QUEUED" @@ -1386,6 +1395,30 @@ type UserTenantPublic struct { Name *string `json:"name,omitempty"` } +// V1BranchDurableTaskRequest defines model for V1BranchDurableTaskRequest. +type V1BranchDurableTaskRequest struct { + // BranchId The branch id to replay from. + BranchId int64 `json:"branchId"` + + // NodeId The node id to replay from. + NodeId int64 `json:"nodeId"` + + // TaskExternalId The external id of the durable task to branch. + TaskExternalId openapi_types.UUID `json:"taskExternalId"` +} + +// V1BranchDurableTaskResponse defines model for V1BranchDurableTaskResponse. +type V1BranchDurableTaskResponse struct { + // BranchId The branch id of the new entry. + BranchId int64 `json:"branchId"` + + // NodeId The node id of the new entry. + NodeId int64 `json:"nodeId"` + + // TaskExternalId The external id of the durable task. + TaskExternalId openapi_types.UUID `json:"taskExternalId"` +} + // V1CELDebugRequest defines model for V1CELDebugRequest. type V1CELDebugRequest struct { // AdditionalMetadata Additional metadata, which simulates metadata that could be sent with an event or a workflow run @@ -1678,6 +1711,23 @@ type V1ReplayedTasks struct { Ids *[]openapi_types.UUID `json:"ids,omitempty"` } +// V1RestoreTaskResponse defines model for V1RestoreTaskResponse. +type V1RestoreTaskResponse struct { + Requeued bool `json:"requeued"` +} + +// V1RunningDetailCount defines model for V1RunningDetailCount. +type V1RunningDetailCount struct { + // Evicted The number of evicted tasks within the RUNNING status bucket. + Evicted int `json:"evicted"` + + // OnWorker The number of tasks currently on a worker within the RUNNING status bucket. + OnWorker int `json:"onWorker"` +} + +// V1RunningFilter defines model for V1RunningFilter. +type V1RunningFilter string + // V1TaskEvent defines model for V1TaskEvent. type V1TaskEvent struct { // Attempt The attempt number of the task. @@ -1728,8 +1778,9 @@ type V1TaskPointMetrics struct { // V1TaskRunMetric defines model for V1TaskRunMetric. type V1TaskRunMetric struct { - Count int `json:"count"` - Status V1TaskStatus `json:"status"` + Count int `json:"count"` + RunningDetailCount *V1RunningDetailCount `json:"runningDetailCount,omitempty"` + Status V1TaskStatus `json:"status"` } // V1TaskRunMetrics defines model for V1TaskRunMetrics. @@ -1768,8 +1819,11 @@ type V1TaskSummary struct { FinishedAt *time.Time `json:"finishedAt,omitempty"` // Input The input of the task run. - Input openapi.NonNullableJSON `json:"input"` - Metadata APIResourceMeta `json:"metadata"` + Input openapi.NonNullableJSON `json:"input"` + + // IsEvicted Whether the task has been evicted from a worker (still counts as RUNNING). + IsEvicted *bool `json:"isEvicted,omitempty"` + Metadata APIResourceMeta `json:"metadata"` // NumSpawnedChildren The number of spawned children tasks NumSpawnedChildren int `json:"numSpawnedChildren"` @@ -1830,8 +1884,11 @@ type V1TaskTiming struct { Depth int `json:"depth"` // FinishedAt The timestamp the task run finished. - FinishedAt *time.Time `json:"finishedAt,omitempty"` - Metadata APIResourceMeta `json:"metadata"` + FinishedAt *time.Time `json:"finishedAt,omitempty"` + + // IsEvicted Whether the task has been evicted from a worker (still counts as RUNNING). + IsEvicted *bool `json:"isEvicted,omitempty"` + Metadata APIResourceMeta `json:"metadata"` // ParentTaskExternalId The external ID of the parent task. ParentTaskExternalId *openapi_types.UUID `json:"parentTaskExternalId,omitempty"` @@ -2624,6 +2681,9 @@ type V1WorkflowRunListParams struct { // IncludePayloads A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset. IncludePayloads *bool `form:"include_payloads,omitempty" json:"include_payloads,omitempty"` + + // RunningFilter Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + RunningFilter *V1RunningFilter `form:"running_filter,omitempty" json:"running_filter,omitempty"` } // V1WorkflowRunDisplayNamesListParams defines parameters for V1WorkflowRunDisplayNamesList. @@ -2648,6 +2708,9 @@ type V1WorkflowRunExternalIdsListParams struct { // WorkflowIds The workflow ids to find runs for WorkflowIds *[]openapi_types.UUID `form:"workflow_ids,omitempty" json:"workflow_ids,omitempty"` + + // RunningFilter Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + RunningFilter *V1RunningFilter `form:"running_filter,omitempty" json:"running_filter,omitempty"` } // V1WorkflowRunTaskEventsListParams defines parameters for V1WorkflowRunTaskEventsList. @@ -2938,6 +3001,9 @@ type AlertEmailGroupUpdateJSONRequestBody = UpdateTenantAlertEmailGroupRequest // V1CelDebugJSONRequestBody defines body for V1CelDebug for application/json ContentType. type V1CelDebugJSONRequestBody = V1CELDebugRequest +// V1DurableTaskBranchJSONRequestBody defines body for V1DurableTaskBranch for application/json ContentType. +type V1DurableTaskBranchJSONRequestBody = V1BranchDurableTaskRequest + // V1FilterCreateJSONRequestBody defines body for V1FilterCreate for application/json ContentType. type V1FilterCreateJSONRequestBody = V1CreateFilterRequest @@ -3263,6 +3329,9 @@ type ClientInterface interface { // V1LogLineList request V1LogLineList(ctx context.Context, task openapi_types.UUID, params *V1LogLineListParams, reqEditors ...RequestEditorFn) (*http.Response, error) + // V1TaskRestore request + V1TaskRestore(ctx context.Context, task openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + // V1TaskEventList request V1TaskEventList(ctx context.Context, task openapi_types.UUID, params *V1TaskEventListParams, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -3271,6 +3340,11 @@ type ClientInterface interface { V1CelDebug(ctx context.Context, tenant openapi_types.UUID, body V1CelDebugJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + // V1DurableTaskBranchWithBody request with any body + V1DurableTaskBranchWithBody(ctx context.Context, tenant openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + V1DurableTaskBranch(ctx context.Context, tenant openapi_types.UUID, body V1DurableTaskBranchJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + // V1EventList request V1EventList(ctx context.Context, tenant openapi_types.UUID, params *V1EventListParams, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -3907,6 +3981,18 @@ func (c *Client) V1LogLineList(ctx context.Context, task openapi_types.UUID, par return c.Client.Do(req) } +func (c *Client) V1TaskRestore(ctx context.Context, task openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1TaskRestoreRequest(c.Server, task) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) V1TaskEventList(ctx context.Context, task openapi_types.UUID, params *V1TaskEventListParams, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewV1TaskEventListRequest(c.Server, task, params) if err != nil { @@ -3943,6 +4029,30 @@ func (c *Client) V1CelDebug(ctx context.Context, tenant openapi_types.UUID, body return c.Client.Do(req) } +func (c *Client) V1DurableTaskBranchWithBody(ctx context.Context, tenant openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1DurableTaskBranchRequestWithBody(c.Server, tenant, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1DurableTaskBranch(ctx context.Context, tenant openapi_types.UUID, body V1DurableTaskBranchJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1DurableTaskBranchRequest(c.Server, tenant, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) V1EventList(ctx context.Context, tenant openapi_types.UUID, params *V1EventListParams, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewV1EventListRequest(c.Server, tenant, params) if err != nil { @@ -6493,6 +6603,40 @@ func NewV1LogLineListRequest(server string, task openapi_types.UUID, params *V1L return req, nil } +// NewV1TaskRestoreRequest generates requests for V1TaskRestore +func NewV1TaskRestoreRequest(server string, task openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task", runtime.ParamLocationPath, task) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/stable/tasks/%s/restore", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + // NewV1TaskEventListRequest generates requests for V1TaskEventList func NewV1TaskEventListRequest(server string, task openapi_types.UUID, params *V1TaskEventListParams) (*http.Request, error) { var err error @@ -6612,6 +6756,53 @@ func NewV1CelDebugRequestWithBody(server string, tenant openapi_types.UUID, cont return req, nil } +// NewV1DurableTaskBranchRequest calls the generic V1DurableTaskBranch builder with application/json body +func NewV1DurableTaskBranchRequest(server string, tenant openapi_types.UUID, body V1DurableTaskBranchJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewV1DurableTaskBranchRequestWithBody(server, tenant, "application/json", bodyReader) +} + +// NewV1DurableTaskBranchRequestWithBody generates requests for V1DurableTaskBranch with any type of body +func NewV1DurableTaskBranchRequestWithBody(server string, tenant openapi_types.UUID, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "tenant", runtime.ParamLocationPath, tenant) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/stable/tenants/%s/durable-tasks/branch", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + // NewV1EventListRequest generates requests for V1EventList func NewV1EventListRequest(server string, tenant openapi_types.UUID, params *V1EventListParams) (*http.Request, error) { var err error @@ -8015,6 +8206,22 @@ func NewV1WorkflowRunListRequest(server string, tenant openapi_types.UUID, param } + if params.RunningFilter != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "running_filter", runtime.ParamLocationQuery, *params.RunningFilter); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + queryURL.RawQuery = queryValues.Encode() } @@ -8183,6 +8390,22 @@ func NewV1WorkflowRunExternalIdsListRequest(server string, tenant openapi_types. } + if params.RunningFilter != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "running_filter", runtime.ParamLocationQuery, *params.RunningFilter); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + queryURL.RawQuery = queryValues.Encode() } @@ -13265,6 +13488,9 @@ type ClientWithResponsesInterface interface { // V1LogLineListWithResponse request V1LogLineListWithResponse(ctx context.Context, task openapi_types.UUID, params *V1LogLineListParams, reqEditors ...RequestEditorFn) (*V1LogLineListResponse, error) + // V1TaskRestoreWithResponse request + V1TaskRestoreWithResponse(ctx context.Context, task openapi_types.UUID, reqEditors ...RequestEditorFn) (*V1TaskRestoreResponse, error) + // V1TaskEventListWithResponse request V1TaskEventListWithResponse(ctx context.Context, task openapi_types.UUID, params *V1TaskEventListParams, reqEditors ...RequestEditorFn) (*V1TaskEventListResponse, error) @@ -13273,6 +13499,11 @@ type ClientWithResponsesInterface interface { V1CelDebugWithResponse(ctx context.Context, tenant openapi_types.UUID, body V1CelDebugJSONRequestBody, reqEditors ...RequestEditorFn) (*V1CelDebugResponse, error) + // V1DurableTaskBranchWithBodyWithResponse request with any body + V1DurableTaskBranchWithBodyWithResponse(ctx context.Context, tenant openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*V1DurableTaskBranchResponse, error) + + V1DurableTaskBranchWithResponse(ctx context.Context, tenant openapi_types.UUID, body V1DurableTaskBranchJSONRequestBody, reqEditors ...RequestEditorFn) (*V1DurableTaskBranchResponse, error) + // V1EventListWithResponse request V1EventListWithResponse(ctx context.Context, tenant openapi_types.UUID, params *V1EventListParams, reqEditors ...RequestEditorFn) (*V1EventListResponse, error) @@ -14093,6 +14324,31 @@ func (r V1LogLineListResponse) StatusCode() int { return 0 } +type V1TaskRestoreResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *V1RestoreTaskResponse + JSON400 *APIErrors + JSON403 *APIErrors + JSON404 *APIErrors +} + +// Status returns HTTPResponse.Status +func (r V1TaskRestoreResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1TaskRestoreResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type V1TaskEventListResponse struct { Body []byte HTTPResponse *http.Response @@ -14143,6 +14399,30 @@ func (r V1CelDebugResponse) StatusCode() int { return 0 } +type V1DurableTaskBranchResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *V1BranchDurableTaskResponse + JSON400 *APIErrors + JSON403 *APIErrors +} + +// Status returns HTTPResponse.Status +func (r V1DurableTaskBranchResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1DurableTaskBranchResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type V1EventListResponse struct { Body []byte HTTPResponse *http.Response @@ -17107,6 +17387,15 @@ func (c *ClientWithResponses) V1LogLineListWithResponse(ctx context.Context, tas return ParseV1LogLineListResponse(rsp) } +// V1TaskRestoreWithResponse request returning *V1TaskRestoreResponse +func (c *ClientWithResponses) V1TaskRestoreWithResponse(ctx context.Context, task openapi_types.UUID, reqEditors ...RequestEditorFn) (*V1TaskRestoreResponse, error) { + rsp, err := c.V1TaskRestore(ctx, task, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1TaskRestoreResponse(rsp) +} + // V1TaskEventListWithResponse request returning *V1TaskEventListResponse func (c *ClientWithResponses) V1TaskEventListWithResponse(ctx context.Context, task openapi_types.UUID, params *V1TaskEventListParams, reqEditors ...RequestEditorFn) (*V1TaskEventListResponse, error) { rsp, err := c.V1TaskEventList(ctx, task, params, reqEditors...) @@ -17133,6 +17422,23 @@ func (c *ClientWithResponses) V1CelDebugWithResponse(ctx context.Context, tenant return ParseV1CelDebugResponse(rsp) } +// V1DurableTaskBranchWithBodyWithResponse request with arbitrary body returning *V1DurableTaskBranchResponse +func (c *ClientWithResponses) V1DurableTaskBranchWithBodyWithResponse(ctx context.Context, tenant openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*V1DurableTaskBranchResponse, error) { + rsp, err := c.V1DurableTaskBranchWithBody(ctx, tenant, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1DurableTaskBranchResponse(rsp) +} + +func (c *ClientWithResponses) V1DurableTaskBranchWithResponse(ctx context.Context, tenant openapi_types.UUID, body V1DurableTaskBranchJSONRequestBody, reqEditors ...RequestEditorFn) (*V1DurableTaskBranchResponse, error) { + rsp, err := c.V1DurableTaskBranch(ctx, tenant, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1DurableTaskBranchResponse(rsp) +} + // V1EventListWithResponse request returning *V1EventListResponse func (c *ClientWithResponses) V1EventListWithResponse(ctx context.Context, tenant openapi_types.UUID, params *V1EventListParams, reqEditors ...RequestEditorFn) (*V1EventListResponse, error) { rsp, err := c.V1EventList(ctx, tenant, params, reqEditors...) @@ -19088,6 +19394,53 @@ func ParseV1LogLineListResponse(rsp *http.Response) (*V1LogLineListResponse, err return response, nil } +// ParseV1TaskRestoreResponse parses an HTTP response from a V1TaskRestoreWithResponse call +func ParseV1TaskRestoreResponse(rsp *http.Response) (*V1TaskRestoreResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1TaskRestoreResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest V1RestoreTaskResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + } + + return response, nil +} + // ParseV1TaskEventListResponse parses an HTTP response from a V1TaskEventListWithResponse call func ParseV1TaskEventListResponse(rsp *http.Response) (*V1TaskEventListResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -19182,6 +19535,46 @@ func ParseV1CelDebugResponse(rsp *http.Response) (*V1CelDebugResponse, error) { return response, nil } +// ParseV1DurableTaskBranchResponse parses an HTTP response from a V1DurableTaskBranchWithResponse call +func ParseV1DurableTaskBranchResponse(rsp *http.Response) (*V1DurableTaskBranchResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1DurableTaskBranchResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest V1BranchDurableTaskResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + } + + return response, nil +} + // ParseV1EventListResponse parses an HTTP response from a V1EventListWithResponse call func ParseV1EventListResponse(rsp *http.Response) (*V1EventListResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) diff --git a/pkg/repository/durable_events.go b/pkg/repository/durable_events.go new file mode 100644 index 000000000..9f5aa3b84 --- /dev/null +++ b/pkg/repository/durable_events.go @@ -0,0 +1,1366 @@ +package repository + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "slices" + "sort" + "strings" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + + "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" +) + +type TaskExternalIdNodeIdBranchId struct { + TaskExternalId uuid.UUID `validate:"required"` + NodeId int64 `validate:"required"` + BranchId int64 `validate:"required"` +} + +type SatisfiedEventWithPayload struct { + TaskExternalId uuid.UUID + InvocationCount int32 + BranchID int64 + NodeID int64 + Result []byte +} + +type BaseIngestEventOpts struct { + Kind sqlcv1.V1DurableEventLogKind `validate:"required"` + TenantId uuid.UUID `validate:"required"` + Task *sqlcv1.FlattenExternalIdsRow `validate:"required"` + InvocationCount int32 +} + +type IngestMemoOpts struct { + Payload []byte + MemoKey []byte +} + +type IngestTriggerRunsOpts struct { + TriggerOpts []*WorkflowNameTriggerOpts `validate:"required,min=1"` +} + +type IngestWaitForOpts struct { + WaitForConditions []CreateExternalSignalConditionOpt +} + +type IngestDurableTaskEventOpts struct { + *BaseIngestEventOpts + Memo *IngestMemoOpts + TriggerRuns *IngestTriggerRunsOpts + WaitFor *IngestWaitForOpts +} + +type IngestMemoResult struct { + InvocationCount int32 + IsSatisfied bool + NodeId int64 + BranchId int64 + ResultPayload []byte + AlreadyExisted bool +} + +type IngestTriggerRunsEntry struct { + NodeId int64 + BranchId int64 + IsSatisfied bool + AlreadyExisted bool + ResultPayload []byte +} + +type IngestTriggerRunsResult struct { + InvocationCount int32 + Entries []*IngestTriggerRunsEntry + CreatedTasks []*V1TaskWithPayload + CreatedDAGs []*DAGWithData +} + +type IngestWaitForResult struct { + InvocationCount int32 + IsSatisfied bool + NodeId int64 + BranchId int64 + AlreadyExisted bool + ResultPayload []byte +} + +type IngestDurableTaskEventResult struct { + Kind sqlcv1.V1DurableEventLogKind + MemoResult *IngestMemoResult + TriggerRunsResult *IngestTriggerRunsResult + WaitForResult *IngestWaitForResult +} + +type HandleBranchResult struct { + EventLogFile *sqlcv1.V1DurableEventLogFile + NodeId int64 + BranchId int64 +} + +type IncrementDurableTaskInvocationCountsOpts struct { + TenantId uuid.UUID + TaskId int64 + TaskInsertedAt pgtype.Timestamptz +} + +type CompleteMemoEntryOpts struct { + TenantId uuid.UUID + TaskExternalId uuid.UUID + InvocationCount int32 + BranchId int64 + NodeId int64 + MemoKey []byte + Payload []byte +} + +type NodeIdBranchIdTuple struct { + NodeId int64 + BranchId int64 +} + +type DurableEventsRepository interface { + IngestDurableTaskEvent(ctx context.Context, opts IngestDurableTaskEventOpts) (*IngestDurableTaskEventResult, error) + HandleBranch(ctx context.Context, tenantId uuid.UUID, nodeId, branchId int64, task *sqlcv1.FlattenExternalIdsRow) (*HandleBranchResult, error) + + GetSatisfiedDurableEvents(ctx context.Context, tenantId uuid.UUID, events []TaskExternalIdNodeIdBranchId) ([]*SatisfiedEventWithPayload, error) + GetDurableTaskInvocationCounts(ctx context.Context, tenantId uuid.UUID, tasks []IdInsertedAt) (map[IdInsertedAt]*int32, error) + CompleteMemoEntry(ctx context.Context, opts CompleteMemoEntryOpts) error +} + +type durableEventsRepository struct { + *sharedRepository +} + +func newDurableEventsRepository(shared *sharedRepository) DurableEventsRepository { + return &durableEventsRepository{ + sharedRepository: shared, + } +} + +type NonDeterminismDetail struct { + Expected string + Received string +} + +type NonDeterminismError struct { + NodeId int64 + BranchId int64 + TaskExternalId uuid.UUID + ExpectedIdempotencyKey []byte + ActualIdempotencyKey []byte + ExpectedKind sqlcv1.V1DurableEventLogKind + ActualKind sqlcv1.V1DurableEventLogKind + ExistingEntryId int64 + ExistingEntryInsertedAt pgtype.Timestamptz + ExistingEntryTenantId uuid.UUID + Detail *NonDeterminismDetail +} + +func (m *NonDeterminismError) Error() string { + msg := fmt.Sprintf("non-determinism error in task %s at node %d:%d", m.TaskExternalId, m.NodeId, m.BranchId) + + if m.Detail != nil { + msg += "\n expected: " + m.Detail.Expected + "\n received: " + m.Detail.Received + } + + return msg +} + +type StaleInvocationError struct { + TaskExternalId uuid.UUID + ExpectedInvocationCount int32 + ActualInvocationCount int32 +} + +func (e *StaleInvocationError) Error() string { + return fmt.Sprintf("invocation count mismatch for task %s: server has %d, worker sent %d", e.TaskExternalId.String(), e.ExpectedInvocationCount, e.ActualInvocationCount) +} + +func formatConditionLabel(c CreateExternalSignalConditionOpt) string { + switch c.Kind { + case CreateExternalSignalConditionKindSLEEP: + if c.SleepFor != nil { + return "sleep(" + *c.SleepFor + ")" + } + return "sleep" + case CreateExternalSignalConditionKindUSEREVENT: + if c.UserEventKey != nil { + return "waitForEvent(" + *c.UserEventKey + ")" + } + return "waitForEvent" + default: + return string(c.Kind) + } +} + +const maxDisplayLabels = 5 + +func summarizeLabels(labels []string) string { + if len(labels) <= maxDisplayLabels { + return strings.Join(labels, ", ") + } + + counts := make(map[string]int, len(labels)) + order := make([]string, 0) + + for _, l := range labels { + if counts[l] == 0 { + order = append(order, l) + } + counts[l]++ + } + + parts := make([]string, 0, min(len(order), maxDisplayLabels)) + + for i, name := range order { + if i >= maxDisplayLabels { + break + } + + if counts[name] > 1 { + parts = append(parts, fmt.Sprintf("%dx %s", counts[name], name)) + } else { + parts = append(parts, name) + } + } + + if remaining := len(order) - maxDisplayLabels; remaining > 0 { + parts = append(parts, fmt.Sprintf("... +%d more unique", remaining)) + } + + return strings.Join(parts, ", ") +} + +func (opts IngestDurableTaskEventOpts) formatCall() string { + switch opts.Kind { + case sqlcv1.V1DurableEventLogKindRUN: + if opts.TriggerRuns != nil { + names := make([]string, 0, len(opts.TriggerRuns.TriggerOpts)) + for _, t := range opts.TriggerRuns.TriggerOpts { + names = append(names, t.WorkflowName) + } + return "run(" + summarizeLabels(names) + ")" + } + case sqlcv1.V1DurableEventLogKindWAITFOR: + if opts.WaitFor != nil { + parts := make([]string, 0, len(opts.WaitFor.WaitForConditions)) + for _, c := range opts.WaitFor.WaitForConditions { + parts = append(parts, formatConditionLabel(c)) + } + return "waitFor(" + summarizeLabels(parts) + ")" + } + case sqlcv1.V1DurableEventLogKindMEMO: + return "memo" + } + + return string(opts.Kind) +} + +func formatStoredPayload(kind sqlcv1.V1DurableEventLogKind, payload []byte) string { + if len(payload) == 0 { + return string(kind) + } + + switch kind { + case sqlcv1.V1DurableEventLogKindRUN: + var triggerOpts WorkflowNameTriggerOpts + + if err := json.Unmarshal(payload, &triggerOpts); err != nil { + return string(kind) + } + + if triggerOpts.WorkflowName != "" { + return "run(" + triggerOpts.WorkflowName + ")" + } + case sqlcv1.V1DurableEventLogKindWAITFOR: + var conditions []CreateExternalSignalConditionOpt + + if err := json.Unmarshal(payload, &conditions); err != nil { + return string(kind) + } + + if len(conditions) > 0 { + parts := make([]string, 0, len(conditions)) + for _, c := range conditions { + parts = append(parts, formatConditionLabel(c)) + } + return "waitFor(" + summarizeLabels(parts) + ")" + } + case sqlcv1.V1DurableEventLogKindMEMO: + return "memo" + } + + return string(kind) +} + +func nonDeterminismDetail(opts IngestDurableTaskEventOpts, expectedKind sqlcv1.V1DurableEventLogKind, existingPayload []byte) *NonDeterminismDetail { + return &NonDeterminismDetail{ + Expected: formatStoredPayload(expectedKind, existingPayload), + Received: opts.formatCall(), + } +} + +type GetOrCreateLogEntryOpt struct { + Kind sqlcv1.V1DurableEventLogKind + IdempotencyKey []byte + InputPayload []byte + ResultPayload []byte + NodeId int64 + BranchId int64 + InvocationCount int32 + IsSatisfied bool +} + +type GetOrCreateLogEntryOpts struct { + TenantId uuid.UUID + DurableTaskId int64 + DurableTaskInsertedAt pgtype.Timestamptz + DurableTaskExternalId uuid.UUID + Entries []GetOrCreateLogEntryOpt +} + +type EventLogEntryWithPayloads struct { + Entry *sqlcv1.BulkGetDurableEventLogEntriesRow + InputPayload []byte + ResultPayload []byte + AlreadyExisted bool +} + +func (r *durableEventsRepository) GetSatisfiedDurableEvents(ctx context.Context, tenantId uuid.UUID, events []TaskExternalIdNodeIdBranchId) ([]*SatisfiedEventWithPayload, error) { + if len(events) == 0 { + return nil, nil + } + + taskExternalIds := make([]uuid.UUID, len(events)) + nodeIds := make([]int64, len(events)) + branchIds := make([]int64, len(events)) + isSatisfieds := make([]bool, len(events)) + + for i, e := range events { + if err := r.v.Validate(e); err != nil { + return nil, fmt.Errorf("invalid event at index %d: %w", i, err) + } + + taskExternalIds[i] = e.TaskExternalId + nodeIds[i] = e.NodeId + branchIds[i] = e.BranchId + isSatisfieds[i] = true + } + + rows, err := r.queries.ListSatisfiedEntries(ctx, r.pool, sqlcv1.ListSatisfiedEntriesParams{ + Taskexternalids: taskExternalIds, + Nodeids: nodeIds, + Branchids: branchIds, + }) + + if err != nil { + return nil, fmt.Errorf("failed to list satisfied entries: %w", err) + } + + retrievePayloadOpts := make([]RetrievePayloadOpts, len(rows)) + + for i, row := range rows { + retrievePayloadOpts[i] = RetrievePayloadOpts{ + Id: row.ID, + InsertedAt: row.InsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + TenantId: tenantId, + } + } + + payloads, err := r.payloadStore.Retrieve(ctx, r.pool, retrievePayloadOpts...) + + if err != nil { + return nil, fmt.Errorf("failed to retrieve payloads for satisfied callbacks: %w", err) + } + + result := make([]*SatisfiedEventWithPayload, 0, len(rows)) + + for _, row := range rows { + retrieveOpt := RetrievePayloadOpts{ + Id: row.ID, + InsertedAt: row.InsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + TenantId: tenantId, + } + + payload := payloads[retrieveOpt] + + result = append(result, &SatisfiedEventWithPayload{ + TaskExternalId: row.TaskExternalID, + NodeID: row.NodeID, + BranchID: row.BranchID, + InvocationCount: row.InvocationCount, + Result: payload, + }) + } + + return result, nil +} + +func getDurableTaskSignalKey(taskExternalId uuid.UUID, nodeId int64) string { + return fmt.Sprintf("durable:%s:%d", taskExternalId.String(), nodeId) +} + +func (r *durableEventsRepository) createIdempotencyKey(kind sqlcv1.V1DurableEventLogKind, triggerOpts *WorkflowNameTriggerOpts, waitForConditions []CreateExternalSignalConditionOpt) ([]byte, error) { + // note: can't use additional metadata here because it's not stable, since we store trace information in it w/ the otel instrumentors + dataToHash := []byte(kind) + + if triggerOpts != nil { + dataToHash = append(dataToHash, triggerOpts.Data...) + dataToHash = append(dataToHash, []byte(triggerOpts.WorkflowName)...) + } + + if waitForConditions != nil { + sort.Slice(waitForConditions, func(i, j int) bool { + condI := waitForConditions[i] + condJ := waitForConditions[j] + + if condI.Expression != condJ.Expression { + return condI.Expression < condJ.Expression + } + + if condI.ReadableDataKey != condJ.ReadableDataKey { + return condI.ReadableDataKey < condJ.ReadableDataKey + } + + if condI.Kind != condJ.Kind { + return condI.Kind < condJ.Kind + } + + if condI.SleepFor != nil && condJ.SleepFor != nil { + if *condI.SleepFor != *condJ.SleepFor { + return *condI.SleepFor < *condJ.SleepFor + } + } + + if condI.UserEventKey != nil && condJ.UserEventKey != nil { + if *condI.UserEventKey != *condJ.UserEventKey { + return *condI.UserEventKey < *condJ.UserEventKey + } + } + + return false + }) + + for _, cond := range waitForConditions { + toHash := cond.Expression + cond.ReadableDataKey + string(cond.Kind) + + if cond.SleepFor != nil { + toHash += *cond.SleepFor + } + + if cond.UserEventKey != nil { + toHash += *cond.UserEventKey + } + + dataToHash = append(dataToHash, []byte(toHash)...) + } + } + + h := sha256.New() + h.Write(dataToHash) + hashBytes := h.Sum(nil) + idempotencyKey := make([]byte, hex.EncodedLen(len(hashBytes))) + hex.Encode(idempotencyKey, hashBytes) + + return idempotencyKey, nil +} + +func (r *sharedRepository) incrementDurableTaskInvocationCounts(ctx context.Context, tx sqlcv1.DBTX, opts []IncrementDurableTaskInvocationCountsOpts) (map[IncrementDurableTaskInvocationCountsOpts]*int32, error) { + taskIds := make([]int64, len(opts)) + taskInsertedAts := make([]pgtype.Timestamptz, len(opts)) + tenantIds := make([]uuid.UUID, len(opts)) + + for i, opt := range opts { + taskIds[i] = opt.TaskId + taskInsertedAts[i] = opt.TaskInsertedAt + tenantIds[i] = opt.TenantId + } + + logFiles, err := r.queries.IncrementLogFileInvocationCounts(ctx, tx, sqlcv1.IncrementLogFileInvocationCountsParams{ + Durabletaskids: taskIds, + Durabletaskinsertedats: taskInsertedAts, + Tenantids: tenantIds, + }) + + if err != nil { + return nil, fmt.Errorf("failed to increment invocation counts: %w", err) + } + + result := make(map[IncrementDurableTaskInvocationCountsOpts]*int32, len(opts)) + + for _, logFile := range logFiles { + opt := IncrementDurableTaskInvocationCountsOpts{ + TenantId: logFile.TenantID, + TaskId: logFile.DurableTaskID, + TaskInsertedAt: logFile.DurableTaskInsertedAt, + } + + result[opt] = &logFile.LatestInvocationCount + } + + return result, nil +} + +func (r *durableEventsRepository) getAndLockLogFile(ctx context.Context, tx sqlcv1.DBTX, tenantId uuid.UUID, durableTaskId int64, durableTaskInsertedAt pgtype.Timestamptz) (*sqlcv1.V1DurableEventLogFile, error) { + return r.queries.GetAndLockLogFile(ctx, tx, sqlcv1.GetAndLockLogFileParams{ + Durabletaskid: durableTaskId, + Durabletaskinsertedat: durableTaskInsertedAt, + Tenantid: tenantId, + }) +} + +func (r *durableEventsRepository) listEventLogBranchPoints(ctx context.Context, tx sqlcv1.DBTX, tenantId uuid.UUID, durableTaskId int64, durableTaskInsertedAt pgtype.Timestamptz) (map[int64]*sqlcv1.V1DurableEventLogBranchPoint, error) { + branchPoints, err := r.queries.ListDurableEventLogBranchPoints(ctx, tx, sqlcv1.ListDurableEventLogBranchPointsParams{ + Durabletaskid: durableTaskId, + Durabletaskinsertedat: durableTaskInsertedAt, + Tenantid: tenantId, + }) + + if err != nil { + return nil, fmt.Errorf("failed to list durable event log branch points: %w", err) + } + + nextBranchIdToBranchPoint := make(map[int64]*sqlcv1.V1DurableEventLogBranchPoint, len(branchPoints)) + + for _, bp := range branchPoints { + nextBranchIdToBranchPoint[bp.NextBranchID] = bp + } + + return nextBranchIdToBranchPoint, nil +} + +type BranchIdFromNodeIdTuple struct { + BranchId int64 + FromNodeId int64 +} + +func resolveBranchForNode(nodeId, currentBranchId int64, nextBranchIdToBranchPoint map[int64]*sqlcv1.V1DurableEventLogBranchPoint) int64 { + tree := make([]BranchIdFromNodeIdTuple, 0) + + currBranchId := currentBranchId + for { + branchPoint, found := nextBranchIdToBranchPoint[currBranchId] + + if !found { + tree = append(tree, BranchIdFromNodeIdTuple{currBranchId, 0}) + break + } + + tree = append(tree, BranchIdFromNodeIdTuple{currBranchId, branchPoint.FirstNodeIDInNewBranch}) + currBranchId = branchPoint.ParentBranchID + } + + sort.Slice(tree, func(i, j int) bool { + if tree[i].FromNodeId != tree[j].FromNodeId { + return tree[i].FromNodeId < tree[j].FromNodeId + } + return tree[i].BranchId < tree[j].BranchId + }) + + i := sort.Search(len(tree), func(i int) bool { return tree[i].FromNodeId > nodeId }) + return tree[i-1].BranchId +} + +func (r *durableEventsRepository) getOrCreateEventLogEntries( + ctx context.Context, + tx sqlcv1.DBTX, + opts GetOrCreateLogEntryOpts, +) ([]*EventLogEntryWithPayloads, error) { + if len(opts.Entries) == 0 { + return nil, nil + } + + n := len(opts.Entries) + branchIds := make([]int64, n) + nodeIds := make([]int64, n) + + for i, o := range opts.Entries { + branchIds[i] = o.BranchId + nodeIds[i] = o.NodeId + } + + existingEntries, err := r.queries.BulkGetDurableEventLogEntries(ctx, tx, sqlcv1.BulkGetDurableEventLogEntriesParams{ + Durabletaskid: opts.DurableTaskId, + Durabletaskinsertedat: opts.DurableTaskInsertedAt, + Branchids: branchIds, + Nodeids: nodeIds, + }) + + if err != nil { + return nil, fmt.Errorf("failed to bulk-get existing entries: %w", err) + } + + nodeIdBranchIdToExistingEntry := make(map[NodeIdBranchIdTuple]*sqlcv1.BulkGetDurableEventLogEntriesRow, len(existingEntries)) + for _, e := range existingEntries { + nodeIdBranchIdToExistingEntry[NodeIdBranchIdTuple{e.NodeID, e.BranchID}] = e + } + + existedEntries := make(map[NodeIdBranchIdTuple]*sqlcv1.BulkGetDurableEventLogEntriesRow) + nodeIdBranchIdToNewEntry := make(map[NodeIdBranchIdTuple]GetOrCreateLogEntryOpt, 0) + + for _, o := range opts.Entries { + key := NodeIdBranchIdTuple{o.NodeId, o.BranchId} + existingEntry, found := nodeIdBranchIdToExistingEntry[key] + + if !found { + nodeIdBranchIdToNewEntry[key] = o + continue + } + + if !bytes.Equal(o.IdempotencyKey, existingEntry.IdempotencyKey) { + return nil, &NonDeterminismError{ + BranchId: o.BranchId, + NodeId: o.NodeId, + TaskExternalId: opts.DurableTaskExternalId, + ExpectedIdempotencyKey: existingEntry.IdempotencyKey, + ActualIdempotencyKey: o.IdempotencyKey, + ExpectedKind: existingEntry.Kind, + ActualKind: o.Kind, + ExistingEntryId: existingEntry.ID, + ExistingEntryInsertedAt: existingEntry.InsertedAt, + ExistingEntryTenantId: existingEntry.TenantID, + } + } + + existedEntries[key] = existingEntry + } + + nodeIdBranchIdToCreatedEntry := make(map[NodeIdBranchIdTuple]*sqlcv1.BulkCreateDurableEventLogEntriesRow) + + if len(nodeIdBranchIdToNewEntry) > 0 { + createParams := sqlcv1.BulkCreateDurableEventLogEntriesParams{ + Tenantids: make([]uuid.UUID, 0), + Externalids: make([]uuid.UUID, 0), + Durabletaskids: make([]int64, 0), + Durabletaskinsertedats: make([]pgtype.Timestamptz, 0), + Kinds: make([]string, 0), + Nodeids: make([]int64, 0), + Branchids: make([]int64, 0), + Idempotencykeys: make([][]byte, 0), + Issatisfieds: make([]bool, 0), + } + + for _, entry := range nodeIdBranchIdToNewEntry { + createParams.Tenantids = append(createParams.Tenantids, opts.TenantId) + createParams.Externalids = append(createParams.Externalids, uuid.New()) + createParams.Durabletaskids = append(createParams.Durabletaskids, opts.DurableTaskId) + createParams.Durabletaskinsertedats = append(createParams.Durabletaskinsertedats, opts.DurableTaskInsertedAt) + createParams.Kinds = append(createParams.Kinds, string(entry.Kind)) + createParams.Nodeids = append(createParams.Nodeids, entry.NodeId) + createParams.Branchids = append(createParams.Branchids, entry.BranchId) + createParams.Idempotencykeys = append(createParams.Idempotencykeys, entry.IdempotencyKey) + createParams.Issatisfieds = append(createParams.Issatisfieds, entry.IsSatisfied) + } + + createdRows, err := r.queries.BulkCreateDurableEventLogEntries(ctx, tx, createParams) + if err != nil { + return nil, fmt.Errorf("failed to bulk-create event log entries: %w", err) + } + + for _, createdRow := range createdRows { + nodeIdBranchIdToCreatedEntry[NodeIdBranchIdTuple{createdRow.NodeID, createdRow.BranchID}] = createdRow + } + + storePayloadOpts := make([]StorePayloadOpts, 0, len(nodeIdBranchIdToNewEntry)*2) + for _, createdRow := range createdRows { + opt, ok := nodeIdBranchIdToNewEntry[NodeIdBranchIdTuple{createdRow.NodeID, createdRow.BranchID}] + + if !ok { + continue + } + + if len(opt.InputPayload) > 0 { + storePayloadOpts = append(storePayloadOpts, StorePayloadOpts{ + Id: createdRow.ID, + InsertedAt: createdRow.InsertedAt, + ExternalId: createdRow.ExternalID, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYDATA, + Payload: opt.InputPayload, + TenantId: opts.TenantId, + }) + } + + if len(opt.ResultPayload) > 0 { + storePayloadOpts = append(storePayloadOpts, StorePayloadOpts{ + Id: createdRow.ID, + InsertedAt: createdRow.InsertedAt, + ExternalId: createdRow.ExternalID, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + Payload: opt.ResultPayload, + TenantId: opts.TenantId, + }) + } + } + + if len(storePayloadOpts) > 0 { + if storeErr := r.payloadStore.Store(ctx, tx, storePayloadOpts...); storeErr != nil { + return nil, fmt.Errorf("failed to store payloads for new entries: %w", storeErr) + } + } + } + + var retrieveOpts []RetrievePayloadOpts + for _, entry := range existedEntries { + retrieveOpts = append(retrieveOpts, RetrievePayloadOpts{ + Id: entry.ID, + InsertedAt: entry.InsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + TenantId: opts.TenantId, + }) + } + + var existingPayloads map[RetrievePayloadOpts][]byte + if len(retrieveOpts) > 0 { + existingPayloads, err = r.payloadStore.Retrieve(ctx, tx, retrieveOpts...) + if err != nil { + existingPayloads = nil + } + } + + results := make([]*EventLogEntryWithPayloads, n) + for i, o := range opts.Entries { + key := NodeIdBranchIdTuple{o.NodeId, o.BranchId} + if existingEntry, ok := existedEntries[key]; ok { + var resultPayload []byte + if existingPayloads != nil { + resultPayload = existingPayloads[RetrievePayloadOpts{ + Id: existingEntry.ID, + InsertedAt: existingEntry.InsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + TenantId: opts.TenantId, + }] + } + results[i] = &EventLogEntryWithPayloads{ + Entry: existingEntry, + InputPayload: o.InputPayload, + ResultPayload: resultPayload, + AlreadyExisted: true, + } + } else { + created := nodeIdBranchIdToCreatedEntry[key] + results[i] = &EventLogEntryWithPayloads{ + Entry: &sqlcv1.BulkGetDurableEventLogEntriesRow{ + TenantID: created.TenantID, + ExternalID: created.ExternalID, + ID: created.ID, + DurableTaskID: created.DurableTaskID, + DurableTaskInsertedAt: created.DurableTaskInsertedAt, + Kind: created.Kind, + NodeID: created.NodeID, + BranchID: created.BranchID, + IdempotencyKey: created.IdempotencyKey, + IsSatisfied: created.IsSatisfied, + InvocationCount: created.InvocationCount, + }, + InputPayload: o.InputPayload, + ResultPayload: o.ResultPayload, + AlreadyExisted: false, + } + } + } + + slices.SortFunc(results, func(i, j *EventLogEntryWithPayloads) int { + if i.Entry.NodeID != j.Entry.NodeID { + return int(i.Entry.NodeID - j.Entry.NodeID) + } + + return int(i.Entry.BranchID - j.Entry.BranchID) + }) + + return results, nil +} + +func (r *durableEventsRepository) IngestDurableTaskEvent(ctx context.Context, opts IngestDurableTaskEventOpts) (*IngestDurableTaskEventResult, error) { + if err := r.v.Validate(opts); err != nil { + return nil, fmt.Errorf("invalid opts: %w", err) + } + + if opts.Kind == sqlcv1.V1DurableEventLogKindRUN && len(opts.TriggerRuns.TriggerOpts) == 0 { + return nil, fmt.Errorf("TriggerOptsList is required and must be non-empty for RUN kind") + } + + tenantId := opts.TenantId + task := opts.Task + + optTx, err := r.PrepareOptimisticTx(ctx) + if err != nil { + return nil, fmt.Errorf("failed to prepare tx: %w", err) + } + defer optTx.Rollback() + + tx := optTx.tx + + logFile, err := r.getAndLockLogFile(ctx, tx, tenantId, task.ID, task.InsertedAt) + if err != nil { + return nil, fmt.Errorf("failed to lock log file: %w", err) + } + + nextBranchIdToBranchPoint, err := r.listEventLogBranchPoints(ctx, tx, tenantId, task.ID, task.InsertedAt) + if err != nil { + return nil, fmt.Errorf("failed to list log branch points: %w", err) + } + + if logFile.LatestInvocationCount != opts.InvocationCount { + return nil, &StaleInvocationError{ + TaskExternalId: opts.Task.ExternalID, + ExpectedInvocationCount: logFile.LatestInvocationCount, + ActualInvocationCount: opts.InvocationCount, + } + } + + baseNodeId := logFile.LatestNodeID + 1 + + var getOrCreateOpts GetOrCreateLogEntryOpts + + nodeIdBranchIdToTriggerOpts := make(map[NodeIdBranchIdTuple]*WorkflowNameTriggerOpts) + runExternalIdToNodeIdBranchId := make(map[uuid.UUID]NodeIdBranchIdTuple) + + switch opts.Kind { + case sqlcv1.V1DurableEventLogKindRUN: + innerOpts := make([]GetOrCreateLogEntryOpt, len(opts.TriggerRuns.TriggerOpts)) + + for i, triggerOpts := range opts.TriggerRuns.TriggerOpts { + nodeId := baseNodeId + int64(i) + branchId := resolveBranchForNode(nodeId, logFile.LatestBranchID, nextBranchIdToBranchPoint) + + inputPayload, marshalErr := json.Marshal(triggerOpts) + if marshalErr != nil { + return nil, fmt.Errorf("failed to marshal trigger opts: %w", marshalErr) + } + + idempotencyKey, keyErr := r.createIdempotencyKey(sqlcv1.V1DurableEventLogKindRUN, triggerOpts, nil) + if keyErr != nil { + return nil, fmt.Errorf("failed to create idempotency key: %w", keyErr) + } + + innerOpts[i] = GetOrCreateLogEntryOpt{ + Kind: sqlcv1.V1DurableEventLogKindRUN, + NodeId: nodeId, + BranchId: branchId, + InvocationCount: opts.InvocationCount, + IdempotencyKey: idempotencyKey, + InputPayload: inputPayload, + } + + nodeBranchKey := NodeIdBranchIdTuple{NodeId: nodeId, BranchId: branchId} + nodeIdBranchIdToTriggerOpts[nodeBranchKey] = triggerOpts + runExternalIdToNodeIdBranchId[triggerOpts.ExternalId] = nodeBranchKey + } + + getOrCreateOpts = GetOrCreateLogEntryOpts{ + TenantId: tenantId, + DurableTaskId: task.ID, + DurableTaskInsertedAt: task.InsertedAt, + DurableTaskExternalId: task.ExternalID, + Entries: innerOpts, + } + case sqlcv1.V1DurableEventLogKindWAITFOR: + branchId := resolveBranchForNode(baseNodeId, logFile.LatestBranchID, nextBranchIdToBranchPoint) + + inputPayload, marshalErr := json.Marshal(opts.WaitFor.WaitForConditions) + if marshalErr != nil { + return nil, fmt.Errorf("failed to marshal wait for conditions: %w", marshalErr) + } + + idempotencyKey, keyErr := r.createIdempotencyKey(sqlcv1.V1DurableEventLogKindWAITFOR, nil, opts.WaitFor.WaitForConditions) + if keyErr != nil { + return nil, fmt.Errorf("failed to create idempotency key: %w", keyErr) + } + + getOrCreateOpts = GetOrCreateLogEntryOpts{ + TenantId: tenantId, + DurableTaskExternalId: task.ExternalID, + DurableTaskId: task.ID, + DurableTaskInsertedAt: task.InsertedAt, + Entries: []GetOrCreateLogEntryOpt{{ + Kind: sqlcv1.V1DurableEventLogKindWAITFOR, + NodeId: baseNodeId, + BranchId: branchId, + InvocationCount: opts.InvocationCount, + IdempotencyKey: idempotencyKey, + InputPayload: inputPayload, + }}, + } + case sqlcv1.V1DurableEventLogKindMEMO: + branchId := resolveBranchForNode(baseNodeId, logFile.LatestBranchID, nextBranchIdToBranchPoint) + + var resultPayload []byte + isSatisfied := false + if len(opts.Memo.Payload) > 0 { + isSatisfied = true + resultPayload = opts.Memo.Payload + } + + getOrCreateOpts = GetOrCreateLogEntryOpts{ + TenantId: tenantId, + DurableTaskExternalId: task.ExternalID, + DurableTaskId: task.ID, + DurableTaskInsertedAt: task.InsertedAt, + Entries: []GetOrCreateLogEntryOpt{{ + Kind: sqlcv1.V1DurableEventLogKindMEMO, + NodeId: baseNodeId, + BranchId: branchId, + InvocationCount: opts.InvocationCount, + IdempotencyKey: opts.Memo.MemoKey, + IsSatisfied: isSatisfied, + ResultPayload: resultPayload, + }}, + } + default: + return nil, fmt.Errorf("unsupported durable event log entry kind: %s", opts.Kind) + } + + logEntries, err := r.getOrCreateEventLogEntries(ctx, tx, getOrCreateOpts) + if err != nil { + var nde *NonDeterminismError + if errors.As(err, &nde) { + var existingPayload []byte + payloads, retrieveErr := r.payloadStore.Retrieve(ctx, tx, RetrievePayloadOpts{ + Id: nde.ExistingEntryId, + InsertedAt: nde.ExistingEntryInsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYDATA, + TenantId: nde.ExistingEntryTenantId, + }) + if retrieveErr == nil { + existingPayload = payloads[RetrievePayloadOpts{ + Id: nde.ExistingEntryId, + InsertedAt: nde.ExistingEntryInsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYDATA, + TenantId: nde.ExistingEntryTenantId, + }] + } + nde.Detail = nonDeterminismDetail(opts, nde.ExpectedKind, existingPayload) + } + + return nil, fmt.Errorf("failed to get or create event log entries: %w", err) + } + + var memoResult *IngestMemoResult + var waitForResult *IngestWaitForResult + var triggerRunsResult *IngestTriggerRunsResult + + switch opts.Kind { + case sqlcv1.V1DurableEventLogKindRUN: + entries := make([]*IngestTriggerRunsEntry, len(getOrCreateOpts.Entries)) + + for i, entry := range logEntries { + entries[i] = &IngestTriggerRunsEntry{ + NodeId: entry.Entry.NodeID, + BranchId: entry.Entry.BranchID, + IsSatisfied: entry.Entry.IsSatisfied, + AlreadyExisted: entry.AlreadyExisted, + ResultPayload: entry.ResultPayload, + } + } + + triggerRunsResult = &IngestTriggerRunsResult{ + InvocationCount: opts.InvocationCount, + Entries: entries, + } + + var newTriggerOpts []*WorkflowNameTriggerOpts + + for _, le := range logEntries { + if le.AlreadyExisted { + continue + } + + newTriggerOpts = append(newTriggerOpts, nodeIdBranchIdToTriggerOpts[NodeIdBranchIdTuple{ + NodeId: le.Entry.NodeID, + BranchId: le.Entry.BranchID, + }]) + } + + if len(newTriggerOpts) > 0 { + createdTasks, createdDags, triggerErr := r.triggerFromWorkflowNames(ctx, optTx, tenantId, newTriggerOpts) + + if triggerErr != nil { + return nil, fmt.Errorf("failed to trigger workflows: %w", triggerErr) + } + + triggerRunsResult.CreatedTasks = createdTasks + triggerRunsResult.CreatedDAGs = createdDags + + createMatchOpts := make([]CreateMatchOpts, 0, len(createdTasks)+len(createdDags)) + + dagExternalIds := make(map[uuid.UUID]struct{}, len(createdDags)) + + for _, dag := range createdDags { + dagExternalIds[dag.ExternalID] = struct{}{} + } + + for _, ct := range createdTasks { + if _, isDagTask := dagExternalIds[ct.WorkflowRunID]; isDagTask { + continue + } + + childHint := ct.ExternalID.String() + orGroupId := uuid.New() + + conditions := []GroupMatchCondition{ + { + GroupId: orGroupId, + EventType: sqlcv1.V1EventTypeINTERNAL, + EventKey: string(sqlcv1.V1TaskEventTypeCOMPLETED), + ReadableDataKey: "output", + EventResourceHint: &childHint, + Expression: "true", + Action: sqlcv1.V1MatchConditionActionCREATE, + }, + { + GroupId: orGroupId, + EventType: sqlcv1.V1EventTypeINTERNAL, + EventKey: string(sqlcv1.V1TaskEventTypeFAILED), + ReadableDataKey: "output", + EventResourceHint: &childHint, + Expression: "true", + Action: sqlcv1.V1MatchConditionActionCREATE, + }, + { + GroupId: orGroupId, + EventType: sqlcv1.V1EventTypeINTERNAL, + EventKey: string(sqlcv1.V1TaskEventTypeCANCELLED), + ReadableDataKey: "output", + EventResourceHint: &childHint, + Expression: "true", + Action: sqlcv1.V1MatchConditionActionCREATE, + }, + } + + nodeIdBranchId := runExternalIdToNodeIdBranchId[ct.ExternalID] + + nodeId := nodeIdBranchId.NodeId + branchId := nodeIdBranchId.BranchId + + runEventLogEntrySignalKey := fmt.Sprintf("durable_run:%s:%d:%d", task.ExternalID.String(), branchId, nodeId) + + taskId := task.ID + + createMatchOpts = append(createMatchOpts, CreateMatchOpts{ + Kind: sqlcv1.V1MatchKindSIGNAL, + Conditions: conditions, + SignalTaskId: &taskId, + SignalTaskInsertedAt: task.InsertedAt, + SignalExternalId: &ct.ExternalID, + SignalTaskExternalId: &task.ExternalID, + SignalKey: &runEventLogEntrySignalKey, + DurableEventLogEntryNodeId: &nodeId, + DurableEventLogEntryBranchId: &branchId, + }) + } + + for _, dag := range createdDags { + conditions := make([]GroupMatchCondition, 0, len(dag.TaskExternalIDs)*3) + + for i, taskExtId := range dag.TaskExternalIDs { + childHint := taskExtId.String() + orGroupId := uuid.New() + + readableDataKey := "output" + if i < len(dag.TaskStepReadableIDs) { + readableDataKey = dag.TaskStepReadableIDs[i] + } + + conditions = append(conditions, + GroupMatchCondition{ + GroupId: orGroupId, + EventType: sqlcv1.V1EventTypeINTERNAL, + EventKey: string(sqlcv1.V1TaskEventTypeCOMPLETED), + ReadableDataKey: readableDataKey, + EventResourceHint: &childHint, + Expression: "true", + Action: sqlcv1.V1MatchConditionActionCREATE, + }, + GroupMatchCondition{ + GroupId: orGroupId, + EventType: sqlcv1.V1EventTypeINTERNAL, + EventKey: string(sqlcv1.V1TaskEventTypeFAILED), + ReadableDataKey: readableDataKey, + EventResourceHint: &childHint, + Expression: "true", + Action: sqlcv1.V1MatchConditionActionCREATE, + }, + GroupMatchCondition{ + GroupId: orGroupId, + EventType: sqlcv1.V1EventTypeINTERNAL, + EventKey: string(sqlcv1.V1TaskEventTypeCANCELLED), + ReadableDataKey: readableDataKey, + EventResourceHint: &childHint, + Expression: "true", + Action: sqlcv1.V1MatchConditionActionCREATE, + }, + ) + } + + nodeIdBranchId := runExternalIdToNodeIdBranchId[dag.ExternalID] + + nodeId := nodeIdBranchId.NodeId + branchId := nodeIdBranchId.BranchId + + runEventLogEntrySignalKey := fmt.Sprintf("durable_run:%s:%d:%d", task.ExternalID.String(), branchId, nodeId) + + taskId := task.ID + dagExternalId := dag.ExternalID + + createMatchOpts = append(createMatchOpts, CreateMatchOpts{ + Kind: sqlcv1.V1MatchKindSIGNAL, + Conditions: conditions, + SignalTaskId: &taskId, + SignalTaskInsertedAt: task.InsertedAt, + SignalExternalId: &dagExternalId, + SignalTaskExternalId: &task.ExternalID, + SignalKey: &runEventLogEntrySignalKey, + DurableEventLogEntryNodeId: &nodeId, + DurableEventLogEntryBranchId: &branchId, + }) + } + + if len(createMatchOpts) > 0 { + if matchErr := r.createEventMatches(ctx, tx, tenantId, createMatchOpts); matchErr != nil { + return nil, fmt.Errorf("failed to register run completion matches: %w", matchErr) + } + } + } + case sqlcv1.V1DurableEventLogKindWAITFOR: + if len(logEntries) != 1 { + // note: we implicitly assume that there will only be one log entry for wait for conditions + // if we get more than one, it's an indication something is wrong + return nil, fmt.Errorf("expected to get exactly one log entry for wait for condition, but got %d", len(logEntries)) + } + le := logEntries[0] + + if !le.AlreadyExisted { + if err = r.handleWaitFor(ctx, tx, tenantId, le.Entry.BranchID, le.Entry.NodeID, opts.WaitFor.WaitForConditions, task); err != nil { + return nil, fmt.Errorf("failed to handle wait for conditions: %w", err) + } + } + + waitForResult = &IngestWaitForResult{ + InvocationCount: opts.InvocationCount, + IsSatisfied: le.Entry.IsSatisfied, + NodeId: le.Entry.NodeID, + BranchId: le.Entry.BranchID, + AlreadyExisted: le.AlreadyExisted, + ResultPayload: le.ResultPayload, + } + case sqlcv1.V1DurableEventLogKindMEMO: + if len(logEntries) != 1 { + // note: we implicitly assume that there will only be one log entry for memo + // if we get more than one, it's an indication something is wrong + return nil, fmt.Errorf("expected to get exactly one log entry for memo, but got %d", len(logEntries)) + } + + le := logEntries[0] + + memoResult = &IngestMemoResult{ + InvocationCount: opts.InvocationCount, + IsSatisfied: le.Entry.IsSatisfied, + NodeId: le.Entry.NodeID, + BranchId: le.Entry.BranchID, + ResultPayload: le.ResultPayload, + AlreadyExisted: le.AlreadyExisted, + } + } + + n := len(getOrCreateOpts.Entries) + + finalNodeId := baseNodeId + int64(n) - 1 + _, err = r.queries.UpdateLogFile(ctx, tx, sqlcv1.UpdateLogFileParams{ + NodeId: sqlchelpers.ToBigInt(&finalNodeId), + Durabletaskid: task.ID, + Durabletaskinsertedat: task.InsertedAt, + }) + + if err != nil { + return nil, fmt.Errorf("failed to update latest node id: %w", err) + } + + if err := optTx.Commit(ctx); err != nil { + return nil, err + } + + return &IngestDurableTaskEventResult{ + Kind: opts.Kind, + MemoResult: memoResult, + WaitForResult: waitForResult, + TriggerRunsResult: triggerRunsResult, + }, nil +} + +func (r *durableEventsRepository) handleWaitFor(ctx context.Context, tx sqlcv1.DBTX, tenantId uuid.UUID, branchId, nodeId int64, waitForConditions []CreateExternalSignalConditionOpt, task *sqlcv1.FlattenExternalIdsRow) error { + if waitForConditions == nil { + return nil + } + + if len(waitForConditions) == 0 { + return nil + } + + taskExternalId := task.ExternalID + signalKey := getDurableTaskSignalKey(taskExternalId, nodeId) + + createMatchOpts := []ExternalCreateSignalMatchOpts{{ + Conditions: waitForConditions, + SignalTaskId: task.ID, + SignalTaskInsertedAt: task.InsertedAt, + SignalTaskExternalId: task.ExternalID, + SignalExternalId: taskExternalId, + SignalKey: signalKey, + DurableEventLogEntryNodeId: &nodeId, + DurableEventLogEntryBranchId: &branchId, + }} + + return r.registerSignalMatchConditions(ctx, tx, tenantId, createMatchOpts) +} + +func (r *durableEventsRepository) CompleteMemoEntry(ctx context.Context, opts CompleteMemoEntryOpts) error { + task, err := r.GetTaskByExternalId(ctx, opts.TenantId, opts.TaskExternalId, false) + if err != nil { + return fmt.Errorf("failed to get task by external id: %w", err) + } + + entry, err := r.queries.GetDurableEventLogEntry(ctx, r.pool, sqlcv1.GetDurableEventLogEntryParams{ + Durabletaskid: task.ID, + Durabletaskinsertedat: task.InsertedAt, + Nodeid: opts.NodeId, + Branchid: opts.BranchId, + }) + if err != nil { + return fmt.Errorf("failed to get durable event log entry at branch %d node %d: %w", opts.BranchId, opts.NodeId, err) + } + + if entry.IsSatisfied { + return nil + } + + _, err = r.queries.MarkDurableEventLogEntrySatisfied(ctx, r.pool, sqlcv1.MarkDurableEventLogEntrySatisfiedParams{ + Durabletaskid: task.ID, + Durabletaskinsertedat: task.InsertedAt, + Nodeid: opts.NodeId, + Branchid: opts.BranchId, + }) + + if err != nil { + return fmt.Errorf("failed to mark memo entry as satisfied: %w", err) + } + + if len(opts.Payload) > 0 { + err = r.payloadStore.Store(ctx, r.pool, StorePayloadOpts{ + Id: entry.ID, + InsertedAt: entry.InsertedAt, + ExternalId: entry.ExternalID, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + Payload: opts.Payload, + TenantId: opts.TenantId, + }) + + if err != nil { + return fmt.Errorf("failed to store memo result payload: %w", err) + } + } + + return nil +} + +func (r *durableEventsRepository) HandleBranch(ctx context.Context, tenantId uuid.UUID, nodeId, branchId int64, task *sqlcv1.FlattenExternalIdsRow) (*HandleBranchResult, error) { + optTx, err := r.PrepareOptimisticTx(ctx) + if err != nil { + return nil, fmt.Errorf("failed to prepare tx: %w", err) + } + defer optTx.Rollback() + + tx := optTx.tx + + logFile, err := r.getAndLockLogFile(ctx, tx, tenantId, task.ID, task.InsertedAt) + + if err != nil { + return nil, fmt.Errorf("failed to lock log file: %w", err) + } + + newBranchId := logFile.LatestBranchID + 1 + zero := int64(0) + + logFile, err = r.queries.UpdateLogFile(ctx, tx, sqlcv1.UpdateLogFileParams{ + BranchId: sqlchelpers.ToBigInt(&newBranchId), + NodeId: sqlchelpers.ToBigInt(&zero), + Durabletaskid: task.ID, + Durabletaskinsertedat: task.InsertedAt, + }) + + if err != nil { + return nil, fmt.Errorf("failed to update log file for branch: %w", err) + } + + err = r.queries.CreateDurableEventLogBranchPoint(ctx, tx, sqlcv1.CreateDurableEventLogBranchPointParams{ + Tenantid: tenantId, + Firstnodeidinnewbranch: nodeId, + Parentbranchid: branchId, + Nextbranchid: newBranchId, + Durabletaskid: task.ID, + Durabletaskinsertedat: task.InsertedAt, + }) + + if err != nil { + return nil, fmt.Errorf("failed to create log branch point for fork: %w", err) + } + + if err := optTx.Commit(ctx); err != nil { + return nil, err + } + + return &HandleBranchResult{ + NodeId: nodeId, + BranchId: newBranchId, + EventLogFile: logFile, + }, nil +} + +func (r *durableEventsRepository) GetDurableTaskInvocationCounts(ctx context.Context, tenantId uuid.UUID, tasks []IdInsertedAt) (map[IdInsertedAt]*int32, error) { + if len(tasks) == 0 { + return nil, nil + } + + taskIds := make([]int64, len(tasks)) + taskInsertedAts := make([]pgtype.Timestamptz, len(tasks)) + tenantIds := make([]uuid.UUID, len(tasks)) + + for i, t := range tasks { + taskIds[i] = t.ID + taskInsertedAts[i] = t.InsertedAt + tenantIds[i] = tenantId + } + + logFiles, err := r.queries.GetDurableTaskLogFiles(ctx, r.pool, sqlcv1.GetDurableTaskLogFilesParams{ + Durabletaskids: taskIds, + Durabletaskinsertedats: taskInsertedAts, + Tenantids: tenantIds, + }) + + if err != nil { + return nil, fmt.Errorf("failed to get log files: %w", err) + } + + result := make(map[IdInsertedAt]*int32, len(tasks)) + + for _, logFile := range logFiles { + key := IdInsertedAt{ + ID: logFile.DurableTaskID, + InsertedAt: logFile.DurableTaskInsertedAt, + } + + result[key] = &logFile.LatestInvocationCount + } + + return result, nil +} diff --git a/pkg/repository/durable_events_test.go b/pkg/repository/durable_events_test.go new file mode 100644 index 000000000..48ca3a5d3 --- /dev/null +++ b/pkg/repository/durable_events_test.go @@ -0,0 +1,485 @@ +//go:build !e2e && !load && !rampup && !integration + +package repository + +import ( + "encoding/json" + "errors" + "testing" + + "github.com/google/uuid" + "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" + + "github.com/stretchr/testify/assert" +) + +func TestStaleInvocationError_ImplementsError(t *testing.T) { + id := uuid.New() + err := &StaleInvocationError{ + TaskExternalId: id, + ExpectedInvocationCount: 3, + ActualInvocationCount: 1, + } + + var target *StaleInvocationError + assert.True(t, errors.As(err, &target)) + assert.Equal(t, id, target.TaskExternalId) + assert.Equal(t, int32(3), target.ExpectedInvocationCount) + assert.Equal(t, int32(1), target.ActualInvocationCount) + assert.Contains(t, err.Error(), id.String()) + assert.Contains(t, err.Error(), "server has 3") + assert.Contains(t, err.Error(), "worker sent 1") +} + +func TestStaleInvocationError_NotMatchedByOtherErrors(t *testing.T) { + err := errors.New("some other error") + var target *StaleInvocationError + assert.False(t, errors.As(err, &target)) +} + +func TestResolveBranchForNode_NoBranchPoints(t *testing.T) { + // Single branch, no forks. All nodes resolve to branch 1. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{} + + for _, nodeId := range []int64{1, 2, 3, 4, 5, 6} { + assert.Equal(t, resolveBranchForNode(nodeId, 1, branchPoints), int64(1), "nodeId=%d", nodeId) + } +} + +func TestResolveBranchForNode_SingleForkFromNode1(t *testing.T) { + // Branch 1 forked at node 1 → branch 2. + // Nodes >= 1 should resolve to branch 2. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 1, ParentBranchID: 1, NextBranchID: 2}, + } + + assert.Equal(t, resolveBranchForNode(1, 2, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(2, 2, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(3, 2, branchPoints), int64(2)) +} + +func TestResolveBranchForNode_SingleForkFromNode2(t *testing.T) { + // Branch 1 forked at node 2 → branch 2. + // Node 1 should resolve to branch 1 (cached), nodes >= 2 to branch 2. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 2, ParentBranchID: 1, NextBranchID: 2}, + } + + assert.Equal(t, resolveBranchForNode(1, 2, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(2, 2, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(3, 2, branchPoints), int64(2)) +} + +func TestResolveBranchForNode_BranchOffBranch(t *testing.T) { + // Branch 1 forked at node 1 → branch 2. + // Branch 2 forked at node 2 → branch 3. + // Node 1 should use branch 2, nodes >= 2 should use branch 3. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 1, ParentBranchID: 1, NextBranchID: 2}, + 3: {FirstNodeIDInNewBranch: 2, ParentBranchID: 2, NextBranchID: 3}, + } + + assert.Equal(t, resolveBranchForNode(1, 3, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(2, 3, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(3, 3, branchPoints), int64(3)) +} + +func TestResolveBranchForNode_DeepChain(t *testing.T) { + // Chain: branch 1 → 2 (at node 1) → 3 (at node 2) → 4 (at node 3) + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 1, ParentBranchID: 1, NextBranchID: 2}, + 3: {FirstNodeIDInNewBranch: 2, ParentBranchID: 2, NextBranchID: 3}, + 4: {FirstNodeIDInNewBranch: 3, ParentBranchID: 3, NextBranchID: 4}, + } + + assert.Equal(t, resolveBranchForNode(1, 4, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(2, 4, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(3, 4, branchPoints), int64(4)) + assert.Equal(t, resolveBranchForNode(4, 4, branchPoints), int64(4)) +} + +func TestResolveBranchForNode_ForkAtSameNode(t *testing.T) { + // Two successive forks both at node 1: branch 1 → 2, then branch 2 → 3. + // All nodes on branch 3 should resolve to branch 3 (since fork point is node 1). + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 1, ParentBranchID: 1, NextBranchID: 2}, + 3: {FirstNodeIDInNewBranch: 1, ParentBranchID: 2, NextBranchID: 3}, + } + + assert.Equal(t, resolveBranchForNode(1, 3, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(2, 3, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(3, 3, branchPoints), int64(3)) +} + +func TestResolveBranchForNode_QueriedFromOlderBranch(t *testing.T) { + // Branch points exist for branches 2 and 3, but we're resolving from branch 2. + // Branch 3's branch point should be irrelevant. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 2, ParentBranchID: 1, NextBranchID: 2}, + 3: {FirstNodeIDInNewBranch: 3, ParentBranchID: 2, NextBranchID: 3}, + } + + assert.Equal(t, resolveBranchForNode(1, 2, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(2, 2, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(3, 2, branchPoints), int64(2)) +} + +func TestResolveBranchForNode_SiblingBranches(t *testing.T) { + // Branch 1 forks at node 3 → branch 2, then branch 1 forks at node 2 → branch 3. + // Branch 3's ancestry is just branch 1, so branch 2 is irrelevant. + // Nodes 1 should use branch 1, nodes >= 2 should use branch 3. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 3, ParentBranchID: 1, NextBranchID: 2}, + 3: {FirstNodeIDInNewBranch: 2, ParentBranchID: 1, NextBranchID: 3}, + } + + assert.Equal(t, resolveBranchForNode(1, 3, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(2, 3, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(3, 3, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(4, 3, branchPoints), int64(3)) + + // And from branch 2's perspective, branch 3 is irrelevant. + // Nodes 1-2 should use branch 1, nodes >= 3 should use branch 2. + assert.Equal(t, resolveBranchForNode(1, 2, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(2, 2, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(3, 2, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(4, 2, branchPoints), int64(2)) +} + +func TestResolveBranchForNode_SiblingBranchForkAfterSibling(t *testing.T) { + // Branch 1 forks at node 3 → branch 2, then branch 1 forks at node 4 → branch 3. + // Branch 3's ancestry is just branch 1, so branch 2 is irrelevant. + // Nodes 1-3 should use branch 1, nodes >= 4 should use branch 3. + branchPoints := map[int64]*sqlcv1.V1DurableEventLogBranchPoint{ + 2: {FirstNodeIDInNewBranch: 3, ParentBranchID: 1, NextBranchID: 2}, + 3: {FirstNodeIDInNewBranch: 4, ParentBranchID: 1, NextBranchID: 3}, + } + + assert.Equal(t, resolveBranchForNode(1, 3, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(2, 3, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(3, 3, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(4, 3, branchPoints), int64(3)) + assert.Equal(t, resolveBranchForNode(5, 3, branchPoints), int64(3)) + + // From branch 2's perspective, branch 3 is irrelevant. + // Nodes 1-2 should use branch 1, nodes >= 3 should use branch 2. + assert.Equal(t, resolveBranchForNode(1, 2, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(2, 2, branchPoints), int64(1)) + assert.Equal(t, resolveBranchForNode(3, 2, branchPoints), int64(2)) + assert.Equal(t, resolveBranchForNode(4, 2, branchPoints), int64(2)) +} + +func strPtr(s string) *string { return &s } + +func keyFromKind(t *testing.T, kind sqlcv1.V1DurableEventLogKind, triggerOpts *WorkflowNameTriggerOpts, waitForConditions []CreateExternalSignalConditionOpt) string { + t.Helper() + r := &durableEventsRepository{} + k, err := r.createIdempotencyKey(kind, triggerOpts, waitForConditions) + assert.NoError(t, err) + return string(k) +} + +func TestCreateIdempotencyKey_ConditionOrderInvariant(t *testing.T) { + condA := CreateExternalSignalConditionOpt{ + Kind: CreateExternalSignalConditionKindSLEEP, + Expression: "aaa", + ReadableDataKey: "output", + SleepFor: strPtr("10s"), + } + condB := CreateExternalSignalConditionOpt{ + Kind: CreateExternalSignalConditionKindUSEREVENT, + Expression: "bbb", + ReadableDataKey: "output", + UserEventKey: strPtr("some-event"), + } + + keyAB := keyFromKind(t, sqlcv1.V1DurableEventLogKindWAITFOR, nil, []CreateExternalSignalConditionOpt{condA, condB}) + keyBA := keyFromKind(t, sqlcv1.V1DurableEventLogKindWAITFOR, nil, []CreateExternalSignalConditionOpt{condB, condA}) + + assert.Equal(t, keyAB, keyBA) +} + +func TestCreateIdempotencyKey_DifferentConditions(t *testing.T) { + base := keyFromKind(t, sqlcv1.V1DurableEventLogKindWAITFOR, nil, []CreateExternalSignalConditionOpt{ + {Kind: CreateExternalSignalConditionKindSLEEP, Expression: "true", ReadableDataKey: "output", SleepFor: strPtr("5s")}, + }) + different := keyFromKind(t, sqlcv1.V1DurableEventLogKindWAITFOR, nil, []CreateExternalSignalConditionOpt{ + {Kind: CreateExternalSignalConditionKindSLEEP, Expression: "true", ReadableDataKey: "output", SleepFor: strPtr("30s")}, + }) + + assert.NotEqual(t, base, different) +} + +func TestCreateIdempotencyKey_DifferentKind(t *testing.T) { + run := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, nil, nil) + waitFor := keyFromKind(t, sqlcv1.V1DurableEventLogKindWAITFOR, nil, nil) + memo := keyFromKind(t, sqlcv1.V1DurableEventLogKindMEMO, nil, nil) + + assert.NotEqual(t, run, waitFor) + assert.NotEqual(t, run, memo) + assert.NotEqual(t, waitFor, memo) +} + +func TestCreateIdempotencyKey_DifferentWorkflowName(t *testing.T) { + keyA := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "workflow-a"}, + }, nil) + keyB := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "workflow-b"}, + }, nil) + + assert.NotEqual(t, keyA, keyB) +} + +func TestCreateIdempotencyKey_DifferentTriggerData(t *testing.T) { + keyA := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`)}, + }, nil) + keyB := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":2}`)}, + }, nil) + + assert.NotEqual(t, keyA, keyB) +} + +func TestCreateIdempotencyKey_WithAndWithoutTriggerOpts(t *testing.T) { + without := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, nil, nil) + with := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow"}, + }, nil) + + assert.NotEqual(t, without, with) +} + +func int32Ptr(i int32) *int32 { return &i } + +func TestCreateIdempotencyKey_PriorityIgnored(t *testing.T) { + base := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`)}, + }, nil) + withPriority := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`), Priority: int32Ptr(3)}, + }, nil) + + assert.Equal(t, base, withPriority) +} + +func TestCreateIdempotencyKey_AdditionalMetadataIgnored(t *testing.T) { + base := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`)}, + }, nil) + withMeta := keyFromKind(t, sqlcv1.V1DurableEventLogKindRUN, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{ + WorkflowName: "my-workflow", + Data: []byte(`{"x":1}`), + AdditionalMetadata: []byte(`{"env":"prod"}`), + }, + }, nil) + + assert.Equal(t, base, withMeta) +} + +func TestNonDeterminismError_SameKind(t *testing.T) { + id := uuid.New() + err := &NonDeterminismError{ + NodeId: 3, + BranchId: 1, + TaskExternalId: id, + Detail: &NonDeterminismDetail{ + Expected: "waitFor(sleep(2s))", + Received: "waitFor(sleep(4s))", + }, + } + + assert.Contains(t, err.Error(), id.String()) + assert.Contains(t, err.Error(), "node 3:1") + assert.Contains(t, err.Error(), "expected: waitFor(sleep(2s))") + assert.Contains(t, err.Error(), "received: waitFor(sleep(4s))") +} + +func TestNonDeterminismError_DifferentKinds(t *testing.T) { + id := uuid.New() + err := &NonDeterminismError{ + NodeId: 5, + BranchId: 2, + TaskExternalId: id, + Detail: &NonDeterminismDetail{ + Expected: "MEMO", + Received: "run(my-workflow)", + }, + } + + assert.Contains(t, err.Error(), "expected: MEMO") + assert.Contains(t, err.Error(), "received: run(my-workflow)") +} + +func TestNonDeterminismError_NoDetail(t *testing.T) { + id := uuid.New() + err := &NonDeterminismError{ + NodeId: 1, + BranchId: 1, + TaskExternalId: id, + } + + msg := err.Error() + assert.Contains(t, msg, "non-determinism error") + assert.NotContains(t, msg, "expected:") + assert.NotContains(t, msg, "received:") +} + +func TestNonDeterminismError_ImplementsError(t *testing.T) { + err := &NonDeterminismError{TaskExternalId: uuid.New()} + var target *NonDeterminismError + assert.True(t, errors.As(err, &target)) +} + +func TestFormatCall_Run(t *testing.T) { + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}, + TriggerRuns: &IngestTriggerRunsOpts{ + TriggerOpts: []*WorkflowNameTriggerOpts{ + {TriggerTaskData: &TriggerTaskData{WorkflowName: "wf-a"}}, + {TriggerTaskData: &TriggerTaskData{WorkflowName: "wf-b"}}, + }, + }, + } + assert.Equal(t, "run(wf-a, wf-b)", opts.formatCall()) +} + +func TestFormatCall_WaitFor(t *testing.T) { + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindWAITFOR}, + WaitFor: &IngestWaitForOpts{ + WaitForConditions: []CreateExternalSignalConditionOpt{ + {Kind: CreateExternalSignalConditionKindSLEEP, SleepFor: strPtr("10s")}, + {Kind: CreateExternalSignalConditionKindUSEREVENT, UserEventKey: strPtr("user:signup")}, + }, + }, + } + assert.Equal(t, "waitFor(sleep(10s), waitForEvent(user:signup))", opts.formatCall()) +} + +func TestFormatCall_Memo(t *testing.T) { + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindMEMO}, + } + assert.Equal(t, "memo", opts.formatCall()) +} + +func TestFormatCall_RunBulkWithDuplicates(t *testing.T) { + triggers := make([]*WorkflowNameTriggerOpts, 0, 8) + for i := 0; i < 6; i++ { + triggers = append(triggers, &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "wf-a"}, + }) + } + triggers = append(triggers, + &WorkflowNameTriggerOpts{TriggerTaskData: &TriggerTaskData{WorkflowName: "wf-b"}}, + &WorkflowNameTriggerOpts{TriggerTaskData: &TriggerTaskData{WorkflowName: "wf-c"}}, + ) + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}, + TriggerRuns: &IngestTriggerRunsOpts{TriggerOpts: triggers}, + } + assert.Equal(t, "run(6x wf-a, wf-b, wf-c)", opts.formatCall()) +} + +func TestFormatCall_RunBulkExceedsMaxLabels(t *testing.T) { + names := []string{"a", "b", "c", "d", "e", "f", "g"} + triggers := make([]*WorkflowNameTriggerOpts, len(names)) + for i, n := range names { + triggers[i] = &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: n}, + } + } + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}, + TriggerRuns: &IngestTriggerRunsOpts{TriggerOpts: triggers}, + } + assert.Equal(t, "run(a, b, c, d, e, ... +2 more unique)", opts.formatCall()) +} + +func TestFormatCall_WaitForBulkMixed(t *testing.T) { + conditions := make([]CreateExternalSignalConditionOpt, 0, 8) + for i := 0; i < 4; i++ { + conditions = append(conditions, CreateExternalSignalConditionOpt{ + Kind: CreateExternalSignalConditionKindSLEEP, SleepFor: strPtr("5s"), + }) + } + conditions = append(conditions, + CreateExternalSignalConditionOpt{Kind: CreateExternalSignalConditionKindUSEREVENT, UserEventKey: strPtr("ev1")}, + CreateExternalSignalConditionOpt{Kind: CreateExternalSignalConditionKindUSEREVENT, UserEventKey: strPtr("ev2")}, + CreateExternalSignalConditionOpt{Kind: CreateExternalSignalConditionKindUSEREVENT, UserEventKey: strPtr("ev3")}, + CreateExternalSignalConditionOpt{Kind: CreateExternalSignalConditionKindUSEREVENT, UserEventKey: strPtr("ev4")}, + ) + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindWAITFOR}, + WaitFor: &IngestWaitForOpts{WaitForConditions: conditions}, + } + assert.Equal(t, "waitFor(4x sleep(5s), waitForEvent(ev1), waitForEvent(ev2), waitForEvent(ev3), waitForEvent(ev4))", opts.formatCall()) +} + +func TestFormatCall_RunExactlyAtMaxLabels(t *testing.T) { + names := []string{"a", "b", "c", "d", "e"} + triggers := make([]*WorkflowNameTriggerOpts, len(names)) + for i, n := range names { + triggers[i] = &WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: n}, + } + } + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}, + TriggerRuns: &IngestTriggerRunsOpts{TriggerOpts: triggers}, + } + assert.Equal(t, "run(a, b, c, d, e)", opts.formatCall()) +} + +func TestFormatStoredPayload_Run(t *testing.T) { + payload, _ := json.Marshal(WorkflowNameTriggerOpts{ + TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow"}, + }) + assert.Equal(t, "run(my-workflow)", formatStoredPayload(sqlcv1.V1DurableEventLogKindRUN, payload)) +} + +func TestFormatStoredPayload_WaitFor(t *testing.T) { + payload, _ := json.Marshal([]CreateExternalSignalConditionOpt{ + {Kind: CreateExternalSignalConditionKindSLEEP, SleepFor: strPtr("2s")}, + }) + assert.Equal(t, "waitFor(sleep(2s))", formatStoredPayload(sqlcv1.V1DurableEventLogKindWAITFOR, payload)) +} + +func TestFormatStoredPayload_NoPayload(t *testing.T) { + assert.Equal(t, "MEMO", formatStoredPayload(sqlcv1.V1DurableEventLogKindMEMO, nil)) + assert.Equal(t, "RUN", formatStoredPayload(sqlcv1.V1DurableEventLogKindRUN, nil)) +} + +func TestNonDeterminismDetail_WithPayload(t *testing.T) { + existingPayload, _ := json.Marshal([]CreateExternalSignalConditionOpt{ + {Kind: CreateExternalSignalConditionKindSLEEP, SleepFor: strPtr("2s")}, + }) + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindWAITFOR}, + WaitFor: &IngestWaitForOpts{ + WaitForConditions: []CreateExternalSignalConditionOpt{ + {Kind: CreateExternalSignalConditionKindSLEEP, SleepFor: strPtr("4s")}, + }, + }, + } + detail := nonDeterminismDetail(opts, sqlcv1.V1DurableEventLogKindWAITFOR, existingPayload) + assert.Equal(t, "waitFor(sleep(2s))", detail.Expected) + assert.Equal(t, "waitFor(sleep(4s))", detail.Received) +} + +func TestNonDeterminismDetail_KindMismatch(t *testing.T) { + opts := IngestDurableTaskEventOpts{ + BaseIngestEventOpts: &BaseIngestEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}, + TriggerRuns: &IngestTriggerRunsOpts{ + TriggerOpts: []*WorkflowNameTriggerOpts{ + {TriggerTaskData: &TriggerTaskData{WorkflowName: "my-wf"}}, + }, + }, + } + detail := nonDeterminismDetail(opts, sqlcv1.V1DurableEventLogKindMEMO, nil) + assert.Equal(t, "MEMO", detail.Expected) + assert.Equal(t, "run(my-wf)", detail.Received) +} diff --git a/pkg/repository/match.go b/pkg/repository/match.go index 79d7692df..7b2ac7dba 100644 --- a/pkg/repository/match.go +++ b/pkg/repository/match.go @@ -43,7 +43,13 @@ type ExternalCreateSignalMatchOpts struct { SignalExternalId uuid.UUID `validate:"required"` + SignalTaskExternalId uuid.UUID `validate:"required"` + SignalKey string `validate:"required"` + + // Optional durable event log entry fields for durable WAIT_FOR + DurableEventLogEntryNodeId *int64 + DurableEventLogEntryBranchId *int64 } type CreateExternalSignalConditionKind string @@ -106,9 +112,15 @@ type CreateMatchOpts struct { SignalTaskInsertedAt pgtype.Timestamptz + SignalTaskExternalId *uuid.UUID + SignalExternalId *uuid.UUID SignalKey *string + + // Optional durable event log fields for durable WAIT_FOR + DurableEventLogEntryNodeId *int64 + DurableEventLogEntryBranchId *int64 } type EventMatchResults struct { @@ -117,6 +129,9 @@ type EventMatchResults struct { // The list of tasks which were replayed from the matches ReplayedTasks []*V1TaskWithPayload + + // The list of satisfied durable event log entries from matches + SatisfiedDurableEventLogEntries []SatisfiedEntry } type GroupMatchCondition struct { @@ -140,6 +155,16 @@ type GroupMatchCondition struct { Data []byte } +type SatisfiedEntry struct { + DurableTaskExternalId uuid.UUID + DurableTaskId int64 + DurableTaskInsertedAt pgtype.Timestamptz + InvocationCount int32 + NodeId int64 + BranchId int64 + Data []byte +} + type MatchRepository interface { RegisterSignalMatchConditions(ctx context.Context, tenantId uuid.UUID, eventMatches []ExternalCreateSignalMatchOpts) error @@ -157,20 +182,7 @@ func newMatchRepository(s *sharedRepository) MatchRepository { } } -func (m *MatchRepositoryImpl) RegisterSignalMatchConditions(ctx context.Context, tenantId uuid.UUID, signalMatches []ExternalCreateSignalMatchOpts) error { - // TODO: ADD BACK VALIDATION - // if err := m.v.Validate(signalMatches); err != nil { - // return err - // } - - tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, m.pool, m.l) - - if err != nil { - return err - } - - defer rollback() - +func (r *sharedRepository) registerSignalMatchConditions(ctx context.Context, tx sqlcv1.DBTX, tenantId uuid.UUID, signalMatches []ExternalCreateSignalMatchOpts) error { eventMatches := make([]CreateMatchOpts, 0, len(signalMatches)) for _, signalMatch := range signalMatches { @@ -183,7 +195,7 @@ func (m *MatchRepositoryImpl) RegisterSignalMatchConditions(ctx context.Context, return fmt.Errorf("sleep condition requires a duration") } - c, err := m.durableSleepCondition( + c, err := r.durableSleepCondition( ctx, tx, tenantId, @@ -203,7 +215,7 @@ func (m *MatchRepositoryImpl) RegisterSignalMatchConditions(ctx context.Context, return fmt.Errorf("user event condition requires a user event key") } - conditions = append(conditions, m.userEventCondition( + conditions = append(conditions, r.userEventCondition( condition.OrGroupId, condition.ReadableDataKey, *condition.UserEventKey, @@ -218,21 +230,37 @@ func (m *MatchRepositoryImpl) RegisterSignalMatchConditions(ctx context.Context, signalKey := signalMatch.SignalKey eventMatches = append(eventMatches, CreateMatchOpts{ - Kind: sqlcv1.V1MatchKindSIGNAL, - Conditions: conditions, - SignalTaskId: &taskId, - SignalTaskInsertedAt: signalMatch.SignalTaskInsertedAt, - SignalExternalId: &externalId, - SignalKey: &signalKey, + Kind: sqlcv1.V1MatchKindSIGNAL, + Conditions: conditions, + SignalTaskId: &taskId, + SignalTaskInsertedAt: signalMatch.SignalTaskInsertedAt, + SignalTaskExternalId: &signalMatch.SignalTaskExternalId, + SignalExternalId: &externalId, + SignalKey: &signalKey, + DurableEventLogEntryNodeId: signalMatch.DurableEventLogEntryNodeId, + DurableEventLogEntryBranchId: signalMatch.DurableEventLogEntryBranchId, }) } - err = m.createEventMatches(ctx, tx, tenantId, eventMatches) + return r.createEventMatches(ctx, tx, tenantId, eventMatches) +} + +func (m *MatchRepositoryImpl) RegisterSignalMatchConditions(ctx context.Context, tenantId uuid.UUID, signalMatches []ExternalCreateSignalMatchOpts) error { + // TODO: ADD BACK VALIDATION + // if err := m.v.Validate(signalMatches); err != nil { + // return err + // } + + tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, m.pool, m.l) if err != nil { return err } + defer rollback() + + err = m.registerSignalMatchConditions(ctx, tx, tenantId, signalMatches) + if err := commit(ctx); err != nil { return err } @@ -327,6 +355,13 @@ func (m *MatchRepositoryImpl) ProcessUserEventMatches(ctx context.Context, tenan return res, nil } +type DurableTaskNodeIdKey struct { + DurableTaskId int64 + DurableTaskInsertedAt time.Time + NodeId int64 + BranchId int64 +} + func (m *sharedRepository) processEventMatches(ctx context.Context, tx sqlcv1.DBTX, tenantId uuid.UUID, events []CandidateEventMatch, eventType sqlcv1.V1EventType) (*EventMatchResults, error) { start := time.Now() @@ -653,6 +688,109 @@ func (m *sharedRepository) processEventMatches(ctx context.Context, tx sqlcv1.DB res.CreatedTasks = tasks + durableTaskIds := make([]int64, 0) + durableTaskInsertedAts := make([]pgtype.Timestamptz, 0) + durableTaskNodeIds := make([]int64, 0) + durableTaskBranchIds := make([]int64, 0) + payloadsToStore := make([]StorePayloadOpts, 0) + idInsertedAtNodeIdToSatisfiedEntry := make(map[DurableTaskNodeIdKey]SatisfiedEntry) + + for _, match := range satisfiedMatches { + durableTaskExternalId := match.SignalTaskExternalID + durableTaskId := match.SignalTaskID + durableTaskInsertedAt := match.SignalTaskInsertedAt + nodeId := match.DurableEventLogEntryNodeID + branchId := match.DurableEventLogEntryBranchID + + if nodeId.Valid && durableTaskExternalId != nil { + + key := DurableTaskNodeIdKey{ + DurableTaskId: durableTaskId.Int64, + DurableTaskInsertedAt: durableTaskInsertedAt.Time, + NodeId: nodeId.Int64, + BranchId: branchId.Int64, + } + + cb := SatisfiedEntry{ + DurableTaskExternalId: *durableTaskExternalId, + DurableTaskId: durableTaskId.Int64, + DurableTaskInsertedAt: durableTaskInsertedAt, + NodeId: nodeId.Int64, + BranchId: branchId.Int64, + Data: match.McAggregatedData, + } + + idInsertedAtNodeIdToSatisfiedEntry[key] = cb + + durableTaskIds = append(durableTaskIds, durableTaskId.Int64) + durableTaskInsertedAts = append(durableTaskInsertedAts, durableTaskInsertedAt) + durableTaskNodeIds = append(durableTaskNodeIds, nodeId.Int64) + durableTaskBranchIds = append(durableTaskBranchIds, branchId.Int64) + } + } + + entries, err := m.queries.UpdateDurableEventLogEntriesSatisfied(ctx, tx, sqlcv1.UpdateDurableEventLogEntriesSatisfiedParams{ + Nodeids: durableTaskNodeIds, + Branchids: durableTaskBranchIds, + Durabletaskids: durableTaskIds, + Durabletaskinsertedats: durableTaskInsertedAts, + }) + + if err != nil { + return nil, fmt.Errorf("failed to list satisfied entries: %w", err) + } + + satisfiedEntries := make([]SatisfiedEntry, 0) + + for _, cb := range entries { + key := DurableTaskNodeIdKey{ + DurableTaskId: cb.DurableTaskID, + DurableTaskInsertedAt: cb.DurableTaskInsertedAt.Time, + NodeId: cb.NodeID, + BranchId: cb.BranchID, + } + + initialEntry, ok := idInsertedAtNodeIdToSatisfiedEntry[key] + + if !ok { + m.l.Error().Msgf("no initial entry found for satisfied entry with node id %d, durable task id %d and durable task inserted at %s", cb.NodeID, cb.DurableTaskID, cb.DurableTaskInsertedAt.Time) + continue + } + + if cb.Kind == sqlcv1.V1DurableEventLogKindRUN { + if extracted, extractErr := ExtractOutputFromMatchData(initialEntry.Data); extractErr != nil { + m.l.Error().Err(extractErr).Msgf("failed to extract output from RUN_COMPLETED match data for entry %d", cb.NodeID) + } else { + initialEntry.Data = extracted + } + } + + initialEntry.InvocationCount = cb.InvocationCount + + if len(initialEntry.Data) > 0 { + payloadsToStore = append(payloadsToStore, StorePayloadOpts{ + Id: cb.ID, + InsertedAt: cb.InsertedAt, + Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA, + Payload: initialEntry.Data, + ExternalId: cb.ExternalID, + TenantId: tenantId, + }) + } + + satisfiedEntries = append(satisfiedEntries, initialEntry) + } + + if len(payloadsToStore) > 0 { + err = m.payloadStore.Store(ctx, tx, payloadsToStore...) + + if err != nil { + return nil, fmt.Errorf("failed to store entry result payloads for satisfied entry: %w", err) + } + } + + res.SatisfiedDurableEventLogEntries = satisfiedEntries + if len(signalIds) > 0 { // create a SIGNAL_COMPLETED event for any signal taskIds := make([]TaskIdInsertedAtRetryCount, 0, len(satisfiedMatches)) @@ -973,13 +1111,21 @@ func (m *sharedRepository) createEventMatches(ctx context.Context, tx sqlcv1.DBT signalTaskIds := make([]int64, len(signalMatches)) signalTaskInsertedAts := make([]pgtype.Timestamptz, len(signalMatches)) signalKeys := make([]string, len(signalMatches)) + durableLogEntryNodeIds := make([]*int64, len(signalMatches)) + durableLogEntryBranchIds := make([]*int64, len(signalMatches)) + signalTaskExternalIds := make([]*uuid.UUID, len(signalMatches)) for i, match := range signalMatches { signalTenantIds[i] = tenantId signalKinds[i] = string(match.Kind) signalTaskIds[i] = *match.SignalTaskId signalTaskInsertedAts[i] = match.SignalTaskInsertedAt + + signalTaskExternalIds[i] = match.SignalTaskExternalId signalKeys[i] = *match.SignalKey + + durableLogEntryNodeIds[i] = match.DurableEventLogEntryNodeId + durableLogEntryBranchIds[i] = match.DurableEventLogEntryBranchId } // Create matches in the database @@ -987,11 +1133,14 @@ func (m *sharedRepository) createEventMatches(ctx context.Context, tx sqlcv1.DBT ctx, tx, sqlcv1.CreateMatchesForSignalTriggersParams{ - Tenantids: signalTenantIds, - Kinds: signalKinds, - Signaltaskids: signalTaskIds, - Signaltaskinsertedats: signalTaskInsertedAts, - Signalkeys: signalKeys, + Tenantids: signalTenantIds, + Kinds: signalKinds, + Signaltaskids: signalTaskIds, + Signaltaskinsertedats: signalTaskInsertedAts, + Signaltaskexternalids: signalTaskExternalIds, + Signalkeys: signalKeys, + Durableeventlogentrynodeids: durableLogEntryNodeIds, + Durableeventlogentrybranchids: durableLogEntryBranchIds, }, ) diff --git a/pkg/repository/olap.go b/pkg/repository/olap.go index c798bfe0a..e72818541 100644 --- a/pkg/repository/olap.go +++ b/pkg/repository/olap.go @@ -123,8 +123,10 @@ type V1WorkflowRunPopulator struct { } type TaskRunMetric struct { - Status string `json:"status"` - Count uint64 `json:"count"` + Status string `json:"status"` + Count uint64 `json:"count"` + EvictedCount uint64 `json:"evictedCount,omitempty"` + OnWorkerCount uint64 `json:"onWorkerCount,omitempty"` } type Sticky string @@ -1472,32 +1474,33 @@ func (r *OLAPRepositoryImpl) ReadTaskRunMetrics(ctx context.Context, tenantId uu return nil, err } - metrics := make([]TaskRunMetric, 0) + runningCount := uint64(res.TotalRunning) // nolint: gosec + evictedCount := uint64(res.TotalEvicted) // nolint: gosec - metrics = append(metrics, TaskRunMetric{ - Status: "QUEUED", - Count: uint64(res.TotalQueued), - }) - - metrics = append(metrics, TaskRunMetric{ - Status: "RUNNING", - Count: uint64(res.TotalRunning), - }) - - metrics = append(metrics, TaskRunMetric{ - Status: "COMPLETED", - Count: uint64(res.TotalCompleted), - }) - - metrics = append(metrics, TaskRunMetric{ - Status: "CANCELLED", - Count: uint64(res.TotalCancelled), - }) - - metrics = append(metrics, TaskRunMetric{ - Status: "FAILED", - Count: uint64(res.TotalFailed), - }) + metrics := []TaskRunMetric{ + { + Status: "QUEUED", + Count: uint64(res.TotalQueued), // nolint: gosec + }, + { + Status: "RUNNING", + Count: runningCount + evictedCount, + EvictedCount: evictedCount, + OnWorkerCount: runningCount, + }, + { + Status: "COMPLETED", + Count: uint64(res.TotalCompleted), // nolint: gosec + }, + { + Status: "CANCELLED", + Count: uint64(res.TotalCancelled), // nolint: gosec + }, + { + Status: "FAILED", + Count: uint64(res.TotalFailed), // nolint: gosec + }, + } return metrics, nil } @@ -1510,8 +1513,7 @@ func (r *OLAPRepositoryImpl) saveEventsToCache(events []sqlcv1.CreateTaskEventsO } func getCacheKey(event sqlcv1.CreateTaskEventsOLAPParams) string { - // key on the task_id, retry_count, and event_type - return fmt.Sprintf("%d-%s-%d", event.TaskID, event.EventType, event.RetryCount) + return fmt.Sprintf("%d-%s-%d-%d", event.TaskID, event.EventType, event.RetryCount, event.DurableInvocationCount) } func (r *OLAPRepositoryImpl) writeTaskEventBatch(ctx context.Context, tenantId uuid.UUID, events []sqlcv1.CreateTaskEventsOLAPParams) error { diff --git a/pkg/repository/output.go b/pkg/repository/output.go index fb54c731e..fea33fd0b 100644 --- a/pkg/repository/output.go +++ b/pkg/repository/output.go @@ -2,6 +2,7 @@ package repository import ( "encoding/json" + "fmt" "github.com/google/uuid" @@ -138,3 +139,27 @@ func newTaskEventFromBytes(b []byte) (*TaskOutputEvent, error) { return &e, err } + +func ExtractOutputFromMatchData(data []byte) ([]byte, error) { + var outer map[string]map[string][]json.RawMessage + if err := json.Unmarshal(data, &outer); err != nil { + return nil, fmt.Errorf("failed to unmarshal match data: %w", err) + } + + for _, keyMap := range outer { + for _, entries := range keyMap { + if len(entries) == 0 { + continue + } + + var event TaskOutputEvent + if err := json.Unmarshal(entries[0], &event); err != nil { + return nil, fmt.Errorf("failed to unmarshal task output event from match data: %w", err) + } + + return event.Output, nil + } + } + + return nil, fmt.Errorf("no entries found in match data") +} diff --git a/pkg/repository/payloadstore.go b/pkg/repository/payloadstore.go index 88928ed61..7b602b9f2 100644 --- a/pkg/repository/payloadstore.go +++ b/pkg/repository/payloadstore.go @@ -62,6 +62,7 @@ type ExternalStore interface { type PayloadStoreRepository interface { Store(ctx context.Context, tx sqlcv1.DBTX, payloads ...StorePayloadOpts) error Retrieve(ctx context.Context, tx sqlcv1.DBTX, opts ...RetrievePayloadOpts) (map[RetrievePayloadOpts][]byte, error) + RetrieveSingle(ctx context.Context, tx sqlcv1.DBTX, opt RetrievePayloadOpts) ([]byte, error) RetrieveFromExternal(ctx context.Context, keys ...ExternalPayloadLocationKey) (map[ExternalPayloadLocationKey][]byte, error) OverwriteExternalStore(store ExternalStore) DualWritesEnabled() bool @@ -282,6 +283,24 @@ func (p *payloadStoreRepositoryImpl) Retrieve(ctx context.Context, tx sqlcv1.DBT return p.retrieve(ctx, tx, opts...) } +func (p *payloadStoreRepositoryImpl) RetrieveSingle(ctx context.Context, tx sqlcv1.DBTX, opt RetrievePayloadOpts) ([]byte, error) { + if tx == nil { + tx = p.pool + } + + optsToPayload, err := p.retrieve(ctx, tx, opt) + + if err != nil { + return nil, err + } + + if len(optsToPayload) == 0 { + return nil, pgx.ErrNoRows + } + + return optsToPayload[opt], nil +} + func (p *payloadStoreRepositoryImpl) RetrieveFromExternal(ctx context.Context, keys ...ExternalPayloadLocationKey) (map[ExternalPayloadLocationKey][]byte, error) { if !p.externalStoreEnabled { return nil, fmt.Errorf("external store not enabled") diff --git a/pkg/repository/repository.go b/pkg/repository/repository.go index e293e54b3..dca21fb5a 100644 --- a/pkg/repository/repository.go +++ b/pkg/repository/repository.go @@ -21,6 +21,7 @@ type TaskOperationLimits struct { type Repository interface { APIToken() APITokenRepository Dispatcher() DispatcherRepository + DurableEvents() DurableEventsRepository Health() HealthRepository MessageQueue() MessageQueueRepository RateLimit() RateLimitRepository @@ -59,6 +60,7 @@ type Repository interface { type repositoryImpl struct { apiToken APITokenRepository dispatcher DispatcherRepository + durableEvents DurableEventsRepository health HealthRepository messageQueue MessageQueueRepository rateLimit RateLimitRepository @@ -113,6 +115,7 @@ func NewRepository( impl := &repositoryImpl{ apiToken: newAPITokenRepository(shared, cacheDuration), dispatcher: newDispatcherRepository(shared), + durableEvents: newDurableEventsRepository(shared), health: newHealthRepository(shared), messageQueue: mq, rateLimit: newRateLimitRepository(shared), @@ -167,6 +170,10 @@ func (r *repositoryImpl) Dispatcher() DispatcherRepository { return r.dispatcher } +func (r *repositoryImpl) DurableEvents() DurableEventsRepository { + return r.durableEvents +} + func (r *repositoryImpl) Health() HealthRepository { return r.health } diff --git a/pkg/repository/scheduler_queue.go b/pkg/repository/scheduler_queue.go index 3f1462de9..dd88884b1 100644 --- a/pkg/repository/scheduler_queue.go +++ b/pkg/repository/scheduler_queue.go @@ -278,6 +278,7 @@ func (d *sharedRepository) markQueueItemsProcessed(ctx context.Context, tenantId } taskIds := make([]int64, 0, len(r.Assigned)) + tenantIds := make([]uuid.UUID, 0, len(r.Assigned)) taskInsertedAts := make([]pgtype.Timestamptz, 0, len(r.Assigned)) workerIds := make([]uuid.UUID, 0, len(r.Assigned)) @@ -289,6 +290,7 @@ func (d *sharedRepository) markQueueItemsProcessed(ctx context.Context, tenantId if _, ok := queuedItemsMap[id]; ok { taskIds = append(taskIds, assignedItem.QueueItem.TaskID) taskInsertedAts = append(taskInsertedAts, assignedItem.QueueItem.TaskInsertedAt) + tenantIds = append(tenantIds, tenantId) workerIds = append(workerIds, assignedItem.WorkerId) if assignedItem.QueueItem.TaskInsertedAt.Valid && (!minTaskInsertedAt.Valid || assignedItem.QueueItem.TaskInsertedAt.Time.Before(minTaskInsertedAt.Time)) { @@ -312,6 +314,26 @@ func (d *sharedRepository) markQueueItemsProcessed(ctx context.Context, tenantId return nil, nil, err } + incrementInvocationCountOpts := make([]IncrementDurableTaskInvocationCountsOpts, 0) + + for _, t := range updatedTasks { + if t.IsDurable.Valid && t.IsDurable.Bool { + incrementInvocationCountOpts = append(incrementInvocationCountOpts, IncrementDurableTaskInvocationCountsOpts{ + TaskId: t.TaskID, + TaskInsertedAt: t.TaskInsertedAt, + TenantId: tenantId, + }) + } + } + + if len(incrementInvocationCountOpts) > 0 { + _, err := d.incrementDurableTaskInvocationCounts(ctx, tx, incrementInvocationCountOpts) + + if err != nil { + return nil, nil, err + } + } + timeAfterUpdateStepRuns := time.Since(checkpoint) succeeded = make([]*AssignedItem, 0, len(r.Assigned)) diff --git a/pkg/repository/sqlchelpers/int.go b/pkg/repository/sqlchelpers/int.go index 50cd8e3fb..31264426d 100644 --- a/pkg/repository/sqlchelpers/int.go +++ b/pkg/repository/sqlchelpers/int.go @@ -2,9 +2,28 @@ package sqlchelpers import "github.com/jackc/pgx/v5/pgtype" -func ToInt(i int32) pgtype.Int4 { +func ToInt(i *int32) pgtype.Int4 { + if i == nil { + return pgtype.Int4{ + Valid: false, + } + } + return pgtype.Int4{ Valid: true, - Int32: i, + Int32: *i, + } +} + +func ToBigInt(i *int64) pgtype.Int8 { + if i == nil { + return pgtype.Int8{ + Valid: false, + } + } + + return pgtype.Int8{ + Valid: true, + Int64: *i, } } diff --git a/pkg/repository/sqlcv1/batch.go b/pkg/repository/sqlcv1/batch.go index bdbde3ddf..3ea6dcc44 100644 --- a/pkg/repository/sqlcv1/batch.go +++ b/pkg/repository/sqlcv1/batch.go @@ -17,7 +17,7 @@ var ( ) const registerBatch = `-- name: RegisterBatch :batchexec -SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, desired_worker_label FROM v1_task WHERE id = $1 +SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, is_durable, desired_worker_label FROM v1_task WHERE id = $1 ` type RegisterBatchBatchResults struct { diff --git a/pkg/repository/sqlcv1/copyfrom.go b/pkg/repository/sqlcv1/copyfrom.go index f27781809..5604c08eb 100644 --- a/pkg/repository/sqlcv1/copyfrom.go +++ b/pkg/repository/sqlcv1/copyfrom.go @@ -233,6 +233,7 @@ func (r iteratorForCreateTaskEventsOLAP) Values() ([]interface{}, error) { r.rows[0].AdditionalEventData, r.rows[0].AdditionalEventMessage, r.rows[0].ExternalID, + r.rows[0].DurableInvocationCount, }, nil } @@ -241,7 +242,7 @@ func (r iteratorForCreateTaskEventsOLAP) Err() error { } func (q *Queries) CreateTaskEventsOLAP(ctx context.Context, db DBTX, arg []CreateTaskEventsOLAPParams) (int64, error) { - return db.CopyFrom(ctx, []string{"v1_task_events_olap"}, []string{"tenant_id", "task_id", "task_inserted_at", "event_type", "workflow_id", "event_timestamp", "readable_status", "retry_count", "error_message", "output", "worker_id", "additional__event_data", "additional__event_message", "external_id"}, &iteratorForCreateTaskEventsOLAP{rows: arg}) + return db.CopyFrom(ctx, []string{"v1_task_events_olap"}, []string{"tenant_id", "task_id", "task_inserted_at", "event_type", "workflow_id", "event_timestamp", "readable_status", "retry_count", "error_message", "output", "worker_id", "additional__event_data", "additional__event_message", "external_id", "durable_invocation_count"}, &iteratorForCreateTaskEventsOLAP{rows: arg}) } // iteratorForCreateTaskEventsOLAPTmp implements pgx.CopyFromSource. diff --git a/pkg/repository/sqlcv1/durable_event_log.sql b/pkg/repository/sqlcv1/durable_event_log.sql new file mode 100644 index 000000000..e70f9d8ba --- /dev/null +++ b/pkg/repository/sqlcv1/durable_event_log.sql @@ -0,0 +1,227 @@ +-- name: GetAndLockLogFile :one +SELECT * +FROM v1_durable_event_log_file +WHERE + durable_task_id = @durableTaskId::BIGINT + AND durable_task_inserted_at = @durableTaskInsertedAt::TIMESTAMPTZ + AND tenant_id = @tenantId::UUID +FOR UPDATE +; + +-- name: IncrementLogFileInvocationCounts :many +WITH inputs AS ( + SELECT + UNNEST(@durableTaskIds::BIGINT[]) AS durable_task_id, + UNNEST(@durableTaskInsertedAts::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST(@tenantIds::UUID[]) AS tenant_id +) + +INSERT INTO v1_durable_event_log_file ( + tenant_id, + durable_task_id, + durable_task_inserted_at, + latest_invocation_count, + latest_inserted_at, + latest_node_id, + latest_branch_id +) +SELECT + tenant_id, + durable_task_id, + durable_task_inserted_at, + 1, + NOW(), + 0, + 1 +FROM inputs +ON CONFLICT (durable_task_id, durable_task_inserted_at) DO UPDATE +SET + latest_invocation_count = v1_durable_event_log_file.latest_invocation_count + 1, + latest_node_id = 0 +RETURNING v1_durable_event_log_file.* +; + +-- name: UpdateLogFile :one +UPDATE v1_durable_event_log_file +SET + latest_node_id = COALESCE(sqlc.narg('nodeId')::BIGINT, v1_durable_event_log_file.latest_node_id), + latest_invocation_count = COALESCE(sqlc.narg('invocationCount')::INTEGER, v1_durable_event_log_file.latest_invocation_count), + latest_branch_id = COALESCE(sqlc.narg('branchId')::BIGINT, v1_durable_event_log_file.latest_branch_id) +WHERE durable_task_id = @durableTaskId::BIGINT + AND durable_task_inserted_at = @durableTaskInsertedAt::TIMESTAMPTZ +RETURNING *; + +-- name: CreateDurableEventLogBranchPoint :exec +INSERT INTO v1_durable_event_log_branch_point ( + tenant_id, + durable_task_id, + durable_task_inserted_at, + first_node_id_in_new_branch, + parent_branch_id, + next_branch_id +) +VALUES ( + @tenantId::UUID, + @durableTaskId::BIGINT, + @durableTaskInsertedAt::TIMESTAMPTZ, + @firstNodeIdInNewBranch::BIGINT, + @parentBranchId::BIGINT, + @nextBranchId::BIGINT +) +RETURNING * +; + +-- name: GetDurableEventLogEntry :one +SELECT * +FROM v1_durable_event_log_entry +WHERE durable_task_id = @durableTaskId::BIGINT + AND durable_task_inserted_at = @durableTaskInsertedAt::TIMESTAMPTZ + AND branch_id = @branchId::BIGINT + AND node_id = @nodeId::BIGINT; + + +-- name: UpdateDurableEventLogEntriesSatisfied :many +WITH inputs AS ( + SELECT + UNNEST(@durableTaskIds::BIGINT[]) AS durable_task_id, + UNNEST(@durableTaskInsertedAts::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST(@nodeIds::BIGINT[]) AS node_id, + UNNEST(@branchIds::BIGINT[]) AS branch_id +), updated AS ( + UPDATE v1_durable_event_log_entry + SET is_satisfied = true + FROM inputs + WHERE v1_durable_event_log_entry.durable_task_id = inputs.durable_task_id + AND v1_durable_event_log_entry.durable_task_inserted_at = inputs.durable_task_inserted_at + AND v1_durable_event_log_entry.node_id = inputs.node_id + AND v1_durable_event_log_entry.branch_id = inputs.branch_id + RETURNING v1_durable_event_log_entry.* +) + +SELECT updated.*, lf.latest_invocation_count AS invocation_count +FROM updated +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (updated.durable_task_id, updated.durable_task_inserted_at) +; + +-- name: ListSatisfiedEntries :many +WITH inputs AS ( + SELECT + UNNEST(@taskExternalIds::UUID[]) AS external_id, + UNNEST(@nodeIds::BIGINT[]) AS node_id, + UNNEST(@branchIds::BIGINT[]) AS branch_id +), tasks_with_nodes AS ( + SELECT t.*, i.node_id AS requested_node_id, i.branch_id AS requested_branch_id + FROM inputs i + JOIN v1_lookup_table lt ON lt.external_id = i.external_id + JOIN v1_task t ON (t.id, t.inserted_at) = (lt.task_id, lt.inserted_at) +) + +SELECT + e.*, + twn.external_id AS task_external_id, + lf.latest_invocation_count AS invocation_count +FROM v1_durable_event_log_entry e +JOIN tasks_with_nodes twn ON (twn.id, twn.inserted_at) = (e.durable_task_id, e.durable_task_inserted_at) +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (e.durable_task_id, e.durable_task_inserted_at) +WHERE + e.branch_id = twn.requested_branch_id + AND e.node_id = twn.requested_node_id + AND e.is_satisfied +; + +-- name: MarkDurableEventLogEntrySatisfied :one +UPDATE v1_durable_event_log_entry +SET is_satisfied = true +WHERE durable_task_id = @durableTaskId::BIGINT + AND durable_task_inserted_at = @durableTaskInsertedAt::TIMESTAMPTZ + AND branch_id = @branchId::BIGINT + AND node_id = @nodeId::BIGINT +RETURNING * +; + + +-- name: BulkGetDurableEventLogEntries :many +WITH inputs AS ( + SELECT + UNNEST(@branchIds::BIGINT[]) AS branch_id, + UNNEST(@nodeIds::BIGINT[]) AS node_id +) +SELECT e.*, lf.latest_invocation_count AS invocation_count +FROM v1_durable_event_log_entry e +JOIN inputs i ON e.branch_id = i.branch_id AND e.node_id = i.node_id +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (e.durable_task_id, e.durable_task_inserted_at) +WHERE e.durable_task_id = @durableTaskId::BIGINT + AND e.durable_task_inserted_at = @durableTaskInsertedAt::TIMESTAMPTZ; + +-- name: BulkCreateDurableEventLogEntries :many +WITH inputs AS ( + SELECT + UNNEST(@tenantIds::UUID[]) AS tenant_id, + UNNEST(@externalIds::UUID[]) AS external_id, + UNNEST(@durableTaskIds::BIGINT[]) AS durable_task_id, + UNNEST(@durableTaskInsertedAts::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST(@kinds::text[]) AS kind, + UNNEST(@nodeIds::BIGINT[]) AS node_id, + UNNEST(@branchIds::BIGINT[]) AS branch_id, + UNNEST(@idempotencyKeys::BYTEA[]) AS idempotency_key, + UNNEST(@isSatisfieds::BOOLEAN[]) AS is_satisfied +), inserts AS ( + INSERT INTO v1_durable_event_log_entry ( + tenant_id, + external_id, + durable_task_id, + durable_task_inserted_at, + inserted_at, + kind, + node_id, + branch_id, + idempotency_key, + is_satisfied + ) + SELECT + i.tenant_id, + i.external_id, + i.durable_task_id, + i.durable_task_inserted_at, + NOW(), + i.kind::v1_durable_event_log_kind, + i.node_id, + i.branch_id, + i.idempotency_key, + i.is_satisfied + FROM inputs i + ON CONFLICT (durable_task_id, durable_task_inserted_at, branch_id, node_id) DO NOTHING + RETURNING * +) + +SELECT i.*, lf.latest_invocation_count AS invocation_count +FROM inserts i +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (i.durable_task_id, i.durable_task_inserted_at) +; + + +-- name: GetDurableTaskLogFiles :many +WITH inputs AS ( + SELECT + UNNEST(@durableTaskIds::BIGINT[]) AS durable_task_id, + UNNEST(@durableTaskInsertedAts::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST(@tenantIds::UUID[]) AS tenant_id +) + +SELECT * +FROM v1_durable_event_log_file lf +WHERE (lf.durable_task_id, lf.durable_task_inserted_at, lf.tenant_id) IN ( + SELECT durable_task_id, durable_task_inserted_at, tenant_id + FROM inputs +) +; + +-- name: ListDurableEventLogBranchPoints :many +SELECT * +FROM v1_durable_event_log_branch_point +WHERE + durable_task_id = @durableTaskId::BIGINT + AND durable_task_inserted_at = @durableTaskInsertedAt::TIMESTAMPTZ + AND tenant_id = @tenantId::UUID +ORDER BY id ASC +; diff --git a/pkg/repository/sqlcv1/durable_event_log.sql.go b/pkg/repository/sqlcv1/durable_event_log.sql.go new file mode 100644 index 000000000..e5e0c66c5 --- /dev/null +++ b/pkg/repository/sqlcv1/durable_event_log.sql.go @@ -0,0 +1,723 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: durable_event_log.sql + +package sqlcv1 + +import ( + "context" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" +) + +const bulkCreateDurableEventLogEntries = `-- name: BulkCreateDurableEventLogEntries :many +WITH inputs AS ( + SELECT + UNNEST($1::UUID[]) AS tenant_id, + UNNEST($2::UUID[]) AS external_id, + UNNEST($3::BIGINT[]) AS durable_task_id, + UNNEST($4::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST($5::text[]) AS kind, + UNNEST($6::BIGINT[]) AS node_id, + UNNEST($7::BIGINT[]) AS branch_id, + UNNEST($8::BYTEA[]) AS idempotency_key, + UNNEST($9::BOOLEAN[]) AS is_satisfied +), inserts AS ( + INSERT INTO v1_durable_event_log_entry ( + tenant_id, + external_id, + durable_task_id, + durable_task_inserted_at, + inserted_at, + kind, + node_id, + branch_id, + idempotency_key, + is_satisfied + ) + SELECT + i.tenant_id, + i.external_id, + i.durable_task_id, + i.durable_task_inserted_at, + NOW(), + i.kind::v1_durable_event_log_kind, + i.node_id, + i.branch_id, + i.idempotency_key, + i.is_satisfied + FROM inputs i + ON CONFLICT (durable_task_id, durable_task_inserted_at, branch_id, node_id) DO NOTHING + RETURNING tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, branch_id, idempotency_key, is_satisfied +) + +SELECT i.tenant_id, i.external_id, i.inserted_at, i.id, i.durable_task_id, i.durable_task_inserted_at, i.kind, i.node_id, i.branch_id, i.idempotency_key, i.is_satisfied, lf.latest_invocation_count AS invocation_count +FROM inserts i +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (i.durable_task_id, i.durable_task_inserted_at) +` + +type BulkCreateDurableEventLogEntriesParams struct { + Tenantids []uuid.UUID `json:"tenantids"` + Externalids []uuid.UUID `json:"externalids"` + Durabletaskids []int64 `json:"durabletaskids"` + Durabletaskinsertedats []pgtype.Timestamptz `json:"durabletaskinsertedats"` + Kinds []string `json:"kinds"` + Nodeids []int64 `json:"nodeids"` + Branchids []int64 `json:"branchids"` + Idempotencykeys [][]byte `json:"idempotencykeys"` + Issatisfieds []bool `json:"issatisfieds"` +} + +type BulkCreateDurableEventLogEntriesRow struct { + TenantID uuid.UUID `json:"tenant_id"` + ExternalID uuid.UUID `json:"external_id"` + InsertedAt pgtype.Timestamptz `json:"inserted_at"` + ID int64 `json:"id"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + Kind V1DurableEventLogKind `json:"kind"` + NodeID int64 `json:"node_id"` + BranchID int64 `json:"branch_id"` + IdempotencyKey []byte `json:"idempotency_key"` + IsSatisfied bool `json:"is_satisfied"` + InvocationCount int32 `json:"invocation_count"` +} + +func (q *Queries) BulkCreateDurableEventLogEntries(ctx context.Context, db DBTX, arg BulkCreateDurableEventLogEntriesParams) ([]*BulkCreateDurableEventLogEntriesRow, error) { + rows, err := db.Query(ctx, bulkCreateDurableEventLogEntries, + arg.Tenantids, + arg.Externalids, + arg.Durabletaskids, + arg.Durabletaskinsertedats, + arg.Kinds, + arg.Nodeids, + arg.Branchids, + arg.Idempotencykeys, + arg.Issatisfieds, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*BulkCreateDurableEventLogEntriesRow + for rows.Next() { + var i BulkCreateDurableEventLogEntriesRow + if err := rows.Scan( + &i.TenantID, + &i.ExternalID, + &i.InsertedAt, + &i.ID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.Kind, + &i.NodeID, + &i.BranchID, + &i.IdempotencyKey, + &i.IsSatisfied, + &i.InvocationCount, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const bulkGetDurableEventLogEntries = `-- name: BulkGetDurableEventLogEntries :many +WITH inputs AS ( + SELECT + UNNEST($3::BIGINT[]) AS branch_id, + UNNEST($4::BIGINT[]) AS node_id +) +SELECT e.tenant_id, e.external_id, e.inserted_at, e.id, e.durable_task_id, e.durable_task_inserted_at, e.kind, e.node_id, e.branch_id, e.idempotency_key, e.is_satisfied, lf.latest_invocation_count AS invocation_count +FROM v1_durable_event_log_entry e +JOIN inputs i ON e.branch_id = i.branch_id AND e.node_id = i.node_id +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (e.durable_task_id, e.durable_task_inserted_at) +WHERE e.durable_task_id = $1::BIGINT + AND e.durable_task_inserted_at = $2::TIMESTAMPTZ +` + +type BulkGetDurableEventLogEntriesParams struct { + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` + Branchids []int64 `json:"branchids"` + Nodeids []int64 `json:"nodeids"` +} + +type BulkGetDurableEventLogEntriesRow struct { + TenantID uuid.UUID `json:"tenant_id"` + ExternalID uuid.UUID `json:"external_id"` + InsertedAt pgtype.Timestamptz `json:"inserted_at"` + ID int64 `json:"id"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + Kind V1DurableEventLogKind `json:"kind"` + NodeID int64 `json:"node_id"` + BranchID int64 `json:"branch_id"` + IdempotencyKey []byte `json:"idempotency_key"` + IsSatisfied bool `json:"is_satisfied"` + InvocationCount int32 `json:"invocation_count"` +} + +func (q *Queries) BulkGetDurableEventLogEntries(ctx context.Context, db DBTX, arg BulkGetDurableEventLogEntriesParams) ([]*BulkGetDurableEventLogEntriesRow, error) { + rows, err := db.Query(ctx, bulkGetDurableEventLogEntries, + arg.Durabletaskid, + arg.Durabletaskinsertedat, + arg.Branchids, + arg.Nodeids, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*BulkGetDurableEventLogEntriesRow + for rows.Next() { + var i BulkGetDurableEventLogEntriesRow + if err := rows.Scan( + &i.TenantID, + &i.ExternalID, + &i.InsertedAt, + &i.ID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.Kind, + &i.NodeID, + &i.BranchID, + &i.IdempotencyKey, + &i.IsSatisfied, + &i.InvocationCount, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const createDurableEventLogBranchPoint = `-- name: CreateDurableEventLogBranchPoint :exec +INSERT INTO v1_durable_event_log_branch_point ( + tenant_id, + durable_task_id, + durable_task_inserted_at, + first_node_id_in_new_branch, + parent_branch_id, + next_branch_id +) +VALUES ( + $1::UUID, + $2::BIGINT, + $3::TIMESTAMPTZ, + $4::BIGINT, + $5::BIGINT, + $6::BIGINT +) +RETURNING tenant_id, id, inserted_at, durable_task_id, durable_task_inserted_at, first_node_id_in_new_branch, parent_branch_id, next_branch_id +` + +type CreateDurableEventLogBranchPointParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` + Firstnodeidinnewbranch int64 `json:"firstnodeidinnewbranch"` + Parentbranchid int64 `json:"parentbranchid"` + Nextbranchid int64 `json:"nextbranchid"` +} + +func (q *Queries) CreateDurableEventLogBranchPoint(ctx context.Context, db DBTX, arg CreateDurableEventLogBranchPointParams) error { + _, err := db.Exec(ctx, createDurableEventLogBranchPoint, + arg.Tenantid, + arg.Durabletaskid, + arg.Durabletaskinsertedat, + arg.Firstnodeidinnewbranch, + arg.Parentbranchid, + arg.Nextbranchid, + ) + return err +} + +const getAndLockLogFile = `-- name: GetAndLockLogFile :one +SELECT tenant_id, durable_task_id, durable_task_inserted_at, latest_invocation_count, latest_inserted_at, latest_node_id, latest_branch_id +FROM v1_durable_event_log_file +WHERE + durable_task_id = $1::BIGINT + AND durable_task_inserted_at = $2::TIMESTAMPTZ + AND tenant_id = $3::UUID +FOR UPDATE +` + +type GetAndLockLogFileParams struct { + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` + Tenantid uuid.UUID `json:"tenantid"` +} + +func (q *Queries) GetAndLockLogFile(ctx context.Context, db DBTX, arg GetAndLockLogFileParams) (*V1DurableEventLogFile, error) { + row := db.QueryRow(ctx, getAndLockLogFile, arg.Durabletaskid, arg.Durabletaskinsertedat, arg.Tenantid) + var i V1DurableEventLogFile + err := row.Scan( + &i.TenantID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.LatestInvocationCount, + &i.LatestInsertedAt, + &i.LatestNodeID, + &i.LatestBranchID, + ) + return &i, err +} + +const getDurableEventLogEntry = `-- name: GetDurableEventLogEntry :one +SELECT tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, branch_id, idempotency_key, is_satisfied +FROM v1_durable_event_log_entry +WHERE durable_task_id = $1::BIGINT + AND durable_task_inserted_at = $2::TIMESTAMPTZ + AND branch_id = $3::BIGINT + AND node_id = $4::BIGINT +` + +type GetDurableEventLogEntryParams struct { + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` + Branchid int64 `json:"branchid"` + Nodeid int64 `json:"nodeid"` +} + +func (q *Queries) GetDurableEventLogEntry(ctx context.Context, db DBTX, arg GetDurableEventLogEntryParams) (*V1DurableEventLogEntry, error) { + row := db.QueryRow(ctx, getDurableEventLogEntry, + arg.Durabletaskid, + arg.Durabletaskinsertedat, + arg.Branchid, + arg.Nodeid, + ) + var i V1DurableEventLogEntry + err := row.Scan( + &i.TenantID, + &i.ExternalID, + &i.InsertedAt, + &i.ID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.Kind, + &i.NodeID, + &i.BranchID, + &i.IdempotencyKey, + &i.IsSatisfied, + ) + return &i, err +} + +const getDurableTaskLogFiles = `-- name: GetDurableTaskLogFiles :many +WITH inputs AS ( + SELECT + UNNEST($1::BIGINT[]) AS durable_task_id, + UNNEST($2::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST($3::UUID[]) AS tenant_id +) + +SELECT tenant_id, durable_task_id, durable_task_inserted_at, latest_invocation_count, latest_inserted_at, latest_node_id, latest_branch_id +FROM v1_durable_event_log_file lf +WHERE (lf.durable_task_id, lf.durable_task_inserted_at, lf.tenant_id) IN ( + SELECT durable_task_id, durable_task_inserted_at, tenant_id + FROM inputs +) +` + +type GetDurableTaskLogFilesParams struct { + Durabletaskids []int64 `json:"durabletaskids"` + Durabletaskinsertedats []pgtype.Timestamptz `json:"durabletaskinsertedats"` + Tenantids []uuid.UUID `json:"tenantids"` +} + +func (q *Queries) GetDurableTaskLogFiles(ctx context.Context, db DBTX, arg GetDurableTaskLogFilesParams) ([]*V1DurableEventLogFile, error) { + rows, err := db.Query(ctx, getDurableTaskLogFiles, arg.Durabletaskids, arg.Durabletaskinsertedats, arg.Tenantids) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*V1DurableEventLogFile + for rows.Next() { + var i V1DurableEventLogFile + if err := rows.Scan( + &i.TenantID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.LatestInvocationCount, + &i.LatestInsertedAt, + &i.LatestNodeID, + &i.LatestBranchID, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const incrementLogFileInvocationCounts = `-- name: IncrementLogFileInvocationCounts :many +WITH inputs AS ( + SELECT + UNNEST($1::BIGINT[]) AS durable_task_id, + UNNEST($2::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST($3::UUID[]) AS tenant_id +) + +INSERT INTO v1_durable_event_log_file ( + tenant_id, + durable_task_id, + durable_task_inserted_at, + latest_invocation_count, + latest_inserted_at, + latest_node_id, + latest_branch_id +) +SELECT + tenant_id, + durable_task_id, + durable_task_inserted_at, + 1, + NOW(), + 0, + 1 +FROM inputs +ON CONFLICT (durable_task_id, durable_task_inserted_at) DO UPDATE +SET + latest_invocation_count = v1_durable_event_log_file.latest_invocation_count + 1, + latest_node_id = 0 +RETURNING v1_durable_event_log_file.tenant_id, v1_durable_event_log_file.durable_task_id, v1_durable_event_log_file.durable_task_inserted_at, v1_durable_event_log_file.latest_invocation_count, v1_durable_event_log_file.latest_inserted_at, v1_durable_event_log_file.latest_node_id, v1_durable_event_log_file.latest_branch_id +` + +type IncrementLogFileInvocationCountsParams struct { + Durabletaskids []int64 `json:"durabletaskids"` + Durabletaskinsertedats []pgtype.Timestamptz `json:"durabletaskinsertedats"` + Tenantids []uuid.UUID `json:"tenantids"` +} + +func (q *Queries) IncrementLogFileInvocationCounts(ctx context.Context, db DBTX, arg IncrementLogFileInvocationCountsParams) ([]*V1DurableEventLogFile, error) { + rows, err := db.Query(ctx, incrementLogFileInvocationCounts, arg.Durabletaskids, arg.Durabletaskinsertedats, arg.Tenantids) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*V1DurableEventLogFile + for rows.Next() { + var i V1DurableEventLogFile + if err := rows.Scan( + &i.TenantID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.LatestInvocationCount, + &i.LatestInsertedAt, + &i.LatestNodeID, + &i.LatestBranchID, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listDurableEventLogBranchPoints = `-- name: ListDurableEventLogBranchPoints :many +SELECT tenant_id, id, inserted_at, durable_task_id, durable_task_inserted_at, first_node_id_in_new_branch, parent_branch_id, next_branch_id +FROM v1_durable_event_log_branch_point +WHERE + durable_task_id = $1::BIGINT + AND durable_task_inserted_at = $2::TIMESTAMPTZ + AND tenant_id = $3::UUID +ORDER BY id ASC +` + +type ListDurableEventLogBranchPointsParams struct { + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` + Tenantid uuid.UUID `json:"tenantid"` +} + +func (q *Queries) ListDurableEventLogBranchPoints(ctx context.Context, db DBTX, arg ListDurableEventLogBranchPointsParams) ([]*V1DurableEventLogBranchPoint, error) { + rows, err := db.Query(ctx, listDurableEventLogBranchPoints, arg.Durabletaskid, arg.Durabletaskinsertedat, arg.Tenantid) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*V1DurableEventLogBranchPoint + for rows.Next() { + var i V1DurableEventLogBranchPoint + if err := rows.Scan( + &i.TenantID, + &i.ID, + &i.InsertedAt, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.FirstNodeIDInNewBranch, + &i.ParentBranchID, + &i.NextBranchID, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const listSatisfiedEntries = `-- name: ListSatisfiedEntries :many +WITH inputs AS ( + SELECT + UNNEST($1::UUID[]) AS external_id, + UNNEST($2::BIGINT[]) AS node_id, + UNNEST($3::BIGINT[]) AS branch_id +), tasks_with_nodes AS ( + SELECT t.id, t.inserted_at, t.tenant_id, t.queue, t.action_id, t.step_id, t.step_readable_id, t.workflow_id, t.workflow_version_id, t.workflow_run_id, t.schedule_timeout, t.step_timeout, t.priority, t.sticky, t.desired_worker_id, t.external_id, t.display_name, t.input, t.retry_count, t.internal_retry_count, t.app_retry_count, t.step_index, t.additional_metadata, t.dag_id, t.dag_inserted_at, t.parent_task_external_id, t.parent_task_id, t.parent_task_inserted_at, t.child_index, t.child_key, t.initial_state, t.initial_state_reason, t.concurrency_parent_strategy_ids, t.concurrency_strategy_ids, t.concurrency_keys, t.retry_backoff_factor, t.retry_max_backoff, t.is_durable, t.desired_worker_label, i.node_id AS requested_node_id, i.branch_id AS requested_branch_id + FROM inputs i + JOIN v1_lookup_table lt ON lt.external_id = i.external_id + JOIN v1_task t ON (t.id, t.inserted_at) = (lt.task_id, lt.inserted_at) +) + +SELECT + e.tenant_id, e.external_id, e.inserted_at, e.id, e.durable_task_id, e.durable_task_inserted_at, e.kind, e.node_id, e.branch_id, e.idempotency_key, e.is_satisfied, + twn.external_id AS task_external_id, + lf.latest_invocation_count AS invocation_count +FROM v1_durable_event_log_entry e +JOIN tasks_with_nodes twn ON (twn.id, twn.inserted_at) = (e.durable_task_id, e.durable_task_inserted_at) +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (e.durable_task_id, e.durable_task_inserted_at) +WHERE + e.branch_id = twn.requested_branch_id + AND e.node_id = twn.requested_node_id + AND e.is_satisfied +` + +type ListSatisfiedEntriesParams struct { + Taskexternalids []uuid.UUID `json:"taskexternalids"` + Nodeids []int64 `json:"nodeids"` + Branchids []int64 `json:"branchids"` +} + +type ListSatisfiedEntriesRow struct { + TenantID uuid.UUID `json:"tenant_id"` + ExternalID uuid.UUID `json:"external_id"` + InsertedAt pgtype.Timestamptz `json:"inserted_at"` + ID int64 `json:"id"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + Kind V1DurableEventLogKind `json:"kind"` + NodeID int64 `json:"node_id"` + BranchID int64 `json:"branch_id"` + IdempotencyKey []byte `json:"idempotency_key"` + IsSatisfied bool `json:"is_satisfied"` + TaskExternalID uuid.UUID `json:"task_external_id"` + InvocationCount int32 `json:"invocation_count"` +} + +func (q *Queries) ListSatisfiedEntries(ctx context.Context, db DBTX, arg ListSatisfiedEntriesParams) ([]*ListSatisfiedEntriesRow, error) { + rows, err := db.Query(ctx, listSatisfiedEntries, arg.Taskexternalids, arg.Nodeids, arg.Branchids) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ListSatisfiedEntriesRow + for rows.Next() { + var i ListSatisfiedEntriesRow + if err := rows.Scan( + &i.TenantID, + &i.ExternalID, + &i.InsertedAt, + &i.ID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.Kind, + &i.NodeID, + &i.BranchID, + &i.IdempotencyKey, + &i.IsSatisfied, + &i.TaskExternalID, + &i.InvocationCount, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const markDurableEventLogEntrySatisfied = `-- name: MarkDurableEventLogEntrySatisfied :one +UPDATE v1_durable_event_log_entry +SET is_satisfied = true +WHERE durable_task_id = $1::BIGINT + AND durable_task_inserted_at = $2::TIMESTAMPTZ + AND branch_id = $3::BIGINT + AND node_id = $4::BIGINT +RETURNING tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, branch_id, idempotency_key, is_satisfied +` + +type MarkDurableEventLogEntrySatisfiedParams struct { + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` + Branchid int64 `json:"branchid"` + Nodeid int64 `json:"nodeid"` +} + +func (q *Queries) MarkDurableEventLogEntrySatisfied(ctx context.Context, db DBTX, arg MarkDurableEventLogEntrySatisfiedParams) (*V1DurableEventLogEntry, error) { + row := db.QueryRow(ctx, markDurableEventLogEntrySatisfied, + arg.Durabletaskid, + arg.Durabletaskinsertedat, + arg.Branchid, + arg.Nodeid, + ) + var i V1DurableEventLogEntry + err := row.Scan( + &i.TenantID, + &i.ExternalID, + &i.InsertedAt, + &i.ID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.Kind, + &i.NodeID, + &i.BranchID, + &i.IdempotencyKey, + &i.IsSatisfied, + ) + return &i, err +} + +const updateDurableEventLogEntriesSatisfied = `-- name: UpdateDurableEventLogEntriesSatisfied :many +WITH inputs AS ( + SELECT + UNNEST($1::BIGINT[]) AS durable_task_id, + UNNEST($2::TIMESTAMPTZ[]) AS durable_task_inserted_at, + UNNEST($3::BIGINT[]) AS node_id, + UNNEST($4::BIGINT[]) AS branch_id +), updated AS ( + UPDATE v1_durable_event_log_entry + SET is_satisfied = true + FROM inputs + WHERE v1_durable_event_log_entry.durable_task_id = inputs.durable_task_id + AND v1_durable_event_log_entry.durable_task_inserted_at = inputs.durable_task_inserted_at + AND v1_durable_event_log_entry.node_id = inputs.node_id + AND v1_durable_event_log_entry.branch_id = inputs.branch_id + RETURNING v1_durable_event_log_entry.tenant_id, v1_durable_event_log_entry.external_id, v1_durable_event_log_entry.inserted_at, v1_durable_event_log_entry.id, v1_durable_event_log_entry.durable_task_id, v1_durable_event_log_entry.durable_task_inserted_at, v1_durable_event_log_entry.kind, v1_durable_event_log_entry.node_id, v1_durable_event_log_entry.branch_id, v1_durable_event_log_entry.idempotency_key, v1_durable_event_log_entry.is_satisfied +) + +SELECT updated.tenant_id, updated.external_id, updated.inserted_at, updated.id, updated.durable_task_id, updated.durable_task_inserted_at, updated.kind, updated.node_id, updated.branch_id, updated.idempotency_key, updated.is_satisfied, lf.latest_invocation_count AS invocation_count +FROM updated +JOIN v1_durable_event_log_file lf ON (lf.durable_task_id, lf.durable_task_inserted_at) = (updated.durable_task_id, updated.durable_task_inserted_at) +` + +type UpdateDurableEventLogEntriesSatisfiedParams struct { + Durabletaskids []int64 `json:"durabletaskids"` + Durabletaskinsertedats []pgtype.Timestamptz `json:"durabletaskinsertedats"` + Nodeids []int64 `json:"nodeids"` + Branchids []int64 `json:"branchids"` +} + +type UpdateDurableEventLogEntriesSatisfiedRow struct { + TenantID uuid.UUID `json:"tenant_id"` + ExternalID uuid.UUID `json:"external_id"` + InsertedAt pgtype.Timestamptz `json:"inserted_at"` + ID int64 `json:"id"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + Kind V1DurableEventLogKind `json:"kind"` + NodeID int64 `json:"node_id"` + BranchID int64 `json:"branch_id"` + IdempotencyKey []byte `json:"idempotency_key"` + IsSatisfied bool `json:"is_satisfied"` + InvocationCount int32 `json:"invocation_count"` +} + +func (q *Queries) UpdateDurableEventLogEntriesSatisfied(ctx context.Context, db DBTX, arg UpdateDurableEventLogEntriesSatisfiedParams) ([]*UpdateDurableEventLogEntriesSatisfiedRow, error) { + rows, err := db.Query(ctx, updateDurableEventLogEntriesSatisfied, + arg.Durabletaskids, + arg.Durabletaskinsertedats, + arg.Nodeids, + arg.Branchids, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*UpdateDurableEventLogEntriesSatisfiedRow + for rows.Next() { + var i UpdateDurableEventLogEntriesSatisfiedRow + if err := rows.Scan( + &i.TenantID, + &i.ExternalID, + &i.InsertedAt, + &i.ID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.Kind, + &i.NodeID, + &i.BranchID, + &i.IdempotencyKey, + &i.IsSatisfied, + &i.InvocationCount, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const updateLogFile = `-- name: UpdateLogFile :one +UPDATE v1_durable_event_log_file +SET + latest_node_id = COALESCE($1::BIGINT, v1_durable_event_log_file.latest_node_id), + latest_invocation_count = COALESCE($2::INTEGER, v1_durable_event_log_file.latest_invocation_count), + latest_branch_id = COALESCE($3::BIGINT, v1_durable_event_log_file.latest_branch_id) +WHERE durable_task_id = $4::BIGINT + AND durable_task_inserted_at = $5::TIMESTAMPTZ +RETURNING tenant_id, durable_task_id, durable_task_inserted_at, latest_invocation_count, latest_inserted_at, latest_node_id, latest_branch_id +` + +type UpdateLogFileParams struct { + NodeId pgtype.Int8 `json:"nodeId"` + InvocationCount pgtype.Int4 `json:"invocationCount"` + BranchId pgtype.Int8 `json:"branchId"` + Durabletaskid int64 `json:"durabletaskid"` + Durabletaskinsertedat pgtype.Timestamptz `json:"durabletaskinsertedat"` +} + +func (q *Queries) UpdateLogFile(ctx context.Context, db DBTX, arg UpdateLogFileParams) (*V1DurableEventLogFile, error) { + row := db.QueryRow(ctx, updateLogFile, + arg.NodeId, + arg.InvocationCount, + arg.BranchId, + arg.Durabletaskid, + arg.Durabletaskinsertedat, + ) + var i V1DurableEventLogFile + err := row.Scan( + &i.TenantID, + &i.DurableTaskID, + &i.DurableTaskInsertedAt, + &i.LatestInvocationCount, + &i.LatestInsertedAt, + &i.LatestNodeID, + &i.LatestBranchID, + ) + return &i, err +} diff --git a/pkg/repository/sqlcv1/matches-overwrite.sql.go b/pkg/repository/sqlcv1/matches-overwrite.sql.go index b2bb8f0d5..658d8c053 100644 --- a/pkg/repository/sqlcv1/matches-overwrite.sql.go +++ b/pkg/repository/sqlcv1/matches-overwrite.sql.go @@ -307,3 +307,125 @@ func (q *Queries) CreateMatchesForDAGTriggers(ctx context.Context, db DBTX, arg } return items, nil } + +const createMatchesForSignalTriggers = `-- name: CreateMatchesForSignalTriggers :many +WITH input AS ( + SELECT + tenant_id, + kind, + signal_task_id, + signal_task_inserted_at, + signal_task_external_id, + signal_external_id, + signal_key, + durable_event_log_entry_node_id, + durable_event_log_entry_branch_id + FROM + ( + SELECT + unnest($1::uuid[]) AS tenant_id, + unnest(cast($2::text[] as v1_match_kind[])) AS kind, + unnest($3::bigint[]) AS signal_task_id, + unnest($4::timestamptz[]) AS signal_task_inserted_at, + unnest($5::uuid[]) AS signal_task_external_id, + unnest($6::uuid[]) AS signal_external_id, + unnest($7::text[]) AS signal_key, + unnest($8::bigint[]) AS durable_event_log_entry_node_id, + unnest($9::bigint[]) AS durable_event_log_entry_branch_id + ) AS subquery +) +INSERT INTO v1_match ( + tenant_id, + kind, + signal_task_id, + signal_task_inserted_at, + signal_task_external_id, + signal_external_id, + signal_key, + durable_event_log_entry_node_id, + durable_event_log_entry_branch_id +) +SELECT + i.tenant_id, + i.kind, + i.signal_task_id, + i.signal_task_inserted_at, + i.signal_task_external_id, + i.signal_external_id, + i.signal_key, + i.durable_event_log_entry_node_id, + i.durable_event_log_entry_branch_id +FROM + input i +RETURNING + id, tenant_id, kind, is_satisfied, existing_data, signal_task_id, signal_task_inserted_at, signal_task_external_id, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key, trigger_existing_task_id, trigger_existing_task_inserted_at, trigger_priority, durable_event_log_entry_node_id, durable_event_log_entry_branch_id +` + +type CreateMatchesForSignalTriggersParams struct { + Tenantids []uuid.UUID `json:"tenantids"` + Kinds []string `json:"kinds"` + Signaltaskids []int64 `json:"signaltaskids"` + Signaltaskinsertedats []pgtype.Timestamptz `json:"signaltaskinsertedats"` + Signaltaskexternalids []*uuid.UUID `json:"signaltaskexternalids"` + Signalexternalids []uuid.UUID `json:"signalexternalids"` + Signalkeys []string `json:"signalkeys"` + Durableeventlogentrynodeids []*int64 `json:"durableeventlogentrynodeids"` + Durableeventlogentrybranchids []*int64 `json:"durableeventlogentrybranchids"` +} + +func (q *Queries) CreateMatchesForSignalTriggers(ctx context.Context, db DBTX, arg CreateMatchesForSignalTriggersParams) ([]*V1Match, error) { + rows, err := db.Query(ctx, createMatchesForSignalTriggers, + arg.Tenantids, + arg.Kinds, + arg.Signaltaskids, + arg.Signaltaskinsertedats, + arg.Signaltaskexternalids, + arg.Signalexternalids, + arg.Signalkeys, + arg.Durableeventlogentrynodeids, + arg.Durableeventlogentrybranchids, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*V1Match + for rows.Next() { + var i V1Match + if err := rows.Scan( + &i.ID, + &i.TenantID, + &i.Kind, + &i.IsSatisfied, + &i.ExistingData, + &i.SignalTaskID, + &i.SignalTaskInsertedAt, + &i.SignalTaskExternalID, + &i.SignalExternalID, + &i.SignalKey, + &i.TriggerDagID, + &i.TriggerDagInsertedAt, + &i.TriggerStepID, + &i.TriggerStepIndex, + &i.TriggerExternalID, + &i.TriggerWorkflowRunID, + &i.TriggerParentTaskExternalID, + &i.TriggerParentTaskID, + &i.TriggerParentTaskInsertedAt, + &i.TriggerChildIndex, + &i.TriggerChildKey, + &i.TriggerExistingTaskID, + &i.TriggerExistingTaskInsertedAt, + &i.TriggerPriority, + &i.DurableEventLogEntryNodeID, + &i.DurableEventLogEntryBranchID, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/pkg/repository/sqlcv1/matches.sql b/pkg/repository/sqlcv1/matches.sql index 22019aeba..3bd79e391 100644 --- a/pkg/repository/sqlcv1/matches.sql +++ b/pkg/repository/sqlcv1/matches.sql @@ -1,38 +1,3 @@ --- name: CreateMatchesForSignalTriggers :many -WITH input AS ( - SELECT - * - FROM - ( - SELECT - unnest(@tenantIds::uuid[]) AS tenant_id, - unnest(cast(@kinds::text[] as v1_match_kind[])) AS kind, - unnest(@signalTaskIds::bigint[]) AS signal_task_id, - unnest(@signalTaskInsertedAts::timestamptz[]) AS signal_task_inserted_at, - unnest(@signalExternalIds::uuid[]) AS signal_external_id, - unnest(@signalKeys::text[]) AS signal_key - ) AS subquery -) -INSERT INTO v1_match ( - tenant_id, - kind, - signal_task_id, - signal_task_inserted_at, - signal_external_id, - signal_key -) -SELECT - i.tenant_id, - i.kind, - i.signal_task_id, - i.signal_task_inserted_at, - i.signal_external_id, - i.signal_key -FROM - input i -RETURNING - *; - -- name: CreateMatchConditions :copyfrom INSERT INTO v1_match_condition ( v1_match_id, diff --git a/pkg/repository/sqlcv1/matches.sql.go b/pkg/repository/sqlcv1/matches.sql.go index e016a2d37..327921c04 100644 --- a/pkg/repository/sqlcv1/matches.sql.go +++ b/pkg/repository/sqlcv1/matches.sql.go @@ -49,102 +49,6 @@ type CreateMatchConditionsParams struct { Data []byte `json:"data"` } -const createMatchesForSignalTriggers = `-- name: CreateMatchesForSignalTriggers :many -WITH input AS ( - SELECT - tenant_id, kind, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key - FROM - ( - SELECT - unnest($1::uuid[]) AS tenant_id, - unnest(cast($2::text[] as v1_match_kind[])) AS kind, - unnest($3::bigint[]) AS signal_task_id, - unnest($4::timestamptz[]) AS signal_task_inserted_at, - unnest($5::uuid[]) AS signal_external_id, - unnest($6::text[]) AS signal_key - ) AS subquery -) -INSERT INTO v1_match ( - tenant_id, - kind, - signal_task_id, - signal_task_inserted_at, - signal_external_id, - signal_key -) -SELECT - i.tenant_id, - i.kind, - i.signal_task_id, - i.signal_task_inserted_at, - i.signal_external_id, - i.signal_key -FROM - input i -RETURNING - id, tenant_id, kind, is_satisfied, existing_data, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key, trigger_existing_task_id, trigger_existing_task_inserted_at, trigger_priority -` - -type CreateMatchesForSignalTriggersParams struct { - Tenantids []uuid.UUID `json:"tenantids"` - Kinds []string `json:"kinds"` - Signaltaskids []int64 `json:"signaltaskids"` - Signaltaskinsertedats []pgtype.Timestamptz `json:"signaltaskinsertedats"` - Signalexternalids []uuid.UUID `json:"signalexternalids"` - Signalkeys []string `json:"signalkeys"` -} - -func (q *Queries) CreateMatchesForSignalTriggers(ctx context.Context, db DBTX, arg CreateMatchesForSignalTriggersParams) ([]*V1Match, error) { - rows, err := db.Query(ctx, createMatchesForSignalTriggers, - arg.Tenantids, - arg.Kinds, - arg.Signaltaskids, - arg.Signaltaskinsertedats, - arg.Signalexternalids, - arg.Signalkeys, - ) - if err != nil { - return nil, err - } - defer rows.Close() - var items []*V1Match - for rows.Next() { - var i V1Match - if err := rows.Scan( - &i.ID, - &i.TenantID, - &i.Kind, - &i.IsSatisfied, - &i.ExistingData, - &i.SignalTaskID, - &i.SignalTaskInsertedAt, - &i.SignalExternalID, - &i.SignalKey, - &i.TriggerDagID, - &i.TriggerDagInsertedAt, - &i.TriggerStepID, - &i.TriggerStepIndex, - &i.TriggerExternalID, - &i.TriggerWorkflowRunID, - &i.TriggerParentTaskExternalID, - &i.TriggerParentTaskID, - &i.TriggerParentTaskInsertedAt, - &i.TriggerChildIndex, - &i.TriggerChildKey, - &i.TriggerExistingTaskID, - &i.TriggerExistingTaskInsertedAt, - &i.TriggerPriority, - ); err != nil { - return nil, err - } - items = append(items, &i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const getSatisfiedMatchConditions = `-- name: GetSatisfiedMatchConditions :many WITH input AS ( SELECT @@ -323,7 +227,7 @@ WITH match_counts AS ( GROUP BY v1_match_id ), result_matches AS ( SELECT - m.id, m.tenant_id, m.kind, m.is_satisfied, m.existing_data, m.signal_task_id, m.signal_task_inserted_at, m.signal_external_id, m.signal_key, m.trigger_dag_id, m.trigger_dag_inserted_at, m.trigger_step_id, m.trigger_step_index, m.trigger_external_id, m.trigger_workflow_run_id, m.trigger_parent_task_external_id, m.trigger_parent_task_id, m.trigger_parent_task_inserted_at, m.trigger_child_index, m.trigger_child_key, m.trigger_existing_task_id, m.trigger_existing_task_inserted_at, m.trigger_priority, + m.id, m.tenant_id, m.kind, m.is_satisfied, m.existing_data, m.signal_task_id, m.signal_task_inserted_at, m.signal_task_external_id, m.signal_external_id, m.signal_key, m.trigger_dag_id, m.trigger_dag_inserted_at, m.trigger_step_id, m.trigger_step_index, m.trigger_external_id, m.trigger_workflow_run_id, m.trigger_parent_task_external_id, m.trigger_parent_task_id, m.trigger_parent_task_inserted_at, m.trigger_child_index, m.trigger_child_key, m.trigger_existing_task_id, m.trigger_existing_task_inserted_at, m.trigger_priority, m.durable_event_log_entry_node_id, m.durable_event_log_entry_branch_id, CASE WHEN (mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups) THEN 'SKIP' WHEN (mc.total_cancel_groups > 0 AND mc.total_cancel_groups = mc.satisfied_cancel_groups) THEN 'CANCEL' @@ -389,7 +293,7 @@ WITH match_counts AS ( id IN (SELECT id FROM deleted_conditions) ) SELECT - rm.id, rm.tenant_id, rm.kind, rm.is_satisfied, rm.existing_data, rm.signal_task_id, rm.signal_task_inserted_at, rm.signal_external_id, rm.signal_key, rm.trigger_dag_id, rm.trigger_dag_inserted_at, rm.trigger_step_id, rm.trigger_step_index, rm.trigger_external_id, rm.trigger_workflow_run_id, rm.trigger_parent_task_external_id, rm.trigger_parent_task_id, rm.trigger_parent_task_inserted_at, rm.trigger_child_index, rm.trigger_child_key, rm.trigger_existing_task_id, rm.trigger_existing_task_inserted_at, rm.trigger_priority, rm.action, + rm.id, rm.tenant_id, rm.kind, rm.is_satisfied, rm.existing_data, rm.signal_task_id, rm.signal_task_inserted_at, rm.signal_task_external_id, rm.signal_external_id, rm.signal_key, rm.trigger_dag_id, rm.trigger_dag_inserted_at, rm.trigger_step_id, rm.trigger_step_index, rm.trigger_external_id, rm.trigger_workflow_run_id, rm.trigger_parent_task_external_id, rm.trigger_parent_task_id, rm.trigger_parent_task_inserted_at, rm.trigger_child_index, rm.trigger_child_key, rm.trigger_existing_task_id, rm.trigger_existing_task_inserted_at, rm.trigger_priority, rm.durable_event_log_entry_node_id, rm.durable_event_log_entry_branch_id, rm.action, COALESCE(rm.existing_data || d.mc_aggregated_data, d.mc_aggregated_data)::jsonb AS mc_aggregated_data FROM result_matches rm @@ -405,6 +309,7 @@ type SaveSatisfiedMatchConditionsRow struct { ExistingData []byte `json:"existing_data"` SignalTaskID pgtype.Int8 `json:"signal_task_id"` SignalTaskInsertedAt pgtype.Timestamptz `json:"signal_task_inserted_at"` + SignalTaskExternalID *uuid.UUID `json:"signal_task_external_id"` SignalExternalID *uuid.UUID `json:"signal_external_id"` SignalKey pgtype.Text `json:"signal_key"` TriggerDagID pgtype.Int8 `json:"trigger_dag_id"` @@ -421,6 +326,8 @@ type SaveSatisfiedMatchConditionsRow struct { TriggerExistingTaskID pgtype.Int8 `json:"trigger_existing_task_id"` TriggerExistingTaskInsertedAt pgtype.Timestamptz `json:"trigger_existing_task_inserted_at"` TriggerPriority pgtype.Int4 `json:"trigger_priority"` + DurableEventLogEntryNodeID pgtype.Int8 `json:"durable_event_log_entry_node_id"` + DurableEventLogEntryBranchID pgtype.Int8 `json:"durable_event_log_entry_branch_id"` Action V1MatchConditionAction `json:"action"` McAggregatedData []byte `json:"mc_aggregated_data"` } @@ -446,6 +353,7 @@ func (q *Queries) SaveSatisfiedMatchConditions(ctx context.Context, db DBTX, mat &i.ExistingData, &i.SignalTaskID, &i.SignalTaskInsertedAt, + &i.SignalTaskExternalID, &i.SignalExternalID, &i.SignalKey, &i.TriggerDagID, @@ -462,6 +370,8 @@ func (q *Queries) SaveSatisfiedMatchConditions(ctx context.Context, db DBTX, mat &i.TriggerExistingTaskID, &i.TriggerExistingTaskInsertedAt, &i.TriggerPriority, + &i.DurableEventLogEntryNodeID, + &i.DurableEventLogEntryBranchID, &i.Action, &i.McAggregatedData, ); err != nil { diff --git a/pkg/repository/sqlcv1/models.go b/pkg/repository/sqlcv1/models.go index bc6d5c558..95b047468 100644 --- a/pkg/repository/sqlcv1/models.go +++ b/pkg/repository/sqlcv1/models.go @@ -987,6 +987,49 @@ func (ns NullV1ConcurrencyStrategy) Value() (driver.Value, error) { return string(ns.V1ConcurrencyStrategy), nil } +type V1DurableEventLogKind string + +const ( + V1DurableEventLogKindRUN V1DurableEventLogKind = "RUN" + V1DurableEventLogKindWAITFOR V1DurableEventLogKind = "WAIT_FOR" + V1DurableEventLogKindMEMO V1DurableEventLogKind = "MEMO" +) + +func (e *V1DurableEventLogKind) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = V1DurableEventLogKind(s) + case string: + *e = V1DurableEventLogKind(s) + default: + return fmt.Errorf("unsupported scan type for V1DurableEventLogKind: %T", src) + } + return nil +} + +type NullV1DurableEventLogKind struct { + V1DurableEventLogKind V1DurableEventLogKind `json:"v1_durable_event_log_kind"` + Valid bool `json:"valid"` // Valid is true if V1DurableEventLogKind is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullV1DurableEventLogKind) Scan(value interface{}) error { + if value == nil { + ns.V1DurableEventLogKind, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.V1DurableEventLogKind.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullV1DurableEventLogKind) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.V1DurableEventLogKind), nil +} + type V1EventType string const ( @@ -1053,6 +1096,8 @@ const ( V1EventTypeOlapRATELIMITERROR V1EventTypeOlap = "RATE_LIMIT_ERROR" V1EventTypeOlapSKIPPED V1EventTypeOlap = "SKIPPED" V1EventTypeOlapCOULDNOTSENDTOWORKER V1EventTypeOlap = "COULD_NOT_SEND_TO_WORKER" + V1EventTypeOlapDURABLEEVICTED V1EventTypeOlap = "DURABLE_EVICTED" + V1EventTypeOlapDURABLERESTORING V1EventTypeOlap = "DURABLE_RESTORING" ) func (e *V1EventTypeOlap) Scan(src interface{}) error { @@ -1484,11 +1529,13 @@ func (ns NullV1PayloadLocationOlap) Value() (driver.Value, error) { type V1PayloadType string const ( - V1PayloadTypeTASKINPUT V1PayloadType = "TASK_INPUT" - V1PayloadTypeDAGINPUT V1PayloadType = "DAG_INPUT" - V1PayloadTypeTASKOUTPUT V1PayloadType = "TASK_OUTPUT" - V1PayloadTypeTASKEVENTDATA V1PayloadType = "TASK_EVENT_DATA" - V1PayloadTypeUSEREVENTINPUT V1PayloadType = "USER_EVENT_INPUT" + V1PayloadTypeTASKINPUT V1PayloadType = "TASK_INPUT" + V1PayloadTypeDAGINPUT V1PayloadType = "DAG_INPUT" + V1PayloadTypeTASKOUTPUT V1PayloadType = "TASK_OUTPUT" + V1PayloadTypeTASKEVENTDATA V1PayloadType = "TASK_EVENT_DATA" + V1PayloadTypeUSEREVENTINPUT V1PayloadType = "USER_EVENT_INPUT" + V1PayloadTypeDURABLEEVENTLOGENTRYDATA V1PayloadType = "DURABLE_EVENT_LOG_ENTRY_DATA" + V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA V1PayloadType = "DURABLE_EVENT_LOG_ENTRY_RESULT_DATA" ) func (e *V1PayloadType) Scan(src interface{}) error { @@ -1534,6 +1581,7 @@ const ( V1ReadableStatusOlapCANCELLED V1ReadableStatusOlap = "CANCELLED" V1ReadableStatusOlapFAILED V1ReadableStatusOlap = "FAILED" V1ReadableStatusOlapCOMPLETED V1ReadableStatusOlap = "COMPLETED" + V1ReadableStatusOlapEVICTED V1ReadableStatusOlap = "EVICTED" ) func (e *V1ReadableStatusOlap) Scan(src interface{}) error { @@ -3017,6 +3065,41 @@ type V1DagsOlap struct { TotalTasks int32 `json:"total_tasks"` } +type V1DurableEventLogBranchPoint struct { + TenantID uuid.UUID `json:"tenant_id"` + ID int64 `json:"id"` + InsertedAt pgtype.Timestamptz `json:"inserted_at"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + FirstNodeIDInNewBranch int64 `json:"first_node_id_in_new_branch"` + ParentBranchID int64 `json:"parent_branch_id"` + NextBranchID int64 `json:"next_branch_id"` +} + +type V1DurableEventLogEntry struct { + TenantID uuid.UUID `json:"tenant_id"` + ExternalID uuid.UUID `json:"external_id"` + InsertedAt pgtype.Timestamptz `json:"inserted_at"` + ID int64 `json:"id"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + Kind V1DurableEventLogKind `json:"kind"` + NodeID int64 `json:"node_id"` + BranchID int64 `json:"branch_id"` + IdempotencyKey []byte `json:"idempotency_key"` + IsSatisfied bool `json:"is_satisfied"` +} + +type V1DurableEventLogFile struct { + TenantID uuid.UUID `json:"tenant_id"` + DurableTaskID int64 `json:"durable_task_id"` + DurableTaskInsertedAt pgtype.Timestamptz `json:"durable_task_inserted_at"` + LatestInvocationCount int32 `json:"latest_invocation_count"` + LatestInsertedAt pgtype.Timestamptz `json:"latest_inserted_at"` + LatestNodeID int64 `json:"latest_node_id"` + LatestBranchID int64 `json:"latest_branch_id"` +} + type V1DurableSleep struct { ID int64 `json:"id"` TenantID uuid.UUID `json:"tenant_id"` @@ -3163,6 +3246,7 @@ type V1Match struct { ExistingData []byte `json:"existing_data"` SignalTaskID pgtype.Int8 `json:"signal_task_id"` SignalTaskInsertedAt pgtype.Timestamptz `json:"signal_task_inserted_at"` + SignalTaskExternalID *uuid.UUID `json:"signal_task_external_id"` SignalExternalID *uuid.UUID `json:"signal_external_id"` SignalKey pgtype.Text `json:"signal_key"` TriggerDagID pgtype.Int8 `json:"trigger_dag_id"` @@ -3179,6 +3263,8 @@ type V1Match struct { TriggerExistingTaskID pgtype.Int8 `json:"trigger_existing_task_id"` TriggerExistingTaskInsertedAt pgtype.Timestamptz `json:"trigger_existing_task_inserted_at"` TriggerPriority pgtype.Int4 `json:"trigger_priority"` + DurableEventLogEntryNodeID pgtype.Int8 `json:"durable_event_log_entry_node_id"` + DurableEventLogEntryBranchID pgtype.Int8 `json:"durable_event_log_entry_branch_id"` } type V1MatchCondition struct { @@ -3396,6 +3482,7 @@ type V1Task struct { ConcurrencyKeys []string `json:"concurrency_keys"` RetryBackoffFactor pgtype.Float8 `json:"retry_backoff_factor"` RetryMaxBackoff pgtype.Int4 `json:"retry_max_backoff"` + IsDurable pgtype.Bool `json:"is_durable"` DesiredWorkerLabel []byte `json:"desired_worker_label"` } @@ -3430,6 +3517,7 @@ type V1TaskEventsOlap struct { WorkerID *uuid.UUID `json:"worker_id"` AdditionalEventData pgtype.Text `json:"additional__event_data"` AdditionalEventMessage pgtype.Text `json:"additional__event_message"` + DurableInvocationCount int32 `json:"durable_invocation_count"` } type V1TaskEventsOlapTmp struct { @@ -3461,6 +3549,7 @@ type V1TaskRuntime struct { WorkerID *uuid.UUID `json:"worker_id"` TenantID uuid.UUID `json:"tenant_id"` TimeoutAt pgtype.Timestamp `json:"timeout_at"` + EvictedAt pgtype.Timestamptz `json:"evicted_at"` } type V1TaskRuntimeSlot struct { @@ -3593,6 +3682,7 @@ type Worker struct { Os pgtype.Text `json:"os"` RuntimeExtra pgtype.Text `json:"runtimeExtra"` SdkVersion pgtype.Text `json:"sdkVersion"` + DurableTaskDispatcherId *uuid.UUID `json:"durableTaskDispatcherId"` } type WorkerAssignEvent struct { diff --git a/pkg/repository/sqlcv1/olap.sql b/pkg/repository/sqlcv1/olap.sql index 87d7fa4df..4ff66cbb5 100644 --- a/pkg/repository/sqlcv1/olap.sql +++ b/pkg/repository/sqlcv1/olap.sql @@ -236,7 +236,8 @@ INSERT INTO v1_task_events_olap ( worker_id, additional__event_data, additional__event_message, - external_id + external_id, + durable_invocation_count ) VALUES ( $1, $2, @@ -251,7 +252,8 @@ INSERT INTO v1_task_events_olap ( $11, $12, $13, - $14 + $14, + $15 ); -- name: ReadTaskByExternalID :one @@ -332,6 +334,7 @@ WITH aggregated_events AS ( task_inserted_at, retry_count, event_type, + durable_invocation_count, MIN(event_timestamp) AS time_first_seen, MAX(event_timestamp) AS time_last_seen, COUNT(*) AS count, @@ -341,7 +344,7 @@ WITH aggregated_events AS ( tenant_id = @tenantId::uuid AND task_id = @taskId::bigint AND task_inserted_at = @taskInsertedAt::timestamptz - GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type + GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type, durable_invocation_count ) SELECT a.tenant_id, @@ -349,6 +352,7 @@ SELECT a.task_inserted_at, a.retry_count, a.event_type, + a.durable_invocation_count, a.time_first_seen, a.time_last_seen, a.count, @@ -384,6 +388,7 @@ WITH tasks AS ( task_inserted_at, retry_count, event_type, + durable_invocation_count, MIN(event_timestamp)::timestamptz AS time_first_seen, MAX(event_timestamp)::timestamptz AS time_last_seen, COUNT(*) AS count, @@ -392,7 +397,7 @@ WITH tasks AS ( WHERE tenant_id = @tenantId::uuid AND (task_id, task_inserted_at) IN (SELECT task_id, task_inserted_at FROM tasks) - GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type + GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type, durable_invocation_count ) SELECT a.tenant_id, @@ -400,6 +405,7 @@ SELECT a.task_inserted_at, a.retry_count, a.event_type, + a.durable_invocation_count, a.time_first_seen, a.time_last_seen, a.count, @@ -484,7 +490,11 @@ WITH selected_retry_count AS ( FROM relevant_events ORDER BY - readable_status DESC + CASE + WHEN readable_status IN ('COMPLETED', 'FAILED', 'CANCELLED') THEN 1 + ELSE 0 + END DESC, + event_timestamp DESC LIMIT 1 ), error_message AS ( SELECT @@ -769,7 +779,7 @@ WITH tenants AS ( e.task_id, e.task_inserted_at, e.retry_count, - MAX(e.readable_status) AS max_readable_status + v1_status_from_priority(MAX(v1_status_to_priority(e.readable_status))) AS max_readable_status FROM locked_events e JOIN @@ -891,10 +901,16 @@ WITH tenants AS ( tu.retry_count > t.latest_retry_count AND tu.max_readable_status != t.readable_status ) OR - -- if the retry count is equal to the latest retry count, update the status if the status is greater + -- if the retry count is equal to the latest retry count, update the status if the priority is higher ( tu.retry_count = t.latest_retry_count - AND tu.max_readable_status > t.readable_status + AND v1_status_to_priority(tu.max_readable_status) > v1_status_to_priority(t.readable_status) + ) OR + -- EVICTED is non-terminal and reversible (durable restore moves it back to RUNNING) + ( + tu.retry_count = t.latest_retry_count + AND t.readable_status = 'EVICTED' + AND tu.max_readable_status != 'EVICTED' ) ) RETURNING @@ -1070,7 +1086,8 @@ WITH tenants AS ( COUNT(t.id) FILTER (WHERE t.readable_status = 'FAILED') AS failed_count, COUNT(t.id) FILTER (WHERE t.readable_status = 'CANCELLED') AS cancelled_count, COUNT(t.id) FILTER (WHERE t.readable_status = 'QUEUED') AS queued_count, - COUNT(t.id) FILTER (WHERE t.readable_status = 'RUNNING') AS running_count + COUNT(t.id) FILTER (WHERE t.readable_status = 'RUNNING') AS running_count, + COUNT(t.id) FILTER (WHERE t.readable_status = 'EVICTED') AS evicted_count FROM locked_dags d LEFT JOIN @@ -1090,6 +1107,8 @@ WITH tenants AS ( WHEN dtc.task_count != dtc.total_tasks THEN 'RUNNING' -- If we have any running or queued tasks, we should set the status to running WHEN dtc.running_count > 0 OR dtc.queued_count > 0 THEN 'RUNNING' + -- If all tasks are evicted, mark DAG as evicted + WHEN dtc.evicted_count = dtc.task_count AND dtc.task_count = dtc.total_tasks THEN 'EVICTED' WHEN dtc.failed_count > 0 THEN 'FAILED' WHEN dtc.cancelled_count > 0 THEN 'CANCELLED' WHEN dtc.completed_count = dtc.task_count THEN 'COMPLETED' @@ -1372,7 +1391,8 @@ SELECT COUNT(*) FILTER (WHERE readable_status = 'RUNNING') AS total_running, COUNT(*) FILTER (WHERE readable_status = 'COMPLETED') AS total_completed, COUNT(*) FILTER (WHERE readable_status = 'CANCELLED') AS total_cancelled, - COUNT(*) FILTER (WHERE readable_status = 'FAILED') AS total_failed + COUNT(*) FILTER (WHERE readable_status = 'FAILED') AS total_failed, + COUNT(*) FILTER (WHERE readable_status = 'EVICTED') AS total_evicted FROM v1_statuses_olap WHERE tenant_id = @tenantId::UUID diff --git a/pkg/repository/sqlcv1/olap.sql.go b/pkg/repository/sqlcv1/olap.sql.go index 6015f06b0..26638c0eb 100644 --- a/pkg/repository/sqlcv1/olap.sql.go +++ b/pkg/repository/sqlcv1/olap.sql.go @@ -525,6 +525,7 @@ type CreateTaskEventsOLAPParams struct { AdditionalEventData pgtype.Text `json:"additional__event_data"` AdditionalEventMessage pgtype.Text `json:"additional__event_message"` ExternalID *uuid.UUID `json:"external_id"` + DurableInvocationCount int32 `json:"durable_invocation_count"` } type CreateTaskEventsOLAPTmpParams struct { @@ -1177,7 +1178,8 @@ SELECT COUNT(*) FILTER (WHERE readable_status = 'RUNNING') AS total_running, COUNT(*) FILTER (WHERE readable_status = 'COMPLETED') AS total_completed, COUNT(*) FILTER (WHERE readable_status = 'CANCELLED') AS total_cancelled, - COUNT(*) FILTER (WHERE readable_status = 'FAILED') AS total_failed + COUNT(*) FILTER (WHERE readable_status = 'FAILED') AS total_failed, + COUNT(*) FILTER (WHERE readable_status = 'EVICTED') AS total_evicted FROM v1_statuses_olap WHERE tenant_id = $1::UUID @@ -1213,6 +1215,7 @@ type GetTenantStatusMetricsRow struct { TotalCompleted int64 `json:"total_completed"` TotalCancelled int64 `json:"total_cancelled"` TotalFailed int64 `json:"total_failed"` + TotalEvicted int64 `json:"total_evicted"` } func (q *Queries) GetTenantStatusMetrics(ctx context.Context, db DBTX, arg GetTenantStatusMetricsParams) (*GetTenantStatusMetricsRow, error) { @@ -1234,6 +1237,7 @@ func (q *Queries) GetTenantStatusMetrics(ctx context.Context, db DBTX, arg GetTe &i.TotalCompleted, &i.TotalCancelled, &i.TotalFailed, + &i.TotalEvicted, ) return &i, err } @@ -1614,6 +1618,7 @@ WITH aggregated_events AS ( task_inserted_at, retry_count, event_type, + durable_invocation_count, MIN(event_timestamp) AS time_first_seen, MAX(event_timestamp) AS time_last_seen, COUNT(*) AS count, @@ -1623,7 +1628,7 @@ WITH aggregated_events AS ( tenant_id = $1::uuid AND task_id = $2::bigint AND task_inserted_at = $3::timestamptz - GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type + GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type, durable_invocation_count ) SELECT a.tenant_id, @@ -1631,6 +1636,7 @@ SELECT a.task_inserted_at, a.retry_count, a.event_type, + a.durable_invocation_count, a.time_first_seen, a.time_last_seen, a.count, @@ -1664,6 +1670,7 @@ type ListTaskEventsRow struct { TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` RetryCount int32 `json:"retry_count"` EventType V1EventTypeOlap `json:"event_type"` + DurableInvocationCount int32 `json:"durable_invocation_count"` TimeFirstSeen interface{} `json:"time_first_seen"` TimeLastSeen interface{} `json:"time_last_seen"` Count int64 `json:"count"` @@ -1693,6 +1700,7 @@ func (q *Queries) ListTaskEvents(ctx context.Context, db DBTX, arg ListTaskEvent &i.TaskInsertedAt, &i.RetryCount, &i.EventType, + &i.DurableInvocationCount, &i.TimeFirstSeen, &i.TimeLastSeen, &i.Count, @@ -1731,6 +1739,7 @@ WITH tasks AS ( task_inserted_at, retry_count, event_type, + durable_invocation_count, MIN(event_timestamp)::timestamptz AS time_first_seen, MAX(event_timestamp)::timestamptz AS time_last_seen, COUNT(*) AS count, @@ -1739,7 +1748,7 @@ WITH tasks AS ( WHERE tenant_id = $2::uuid AND (task_id, task_inserted_at) IN (SELECT task_id, task_inserted_at FROM tasks) - GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type + GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type, durable_invocation_count ) SELECT a.tenant_id, @@ -1747,6 +1756,7 @@ SELECT a.task_inserted_at, a.retry_count, a.event_type, + a.durable_invocation_count, a.time_first_seen, a.time_last_seen, a.count, @@ -1783,6 +1793,7 @@ type ListTaskEventsForWorkflowRunRow struct { TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` RetryCount int32 `json:"retry_count"` EventType V1EventTypeOlap `json:"event_type"` + DurableInvocationCount int32 `json:"durable_invocation_count"` TimeFirstSeen pgtype.Timestamptz `json:"time_first_seen"` TimeLastSeen pgtype.Timestamptz `json:"time_last_seen"` Count int64 `json:"count"` @@ -1814,6 +1825,7 @@ func (q *Queries) ListTaskEventsForWorkflowRun(ctx context.Context, db DBTX, arg &i.TaskInsertedAt, &i.RetryCount, &i.EventType, + &i.DurableInvocationCount, &i.TimeFirstSeen, &i.TimeLastSeen, &i.Count, @@ -2152,7 +2164,7 @@ WITH input AS ( JOIN v1_dags_olap d ON (r.id, r.inserted_at) = (d.id, d.inserted_at) WHERE r.tenant_id = $4::uuid AND r.kind = 'DAG' ), relevant_events AS ( - SELECT r.run_id, e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message + SELECT r.run_id, e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message, e.durable_invocation_count FROM runs r JOIN v1_dag_to_task_olap dt ON (r.dag_id, r.inserted_at) = (dt.dag_id, dt.dag_inserted_at) JOIN v1_task_events_olap e ON (e.task_id, e.task_inserted_at) = (dt.task_id, dt.task_inserted_at) @@ -2366,7 +2378,7 @@ WITH selected_retry_count AS ( LIMIT 1 ), relevant_events AS ( SELECT - tenant_id, id, inserted_at, external_id, task_id, task_inserted_at, event_type, workflow_id, event_timestamp, readable_status, retry_count, error_message, output, worker_id, additional__event_data, additional__event_message + tenant_id, id, inserted_at, external_id, task_id, task_inserted_at, event_type, workflow_id, event_timestamp, readable_status, retry_count, error_message, output, worker_id, additional__event_data, additional__event_message, durable_invocation_count FROM v1_task_events_olap WHERE @@ -2410,7 +2422,11 @@ WITH selected_retry_count AS ( FROM relevant_events ORDER BY - readable_status DESC + CASE + WHEN readable_status IN ('COMPLETED', 'FAILED', 'CANCELLED') THEN 1 + ELSE 0 + END DESC, + event_timestamp DESC LIMIT 1 ), error_message AS ( SELECT @@ -2596,7 +2612,7 @@ WITH input AS ( t.tenant_id = $4::uuid ), relevant_events AS ( SELECT - e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message + e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message, e.durable_invocation_count FROM v1_task_events_olap e JOIN @@ -3112,7 +3128,7 @@ WITH runs AS ( AND lt.task_id IS NOT NULL ), relevant_events AS ( SELECT - e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message + e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message, e.durable_invocation_count FROM runs r JOIN v1_dag_to_task_olap dt ON r.dag_id = dt.dag_id AND r.inserted_at = dt.dag_inserted_at JOIN v1_task_events_olap e ON (e.task_id, e.task_inserted_at) = (dt.task_id, dt.task_inserted_at) @@ -3121,7 +3137,7 @@ WITH runs AS ( UNION ALL SELECT - e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message + e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message, e.durable_invocation_count FROM runs r JOIN v1_task_events_olap e ON e.task_id = r.task_id AND e.task_inserted_at = r.inserted_at WHERE r.task_id IS NOT NULL @@ -3325,7 +3341,8 @@ WITH tenants AS ( COUNT(t.id) FILTER (WHERE t.readable_status = 'FAILED') AS failed_count, COUNT(t.id) FILTER (WHERE t.readable_status = 'CANCELLED') AS cancelled_count, COUNT(t.id) FILTER (WHERE t.readable_status = 'QUEUED') AS queued_count, - COUNT(t.id) FILTER (WHERE t.readable_status = 'RUNNING') AS running_count + COUNT(t.id) FILTER (WHERE t.readable_status = 'RUNNING') AS running_count, + COUNT(t.id) FILTER (WHERE t.readable_status = 'EVICTED') AS evicted_count FROM locked_dags d LEFT JOIN @@ -3345,6 +3362,8 @@ WITH tenants AS ( WHEN dtc.task_count != dtc.total_tasks THEN 'RUNNING' -- If we have any running or queued tasks, we should set the status to running WHEN dtc.running_count > 0 OR dtc.queued_count > 0 THEN 'RUNNING' + -- If all tasks are evicted, mark DAG as evicted + WHEN dtc.evicted_count = dtc.task_count AND dtc.task_count = dtc.total_tasks THEN 'EVICTED' WHEN dtc.failed_count > 0 THEN 'FAILED' WHEN dtc.cancelled_count > 0 THEN 'CANCELLED' WHEN dtc.completed_count = dtc.task_count THEN 'COMPLETED' @@ -3571,7 +3590,7 @@ WITH tenants AS ( e.task_id, e.task_inserted_at, e.retry_count, - MAX(e.readable_status) AS max_readable_status + v1_status_from_priority(MAX(v1_status_to_priority(e.readable_status))) AS max_readable_status FROM locked_events e JOIN @@ -3693,10 +3712,16 @@ WITH tenants AS ( tu.retry_count > t.latest_retry_count AND tu.max_readable_status != t.readable_status ) OR - -- if the retry count is equal to the latest retry count, update the status if the status is greater + -- if the retry count is equal to the latest retry count, update the status if the priority is higher ( tu.retry_count = t.latest_retry_count - AND tu.max_readable_status > t.readable_status + AND v1_status_to_priority(tu.max_readable_status) > v1_status_to_priority(t.readable_status) + ) OR + -- EVICTED is non-terminal and reversible (durable restore moves it back to RUNNING) + ( + tu.retry_count = t.latest_retry_count + AND t.readable_status = 'EVICTED' + AND tu.max_readable_status != 'EVICTED' ) ) RETURNING diff --git a/pkg/repository/sqlcv1/queue.sql b/pkg/repository/sqlcv1/queue.sql index 886a029f2..905dc8880 100644 --- a/pkg/repository/sqlcv1/queue.sql +++ b/pkg/repository/sqlcv1/queue.sql @@ -200,7 +200,8 @@ WITH input AS ( i.worker_id, t.tenant_id, t.step_id, - CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at + CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at, + t.is_durable FROM v1_task t JOIN @@ -226,9 +227,14 @@ WITH input AS ( t.timeout_at FROM updated_tasks t - ON CONFLICT (task_id, task_inserted_at, retry_count) DO NOTHING + ON CONFLICT (task_id, task_inserted_at, retry_count) DO UPDATE + SET + evicted_at = NULL, + worker_id = EXCLUDED.worker_id, + timeout_at = EXCLUDED.timeout_at + WHERE v1_task_runtime.evicted_at IS NOT NULL -- only return the task ids that were successfully assigned - RETURNING task_id, worker_id + RETURNING task_id, task_inserted_at, retry_count, worker_id ), slot_requests AS ( SELECT t.id, @@ -268,9 +274,14 @@ WITH input AS ( ) SELECT asr.task_id, - asr.worker_id + asr.task_inserted_at, + asr.worker_id, + ut.is_durable FROM - assigned_tasks asr; + assigned_tasks asr +JOIN + updated_tasks ut ON (asr.task_id, asr.task_inserted_at, asr.retry_count) = (ut.id, ut.inserted_at, ut.retry_count) +; -- name: GetDesiredLabels :many SELECT diff --git a/pkg/repository/sqlcv1/queue.sql.go b/pkg/repository/sqlcv1/queue.sql.go index 29fe211f8..c06cd188a 100644 --- a/pkg/repository/sqlcv1/queue.sql.go +++ b/pkg/repository/sqlcv1/queue.sql.go @@ -876,7 +876,8 @@ WITH input AS ( i.worker_id, t.tenant_id, t.step_id, - CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at + CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at, + t.is_durable FROM v1_task t JOIN @@ -902,9 +903,14 @@ WITH input AS ( t.timeout_at FROM updated_tasks t - ON CONFLICT (task_id, task_inserted_at, retry_count) DO NOTHING + ON CONFLICT (task_id, task_inserted_at, retry_count) DO UPDATE + SET + evicted_at = NULL, + worker_id = EXCLUDED.worker_id, + timeout_at = EXCLUDED.timeout_at + WHERE v1_task_runtime.evicted_at IS NOT NULL -- only return the task ids that were successfully assigned - RETURNING task_id, worker_id + RETURNING task_id, task_inserted_at, retry_count, worker_id ), slot_requests AS ( SELECT t.id, @@ -944,9 +950,13 @@ WITH input AS ( ) SELECT asr.task_id, - asr.worker_id + asr.task_inserted_at, + asr.worker_id, + ut.is_durable FROM assigned_tasks asr +JOIN + updated_tasks ut ON (asr.task_id, asr.task_inserted_at, asr.retry_count) = (ut.id, ut.inserted_at, ut.retry_count) ` type UpdateTasksToAssignedParams struct { @@ -958,8 +968,10 @@ type UpdateTasksToAssignedParams struct { } type UpdateTasksToAssignedRow struct { - TaskID int64 `json:"task_id"` - WorkerID *uuid.UUID `json:"worker_id"` + TaskID int64 `json:"task_id"` + TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` + WorkerID *uuid.UUID `json:"worker_id"` + IsDurable pgtype.Bool `json:"is_durable"` } func (q *Queries) UpdateTasksToAssigned(ctx context.Context, db DBTX, arg UpdateTasksToAssignedParams) ([]*UpdateTasksToAssignedRow, error) { @@ -977,7 +989,12 @@ func (q *Queries) UpdateTasksToAssigned(ctx context.Context, db DBTX, arg Update var items []*UpdateTasksToAssignedRow for rows.Next() { var i UpdateTasksToAssignedRow - if err := rows.Scan(&i.TaskID, &i.WorkerID); err != nil { + if err := rows.Scan( + &i.TaskID, + &i.TaskInsertedAt, + &i.WorkerID, + &i.IsDurable, + ); err != nil { return nil, err } items = append(items, &i) diff --git a/pkg/repository/sqlcv1/sqlc.yaml b/pkg/repository/sqlcv1/sqlc.yaml index 966444d15..ac3bb7b28 100644 --- a/pkg/repository/sqlcv1/sqlc.yaml +++ b/pkg/repository/sqlcv1/sqlc.yaml @@ -36,6 +36,7 @@ sql: - pg_health.sql - users.sql - workflow_schedules.sql + - durable_event_log.sql schema: - ../../../sql/schema/v0.sql - ../../../sql/schema/v1-core.sql diff --git a/pkg/repository/sqlcv1/tasks-overwrite.go b/pkg/repository/sqlcv1/tasks-overwrite.go index 1a4941f08..b20c282b9 100644 --- a/pkg/repository/sqlcv1/tasks-overwrite.go +++ b/pkg/repository/sqlcv1/tasks-overwrite.go @@ -11,46 +11,42 @@ import ( const createTasks = `-- name: CreateTasks :many WITH input AS ( SELECT - tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, additional_metadata, initial_state, dag_id, dag_inserted_at, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, initial_state_reason, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, step_index, retry_backoff_factor, retry_max_backoff, workflow_version_id, workflow_run_id, desired_worker_label - FROM - ( - SELECT - unnest($1::uuid[]) AS tenant_id, - unnest($2::text[]) AS queue, - unnest($3::text[]) AS action_id, - unnest($4::uuid[]) AS step_id, - unnest($5::text[]) AS step_readable_id, - unnest($6::uuid[]) AS workflow_id, - unnest($7::text[]) AS schedule_timeout, - unnest($8::text[]) AS step_timeout, - unnest($9::integer[]) AS priority, - unnest(cast($10::text[] as v1_sticky_strategy[])) AS sticky, - unnest($11::uuid[]) AS desired_worker_id, - unnest($12::uuid[]) AS external_id, - unnest($13::text[]) AS display_name, - unnest($14::jsonb[]) AS input, - unnest($15::integer[]) AS retry_count, - unnest($16::jsonb[]) AS additional_metadata, - unnest(cast($17::text[] as v1_task_initial_state[])) AS initial_state, - -- NOTE: these are nullable, so sqlc doesn't support casting to a type - unnest($18::bigint[]) AS dag_id, - unnest($19::timestamptz[]) AS dag_inserted_at, - unnest_nd_1d($20::bigint[][]) AS concurrency_parent_strategy_ids, - unnest_nd_1d($21::bigint[][]) AS concurrency_strategy_ids, - unnest_nd_1d($22::text[][]) AS concurrency_keys, - unnest($23::text[]) AS initial_state_reason, - unnest($24::uuid[]) AS parent_task_external_id, - unnest($25::bigint[]) AS parent_task_id, - unnest($26::timestamptz[]) AS parent_task_inserted_at, - unnest($27::integer[]) AS child_index, - unnest($28::text[]) AS child_key, - unnest($29::bigint[]) AS step_index, - unnest($30::double precision[]) AS retry_backoff_factor, - unnest($31::integer[]) AS retry_max_backoff, - unnest($32::uuid[]) AS workflow_version_id, - unnest($33::uuid[]) AS workflow_run_id, - unnest($34::jsonb[]) AS desired_worker_label - ) AS subquery + unnest($1::uuid[]) AS tenant_id, + unnest($2::text[]) AS queue, + unnest($3::text[]) AS action_id, + unnest($4::uuid[]) AS step_id, + unnest($5::text[]) AS step_readable_id, + unnest($6::uuid[]) AS workflow_id, + unnest($7::text[]) AS schedule_timeout, + unnest($8::text[]) AS step_timeout, + unnest($9::integer[]) AS priority, + unnest(cast($10::text[] as v1_sticky_strategy[])) AS sticky, + unnest($11::uuid[]) AS desired_worker_id, + unnest($12::uuid[]) AS external_id, + unnest($13::text[]) AS display_name, + unnest($14::jsonb[]) AS input, + unnest($15::integer[]) AS retry_count, + unnest($16::jsonb[]) AS additional_metadata, + unnest(cast($17::text[] as v1_task_initial_state[])) AS initial_state, + -- NOTE: these are nullable, so sqlc doesn't support casting to a type + unnest($18::bigint[]) AS dag_id, + unnest($19::timestamptz[]) AS dag_inserted_at, + unnest_nd_1d($20::bigint[][]) AS concurrency_parent_strategy_ids, + unnest_nd_1d($21::bigint[][]) AS concurrency_strategy_ids, + unnest_nd_1d($22::text[][]) AS concurrency_keys, + unnest($23::text[]) AS initial_state_reason, + unnest($24::uuid[]) AS parent_task_external_id, + unnest($25::bigint[]) AS parent_task_id, + unnest($26::timestamptz[]) AS parent_task_inserted_at, + unnest($27::integer[]) AS child_index, + unnest($28::text[]) AS child_key, + unnest($29::bigint[]) AS step_index, + unnest($30::double precision[]) AS retry_backoff_factor, + unnest($31::integer[]) AS retry_max_backoff, + unnest($32::uuid[]) AS workflow_version_id, + unnest($33::uuid[]) AS workflow_run_id, + unnest($34::boolean[]) AS is_durable, + unnest($35::jsonb[]) AS desired_worker_label ) INSERT INTO v1_task ( tenant_id, @@ -86,6 +82,7 @@ INSERT INTO v1_task ( retry_max_backoff, workflow_version_id, workflow_run_id, + is_durable, desired_worker_label ) SELECT @@ -122,11 +119,12 @@ SELECT i.retry_max_backoff, i.workflow_version_id, i.workflow_run_id, + i.is_durable, i.desired_worker_label FROM input i RETURNING - id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, additional_metadata, initial_state, dag_id, dag_inserted_at, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, initial_state_reason, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, step_index, retry_backoff_factor, retry_max_backoff, workflow_version_id, workflow_run_id, desired_worker_label + id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, additional_metadata, initial_state, dag_id, dag_inserted_at, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, initial_state_reason, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, step_index, retry_backoff_factor, retry_max_backoff, workflow_version_id, workflow_run_id, is_durable, desired_worker_label ` type CreateTasksParams struct { @@ -166,6 +164,7 @@ type CreateTasksParams struct { RetryMaxBackoff []pgtype.Int4 `json:"retryMaxBackoff"` WorkflowVersionIds []uuid.UUID `json:"workflowVersionIds"` WorkflowRunIds []uuid.UUID `json:"workflowRunIds"` + IsDurables []bool `json:"isDurables"` DesiredWorkerLabels [][]byte `json:"desiredWorkerLabels"` } @@ -217,6 +216,7 @@ func (q *Queries) CreateTasks(ctx context.Context, db DBTX, arg CreateTasksParam arg.RetryMaxBackoff, arg.WorkflowVersionIds, arg.WorkflowRunIds, + arg.IsDurables, arg.DesiredWorkerLabels, ) if err != nil { @@ -264,6 +264,7 @@ func (q *Queries) CreateTasks(ctx context.Context, db DBTX, arg CreateTasksParam &i.RetryMaxBackoff, &i.WorkflowVersionID, &i.WorkflowRunID, + &i.IsDurable, &i.DesiredWorkerLabel, ); err != nil { return nil, err @@ -400,17 +401,12 @@ func (q *Queries) CreateTaskEvents(ctx context.Context, db DBTX, arg CreateTaskE const replayTasks = `-- name: ReplayTasks :many WITH input AS ( SELECT - task_id, task_inserted_at, input, initial_state, concurrency_keys, initial_state_reason - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::jsonb[]) AS input, - unnest(cast($4::text[] as v1_task_initial_state[])) AS initial_state, - unnest_nd_1d($5::text[][]) AS concurrency_keys, - unnest($6::text[]) AS initial_state_reason - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::jsonb[]) AS input, + unnest(cast($4::text[] as v1_task_initial_state[])) AS initial_state, + unnest_nd_1d($5::text[][]) AS concurrency_keys, + unnest($6::text[]) AS initial_state_reason ) UPDATE v1_task @@ -506,17 +502,12 @@ func (q *Queries) ReplayTasks(ctx context.Context, db DBTX, arg ReplayTasksParam const createTaskExpressionEvals = `-- name: CreateTaskExpressionEvals :exec WITH input AS ( SELECT - task_id, task_inserted_at, key, value_str, value_int, kind - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::text[]) AS key, - unnest($4::text[]) AS value_str, - unnest($5::integer[]) AS value_int, - unnest(cast($6::text[] as "StepExpressionKind"[])) AS kind - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::text[]) AS key, + unnest($4::text[]) AS value_str, + unnest($5::integer[]) AS value_int, + unnest(cast($6::text[] as "StepExpressionKind"[])) AS kind ) INSERT INTO v1_task_expression_eval ( key, @@ -565,14 +556,9 @@ func (q *Queries) CreateTaskExpressionEvals(ctx context.Context, db DBTX, arg Cr const lockParentConcurrencySlots = `-- name: LockParentConcurrencySlots :batchexec WITH input AS ( SELECT - task_id, task_inserted_at, retry_count - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::integer[]) AS retry_count - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count ), concurrency_slots_to_delete AS ( SELECT task_id, task_inserted_at, task_retry_count, parent_strategy_id, workflow_version_id, workflow_run_id @@ -597,14 +583,9 @@ FOR UPDATE const releaseConcurrencySlots = `-- name: ReleaseConcurrencySlots :batchexec WITH input AS ( SELECT - task_id, task_inserted_at, retry_count - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::integer[]) AS retry_count - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count ), concurrency_slots_to_delete AS ( SELECT task_id, task_inserted_at, task_retry_count @@ -625,14 +606,9 @@ WHERE const releaseQueueItems = `-- name: ReleaseQueueItems :batchexec WITH input AS ( SELECT - task_id, task_inserted_at, retry_count - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::integer[]) AS retry_count - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count ), queue_items_to_delete AS ( SELECT task_id, task_inserted_at, retry_count @@ -653,14 +629,9 @@ WHERE const releaseRateLimitedQueueItems = `-- name: ReleaseRateLimitedQueueItems :batchexec WITH input AS ( SELECT - task_id, task_inserted_at, retry_count - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::integer[]) AS retry_count - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count ), rate_limited_items_to_delete AS ( SELECT task_id, task_inserted_at, retry_count @@ -681,14 +652,9 @@ WHERE const releaseRetryQueueItems = `-- name: ReleaseRetryQueueItems :batchexec WITH input AS ( SELECT - task_id, task_inserted_at, retry_count - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::integer[]) AS retry_count - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count ), retry_queue_items_to_delete AS ( SELECT task_id, task_inserted_at, task_retry_count @@ -714,14 +680,9 @@ WHERE const releaseTasks = `-- name: ReleaseTasks :batchmany WITH input AS ( SELECT - task_id, task_inserted_at, retry_count - FROM - ( - SELECT - unnest($1::bigint[]) AS task_id, - unnest($2::timestamptz[]) AS task_inserted_at, - unnest($3::integer[]) AS retry_count - ) AS subquery + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count ), runtimes_to_delete AS ( SELECT task_id, @@ -850,6 +811,109 @@ func (q *Queries) ReleaseTasks(ctx context.Context, db DBTX, arg ReleaseTasksPar return items, nil } +const restoreEvictedTasks = `-- name: RestoreEvictedTasks :many +WITH input AS ( + SELECT + unnest($1::bigint[]) AS task_id, + unnest($2::timestamptz[]) AS task_inserted_at, + unnest($3::integer[]) AS retry_count +), evicted_runtimes AS ( + SELECT + r.task_id, + r.task_inserted_at, + r.retry_count + FROM + v1_task_runtime r + JOIN + input i ON r.task_id = i.task_id + AND r.task_inserted_at = i.task_inserted_at + AND r.retry_count = i.retry_count + WHERE + r.tenant_id = $4::uuid + AND r.evicted_at IS NOT NULL + ORDER BY r.task_id, r.task_inserted_at, r.retry_count + FOR UPDATE +), selected_tasks AS ( + SELECT + t.* + FROM + v1_task t + JOIN + evicted_runtimes er ON t.id = er.task_id AND t.inserted_at = er.task_inserted_at +), inserted_qi AS ( + INSERT INTO v1_queue_item ( + tenant_id, queue, task_id, task_inserted_at, external_id, action_id, step_id, + workflow_id, workflow_run_id, schedule_timeout_at, step_timeout, priority, + sticky, desired_worker_id, retry_count + ) + SELECT + t.tenant_id, t.queue, t.id, t.inserted_at, t.external_id, t.action_id, t.step_id, + t.workflow_id, t.workflow_run_id, + CURRENT_TIMESTAMP + convert_duration_to_interval(t.schedule_timeout), + t.step_timeout, 4, t.sticky, t.desired_worker_id, t.retry_count + FROM + selected_tasks t + ON CONFLICT DO NOTHING + RETURNING task_id, task_inserted_at +) +SELECT + st.id AS task_id, + st.inserted_at AS task_inserted_at, + st.retry_count, + (iq.task_id IS NOT NULL) AS queued, + st.queue +FROM + selected_tasks st +LEFT JOIN + inserted_qi iq ON st.id = iq.task_id AND st.inserted_at = iq.task_inserted_at +` + +type RestoreEvictedTasksParams struct { + Taskids []int64 `json:"taskids"` + Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"` + Retrycounts []int32 `json:"retrycounts"` + Tenantid uuid.UUID `json:"tenantid"` +} + +type RestoreEvictedTasksRow struct { + TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` + Queue string `json:"queue"` + TaskID int64 `json:"task_id"` + RetryCount int32 `json:"retry_count"` + Queued bool `json:"queued"` +} + +func (q *Queries) RestoreEvictedTasks(ctx context.Context, db DBTX, arg RestoreEvictedTasksParams) ([]*RestoreEvictedTasksRow, error) { + rows, err := db.Query(ctx, restoreEvictedTasks, + arg.Taskids, + arg.Taskinsertedats, + arg.Retrycounts, + arg.Tenantid, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*RestoreEvictedTasksRow + for rows.Next() { + var i RestoreEvictedTasksRow + if err := rows.Scan( + &i.TaskID, + &i.TaskInsertedAt, + &i.RetryCount, + &i.Queued, + &i.Queue, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const bulkCreateEvents = `-- name: BulkCreateEvents :many WITH to_insert AS ( SELECT diff --git a/pkg/repository/sqlcv1/tasks.sql b/pkg/repository/sqlcv1/tasks.sql index 7393f327a..1685861b1 100644 --- a/pkg/repository/sqlcv1/tasks.sql +++ b/pkg/repository/sqlcv1/tasks.sql @@ -7,7 +7,11 @@ SELECT create_v1_range_partition('v1_payload', @date::date), create_v1_range_partition('v1_event', @date::date), create_v1_weekly_range_partition('v1_event_lookup_table', @date::date), - create_v1_range_partition('v1_event_to_run', @date::date); + create_v1_range_partition('v1_event_to_run', @date::date), + create_v1_range_partition('v1_durable_event_log_file', @date::date), + create_v1_range_partition('v1_durable_event_log_entry', @date::date, 80), + create_v1_range_partition('v1_durable_event_log_branch_point', @date::date, 80) +; -- name: EnsureTablePartitionsExist :one WITH tomorrow_date AS ( @@ -25,6 +29,12 @@ WITH tomorrow_date AS ( SELECT 'v1_payload_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') UNION ALL SELECT 'v1_event_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') + UNION ALL + SELECT 'v1_durable_event_log_file_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') + UNION ALL + SELECT 'v1_durable_event_log_entry_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') + UNION ALL + SELECT 'v1_durable_event_log_branch_point_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') ), partition_check AS ( SELECT COUNT(*) AS total_tables, @@ -56,6 +66,12 @@ WITH task_partitions AS ( SELECT 'v1_event_lookup_table' AS parent_table, p::text as partition_name FROM get_v1_weekly_partitions_before_date('v1_event_lookup_table', @date::date) AS p ), event_to_run_partitions AS ( SELECT 'v1_event_to_run' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_event_to_run', @date::date) AS p +), durable_event_log_file_partitions AS ( + SELECT 'v1_durable_event_log_file' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_durable_event_log_file', @date::date) AS p +), durable_event_log_entry_partitions AS ( + SELECT 'v1_durable_event_log_entry' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_durable_event_log_entry', @date::date) AS p +), durable_event_log_branch_point_partitions AS ( + SELECT 'v1_durable_event_log_branch_point' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_durable_event_log_branch_point', @date::date) AS p ) SELECT @@ -111,6 +127,27 @@ SELECT * FROM event_to_run_partitions + +UNION ALL + +SELECT + * +FROM + durable_event_log_file_partitions + +UNION ALL + +SELECT + * +FROM + durable_event_log_entry_partitions + +UNION ALL + +SELECT + * +FROM + durable_event_log_branch_point_partitions ; -- name: DefaultTaskActivityGauge :one @@ -379,6 +416,8 @@ WITH tasks_on_inactive_workers AS ( WHERE w."tenantId" = @tenantId::uuid AND w."lastHeartbeatAt" < NOW() - INTERVAL '30 seconds' + -- evicted tasks are not eligible for re-assignment + AND runtime.evicted_at IS NULL LIMIT COALESCE(sqlc.narg('limit')::integer, 1000) ) @@ -938,6 +977,45 @@ WHERE RETURNING v1_task_runtime.*; +-- name: EvictTask :one +-- Marks a task as evicted in v1_task_runtime and releases worker slots. +-- Skips rows whose execution timeout has already passed so the timeout +-- mechanism handles them instead of producing a spurious EVICTED status. +WITH locked_runtime AS ( + SELECT + task_id, + task_inserted_at, + retry_count + FROM + v1_task_runtime + WHERE + tenant_id = @tenantId::uuid + AND task_id = @taskId::bigint + AND task_inserted_at = @taskInsertedAt::timestamptz + AND retry_count = @retryCount::int + AND evicted_at IS NULL + AND (timeout_at IS NULL OR timeout_at > NOW()) + FOR UPDATE +), deleted_slots AS ( + DELETE FROM v1_task_runtime_slot + WHERE + tenant_id = @tenantId::uuid + AND task_id = @taskId::bigint + AND task_inserted_at = @taskInsertedAt::timestamptz + AND retry_count = @retryCount::int +), updated_runtime AS ( + UPDATE v1_task_runtime + SET + evicted_at = NOW(), + worker_id = NULL + WHERE (task_id, task_inserted_at, retry_count) + IN (SELECT task_id, task_inserted_at, retry_count FROM locked_runtime) + RETURNING 1 +) +SELECT + COALESCE((SELECT 1 FROM updated_runtime LIMIT 1), 0)::int AS "evicted"; + + -- name: CleanupWorkflowConcurrencySlotsAfterInsert :exec -- Cleans up workflow concurrency slots when tasks have been inserted in a non-QUEUED state. -- NOTE: this comes after the insert into v1_dag_to_task and v1_lookup_table, because we case on these tables for cleanup @@ -1221,7 +1299,8 @@ WITH inputs AS ( SELECT t.external_id, - (tr.task_id IS NOT NULL)::BOOLEAN AS is_running + (tr.task_id IS NOT NULL)::BOOLEAN AS is_running, + (tr.task_id IS NOT NULL AND tr.evicted_at IS NOT NULL)::BOOLEAN AS is_evicted FROM v1_task t LEFT JOIN v1_task_runtime tr ON (t.id, t.inserted_at, t.retry_count) = (tr.task_id, tr.task_inserted_at, tr.retry_count) WHERE diff --git a/pkg/repository/sqlcv1/tasks.sql.go b/pkg/repository/sqlcv1/tasks.sql.go index c9b6c3398..ffd0d0cf1 100644 --- a/pkg/repository/sqlcv1/tasks.sql.go +++ b/pkg/repository/sqlcv1/tasks.sql.go @@ -233,7 +233,10 @@ SELECT create_v1_range_partition('v1_payload', $1::date), create_v1_range_partition('v1_event', $1::date), create_v1_weekly_range_partition('v1_event_lookup_table', $1::date), - create_v1_range_partition('v1_event_to_run', $1::date) + create_v1_range_partition('v1_event_to_run', $1::date), + create_v1_range_partition('v1_durable_event_log_file', $1::date), + create_v1_range_partition('v1_durable_event_log_entry', $1::date, 80), + create_v1_range_partition('v1_durable_event_log_branch_point', $1::date, 80) ` func (q *Queries) CreatePartitions(ctx context.Context, db DBTX, date pgtype.Date) error { @@ -329,6 +332,12 @@ WITH tomorrow_date AS ( SELECT 'v1_payload_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') UNION ALL SELECT 'v1_event_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') + UNION ALL + SELECT 'v1_durable_event_log_file_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') + UNION ALL + SELECT 'v1_durable_event_log_entry_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') + UNION ALL + SELECT 'v1_durable_event_log_branch_point_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') ), partition_check AS ( SELECT COUNT(*) AS total_tables, @@ -351,6 +360,64 @@ func (q *Queries) EnsureTablePartitionsExist(ctx context.Context, db DBTX) (bool return all_partitions_exist, err } +const evictTask = `-- name: EvictTask :one +WITH locked_runtime AS ( + SELECT + task_id, + task_inserted_at, + retry_count + FROM + v1_task_runtime + WHERE + tenant_id = $1::uuid + AND task_id = $2::bigint + AND task_inserted_at = $3::timestamptz + AND retry_count = $4::int + AND evicted_at IS NULL + AND (timeout_at IS NULL OR timeout_at > NOW()) + FOR UPDATE +), deleted_slots AS ( + DELETE FROM v1_task_runtime_slot + WHERE + tenant_id = $1::uuid + AND task_id = $2::bigint + AND task_inserted_at = $3::timestamptz + AND retry_count = $4::int +), updated_runtime AS ( + UPDATE v1_task_runtime + SET + evicted_at = NOW(), + worker_id = NULL + WHERE (task_id, task_inserted_at, retry_count) + IN (SELECT task_id, task_inserted_at, retry_count FROM locked_runtime) + RETURNING 1 +) +SELECT + COALESCE((SELECT 1 FROM updated_runtime LIMIT 1), 0)::int AS "evicted" +` + +type EvictTaskParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Taskid int64 `json:"taskid"` + Taskinsertedat pgtype.Timestamptz `json:"taskinsertedat"` + Retrycount int32 `json:"retrycount"` +} + +// Marks a task as evicted in v1_task_runtime and releases worker slots. +// Skips rows whose execution timeout has already passed so the timeout +// mechanism handles them instead of producing a spurious EVICTED status. +func (q *Queries) EvictTask(ctx context.Context, db DBTX, arg EvictTaskParams) (int32, error) { + row := db.QueryRow(ctx, evictTask, + arg.Tenantid, + arg.Taskid, + arg.Taskinsertedat, + arg.Retrycount, + ) + var evicted int32 + err := row.Scan(&evicted) + return evicted, err +} + const failTaskAppFailure = `-- name: FailTaskAppFailure :many WITH input AS ( SELECT @@ -624,9 +691,18 @@ ORDER BY task_id, task_inserted_at LIMIT 1 ` -func (q *Queries) FindOldestRunningTask(ctx context.Context, db DBTX) (*V1TaskRuntime, error) { +type FindOldestRunningTaskRow struct { + TaskID int64 `json:"task_id"` + TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` + RetryCount int32 `json:"retry_count"` + WorkerID *uuid.UUID `json:"worker_id"` + TenantID uuid.UUID `json:"tenant_id"` + TimeoutAt pgtype.Timestamp `json:"timeout_at"` +} + +func (q *Queries) FindOldestRunningTask(ctx context.Context, db DBTX) (*FindOldestRunningTaskRow, error) { row := db.QueryRow(ctx, findOldestRunningTask) - var i V1TaskRuntime + var i FindOldestRunningTaskRow err := row.Scan( &i.TaskID, &i.TaskInsertedAt, @@ -639,7 +715,7 @@ func (q *Queries) FindOldestRunningTask(ctx context.Context, db DBTX) (*V1TaskRu } const findOldestTask = `-- name: FindOldestTask :one -SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, desired_worker_label +SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, is_durable, desired_worker_label FROM v1_task ORDER BY id, inserted_at LIMIT 1 @@ -686,6 +762,7 @@ func (q *Queries) FindOldestTask(ctx context.Context, db DBTX) (*V1Task, error) &i.ConcurrencyKeys, &i.RetryBackoffFactor, &i.RetryMaxBackoff, + &i.IsDurable, &i.DesiredWorkerLabel, ) return &i, err @@ -1251,6 +1328,12 @@ WITH task_partitions AS ( SELECT 'v1_event_lookup_table' AS parent_table, p::text as partition_name FROM get_v1_weekly_partitions_before_date('v1_event_lookup_table', $1::date) AS p ), event_to_run_partitions AS ( SELECT 'v1_event_to_run' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_event_to_run', $1::date) AS p +), durable_event_log_file_partitions AS ( + SELECT 'v1_durable_event_log_file' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_durable_event_log_file', $1::date) AS p +), durable_event_log_entry_partitions AS ( + SELECT 'v1_durable_event_log_entry' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_durable_event_log_entry', $1::date) AS p +), durable_event_log_branch_point_partitions AS ( + SELECT 'v1_durable_event_log_branch_point' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_durable_event_log_branch_point', $1::date) AS p ) SELECT @@ -1306,6 +1389,27 @@ SELECT parent_table, partition_name FROM event_to_run_partitions + +UNION ALL + +SELECT + parent_table, partition_name +FROM + durable_event_log_file_partitions + +UNION ALL + +SELECT + parent_table, partition_name +FROM + durable_event_log_entry_partitions + +UNION ALL + +SELECT + parent_table, partition_name +FROM + durable_event_log_branch_point_partitions ` type ListPartitionsBeforeDateRow struct { @@ -1562,7 +1666,8 @@ WITH inputs AS ( SELECT t.external_id, - (tr.task_id IS NOT NULL)::BOOLEAN AS is_running + (tr.task_id IS NOT NULL)::BOOLEAN AS is_running, + (tr.task_id IS NOT NULL AND tr.evicted_at IS NOT NULL)::BOOLEAN AS is_evicted FROM v1_task t LEFT JOIN v1_task_runtime tr ON (t.id, t.inserted_at, t.retry_count) = (tr.task_id, tr.task_inserted_at, tr.retry_count) WHERE @@ -1583,6 +1688,7 @@ type ListTaskRunningStatusesParams struct { type ListTaskRunningStatusesRow struct { ExternalID uuid.UUID `json:"external_id"` IsRunning bool `json:"is_running"` + IsEvicted bool `json:"is_evicted"` } func (q *Queries) ListTaskRunningStatuses(ctx context.Context, db DBTX, arg ListTaskRunningStatusesParams) ([]*ListTaskRunningStatusesRow, error) { @@ -1599,7 +1705,7 @@ func (q *Queries) ListTaskRunningStatuses(ctx context.Context, db DBTX, arg List var items []*ListTaskRunningStatusesRow for rows.Next() { var i ListTaskRunningStatusesRow - if err := rows.Scan(&i.ExternalID, &i.IsRunning); err != nil { + if err := rows.Scan(&i.ExternalID, &i.IsRunning, &i.IsEvicted); err != nil { return nil, err } items = append(items, &i) @@ -1611,7 +1717,7 @@ func (q *Queries) ListTaskRunningStatuses(ctx context.Context, db DBTX, arg List } const listTasks = `-- name: ListTasks :many -SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, desired_worker_label +SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, is_durable, desired_worker_label FROM v1_task WHERE @@ -1671,6 +1777,7 @@ func (q *Queries) ListTasks(ctx context.Context, db DBTX, arg ListTasksParams) ( &i.ConcurrencyKeys, &i.RetryBackoffFactor, &i.RetryMaxBackoff, + &i.IsDurable, &i.DesiredWorkerLabel, ); err != nil { return nil, err @@ -1884,6 +1991,8 @@ WITH tasks_on_inactive_workers AS ( WHERE w."tenantId" = $1::uuid AND w."lastHeartbeatAt" < NOW() - INTERVAL '30 seconds' + -- evicted tasks are not eligible for re-assignment + AND runtime.evicted_at IS NULL LIMIT COALESCE($2::integer, 1000) ) @@ -2223,7 +2332,7 @@ FROM WHERE (v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count) IN (SELECT id, inserted_at, retry_count FROM task) RETURNING - v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count, v1_task_runtime.worker_id, v1_task_runtime.tenant_id, v1_task_runtime.timeout_at + v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count, v1_task_runtime.worker_id, v1_task_runtime.tenant_id, v1_task_runtime.timeout_at, v1_task_runtime.evicted_at ` type ManualSlotReleaseParams struct { @@ -2241,6 +2350,7 @@ func (q *Queries) ManualSlotRelease(ctx context.Context, db DBTX, arg ManualSlot &i.WorkerID, &i.TenantID, &i.TimeoutAt, + &i.EvictedAt, ) return &i, err } @@ -2329,7 +2439,7 @@ WITH input AS ( UNNEST($3::bigint[]) AS task_id, UNNEST($4::timestamptz[]) AS task_inserted_at ), relevant_tasks AS ( - SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, desired_worker_label, task_id, task_inserted_at + SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, is_durable, desired_worker_label, task_id, task_inserted_at FROM v1_task t JOIN @@ -2490,7 +2600,7 @@ FROM WHERE (v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count) IN (SELECT id, inserted_at, retry_count FROM task) RETURNING - v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count, v1_task_runtime.worker_id, v1_task_runtime.tenant_id, v1_task_runtime.timeout_at + v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count, v1_task_runtime.worker_id, v1_task_runtime.tenant_id, v1_task_runtime.timeout_at, v1_task_runtime.evicted_at ` type RefreshTimeoutByParams struct { @@ -2509,6 +2619,7 @@ func (q *Queries) RefreshTimeoutBy(ctx context.Context, db DBTX, arg RefreshTime &i.WorkerID, &i.TenantID, &i.TimeoutAt, + &i.EvictedAt, ) return &i, err } diff --git a/pkg/repository/sqlcv1/workers.sql b/pkg/repository/sqlcv1/workers.sql index 67c75205a..38a8bd84a 100644 --- a/pkg/repository/sqlcv1/workers.sql +++ b/pkg/repository/sqlcv1/workers.sql @@ -481,3 +481,33 @@ VALUES ( @workerId::uuid ) ON CONFLICT DO NOTHING; + +-- name: UpdateWorkerDurableTaskDispatcherId :one +UPDATE "Worker" +SET + "durableTaskDispatcherId" = @dispatcherId::UUID, + "updatedAt" = CURRENT_TIMESTAMP +WHERE + "id" = @workerId::uuid + AND "tenantId" = @tenantId::uuid +RETURNING *; + +-- name: ListDurableTaskDispatcherIdsForTasks :many +WITH tasks AS ( + SELECT + UNNEST(@taskIds::BIGINT[]) AS task_id, + UNNEST(@taskInsertedAts::TIMESTAMPTZ[]) AS task_inserted_at +) + +SELECT + rt.*, + w."durableTaskDispatcherId" +FROM v1_task_runtime rt +LEFT JOIN "Worker" w ON rt.worker_id = w.id +WHERE + rt.tenant_id = @tenantId::uuid + AND (rt.task_id, rt.task_inserted_at) IN ( + SELECT task_id, task_inserted_at + FROM tasks + ) +; diff --git a/pkg/repository/sqlcv1/workers.sql.go b/pkg/repository/sqlcv1/workers.sql.go index df8113f78..8e067a038 100644 --- a/pkg/repository/sqlcv1/workers.sql.go +++ b/pkg/repository/sqlcv1/workers.sql.go @@ -39,7 +39,7 @@ INSERT INTO "Worker" ( $7::text, $8::text, $9::text -) RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" +) RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId" ` type CreateWorkerParams struct { @@ -87,6 +87,7 @@ func (q *Queries) CreateWorker(ctx context.Context, db DBTX, arg CreateWorkerPar &i.Os, &i.RuntimeExtra, &i.SdkVersion, + &i.DurableTaskDispatcherId, ) return &i, err } @@ -180,7 +181,7 @@ DELETE FROM "Worker" WHERE "id" = $1::uuid -RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" +RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId" ` func (q *Queries) DeleteWorker(ctx context.Context, db DBTX, id uuid.UUID) (*Worker, error) { @@ -206,13 +207,14 @@ func (q *Queries) DeleteWorker(ctx context.Context, db DBTX, id uuid.UUID) (*Wor &i.Os, &i.RuntimeExtra, &i.SdkVersion, + &i.DurableTaskDispatcherId, ) return &i, err } const getActiveWorkerById = `-- name: GetActiveWorkerById :one SELECT - w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion", + w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion", w."durableTaskDispatcherId", ww."url" AS "webhookUrl", w."maxRuns" - ( SELECT COUNT(*) @@ -268,6 +270,7 @@ func (q *Queries) GetActiveWorkerById(ctx context.Context, db DBTX, arg GetActiv &i.Worker.Os, &i.Worker.RuntimeExtra, &i.Worker.SdkVersion, + &i.Worker.DurableTaskDispatcherId, &i.WebhookUrl, &i.RemainingSlots, ) @@ -322,7 +325,7 @@ func (q *Queries) GetWorkerActionsByWorkerId(ctx context.Context, db DBTX, arg G const getWorkerById = `-- name: GetWorkerById :one SELECT - w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion" + w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion", w."durableTaskDispatcherId" FROM "Worker" w WHERE @@ -356,6 +359,7 @@ func (q *Queries) GetWorkerById(ctx context.Context, db DBTX, id uuid.UUID) (*Ge &i.Worker.Os, &i.Worker.RuntimeExtra, &i.Worker.SdkVersion, + &i.Worker.DurableTaskDispatcherId, ) return &i, err } @@ -802,6 +806,72 @@ func (q *Queries) ListDispatcherIdsForWorkers(ctx context.Context, db DBTX, arg return items, nil } +const listDurableTaskDispatcherIdsForTasks = `-- name: ListDurableTaskDispatcherIdsForTasks :many +WITH tasks AS ( + SELECT + UNNEST($2::BIGINT[]) AS task_id, + UNNEST($3::TIMESTAMPTZ[]) AS task_inserted_at +) + +SELECT + rt.task_id, rt.task_inserted_at, rt.retry_count, rt.worker_id, rt.tenant_id, rt.timeout_at, rt.evicted_at, + w."durableTaskDispatcherId" +FROM v1_task_runtime rt +LEFT JOIN "Worker" w ON rt.worker_id = w.id +WHERE + rt.tenant_id = $1::uuid + AND (rt.task_id, rt.task_inserted_at) IN ( + SELECT task_id, task_inserted_at + FROM tasks + ) +` + +type ListDurableTaskDispatcherIdsForTasksParams struct { + Tenantid uuid.UUID `json:"tenantid"` + Taskids []int64 `json:"taskids"` + Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"` +} + +type ListDurableTaskDispatcherIdsForTasksRow struct { + TaskID int64 `json:"task_id"` + TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"` + RetryCount int32 `json:"retry_count"` + WorkerID *uuid.UUID `json:"worker_id"` + TenantID uuid.UUID `json:"tenant_id"` + TimeoutAt pgtype.Timestamp `json:"timeout_at"` + EvictedAt pgtype.Timestamptz `json:"evicted_at"` + DurableTaskDispatcherId *uuid.UUID `json:"durableTaskDispatcherId"` +} + +func (q *Queries) ListDurableTaskDispatcherIdsForTasks(ctx context.Context, db DBTX, arg ListDurableTaskDispatcherIdsForTasksParams) ([]*ListDurableTaskDispatcherIdsForTasksRow, error) { + rows, err := db.Query(ctx, listDurableTaskDispatcherIdsForTasks, arg.Tenantid, arg.Taskids, arg.Taskinsertedats) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ListDurableTaskDispatcherIdsForTasksRow + for rows.Next() { + var i ListDurableTaskDispatcherIdsForTasksRow + if err := rows.Scan( + &i.TaskID, + &i.TaskInsertedAt, + &i.RetryCount, + &i.WorkerID, + &i.TenantID, + &i.TimeoutAt, + &i.EvictedAt, + &i.DurableTaskDispatcherId, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const listManyWorkerLabels = `-- name: ListManyWorkerLabels :many SELECT "id", @@ -855,7 +925,7 @@ func (q *Queries) ListManyWorkerLabels(ctx context.Context, db DBTX, workerids [ const listSemaphoreSlotsWithStateForWorker = `-- name: ListSemaphoreSlotsWithStateForWorker :many SELECT - task_id, task_inserted_at, runtime.retry_count, worker_id, runtime.tenant_id, timeout_at, id, inserted_at, v1_task.tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, v1_task.retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, desired_worker_label + task_id, task_inserted_at, runtime.retry_count, worker_id, runtime.tenant_id, timeout_at, evicted_at, id, inserted_at, v1_task.tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, v1_task.retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, is_durable, desired_worker_label FROM v1_task_runtime runtime JOIN @@ -880,6 +950,7 @@ type ListSemaphoreSlotsWithStateForWorkerRow struct { WorkerID *uuid.UUID `json:"worker_id"` TenantID uuid.UUID `json:"tenant_id"` TimeoutAt pgtype.Timestamp `json:"timeout_at"` + EvictedAt pgtype.Timestamptz `json:"evicted_at"` ID int64 `json:"id"` InsertedAt pgtype.Timestamptz `json:"inserted_at"` TenantID_2 uuid.UUID `json:"tenant_id_2"` @@ -917,6 +988,7 @@ type ListSemaphoreSlotsWithStateForWorkerRow struct { ConcurrencyKeys []string `json:"concurrency_keys"` RetryBackoffFactor pgtype.Float8 `json:"retry_backoff_factor"` RetryMaxBackoff pgtype.Int4 `json:"retry_max_backoff"` + IsDurable pgtype.Bool `json:"is_durable"` DesiredWorkerLabel []byte `json:"desired_worker_label"` } @@ -936,6 +1008,7 @@ func (q *Queries) ListSemaphoreSlotsWithStateForWorker(ctx context.Context, db D &i.WorkerID, &i.TenantID, &i.TimeoutAt, + &i.EvictedAt, &i.ID, &i.InsertedAt, &i.TenantID_2, @@ -973,6 +1046,7 @@ func (q *Queries) ListSemaphoreSlotsWithStateForWorker(ctx context.Context, db D &i.ConcurrencyKeys, &i.RetryBackoffFactor, &i.RetryMaxBackoff, + &i.IsDurable, &i.DesiredWorkerLabel, ); err != nil { return nil, err @@ -1120,7 +1194,7 @@ func (q *Queries) ListWorkerSlotConfigs(ctx context.Context, db DBTX, arg ListWo const listWorkers = `-- name: ListWorkers :many SELECT - workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId", workers.language, workers."languageVersion", workers.os, workers."runtimeExtra", workers."sdkVersion" + workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId", workers.language, workers."languageVersion", workers.os, workers."runtimeExtra", workers."sdkVersion", workers."durableTaskDispatcherId" FROM "Worker" workers WHERE @@ -1199,6 +1273,7 @@ func (q *Queries) ListWorkers(ctx context.Context, db DBTX, arg ListWorkersParam &i.Worker.Os, &i.Worker.RuntimeExtra, &i.Worker.SdkVersion, + &i.Worker.DurableTaskDispatcherId, ); err != nil { return nil, err } @@ -1221,7 +1296,7 @@ SET "isPaused" = coalesce($4::boolean, "isPaused") WHERE "id" = $5::uuid -RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" +RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId" ` type UpdateWorkerParams struct { @@ -1261,6 +1336,7 @@ func (q *Queries) UpdateWorker(ctx context.Context, db DBTX, arg UpdateWorkerPar &i.Os, &i.RuntimeExtra, &i.SdkVersion, + &i.DurableTaskDispatcherId, ) return &i, err } @@ -1276,7 +1352,7 @@ WHERE "lastListenerEstablished" IS NULL OR "lastListenerEstablished" <= $2::timestamp ) -RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" +RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId" ` type UpdateWorkerActiveStatusParams struct { @@ -1308,6 +1384,52 @@ func (q *Queries) UpdateWorkerActiveStatus(ctx context.Context, db DBTX, arg Upd &i.Os, &i.RuntimeExtra, &i.SdkVersion, + &i.DurableTaskDispatcherId, + ) + return &i, err +} + +const updateWorkerDurableTaskDispatcherId = `-- name: UpdateWorkerDurableTaskDispatcherId :one +UPDATE "Worker" +SET + "durableTaskDispatcherId" = $1::UUID, + "updatedAt" = CURRENT_TIMESTAMP +WHERE + "id" = $2::uuid + AND "tenantId" = $3::uuid +RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId" +` + +type UpdateWorkerDurableTaskDispatcherIdParams struct { + Dispatcherid uuid.UUID `json:"dispatcherid"` + Workerid uuid.UUID `json:"workerid"` + Tenantid uuid.UUID `json:"tenantid"` +} + +func (q *Queries) UpdateWorkerDurableTaskDispatcherId(ctx context.Context, db DBTX, arg UpdateWorkerDurableTaskDispatcherIdParams) (*Worker, error) { + row := db.QueryRow(ctx, updateWorkerDurableTaskDispatcherId, arg.Dispatcherid, arg.Workerid, arg.Tenantid) + var i Worker + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.DeletedAt, + &i.TenantId, + &i.LastHeartbeatAt, + &i.Name, + &i.DispatcherId, + &i.MaxRuns, + &i.IsActive, + &i.LastListenerEstablished, + &i.IsPaused, + &i.Type, + &i.WebhookId, + &i.Language, + &i.LanguageVersion, + &i.Os, + &i.RuntimeExtra, + &i.SdkVersion, + &i.DurableTaskDispatcherId, ) return &i, err } @@ -1320,7 +1442,7 @@ SET "lastHeartbeatAt" = $1::timestamp WHERE "id" = $2::uuid -RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion" +RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId" ` type UpdateWorkerHeartbeatParams struct { @@ -1351,6 +1473,7 @@ func (q *Queries) UpdateWorkerHeartbeat(ctx context.Context, db DBTX, arg Update &i.Os, &i.RuntimeExtra, &i.SdkVersion, + &i.DurableTaskDispatcherId, ) return &i, err } diff --git a/pkg/repository/task.go b/pkg/repository/task.go index ae47a1795..586203e17 100644 --- a/pkg/repository/task.go +++ b/pkg/repository/task.go @@ -213,6 +213,8 @@ type RefreshTimeoutBy struct { IncrementTimeoutBy string `validate:"required,duration"` } +type WasEvicted bool + type TaskRepository interface { EnsureTablePartitionsExist(ctx context.Context) (bool, error) UpdateTablePartitions(ctx context.Context) error @@ -258,6 +260,10 @@ type TaskRepository interface { ReleaseSlot(ctx context.Context, tenantId, externalId uuid.UUID) (*sqlcv1.V1TaskRuntime, error) + EvictTask(ctx context.Context, tenantId uuid.UUID, task TaskIdInsertedAtRetryCount) (WasEvicted, error) + + RestoreEvictedTasks(ctx context.Context, tenantId uuid.UUID, tasks []TaskIdInsertedAtRetryCount) ([]*sqlcv1.RestoreEvictedTasksRow, error) + ListSignalCompletedEvents(ctx context.Context, tenantId uuid.UUID, tasks []TaskIdInsertedAtSignalKey) ([]*V1TaskEventWithPayload, error) // AnalyzeTaskTables runs ANALYZE on the task tables @@ -1575,6 +1581,67 @@ func (r *TaskRepositoryImpl) ReleaseSlot(ctx context.Context, tenantId, external return resp, nil } +func (r *TaskRepositoryImpl) EvictTask(ctx context.Context, tenantId uuid.UUID, task TaskIdInsertedAtRetryCount) (WasEvicted, error) { + tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, r.pool, r.l) + + if err != nil { + return false, err + } + + defer rollback() + + evicted, err := r.queries.EvictTask(ctx, tx, sqlcv1.EvictTaskParams{ + Tenantid: tenantId, + Taskid: task.Id, + Taskinsertedat: task.InsertedAt, + Retrycount: task.RetryCount, + }) + if err != nil { + return false, err + } + + if err := commit(ctx); err != nil { + return false, err + } + + return WasEvicted(evicted > 0), nil +} + +func (r *TaskRepositoryImpl) RestoreEvictedTasks(ctx context.Context, tenantId uuid.UUID, tasks []TaskIdInsertedAtRetryCount) ([]*sqlcv1.RestoreEvictedTasksRow, error) { + tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, r.pool, r.l) + if err != nil { + return nil, err + } + + defer rollback() + + taskIds := make([]int64, len(tasks)) + taskInsertedAts := make([]pgtype.Timestamptz, len(tasks)) + retryCounts := make([]int32, len(tasks)) + + for i, t := range tasks { + taskIds[i] = t.Id + taskInsertedAts[i] = t.InsertedAt + retryCounts[i] = t.RetryCount + } + + rows, err := r.queries.RestoreEvictedTasks(ctx, tx, sqlcv1.RestoreEvictedTasksParams{ + Tenantid: tenantId, + Taskids: taskIds, + Taskinsertedats: taskInsertedAts, + Retrycounts: retryCounts, + }) + if err != nil { + return nil, err + } + + if err := commit(ctx); err != nil { + return nil, err + } + + return rows, nil +} + func (r *sharedRepository) releaseTasks(ctx context.Context, tx sqlcv1.DBTX, tenantId uuid.UUID, tasks []TaskIdInsertedAtRetryCount) ([]*sqlcv1.ReleaseTasksRow, error) { taskIds := make([]int64, len(tasks)) taskInsertedAts := make([]pgtype.Timestamptz, len(tasks)) @@ -1762,6 +1829,7 @@ func (r *sharedRepository) insertTasks( createExpressionOpts := make(map[uuid.UUID][]createTaskExpressionEvalOpt, 0) workflowVersionIds := make([]uuid.UUID, len(tasks)) workflowRunIds := make([]uuid.UUID, len(tasks)) + isDurables := make([]bool, len(tasks)) desiredWorkerLabels := make([][]byte, len(tasks)) externalIdToInput := make(map[uuid.UUID][]byte, len(tasks)) @@ -1789,6 +1857,7 @@ func (r *sharedRepository) insertTasks( retryBackoffFactors[i] = stepConfig.RetryBackoffFactor retryMaxBackoffs[i] = stepConfig.RetryMaxBackoff workflowRunIds[i] = task.WorkflowRunId + isDurables[i] = stepConfig.IsDurable // TODO: case on whether this is a v1 or v2 task by looking at the step data. for now, // we're assuming a v1 task. @@ -2080,6 +2149,7 @@ func (r *sharedRepository) insertTasks( WorkflowVersionIds: make([]uuid.UUID, 0), WorkflowRunIds: make([]uuid.UUID, 0), Inputs: make([][]byte, 0), + IsDurables: make([]bool, 0), DesiredWorkerLabels: make([][]byte, 0), } } @@ -2117,6 +2187,7 @@ func (r *sharedRepository) insertTasks( params.RetryMaxBackoff = append(params.RetryMaxBackoff, retryMaxBackoffs[i]) params.WorkflowVersionIds = append(params.WorkflowVersionIds, workflowVersionIds[i]) params.WorkflowRunIds = append(params.WorkflowRunIds, workflowRunIds[i]) + params.IsDurables = append(params.IsDurables, isDurables[i]) if r.payloadStore.DualWritesEnabled() { // if dual writes are enabled, write the inputs to the tasks table @@ -3170,6 +3241,7 @@ func (r *TaskRepositoryImpl) ReplayTasks(ctx context.Context, tenantId uuid.UUID SignalExternalId: task.ParentTaskExternalID, SignalTaskId: &task.ParentTaskID.Int64, SignalTaskInsertedAt: task.ParentTaskInsertedAt, + SignalTaskExternalId: &task.ExternalID, SignalKey: &k, }) } @@ -4133,17 +4205,22 @@ func (r *TaskRepositoryImpl) GetWorkflowRunResultDetails(ctx context.Context, te return nil, fmt.Errorf("failed to list task running statuses: %w", err) } - externalIdToIsRunning := make(map[string]bool) + externalIdToIsRunning := make(map[uuid.UUID]bool) + externalIdToIsEvicted := make(map[uuid.UUID]bool) for _, stat := range taskStats { - externalIdToIsRunning[stat.ExternalID.String()] = stat.IsRunning + externalIdToIsRunning[stat.ExternalID] = stat.IsRunning + externalIdToIsEvicted[stat.ExternalID] = stat.IsEvicted } for _, task := range flat { - isRunning := externalIdToIsRunning[task.ExternalID.String()] + isRunning := externalIdToIsRunning[task.ExternalID] + isEvicted := externalIdToIsEvicted[task.ExternalID] status := statusutils.V1RunStatusQueued - if isRunning { + if isEvicted { + status = statusutils.V1RunStatusEvicted + } else if isRunning { status = statusutils.V1RunStatusRunning } diff --git a/pkg/repository/trigger.go b/pkg/repository/trigger.go index 88b3186a8..6bd1ff374 100644 --- a/pkg/repository/trigger.go +++ b/pkg/repository/trigger.go @@ -11,11 +11,14 @@ import ( "github.com/google/uuid" "github.com/jackc/pgx/v5/pgtype" "github.com/rs/zerolog" + "go.opentelemetry.io/otel/attribute" "github.com/hatchet-dev/hatchet/internal/cel" + v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" "github.com/hatchet-dev/hatchet/pkg/constants" "github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1" + "github.com/hatchet-dev/hatchet/pkg/telemetry" ) type EventTriggerOpts struct { @@ -109,6 +112,9 @@ type createDAGOpts struct { // (required) a list of task external ids that are part of this DAG TaskIds []uuid.UUID + // (required) a parallel list of step readable ids for each task + TaskStepReadableIds []string + // (required) the workflow id for this DAG WorkflowId uuid.UUID @@ -132,6 +138,8 @@ type TriggerRepository interface { PopulateExternalIdsForWorkflow(ctx context.Context, tenantId uuid.UUID, opts []*WorkflowNameTriggerOpts) error PreflightVerifyWorkflowNameOpts(ctx context.Context, tenantId uuid.UUID, opts []*WorkflowNameTriggerOpts) error + + NewTriggerTaskData(ctx context.Context, tenantId uuid.UUID, req *v1contracts.TriggerWorkflowRequest, parentTask *sqlcv1.FlattenExternalIdsRow) (*TriggerTaskData, error) } type TriggerRepositoryImpl struct { @@ -358,14 +366,36 @@ func getEventExternalIdToRuns(opts []EventTriggerOpts, externalIdToEventIdAndFil return eventExternalIdToRuns } -func (r *TriggerRepositoryImpl) TriggerFromWorkflowNames(ctx context.Context, tenantId uuid.UUID, opts []*WorkflowNameTriggerOpts) ([]*V1TaskWithPayload, []*DAGWithData, error) { - triggerOpts, err := r.prepareTriggerFromWorkflowNames(ctx, r.pool, tenantId, opts) +func (s *sharedRepository) triggerFromWorkflowNames(ctx context.Context, tx *OptimisticTx, tenantId uuid.UUID, opts []*WorkflowNameTriggerOpts) ([]*V1TaskWithPayload, []*DAGWithData, error) { + triggerOpts, err := s.prepareTriggerFromWorkflowNames(ctx, tx.tx, tenantId, opts) if err != nil { return nil, nil, fmt.Errorf("failed to prepare trigger from workflow names: %w", err) } - return r.triggerWorkflows(ctx, nil, tenantId, triggerOpts, nil) + return s.triggerWorkflows(ctx, tx, tenantId, triggerOpts, nil) +} + +func (r *TriggerRepositoryImpl) TriggerFromWorkflowNames(ctx context.Context, tenantId uuid.UUID, opts []*WorkflowNameTriggerOpts) ([]*V1TaskWithPayload, []*DAGWithData, error) { + tx, err := r.PrepareOptimisticTx(ctx) + + if err != nil { + return nil, nil, fmt.Errorf("failed to prepare tx: %w", err) + } + + defer tx.Rollback() + + tasks, dags, err := r.triggerFromWorkflowNames(ctx, tx, tenantId, opts) + + if err != nil { + return nil, nil, err + } + + if err := tx.Commit(ctx); err != nil { + return nil, nil, err + } + + return tasks, dags, nil } type ErrNamesNotFound struct { @@ -601,6 +631,7 @@ func (r *sharedRepository) triggerWorkflows( // a map of trigger tuples to step external IDs stepsToExternalIds := make([]map[uuid.UUID]uuid.UUID, len(tuples)) dagToTaskIds := make(map[uuid.UUID][]uuid.UUID) + dagToTaskReadableIds := make(map[uuid.UUID][]string) // generate UUIDs for each step for i, tuple := range tuples { @@ -633,6 +664,7 @@ func (r *sharedRepository) triggerWorkflows( externalId := uuid.New() stepsToExternalIds[i][step.ID] = externalId dagToTaskIds[tuple.externalId] = append(dagToTaskIds[tuple.externalId], externalId) + dagToTaskReadableIds[tuple.externalId] = append(dagToTaskReadableIds[tuple.externalId], step.ReadableId.String) } } } @@ -1014,6 +1046,7 @@ func (r *sharedRepository) triggerWorkflows( ExternalId: tuple.externalId, Input: tuple.input, TaskIds: dagToTaskIds[tuple.externalId], + TaskStepReadableIds: dagToTaskReadableIds[tuple.externalId], WorkflowId: tuple.workflowId, WorkflowVersionId: tuple.workflowVersionId, WorkflowName: tuple.workflowName, @@ -1230,6 +1263,9 @@ type DAGWithData struct { ParentTaskExternalID *uuid.UUID TotalTasks int + + TaskExternalIDs []uuid.UUID + TaskStepReadableIDs []string } type V1TaskWithPayload struct { @@ -1337,6 +1373,8 @@ func (r *sharedRepository) createDAGs(ctx context.Context, tx sqlcv1.DBTX, tenan AdditionalMetadata: additionalMeta, ParentTaskExternalID: &parentTaskExternalID, TotalTasks: len(opt.TaskIds), + TaskExternalIDs: opt.TaskIds, + TaskStepReadableIDs: opt.TaskStepReadableIds, }) } @@ -1527,6 +1565,7 @@ func (r *sharedRepository) registerChildWorkflows( Conditions: getChildWorkflowGroupMatches(stepExternalId, stepReadableId), SignalExternalId: tuple.parentExternalId, SignalTaskId: tuple.parentTaskId, + SignalTaskExternalId: tuple.parentExternalId, SignalTaskInsertedAt: sqlchelpers.TimestamptzFromTime(*tuple.parentTaskInsertedAt), SignalKey: &key, }) @@ -2208,3 +2247,96 @@ func (r *sharedRepository) prepareTriggerFromWorkflowNames(ctx context.Context, return triggerOpts, nil } + +type TriggerOptInvalidArgumentError struct { + Err error +} + +func (r *TriggerOptInvalidArgumentError) Error() string { + return fmt.Sprintf("err %v", r.Err) +} + +func (r *sharedRepository) NewTriggerTaskData( + ctx context.Context, + tenantId uuid.UUID, + req *v1contracts.TriggerWorkflowRequest, + parentTask *sqlcv1.FlattenExternalIdsRow, +) (*TriggerTaskData, error) { + ctx, span := telemetry.NewSpan(ctx, "sharedRepository.NewTriggerTaskData") + defer span.End() + + span.SetAttributes( + attribute.String("sharedRepository.NewTriggerTaskData.workflow_name", req.Name), + attribute.Int("sharedRepository.NewTriggerTaskData.payload_size", len(req.Input)), + attribute.Bool("sharedRepository.NewTriggerTaskData.is_child_workflow", req.ParentTaskRunExternalId != nil), + ) + + additionalMeta := "" + + if req.AdditionalMetadata != nil { + additionalMeta = *req.AdditionalMetadata + } + + var desiredWorkerId *uuid.UUID + if req.DesiredWorkerId != nil { + if *req.DesiredWorkerId != "" { + workerId, err := uuid.Parse(*req.DesiredWorkerId) + if err != nil { + return nil, &TriggerOptInvalidArgumentError{ + Err: fmt.Errorf("desiredWorkerId must be a valid UUID: %w", err), + } + } + desiredWorkerId = &workerId + } + } + + t := &TriggerTaskData{ + WorkflowName: req.Name, + Data: []byte(req.Input), + AdditionalMetadata: []byte(additionalMeta), + DesiredWorkerId: desiredWorkerId, + Priority: req.Priority, + } + + if len(req.DesiredWorkerLabels) > 0 { + labels := make([]*sqlcv1.GetDesiredLabelsRow, 0, len(req.DesiredWorkerLabels)) + for key, label := range req.DesiredWorkerLabels { + var comparator *string + if label.Comparator != nil { + c := label.Comparator.String() + comparator = &c + } + labels = append(labels, ProtoToDesiredWorkerLabel( + key, + label.StrValue, + label.IntValue, + label.Required, + label.Weight, + comparator, + )) + } + t.DesiredWorkerLabels = labels + } + + if req.Priority != nil { + if *req.Priority < 1 || *req.Priority > 3 { + return nil, &TriggerOptInvalidArgumentError{ + Err: fmt.Errorf("priority must be between 1 and 3, got %d", *req.Priority), + } + } + t.Priority = req.Priority + } + + if parentTask != nil { + parentExternalId := parentTask.ExternalID + childIndex := int64(*req.ChildIndex) + + t.ParentExternalId = &parentExternalId + t.ParentTaskId = &parentTask.ID + t.ParentTaskInsertedAt = &parentTask.InsertedAt.Time + t.ChildIndex = &childIndex + t.ChildKey = req.ChildKey + } + + return t, nil +} diff --git a/pkg/repository/worker.go b/pkg/repository/worker.go index a1cfa5850..173275661 100644 --- a/pkg/repository/worker.go +++ b/pkg/repository/worker.go @@ -74,6 +74,11 @@ type UpsertWorkerLabelOpts struct { StrValue *string } +type DurableTaskDispatcherLookup struct { + DispatcherId *uuid.UUID + IsEvicted bool +} + type WorkerRepository interface { ListWorkers(ctx context.Context, tenantId uuid.UUID, opts *ListWorkersOpts) ([]*sqlcv1.ListWorkersRow, error) GetWorkerById(ctx context.Context, workerId uuid.UUID) (*sqlcv1.GetWorkerByIdRow, error) @@ -122,6 +127,10 @@ type WorkerRepository interface { DeleteOldWorkers(ctx context.Context, tenantId uuid.UUID, lastHeartbeatBefore time.Time) (bool, error) GetDispatcherIdsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]uuid.UUID, map[uuid.UUID]struct{}, error) + + UpdateWorkerDurableTaskDispatcherId(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID, dispatcherId uuid.UUID) error + + GetDurableDispatcherIdsForTasks(ctx context.Context, tenantId uuid.UUID, idInsertedAtTuples []IdInsertedAt) (map[IdInsertedAt]DurableTaskDispatcherLookup, error) } type workerRepository struct { @@ -767,3 +776,47 @@ func (w *workerRepository) GetDispatcherIdsForWorkers(ctx context.Context, tenan return workerIdToDispatcherId, workerIdsWithoutDispatchers, nil } + +func (w *workerRepository) UpdateWorkerDurableTaskDispatcherId(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID, dispatcherId uuid.UUID) error { + _, err := w.queries.UpdateWorkerDurableTaskDispatcherId(ctx, w.pool, sqlcv1.UpdateWorkerDurableTaskDispatcherIdParams{ + Workerid: workerId, + Dispatcherid: dispatcherId, + Tenantid: tenantId, + }) + + return err +} + +func (w *workerRepository) GetDurableDispatcherIdsForTasks(ctx context.Context, tenantId uuid.UUID, idInsertedAtTuples []IdInsertedAt) (map[IdInsertedAt]DurableTaskDispatcherLookup, error) { + taskIds := make([]int64, len(idInsertedAtTuples)) + taskInsertedAts := make([]pgtype.Timestamptz, len(idInsertedAtTuples)) + + for i, tuple := range idInsertedAtTuples { + taskIds[i] = tuple.ID + taskInsertedAts[i] = tuple.InsertedAt + } + + rows, err := w.queries.ListDurableTaskDispatcherIdsForTasks(ctx, w.pool, sqlcv1.ListDurableTaskDispatcherIdsForTasksParams{ + Tenantid: tenantId, + Taskids: taskIds, + Taskinsertedats: taskInsertedAts, + }) + + if err != nil { + return nil, fmt.Errorf("could not get durable dispatcher ids for tasks: %w", err) + } + + taskIdToDispatcherInfo := make(map[IdInsertedAt]DurableTaskDispatcherLookup) + + for _, row := range rows { + taskIdToDispatcherInfo[IdInsertedAt{ + ID: row.TaskID, + InsertedAt: row.TaskInsertedAt, + }] = DurableTaskDispatcherLookup{ + DispatcherId: row.DurableTaskDispatcherId, + IsEvicted: row.EvictedAt.Valid, + } + } + + return taskIdToDispatcherInfo, nil +} diff --git a/pkg/repository/workflow.go b/pkg/repository/workflow.go index b4ff4f4da..c5da84e11 100644 --- a/pkg/repository/workflow.go +++ b/pkg/repository/workflow.go @@ -558,7 +558,7 @@ func (r *workflowRepository) createWorkflowVersionTxs(ctx context.Context, tx sq var priority pgtype.Int4 if opts.DefaultPriority != nil { - priority = sqlchelpers.ToInt(*opts.DefaultPriority) + priority = sqlchelpers.ToInt(opts.DefaultPriority) } var oldWorkflowVersionId uuid.UUID @@ -828,7 +828,7 @@ func (r *workflowRepository) createJobTx(ctx context.Context, tx sqlcv1.DBTX, te } if value.IntValue != nil { - opts.IntValue = sqlchelpers.ToInt(*value.IntValue) + opts.IntValue = sqlchelpers.ToInt(value.IntValue) } if value.StrValue != nil { @@ -836,7 +836,7 @@ func (r *workflowRepository) createJobTx(ctx context.Context, tx sqlcv1.DBTX, te } if value.Weight != nil { - opts.Weight = sqlchelpers.ToInt(*value.Weight) + opts.Weight = sqlchelpers.ToInt(value.Weight) } if value.Required != nil { diff --git a/pkg/repository/workflow_schedules.go b/pkg/repository/workflow_schedules.go index 7fa34ecd0..23c318b83 100644 --- a/pkg/repository/workflow_schedules.go +++ b/pkg/repository/workflow_schedules.go @@ -193,7 +193,7 @@ func (w *workflowScheduleRepository) CreateScheduledWorkflow(ctx context.Context Valid: true, WorkflowTriggerScheduledRefMethods: sqlcv1.WorkflowTriggerScheduledRefMethodsAPI, }, - Priority: sqlchelpers.ToInt(priority), + Priority: sqlchelpers.ToInt(&priority), } created, err := w.queries.CreateWorkflowTriggerScheduledRefForWorkflow(ctx, w.pool, createParams) @@ -553,7 +553,7 @@ func (w *workflowScheduleRepository) CreateCronWorkflow(ctx context.Context, ten Valid: true, WorkflowTriggerCronRefMethods: sqlcv1.WorkflowTriggerCronRefMethodsAPI, }, - Priority: sqlchelpers.ToInt(priority), + Priority: sqlchelpers.ToInt(&priority), } cronTrigger, err := w.queries.CreateWorkflowTriggerCronRefForWorkflow(ctx, w.pool, createParams) diff --git a/pkg/v1/workflow/declaration.go b/pkg/v1/workflow/declaration.go index ee5cfcd2e..f62f4a6b8 100644 --- a/pkg/v1/workflow/declaration.go +++ b/pkg/v1/workflow/declaration.go @@ -9,7 +9,7 @@ import ( "strings" "time" - admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" + v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" v0Client "github.com/hatchet-dev/hatchet/pkg/client" "github.com/hatchet-dev/hatchet/pkg/client/create" "github.com/hatchet-dev/hatchet/pkg/client/rest" @@ -713,7 +713,7 @@ func (w *workflowDeclarationImpl[I, O]) Cron(ctx context.Context, name string, c Input: inputMap, } - runOpts := &admincontracts.TriggerWorkflowRequest{} + runOpts := &v1contracts.TriggerWorkflowRequest{} for _, opt := range opts { opt(runOpts) @@ -755,7 +755,7 @@ func (w *workflowDeclarationImpl[I, O]) Schedule(ctx context.Context, triggerAt Input: inputMap, } - runOpts := &admincontracts.TriggerWorkflowRequest{} + runOpts := &v1contracts.TriggerWorkflowRequest{} for _, opt := range opts { opt(runOpts) @@ -1049,7 +1049,7 @@ func RunChildWorkflow[I any, O any]( spawnOpts := &worker.SpawnWorkflowOpts{} - runOpts := &admincontracts.TriggerWorkflowRequest{} + runOpts := &v1contracts.TriggerWorkflowRequest{} for _, opt := range opts { opt(runOpts) diff --git a/pkg/worker/context.go b/pkg/worker/context.go index 3c9eec592..3dc01e973 100644 --- a/pkg/worker/context.go +++ b/pkg/worker/context.go @@ -535,6 +535,40 @@ func (h *hatchetContext) SpawnWorkflow(workflowName string, input any, opts *Spa return client.NewWorkflow(workflowRunId, listener), nil } +func desiredWorkerLabelsToProto(labels map[string]*types.DesiredWorkerLabel) map[string]*v1.DesiredWorkerLabels { + if labels == nil { + return nil + } + + result := make(map[string]*v1.DesiredWorkerLabels, len(labels)) + + for key, label := range labels { + proto := &v1.DesiredWorkerLabels{ + Required: &label.Required, + Weight: &label.Weight, + } + + if label.Comparator != nil { + comparator := v1.WorkerLabelComparator(*label.Comparator) + proto.Comparator = &comparator + } + + switch v := label.Value.(type) { + case string: + proto.StrValue = &v + case int: + intVal := int32(v) // nolint: gosec + proto.IntValue = &intVal + case int32: + proto.IntValue = &v + } + + result[key] = proto + } + + return result +} + // Deprecated: SpawnWorkflowsOpts is an internal type used by the new Go SDK. // Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go type SpawnWorkflowsOpts struct { diff --git a/sdks/go/internal/declaration.go b/sdks/go/internal/declaration.go index f7bde9089..0f8c8e373 100644 --- a/sdks/go/internal/declaration.go +++ b/sdks/go/internal/declaration.go @@ -9,7 +9,7 @@ import ( "strings" "time" - admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" + v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1" v0Client "github.com/hatchet-dev/hatchet/pkg/client" "github.com/hatchet-dev/hatchet/pkg/client/create" "github.com/hatchet-dev/hatchet/pkg/client/rest" @@ -514,7 +514,7 @@ func (w *workflowDeclarationImpl[I, O]) Cron(ctx context.Context, name string, c Input: inputMap, } - runOpts := &admincontracts.TriggerWorkflowRequest{} + runOpts := &v1contracts.TriggerWorkflowRequest{} for _, opt := range opts { opt(runOpts) @@ -554,7 +554,7 @@ func (w *workflowDeclarationImpl[I, O]) Schedule(ctx context.Context, triggerAt Input: inputMap, } - runOpts := &admincontracts.TriggerWorkflowRequest{} + runOpts := &v1contracts.TriggerWorkflowRequest{} for _, opt := range opts { opt(runOpts) diff --git a/sdks/go/workflow.go b/sdks/go/workflow.go index 03a8ffe02..eca8783aa 100644 --- a/sdks/go/workflow.go +++ b/sdks/go/workflow.go @@ -70,6 +70,36 @@ func WithDesiredWorkerLabels(labels map[string]*DesiredWorkerLabel) RunOptFunc { } } +func desiredWorkerLabelsToProto(labels map[string]*DesiredWorkerLabel) map[string]*v1.DesiredWorkerLabels { + result := make(map[string]*v1.DesiredWorkerLabels, len(labels)) + + for key, label := range labels { + proto := &v1.DesiredWorkerLabels{ + Required: &label.Required, + Weight: &label.Weight, + } + + if label.Comparator != nil { + comparator := v1.WorkerLabelComparator(*label.Comparator) + proto.Comparator = &comparator + } + + switch v := label.Value.(type) { + case string: + proto.StrValue = &v + case int: + intVal := int32(v) // nolint: gosec + proto.IntValue = &intVal + case int32: + proto.IntValue = &v + } + + result[key] = proto + } + + return result +} + // convertInputToType converts input (typically map[string]interface{}) to the expected struct type func convertInputToType(input any, expectedType reflect.Type) reflect.Value { if input == nil { diff --git a/sdks/guides/python/human_in_the_loop/worker.py b/sdks/guides/python/human_in_the_loop/worker.py index 2fe257684..a0b7b922b 100644 --- a/sdks/guides/python/human_in_the_loop/worker.py +++ b/sdks/guides/python/human_in_the_loop/worker.py @@ -1,4 +1,4 @@ -from hatchet_sdk import DurableContext, EmptyModel, Hatchet, UserEventCondition +from hatchet_sdk import DurableContext, EmptyModel, Hatchet hatchet = Hatchet(debug=True) @@ -8,14 +8,13 @@ APPROVAL_EVENT_KEY = "approval:decision" # > Step 02 Wait For Event async def wait_for_approval(ctx: DurableContext) -> dict: run_id = ctx.workflow_run_id - approval = await ctx.aio_wait_for( - "approval", - UserEventCondition( - event_key=APPROVAL_EVENT_KEY, - expression=f"input.runId == '{run_id}'", - ), + approval = await ctx.aio_wait_for_event( + APPROVAL_EVENT_KEY, + f"input.runId == '{run_id}'", ) return approval + + # !! @@ -27,6 +26,8 @@ async def approval_task(input: EmptyModel, ctx: DurableContext) -> dict: if approval.get("approved"): return {"status": "approved", "action": proposed_action} return {"status": "rejected", "reason": approval.get("reason", "")} + + # !! diff --git a/sdks/python/.pymon b/sdks/python/.pymon new file mode 100644 index 000000000..c0a611592 Binary files /dev/null and b/sdks/python/.pymon differ diff --git a/sdks/python/apply_patches.py b/sdks/python/apply_patches.py index 90f603107..9090838cc 100644 --- a/sdks/python/apply_patches.py +++ b/sdks/python/apply_patches.py @@ -52,7 +52,11 @@ def atomically_patch_file( def patch_contract_import_paths(content: str) -> str: - return apply_patch(content, r"\bfrom v1\b", "from hatchet_sdk.contracts.v1") + content = apply_patch(content, r"\bfrom v1\b", "from hatchet_sdk.contracts.v1") + content = apply_patch( + content, r"\bfrom workflows\b", "from hatchet_sdk.contracts.workflows" + ) + return content def patch_grpc_dispatcher_import(content: str) -> str: @@ -588,6 +592,38 @@ def patch_workflow_run_metrics_counts_return_type(content: str) -> str: return apply_patch(content, pattern, replacement) +def patch_workflows_pb2_reexport(content: str) -> str: + """Re-export types that moved to v1/shared/trigger.proto for backwards compatibility.""" + reexport = ( + "\n# Re-export for backwards compatibility\n" + "from hatchet_sdk.contracts.v1.shared.trigger_pb2 import " + "TriggerWorkflowRequest as TriggerWorkflowRequest # noqa: F401\n" + "from hatchet_sdk.contracts.v1.shared.trigger_pb2 import " + "DesiredWorkerLabels as DesiredWorkerLabels # noqa: F401\n" + "from hatchet_sdk.contracts.v1.shared.trigger_pb2 import " + "WorkerLabelComparator as WorkerLabelComparator # noqa: F401\n" + ) + if "TriggerWorkflowRequest as TriggerWorkflowRequest" in content: + return content + return content + reexport + + +def patch_workflows_pyi_reexport(content: str) -> str: + """Add type stubs for re-exported types.""" + reexport = ( + "\n# Re-export for backwards compatibility\n" + "from hatchet_sdk.contracts.v1.shared.trigger_pb2 import " + "TriggerWorkflowRequest as TriggerWorkflowRequest\n" + "from hatchet_sdk.contracts.v1.shared.trigger_pb2 import " + "DesiredWorkerLabels as DesiredWorkerLabels\n" + "from hatchet_sdk.contracts.v1.shared.trigger_pb2 import " + "WorkerLabelComparator as WorkerLabelComparator\n" + ) + if "TriggerWorkflowRequest as TriggerWorkflowRequest" in content: + return content + return content + reexport + + if __name__ == "__main__": atomically_patch_file( "hatchet_sdk/clients/rest/api_client.py", @@ -622,3 +658,12 @@ if __name__ == "__main__": apply_patches_to_matching_files("hatchet_sdk/contracts", "*_grpc.py", grpc_patches) apply_patches_to_matching_files("hatchet_sdk/contracts", "*_pb2.py", pb2_patches) apply_patches_to_matching_files("hatchet_sdk/contracts", "*_pb2.pyi", pb2_patches) + + atomically_patch_file( + "hatchet_sdk/contracts/workflows_pb2.py", + [patch_workflows_pb2_reexport], + ) + atomically_patch_file( + "hatchet_sdk/contracts/workflows_pb2.pyi", + [patch_workflows_pyi_reexport], + ) diff --git a/sdks/python/conftest.py b/sdks/python/conftest.py index 7b77c196f..c5cf38349 100644 --- a/sdks/python/conftest.py +++ b/sdks/python/conftest.py @@ -7,6 +7,8 @@ import pytest_asyncio from pytest import FixtureRequest from hatchet_sdk import Hatchet +from hatchet_sdk.deprecated.deprecation import semver_less_than +from hatchet_sdk.engine_version import MinEngineVersion from tests.worker_fixture import hatchet_worker @@ -17,6 +19,26 @@ async def hatchet() -> AsyncGenerator[Hatchet, None]: ) +@pytest_asyncio.fixture(scope="session", loop_scope="session") +async def engine_version(hatchet: Hatchet) -> str | None: + return await hatchet.aio_get_engine_version() + + +@pytest_asyncio.fixture(scope="session", loop_scope="session") +async def supports_durable_eviction(engine_version: str | None) -> bool: + if not engine_version: + return False + return not semver_less_than(engine_version, MinEngineVersion.DURABLE_EVICTION) + + +@pytest.fixture() +def _skip_unless_durable_eviction(supports_durable_eviction: bool) -> None: + if not supports_durable_eviction: + pytest.skip( + f"Engine does not support durable eviction (requires >= {MinEngineVersion.DURABLE_EVICTION})" + ) + + @pytest.fixture(scope="session", autouse=True) def worker() -> Generator[Popen[bytes], None, None]: command = ["poetry", "run", "python", "examples/worker.py"] diff --git a/sdks/python/examples/dependency_injection/test_dependency_injection.py b/sdks/python/examples/dependency_injection/test_dependency_injection.py index 6236b500a..e68a9d901 100644 --- a/sdks/python/examples/dependency_injection/test_dependency_injection.py +++ b/sdks/python/examples/dependency_injection/test_dependency_injection.py @@ -8,12 +8,9 @@ from examples.dependency_injection.worker import ( SYNC_CM_DEPENDENCY_VALUE, SYNC_DEPENDENCY_VALUE, Output, - async_dep, async_task_with_dependencies, di_workflow, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, - sync_dep, sync_task_with_dependencies, task_with_type_aliases, ) @@ -27,7 +24,6 @@ from hatchet_sdk.runnables.workflow import Standalone async_task_with_dependencies, sync_task_with_dependencies, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, ], ) @pytest.mark.asyncio(loop_scope="session") @@ -51,7 +47,7 @@ async def test_di_standalones( async def test_di_workflows() -> None: result = await di_workflow.aio_run() - assert len(result) == 4 + assert len(result) == 3 for output in result.values(): parsed = Output.model_validate(output) diff --git a/sdks/python/examples/dependency_injection/worker.py b/sdks/python/examples/dependency_injection/worker.py index 66821e9ee..e31a5a61e 100644 --- a/sdks/python/examples/dependency_injection/worker.py +++ b/sdks/python/examples/dependency_injection/worker.py @@ -196,27 +196,6 @@ async def durable_async_task_with_dependencies( ) -@hatchet.durable_task() -def durable_sync_task_with_dependencies( - _i: EmptyModel, - ctx: DurableContext, - async_dep: Annotated[str, Depends(async_dep)], - sync_dep: Annotated[str, Depends(sync_dep)], - async_cm_dep: Annotated[str, Depends(async_cm_dep)], - sync_cm_dep: Annotated[str, Depends(sync_cm_dep)], - chained_dep: Annotated[str, Depends(chained_dep)], - chained_async_dep: Annotated[str, Depends(chained_async_dep)], -) -> Output: - return Output( - sync_dep=sync_dep, - async_dep=async_dep, - async_cm_dep=async_cm_dep, - sync_cm_dep=sync_cm_dep, - chained_dep=chained_dep, - chained_async_dep=chained_async_dep, - ) - - di_workflow = hatchet.workflow( name="dependency-injection-workflow", ) @@ -285,27 +264,6 @@ async def wf_durable_async_task_with_dependencies( ) -@di_workflow.durable_task() -def wf_durable_sync_task_with_dependencies( - _i: EmptyModel, - ctx: DurableContext, - async_dep: Annotated[str, Depends(async_dep)], - sync_dep: Annotated[str, Depends(sync_dep)], - async_cm_dep: Annotated[str, Depends(async_cm_dep)], - sync_cm_dep: Annotated[str, Depends(sync_cm_dep)], - chained_dep: Annotated[str, Depends(chained_dep)], - chained_async_dep: Annotated[str, Depends(chained_async_dep)], -) -> Output: - return Output( - sync_dep=sync_dep, - async_dep=async_dep, - async_cm_dep=async_cm_dep, - sync_cm_dep=sync_cm_dep, - chained_dep=chained_dep, - chained_async_dep=chained_async_dep, - ) - - def main() -> None: worker = hatchet.worker( "dependency-injection-worker", @@ -313,7 +271,6 @@ def main() -> None: async_task_with_dependencies, sync_task_with_dependencies, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, di_workflow, task_with_type_aliases, ], diff --git a/sdks/python/examples/durable/test_durable.py b/sdks/python/examples/durable/test_durable.py index 1eac42388..1b45ae9ce 100644 --- a/sdks/python/examples/durable/test_durable.py +++ b/sdks/python/examples/durable/test_durable.py @@ -1,23 +1,42 @@ import asyncio +import time import pytest +from uuid import uuid4 from examples.durable.worker import ( EVENT_KEY, SLEEP_TIME, + REPLAY_RESET_SLEEP_TIME, + durable_sleep_event_spawn, + durable_with_bulk_spawn, + durable_with_spawn, durable_workflow, wait_for_sleep_twice, + durable_spawn_dag, + durable_non_determinism, + durable_replay_reset, + memo_task, + MemoInput, + DurableBulkSpawnInput, + memo_now_caching, + AwaitedEvent, ) from hatchet_sdk import Hatchet +requires_durable_eviction = pytest.mark.usefixtures("_skip_unless_durable_eviction") + @pytest.mark.asyncio(loop_scope="session") -async def test_durable(hatchet: Hatchet) -> None: +async def test_durable_workflow(hatchet: Hatchet) -> None: ref = durable_workflow.run_no_wait() + id = str(uuid4()) await asyncio.sleep(SLEEP_TIME + 10) - hatchet.event.push(EVENT_KEY, {"test": "test"}) + event = await hatchet.event.aio_push( + EVENT_KEY, AwaitedEvent(id=id).model_dump(mode="json") + ) result = await ref.aio_result() @@ -34,21 +53,23 @@ async def test_durable(hatchet: Hatchet) -> None: assert result["durable_task"]["status"] == "success" + # hack for old engine test + assert ( + result["durable_task"]["event_id"] == "" + or result["durable_task"]["event_id"] == id + ) + assert result["durable_task"]["sleep_duration_seconds"] == SLEEP_TIME + wait_group_1 = result["wait_for_or_group_1"] wait_group_2 = result["wait_for_or_group_2"] - assert abs(wait_group_1["runtime"] - SLEEP_TIME) < 3 - assert wait_group_1["key"] == wait_group_2["key"] assert wait_group_1["key"] == "CREATE" assert "sleep" in wait_group_1["event_id"] assert "event" in wait_group_2["event_id"] - wait_for_multi_sleep = result["wait_for_multi_sleep"] - - assert wait_for_multi_sleep["runtime"] > 3 * SLEEP_TIME - +@requires_durable_eviction @pytest.mark.asyncio(loop_scope="session") async def test_durable_sleep_cancel_replay(hatchet: Hatchet) -> None: first_sleep = await wait_for_sleep_twice.aio_run_no_wait() @@ -59,11 +80,228 @@ async def test_durable_sleep_cancel_replay(hatchet: Hatchet) -> None: await first_sleep.aio_result() + replay_start = time.time() await hatchet.runs.aio_replay( first_sleep.workflow_run_id, ) second_sleep_result = await first_sleep.aio_result() + replay_elapsed = time.time() - replay_start - """We've already slept for a little bit by the time the task is cancelled""" - assert second_sleep_result["runtime"] <= SLEEP_TIME + assert second_sleep_result["runtime"] < SLEEP_TIME + assert replay_elapsed <= SLEEP_TIME + + +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_child_spawn() -> None: + result = await durable_with_spawn.aio_run() + + assert result["child_output"] == {"message": "hello from child 1"} + + +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_child_bulk_spawn() -> None: + n = 10 + result = await durable_with_bulk_spawn.aio_run(DurableBulkSpawnInput(n=n)) + + assert result["child_outputs"] == [ + {"message": "hello from child " + str(i)} for i in range(n) + ] + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_sleep_event_spawn_replay(hatchet: Hatchet) -> None: + start = time.time() + ref = durable_sleep_event_spawn.run_no_wait() + + await asyncio.sleep(SLEEP_TIME + 5) + hatchet.event.push(EVENT_KEY, {"test": "test"}) + + result = await ref.aio_result() + first_elapsed = time.time() - start + + assert result["child_output"] == {"message": "hello from child 1"} + assert first_elapsed >= SLEEP_TIME + + replay_start = time.time() + await hatchet.runs.aio_replay(ref.workflow_run_id) + replayed_result = await ref.aio_result() + replay_elapsed = time.time() - replay_start + + assert replayed_result["child_output"] == {"message": "hello from child 1"} + assert replay_elapsed < SLEEP_TIME + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_completed_replay(hatchet: Hatchet) -> None: + ref = wait_for_sleep_twice.run_no_wait() + + start = time.time() + first_result = await ref.aio_result() + elapsed = time.time() - start + + assert first_result["runtime"] >= SLEEP_TIME + assert elapsed >= SLEEP_TIME + + start = time.time() + await hatchet.runs.aio_replay(ref.workflow_run_id) + replayed_result = await ref.aio_result() + elapsed = time.time() - start + + assert replayed_result["runtime"] < SLEEP_TIME + assert elapsed < SLEEP_TIME + + +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_spawn_dag() -> None: + start = time.time() + result = await durable_spawn_dag.aio_run() + elapsed = time.time() - start + + assert result["sleep_duration"] >= 1 + assert result["spawn_duration"] >= 5 + assert elapsed >= 5 + assert elapsed <= 15 + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_non_determinism(hatchet: Hatchet) -> None: + ref = await durable_non_determinism.aio_run_no_wait() + result = await ref.aio_result() + + assert result.sleep_time > result.attempt_number + assert ( ## headroom to prevent flakiness + result.sleep_time < result.attempt_number * 3 + ) + assert not result.non_determinism_detected + + await hatchet.runs.aio_replay(ref.workflow_run_id) + + replayed_result = await ref.aio_result() + + assert replayed_result.non_determinism_detected + assert replayed_result.node_id == 1 + assert replayed_result.attempt_number == 2 + + +@requires_durable_eviction +@pytest.mark.parametrize("node_id", [1, 2, 3]) +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_replay_reset(hatchet: Hatchet, node_id: int) -> None: + ref = await durable_replay_reset.aio_run_no_wait() + + result = await ref.aio_result() + + assert result.sleep_1_duration >= REPLAY_RESET_SLEEP_TIME + assert result.sleep_2_duration >= REPLAY_RESET_SLEEP_TIME + assert result.sleep_3_duration >= REPLAY_RESET_SLEEP_TIME + + await hatchet.runs.aio_reset_durable_task( + ref.workflow_run_id, node_id=node_id, branch_id=1 + ) + + start = time.time() + reset_result = await ref.aio_result() + reset_elapsed = time.time() - start + + durations = [ + reset_result.sleep_1_duration, + reset_result.sleep_2_duration, + reset_result.sleep_3_duration, + ] + + for i, duration in enumerate(durations, start=1): + if i < node_id: + assert duration < REPLAY_RESET_SLEEP_TIME + else: + assert duration >= REPLAY_RESET_SLEEP_TIME + + sleeps_to_redo = 3 - node_id + 1 + assert reset_elapsed >= sleeps_to_redo * REPLAY_RESET_SLEEP_TIME + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_branching_off_branch(hatchet: Hatchet) -> None: + ref = await durable_replay_reset.aio_run_no_wait() + + result = await ref.aio_result() + + assert result.sleep_1_duration >= REPLAY_RESET_SLEEP_TIME + assert result.sleep_2_duration >= REPLAY_RESET_SLEEP_TIME + assert result.sleep_3_duration >= REPLAY_RESET_SLEEP_TIME + + reset_from_node_id = 1 + + await hatchet.runs.aio_reset_durable_task( + ref.workflow_run_id, node_id=reset_from_node_id, branch_id=1 + ) + + start = time.time() + await asyncio.sleep(1) + reset_result = await ref.aio_result() + reset_elapsed = time.time() - start + + assert reset_result.sleep_1_duration >= REPLAY_RESET_SLEEP_TIME + assert reset_result.sleep_2_duration >= REPLAY_RESET_SLEEP_TIME + assert reset_result.sleep_3_duration >= REPLAY_RESET_SLEEP_TIME + + sleeps_to_redo = 3 - reset_from_node_id + 1 + assert reset_elapsed >= sleeps_to_redo * REPLAY_RESET_SLEEP_TIME + + reset_from_node_id = 2 + await hatchet.runs.aio_reset_durable_task( + ## branch off branch 2 this time + ref.workflow_run_id, + node_id=reset_from_node_id, + branch_id=2, + ) + + start = time.time() + await asyncio.sleep(1) + reset_result = await ref.aio_result() + reset_elapsed = time.time() - start + + assert reset_result.sleep_1_duration < REPLAY_RESET_SLEEP_TIME + assert reset_result.sleep_2_duration >= REPLAY_RESET_SLEEP_TIME + assert reset_result.sleep_3_duration >= REPLAY_RESET_SLEEP_TIME + + sleeps_to_redo = 3 - reset_from_node_id + 1 + assert reset_elapsed >= sleeps_to_redo * REPLAY_RESET_SLEEP_TIME + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_memoization_via_replay(hatchet: Hatchet) -> None: + message = str(uuid4()) + start = time.time() + ref = await memo_task.aio_run_no_wait(MemoInput(message=message)) + result_1 = await ref.aio_result() + duration_1 = time.time() - start + + await hatchet.runs.aio_replay(ref.workflow_run_id) + + start = time.time() + result_2 = await ref.aio_result() + duration_2 = time.time() - start + + assert duration_1 >= SLEEP_TIME + assert duration_2 < 1 + assert result_1.message == result_2.message + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_durable_memo_now_caching(hatchet: Hatchet) -> None: + ref = await memo_now_caching.aio_run_no_wait() + + result_1 = await ref.aio_result() + + await hatchet.runs.aio_replay(ref.workflow_run_id) + + result_2 = await ref.aio_result() + + assert result_1["start_time"] == result_2["start_time"] diff --git a/sdks/python/examples/durable/trigger.py b/sdks/python/examples/durable/trigger.py index a124984bf..70b747d5e 100644 --- a/sdks/python/examples/durable/trigger.py +++ b/sdks/python/examples/durable/trigger.py @@ -6,6 +6,7 @@ from examples.durable.worker import ( durable_workflow, ephemeral_workflow, hatchet, + AwaitedEvent, ) durable_workflow.run_no_wait() @@ -15,4 +16,4 @@ print("Sleeping") time.sleep(SLEEP_TIME + 2) print("Pushing event") -hatchet.event.push(EVENT_KEY, {}) +hatchet.event.push(EVENT_KEY, AwaitedEvent(id="123").model_dump(mode="json")) diff --git a/sdks/python/examples/durable/worker.py b/sdks/python/examples/durable/worker.py index 13fc2083a..5463faae8 100644 --- a/sdks/python/examples/durable/worker.py +++ b/sdks/python/examples/durable/worker.py @@ -1,8 +1,11 @@ import asyncio import time from datetime import timedelta +from typing import Any from uuid import uuid4 +from pydantic import BaseModel + from hatchet_sdk import ( Context, DurableContext, @@ -12,9 +15,47 @@ from hatchet_sdk import ( UserEventCondition, or_, ) +from hatchet_sdk.exceptions import NonDeterminismError hatchet = Hatchet(debug=True) + +dag_child_workflow = hatchet.workflow(name="dag-child-workflow") + + +@dag_child_workflow.task() +async def dag_child_1(input: EmptyModel, ctx: Context) -> dict[str, str]: + await asyncio.sleep(1) + return {"result": "child1"} + + +@dag_child_workflow.task(parents=[dag_child_1]) +async def dag_child_2(input: EmptyModel, ctx: Context) -> dict[str, str]: + await asyncio.sleep(5) + return {"result": "child2"} + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=10)) +async def durable_spawn_dag(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + # NOTE: typically its not safe to use time.time() in a durable task, but + # this test assumes that the task is not replayed or evicted and it is + # used to ensure that the waits are accurate relative to the single invocation. + sleep_start = time.time() + sleep_result = await ctx.aio_sleep_for(timedelta(seconds=1)) + sleep_duration = time.time() - sleep_start + + spawn_start = time.time() + spawn_result = await dag_child_workflow.aio_run() + spawn_duration = time.time() - spawn_start + + return { + "sleep_duration": sleep_duration, + "sleep_result": sleep_result, + "spawn_duration": spawn_duration, + "spawn_result": spawn_result, + } + + # > Create a durable workflow durable_workflow = hatchet.workflow(name="DurableWorkflow") # !! @@ -26,6 +67,7 @@ ephemeral_workflow = hatchet.workflow(name="EphemeralWorkflow") # > Add durable task EVENT_KEY = "durable-example:event" SLEEP_TIME = 5 +REPLAY_RESET_SLEEP_TIME = 3 @durable_workflow.task() @@ -33,21 +75,26 @@ async def ephemeral_task(input: EmptyModel, ctx: Context) -> None: print("Running non-durable task") +class AwaitedEvent(BaseModel): + id: str + + @durable_workflow.durable_task() -async def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str]: +async def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str | int]: print("Waiting for sleep") - await ctx.aio_sleep_for(duration=timedelta(seconds=SLEEP_TIME)) + sleep = await ctx.aio_sleep_for(duration=timedelta(seconds=SLEEP_TIME)) print("Sleep finished") print("Waiting for event") - await ctx.aio_wait_for( - "event", - UserEventCondition(event_key=EVENT_KEY, expression="true"), + event = await ctx.aio_wait_for_event( + EVENT_KEY, "true", payload_validator=AwaitedEvent ) print("Event received") return { "status": "success", + "event_id": event.id, + "sleep_duration_seconds": sleep.duration.seconds, } @@ -60,7 +107,7 @@ async def durable_task(input: EmptyModel, ctx: DurableContext) -> dict[str, str] @durable_workflow.durable_task() async def wait_for_or_group_1( _i: EmptyModel, ctx: DurableContext -) -> dict[str, str | int]: +) -> dict[str, str | int | float]: start = time.time() wait_result = await ctx.aio_wait_for( uuid4().hex, @@ -74,7 +121,7 @@ async def wait_for_or_group_1( event_id = list(wait_result[key].keys())[0] return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, "key": key, "event_id": event_id, } @@ -86,7 +133,7 @@ async def wait_for_or_group_1( @durable_workflow.durable_task() async def wait_for_or_group_2( _i: EmptyModel, ctx: DurableContext -) -> dict[str, str | int]: +) -> dict[str, str | int | float]: start = time.time() wait_result = await ctx.aio_wait_for( uuid4().hex, @@ -100,7 +147,7 @@ async def wait_for_or_group_2( event_id = list(wait_result[key].keys())[0] return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, "key": key, "event_id": event_id, } @@ -109,7 +156,7 @@ async def wait_for_or_group_2( @durable_workflow.durable_task() async def wait_for_multi_sleep( _i: EmptyModel, ctx: DurableContext -) -> dict[str, str | int]: +) -> dict[str, str | float]: start = time.time() for _ in range(3): @@ -118,7 +165,7 @@ async def wait_for_multi_sleep( ) return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, } @@ -127,10 +174,18 @@ def ephemeral_task_2(input: EmptyModel, ctx: Context) -> None: print("Running non-durable task") +@hatchet.durable_task() +async def memo_now_caching(_i: EmptyModel, ctx: DurableContext) -> dict[str, str]: + now = await ctx.aio_now() + return { + "start_time": now.isoformat(), + } + + @hatchet.durable_task() async def wait_for_sleep_twice( input: EmptyModel, ctx: DurableContext -) -> dict[str, int]: +) -> dict[str, float]: try: start = time.time() @@ -139,16 +194,163 @@ async def wait_for_sleep_twice( ) return { - "runtime": int(time.time() - start), + "runtime": time.time() - start, } except asyncio.CancelledError: - return {"runtime": -1} + return {"runtime": -1.0} + + +class DurableBulkSpawnInput(BaseModel): + n: int = 1 + + +@hatchet.task(input_validator=DurableBulkSpawnInput) +def spawn_child_task(input: DurableBulkSpawnInput, ctx: Context) -> dict[str, str]: + return {"message": "hello from child " + str(input.n)} + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=10)) +async def durable_with_spawn(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + child_result = await spawn_child_task.aio_run() + return {"child_output": child_result} + + +@hatchet.durable_task(input_validator=DurableBulkSpawnInput) +async def durable_with_bulk_spawn( + input: DurableBulkSpawnInput, ctx: DurableContext +) -> dict[str, Any]: + child_results = await spawn_child_task.aio_run_many( + [ + spawn_child_task.create_bulk_run_item( + input=DurableBulkSpawnInput(n=i), + ) + for i in range(input.n) + ] + ) + return {"child_outputs": child_results} + + +@hatchet.durable_task() +async def durable_sleep_event_spawn( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + start = time.time() + + await ctx.aio_sleep_for(timedelta(seconds=SLEEP_TIME)) + + await ctx.aio_wait_for_event( + EVENT_KEY, + "true", + ) + + child_result = await spawn_child_task.aio_run() + + return { + "runtime": time.time() - start, + "child_output": child_result, + } + + +class NonDeterminismOutput(BaseModel): + attempt_number: int + sleep_time: int + + non_determinism_detected: bool = False + node_id: int | None = None + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=10)) +async def durable_non_determinism( + input: EmptyModel, ctx: DurableContext +) -> NonDeterminismOutput: + sleep_time = ctx.attempt_number * 2 + + try: + await ctx.aio_sleep_for(timedelta(seconds=sleep_time)) + except NonDeterminismError as e: + return NonDeterminismOutput( + attempt_number=ctx.attempt_number, + sleep_time=sleep_time, + non_determinism_detected=True, + node_id=e.node_id, + ) + + return NonDeterminismOutput( + attempt_number=ctx.attempt_number, + sleep_time=sleep_time, + ) + + +class ReplayResetResponse(BaseModel): + sleep_1_duration: float + sleep_2_duration: float + sleep_3_duration: float + + +@hatchet.durable_task(execution_timeout=timedelta(seconds=20)) +async def durable_replay_reset( + input: EmptyModel, ctx: DurableContext +) -> ReplayResetResponse: + start = time.time() + await ctx.aio_sleep_for(timedelta(seconds=REPLAY_RESET_SLEEP_TIME)) + sleep_1_duration = time.time() - start + + start = time.time() + await ctx.aio_sleep_for(timedelta(seconds=REPLAY_RESET_SLEEP_TIME)) + sleep_2_duration = time.time() - start + + start = time.time() + await ctx.aio_sleep_for(timedelta(seconds=REPLAY_RESET_SLEEP_TIME)) + sleep_3_duration = time.time() - start + + return ReplayResetResponse( + sleep_1_duration=sleep_1_duration, + sleep_2_duration=sleep_2_duration, + sleep_3_duration=sleep_3_duration, + ) + + +class SleepResult(BaseModel): + message: str + duration: float + + +class MemoInput(BaseModel): + message: str + + +async def expensive_computation(message: str) -> SleepResult: + await asyncio.sleep(SLEEP_TIME) + + return SleepResult(message=message, duration=SLEEP_TIME) + + +@hatchet.durable_task(input_validator=MemoInput) +async def memo_task(input: MemoInput, ctx: DurableContext) -> SleepResult: + start = time.time() + res = await ctx._aio_memo( + expensive_computation, + SleepResult, + input.message, + ) + + return SleepResult(message=res.message, duration=time.time() - start) def main() -> None: worker = hatchet.worker( "durable-worker", - workflows=[durable_workflow, ephemeral_workflow, wait_for_sleep_twice], + workflows=[ + durable_workflow, + ephemeral_workflow, + wait_for_sleep_twice, + spawn_child_task, + durable_with_spawn, + durable_with_bulk_spawn, + durable_sleep_event_spawn, + durable_non_determinism, + durable_replay_reset, + ], ) worker.start() diff --git a/sdks/python/examples/durable_event/worker.py b/sdks/python/examples/durable_event/worker.py index bcae3539b..5cc555990 100644 --- a/sdks/python/examples/durable_event/worker.py +++ b/sdks/python/examples/durable_event/worker.py @@ -8,9 +8,8 @@ EVENT_KEY = "user:update" # > Durable Event @hatchet.durable_task(name="DurableEventTask") async def durable_event_task(input: EmptyModel, ctx: DurableContext) -> None: - res = await ctx.aio_wait_for( - "event", - UserEventCondition(event_key="user:update"), + res = await ctx.aio_wait_for_event( + "user:update", ) print("got event", res) @@ -24,12 +23,7 @@ async def durable_event_task_with_filter( input: EmptyModel, ctx: DurableContext ) -> None: # > Durable Event With Filter - res = await ctx.aio_wait_for( - "event", - UserEventCondition( - event_key="user:update", expression="input.user_id == '1234'" - ), - ) + res = await ctx.aio_wait_for_event("user:update", "input.user_id == '1234'") # !! print("got event", res) diff --git a/sdks/python/examples/durable_eviction/capacity_worker.py b/sdks/python/examples/durable_eviction/capacity_worker.py new file mode 100644 index 000000000..9e49e8ce8 --- /dev/null +++ b/sdks/python/examples/durable_eviction/capacity_worker.py @@ -0,0 +1,23 @@ +""" +Dedicated worker for capacity-eviction e2e tests. + +Runs with durable_slots=1 so that a single waiting durable task triggers +capacity pressure and gets evicted (even with ttl=None). +""" + +from __future__ import annotations + +from examples.durable_eviction.worker import capacity_evictable_sleep, hatchet + + +def main() -> None: + worker = hatchet.worker( + "capacity-eviction-worker", + durable_slots=1, + workflows=[capacity_evictable_sleep], + ) + worker.start() + + +if __name__ == "__main__": + main() diff --git a/sdks/python/examples/durable_eviction/test_durable_eviction.py b/sdks/python/examples/durable_eviction/test_durable_eviction.py new file mode 100644 index 000000000..249acc699 --- /dev/null +++ b/sdks/python/examples/durable_eviction/test_durable_eviction.py @@ -0,0 +1,455 @@ +""" +Integration tests for durable slot eviction. + +Run with: + cd sdks/python + poetry run pytest examples/durable_eviction/test_durable_eviction.py -v -s +""" + +from __future__ import annotations + +import asyncio +import signal +import time +from subprocess import Popen +from typing import Any + +import psutil +import pytest + +from examples.durable_eviction.worker import ( + EVENT_KEY, + capacity_evictable_sleep, + evictable_child_bulk_spawn, + evictable_child_spawn, + evictable_sleep, + evictable_wait_for_event, + multiple_eviction, + non_evictable_sleep, +) +from hatchet_sdk import Hatchet +from hatchet_sdk.clients.admin import WorkflowRunDetail +from hatchet_sdk.clients.rest.api.task_api import TaskApi +from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus + +POLL_INTERVAL = 0.2 +MAX_POLLS = 150 + +requires_durable_eviction = pytest.mark.usefixtures("_skip_unless_durable_eviction") + + +async def _poll_until_status( + hatchet: Hatchet, + workflow_run_id: str, + target_status: V1TaskStatus, +) -> WorkflowRunDetail: + """Poll gRPC run details until any task reaches *target_status* (or timeout).""" + for _ in range(MAX_POLLS): + details = await hatchet.runs.aio_get_details(workflow_run_id) + if any(t.status == target_status for t in details.task_runs.values()): + return details + await asyncio.sleep(POLL_INTERVAL) + + return await hatchet.runs.aio_get_details(workflow_run_id) + + +async def _poll_until_evicted( + hatchet: Hatchet, + workflow_run_id: str, +) -> WorkflowRunDetail: + """Poll gRPC run details until any task has is_evicted=True (or timeout).""" + for _ in range(MAX_POLLS): + details = await hatchet.runs.aio_get_details(workflow_run_id) + if any(t.is_evicted for t in details.task_runs.values()): + return details + await asyncio.sleep(POLL_INTERVAL) + + return await hatchet.runs.aio_get_details(workflow_run_id) + + +def _has_evicted_task(details: WorkflowRunDetail) -> bool: + return any(t.is_evicted for t in details.task_runs.values()) + + +def _get_task_id(details: WorkflowRunDetail) -> str: + return list(details.task_runs.values())[0].external_id + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_non_evictable_task_completes(hatchet: Hatchet) -> None: + """A durable task with eviction disabled should finish normally.""" + start = time.time() + result = await non_evictable_sleep.aio_run() + elapsed = time.time() - start + + assert result["status"] == "completed" + assert elapsed >= 10 + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_non_evictable_task_not_evicted(hatchet: Hatchet) -> None: + """A durable task with eviction disabled should never be evicted, even past TTL.""" + ref = non_evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + await asyncio.sleep(7) # Past EVICTION_TTL (5s), task still sleeping (10s total) + details = await hatchet.runs.aio_get_details(ref.workflow_run_id) + assert not _has_evicted_task( + details + ), f"Non-evictable task should never be evicted, got is_evicted=True" + + result = await ref.aio_result() + assert result["status"] == "completed" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_task_is_evicted(hatchet: Hatchet) -> None: + """After the TTL, the eviction manager should evict the task.""" + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + + assert _has_evicted_task(details), f"Expected is_evicted=True after eviction" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_task_restore(hatchet: Hatchet) -> None: + """After eviction, a REST restore should re-enqueue the task.""" + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + details = await _poll_until_status( + hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING + ) + statuses = {t.status for t in details.task_runs.values()} + + assert ( + V1TaskStatus.RUNNING in statuses + ), f"Expected RUNNING after restore, got: {statuses}" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_task_restore_completes(hatchet: Hatchet) -> None: + """After eviction and restore, evictable_sleep should complete and return a result.""" + start = time.time() + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + result = await ref.aio_result() + elapsed = time.time() - start + assert result["status"] == "completed" + assert elapsed >= 15 + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_wait_for_event_is_evicted(hatchet: Hatchet) -> None: + """A durable task waiting for an event should be evicted after TTL.""" + ref = evictable_wait_for_event.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + + assert _has_evicted_task(details), f"Expected is_evicted=True for wait_for_event" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_wait_for_event_restore(hatchet: Hatchet) -> None: + """After eviction, restoring and sending the event should let the task complete.""" + ref = evictable_wait_for_event.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + + hatchet.event.push(EVENT_KEY, {}) + + result = await ref.aio_result() + assert result["status"] == "completed" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_child_spawn_is_evicted(hatchet: Hatchet) -> None: + """A durable task waiting on a child workflow should be evicted after TTL.""" + ref = evictable_child_spawn.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + + assert _has_evicted_task(details), f"Expected is_evicted=True for child_spawn" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_child_spawn_restore(hatchet: Hatchet) -> None: + """After eviction, restoring should let the child-spawning task resume.""" + ref = evictable_child_spawn.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + details = await _poll_until_status( + hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING + ) + statuses = {t.status for t in details.task_runs.values()} + + assert ( + V1TaskStatus.RUNNING in statuses + ), f"Expected RUNNING after restore, got: {statuses}" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_child_spawn_restore_completes(hatchet: Hatchet) -> None: + """After eviction and restore, evictable_child_spawn should complete with child result.""" + ref = evictable_child_spawn.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + result = await ref.aio_result() + assert result["status"] == "completed" + assert result["child"] == {"child_status": "completed"} + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_child_bulk_spawn_restore_completes(hatchet: Hatchet) -> None: + ref = evictable_child_bulk_spawn.run_no_wait() + + eviction_count = 0 + for _ in range(3): + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + eviction_count += 1 + task_id = _get_task_id(details) + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + result = await ref.aio_result() + assert eviction_count == 3, f"Expected 3 evictions, got {eviction_count}" + assert result["child_results"] == [ + {"sleep_for": 10, "status": "completed"}, + {"sleep_for": 20, "status": "completed"}, + {"sleep_for": 30, "status": "completed"}, + ] + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_multiple_eviction_cycle(hatchet: Hatchet) -> None: + """The task should survive two eviction+restore cycles.""" + start = time.time() + ref = multiple_eviction.run_no_wait() + + # --- first eviction cycle --- + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + assert _has_evicted_task(details), f"First eviction failed" + + task_id = _get_task_id(details) + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + # --- second eviction cycle --- + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + assert _has_evicted_task(details), f"Second eviction failed" + + task_id = _get_task_id(details) + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + # --- should complete after the second restore --- + result = await ref.aio_result() + elapsed = time.time() - start + assert result["status"] == "completed" + assert elapsed >= 30 + + +@requires_durable_eviction +@pytest.mark.parametrize( + "on_demand_worker", + [ + ( + ["poetry", "run", "python", "-m", "examples.durable_eviction.worker"], + 8004, + ) + ], + indirect=True, +) +@pytest.mark.asyncio(loop_scope="session") +async def test_graceful_termination_evicts_waiting_runs( + hatchet: Hatchet, on_demand_worker: Popen[Any] +) -> None: + """When a worker receives SIGTERM, all waiting durable runs should be evicted.""" + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + + parent = psutil.Process(on_demand_worker.pid) + for child in parent.children(recursive=True): + child.send_signal(signal.SIGTERM) + parent.send_signal(signal.SIGTERM) + + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + + assert _has_evicted_task(details), f"Expected is_evicted=True after SIGTERM" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_eviction_plus_replay(hatchet: Hatchet) -> None: + """After eviction, replay (not restore) should re-queue the run from the beginning.""" + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + await _poll_until_evicted(hatchet, ref.workflow_run_id) + + await hatchet.runs.aio_replay(ref.workflow_run_id) + + result = await ref.aio_result() + assert result["status"] == "completed" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_evictable_cancel_after_eviction(hatchet: Hatchet) -> None: + """Cancelling an evicted run should transition it to CANCELLED.""" + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + assert _has_evicted_task(details), f"Expected is_evicted=True" + + await hatchet.runs.aio_cancel(ref.workflow_run_id) + + status = await hatchet.runs.aio_get_status(ref.workflow_run_id) + for _ in range(MAX_POLLS): + status = await hatchet.runs.aio_get_status(ref.workflow_run_id) + if status == V1TaskStatus.CANCELLED: + break + await asyncio.sleep(POLL_INTERVAL) + else: + status = await hatchet.runs.aio_get_status(ref.workflow_run_id) + + assert status == V1TaskStatus.CANCELLED + + +@requires_durable_eviction +@pytest.mark.parametrize( + "on_demand_worker", + [ + ( + [ + "poetry", + "run", + "python", + "-m", + "examples.durable_eviction.capacity_worker", + ], + 8005, + ) + ], + indirect=True, +) +@pytest.mark.asyncio(loop_scope="session") +async def test_capacity_eviction_fires( + hatchet: Hatchet, on_demand_worker: Popen[Any] +) -> None: + """A task with ttl=None but allow_capacity_eviction=True should be evicted + under durable-slot pressure (durable_slots=1).""" + ref = capacity_evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + + assert _has_evicted_task( + details + ), f"Expected capacity eviction (ttl=None), got no evicted tasks" + + +@requires_durable_eviction +@pytest.mark.parametrize( + "on_demand_worker", + [ + ( + [ + "poetry", + "run", + "python", + "-m", + "examples.durable_eviction.capacity_worker", + ], + 8005, + ) + ], + indirect=True, +) +@pytest.mark.asyncio(loop_scope="session") +async def test_capacity_eviction_restore_completes( + hatchet: Hatchet, on_demand_worker: Popen[Any] +) -> None: + """After capacity eviction, restore should let the task resume and complete.""" + ref = capacity_evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + + result = await ref.aio_result() + assert result["status"] == "completed" + + +@requires_durable_eviction +@pytest.mark.asyncio(loop_scope="session") +async def test_restore_idempotency(hatchet: Hatchet) -> None: + """Restoring twice on the same evicted task should not cause duplicate execution.""" + ref = evictable_sleep.run_no_wait() + + await _poll_until_status(hatchet, ref.workflow_run_id, V1TaskStatus.RUNNING) + details = await _poll_until_evicted(hatchet, ref.workflow_run_id) + task_id = _get_task_id(details) + + with hatchet.runs.client() as client: + TaskApi(client).v1_task_restore(task=task_id) + TaskApi(client).v1_task_restore(task=task_id) + + result = await ref.aio_result() + assert result["status"] == "completed" diff --git a/sdks/python/examples/durable_eviction/trigger.py b/sdks/python/examples/durable_eviction/trigger.py new file mode 100644 index 000000000..8b99fcf2e --- /dev/null +++ b/sdks/python/examples/durable_eviction/trigger.py @@ -0,0 +1,4 @@ +from examples.durable_eviction.worker import evictable_sleep + +ref = evictable_sleep.run_no_wait() +print(f"Triggered evictable_sleep: workflow_run_id={ref.workflow_run_id}") diff --git a/sdks/python/examples/durable_eviction/worker.py b/sdks/python/examples/durable_eviction/worker.py new file mode 100644 index 000000000..4c80d2f56 --- /dev/null +++ b/sdks/python/examples/durable_eviction/worker.py @@ -0,0 +1,169 @@ +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Any + +from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet, UserEventCondition +from hatchet_sdk.runnables.eviction import EvictionPolicy +from pydantic import BaseModel + +hatchet = Hatchet(debug=True) + + +EVICTION_TTL_SECONDS = 5 +LONG_SLEEP_SECONDS = 15 +EVENT_KEY = "durable-eviction:event" + +EVICTION_POLICY = EvictionPolicy( + ttl=timedelta(seconds=EVICTION_TTL_SECONDS), + allow_capacity_eviction=True, + priority=0, +) + + +@hatchet.task() +async def child_task(input: EmptyModel, ctx: Context) -> dict[str, Any]: + """Simple child that sleeps long enough for the parent's TTL to fire.""" + await asyncio.sleep(LONG_SLEEP_SECONDS) + return {"child_status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_sleep(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + """Sleeps long enough for the TTL-based eviction to kick in.""" + await ctx.aio_sleep_for(timedelta(seconds=LONG_SLEEP_SECONDS)) + return {"status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_wait_for_event( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + """Waits for a user event -- long enough for TTL eviction to fire.""" + await ctx.aio_wait_for_event( + EVENT_KEY, + "true", + ) + return {"status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_child_spawn( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + """Spawns a child workflow whose runtime exceeds the eviction TTL.""" + child_result = await child_task.aio_run() + return {"child": child_result, "status": "completed"} + + +class BulkChildTaskInput(BaseModel): + sleep_for: timedelta + + +@hatchet.task( + input_validator=BulkChildTaskInput, +) +async def bulk_child_task( + input: BulkChildTaskInput, ctx: Context +) -> dict[str, str | int]: + """Simple child that sleeps long enough for the parent's TTL to fire.""" + await asyncio.sleep(input.sleep_for.total_seconds()) + return {"sleep_for": int(input.sleep_for.total_seconds()), "status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def evictable_child_bulk_spawn( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + child_results = await child_task.aio_run_many( + [ + bulk_child_task.create_bulk_run_item( + input=BulkChildTaskInput( + sleep_for=timedelta(seconds=(EVICTION_TTL_SECONDS + 5) * (i + 1)) + ), + key=f"child{i}", + ) + for i in range(3) + ] + ) + return {"child_results": child_results} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EVICTION_POLICY, +) +async def multiple_eviction(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + """Sleeps twice, expecting eviction+restore after each sleep.""" + await ctx.aio_sleep_for(timedelta(seconds=LONG_SLEEP_SECONDS)) + await ctx.aio_sleep_for(timedelta(seconds=LONG_SLEEP_SECONDS)) + return {"status": "completed"} + + +CAPACITY_EVICTION_POLICY = EvictionPolicy( + ttl=None, + allow_capacity_eviction=True, + priority=0, +) + +CAPACITY_SLEEP_SECONDS = 20 + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=CAPACITY_EVICTION_POLICY, +) +async def capacity_evictable_sleep( + input: EmptyModel, ctx: DurableContext +) -> dict[str, Any]: + """No TTL -- only evictable via capacity pressure (durable_slots=1).""" + await ctx.aio_sleep_for(timedelta(seconds=CAPACITY_SLEEP_SECONDS)) + return {"status": "completed"} + + +@hatchet.durable_task( + execution_timeout=timedelta(minutes=5), + eviction_policy=EvictionPolicy( + ttl=None, + allow_capacity_eviction=False, + priority=0, + ), +) +async def non_evictable_sleep(input: EmptyModel, ctx: DurableContext) -> dict[str, Any]: + """Has eviction disabled -- should never be evicted.""" + await ctx.aio_sleep_for(timedelta(seconds=10)) + return {"status": "completed"} + + +def main() -> None: + worker = hatchet.worker( + "eviction-worker", + workflows=[ + evictable_sleep, + evictable_wait_for_event, + evictable_child_spawn, + evictable_child_bulk_spawn, + multiple_eviction, + non_evictable_sleep, + child_task, + bulk_child_task, + ], + ) + worker.start() + + +if __name__ == "__main__": + main() diff --git a/sdks/python/examples/simple/chaos_test.py b/sdks/python/examples/simple/chaos_test.py index df9d14d7e..cd3df67c2 100644 --- a/sdks/python/examples/simple/chaos_test.py +++ b/sdks/python/examples/simple/chaos_test.py @@ -49,7 +49,7 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: @hatchet.durable_task() -def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: +async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: print("Executing durable task!") return {"result": "Hello from durable!"} diff --git a/sdks/python/examples/simple/worker.py b/sdks/python/examples/simple/worker.py index 686742c4f..ee9a25016 100644 --- a/sdks/python/examples/simple/worker.py +++ b/sdks/python/examples/simple/worker.py @@ -1,5 +1,5 @@ # > Simple -from hatchet_sdk import Context, EmptyModel, Hatchet +from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet hatchet = Hatchet(debug=True) @@ -10,7 +10,8 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]: @hatchet.durable_task() -def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]: +async def simple_durable(input: EmptyModel, ctx: DurableContext) -> dict[str, str]: + # durable tasks should be async return {"result": "Hello, world!"} diff --git a/sdks/python/examples/unit_testing/test_unit.py b/sdks/python/examples/unit_testing/test_unit.py index cebc84f17..2045251f6 100644 --- a/sdks/python/examples/unit_testing/test_unit.py +++ b/sdks/python/examples/unit_testing/test_unit.py @@ -10,9 +10,6 @@ from examples.unit_testing.workflows import ( durable_async_complex_workflow, durable_async_simple_workflow, durable_async_standalone, - durable_sync_complex_workflow, - durable_sync_simple_workflow, - durable_sync_standalone, start, sync_complex_workflow, sync_simple_workflow, @@ -25,11 +22,8 @@ from hatchet_sdk import Task "func", [ sync_standalone, - durable_sync_standalone, sync_simple_workflow, - durable_sync_simple_workflow, sync_complex_workflow, - durable_sync_complex_workflow, ], ) def test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None: diff --git a/sdks/python/examples/unit_testing/workflows.py b/sdks/python/examples/unit_testing/workflows.py index ae42e61c9..594e69a73 100644 --- a/sdks/python/examples/unit_testing/workflows.py +++ b/sdks/python/examples/unit_testing/workflows.py @@ -44,19 +44,6 @@ async def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput ) -@hatchet.durable_task(input_validator=UnitTestInput) -def durable_sync_standalone( - input: UnitTestInput, ctx: DurableContext -) -> UnitTestOutput: - return UnitTestOutput( - key=input.key, - number=input.number, - additional_metadata=ctx.additional_metadata, - retry_count=ctx.retry_count, - mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, - ) - - @hatchet.durable_task(input_validator=UnitTestInput) async def durable_async_standalone( input: UnitTestInput, ctx: DurableContext @@ -97,19 +84,6 @@ async def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestO ) -@simple_workflow.durable_task() -def durable_sync_simple_workflow( - input: UnitTestInput, ctx: DurableContext -) -> UnitTestOutput: - return UnitTestOutput( - key=input.key, - number=input.number, - additional_metadata=ctx.additional_metadata, - retry_count=ctx.retry_count, - mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, - ) - - @simple_workflow.durable_task() async def durable_async_simple_workflow( input: UnitTestInput, ctx: DurableContext @@ -153,15 +127,6 @@ async def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTest return ctx.task_output(start) -@complex_workflow.durable_task( - parents=[start], -) -def durable_sync_complex_workflow( - input: UnitTestInput, ctx: DurableContext -) -> UnitTestOutput: - return ctx.task_output(start) - - @complex_workflow.durable_task( parents=[start], ) diff --git a/sdks/python/examples/worker.py b/sdks/python/examples/worker.py index 0ecaaab41..742affe65 100644 --- a/sdks/python/examples/worker.py +++ b/sdks/python/examples/worker.py @@ -27,12 +27,38 @@ from examples.dependency_injection.worker import ( async_task_with_dependencies, di_workflow, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, sync_task_with_dependencies, task_with_type_aliases, ) from examples.dict_input.worker import say_hello_unsafely -from examples.durable.worker import durable_workflow, wait_for_sleep_twice +from examples.durable.worker import ( + durable_sleep_event_spawn, + durable_with_bulk_spawn, + durable_with_spawn, + durable_workflow, + spawn_child_task, + wait_for_sleep_twice, + dag_child_workflow, + durable_spawn_dag, + durable_non_determinism, + durable_replay_reset, + memo_task, + memo_now_caching, +) +from examples.durable_event.worker import ( + durable_event_task, + durable_event_task_with_filter, +) +from examples.durable_eviction.worker import ( + bulk_child_task as eviction_bulk_child_task, + child_task as eviction_child_task, + evictable_child_bulk_spawn, + evictable_child_spawn, + evictable_sleep, + evictable_wait_for_event, + multiple_eviction, + non_evictable_sleep, +) from examples.events.worker import event_workflow from examples.fanout.worker import child_wf, parent_wf from examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent @@ -104,14 +130,33 @@ def main() -> None: return_exceptions_task, exception_parsing_workflow, wait_for_sleep_twice, + spawn_child_task, + durable_with_spawn, + durable_with_bulk_spawn, + durable_sleep_event_spawn, + durable_event_task, + durable_event_task_with_filter, async_task_with_dependencies, sync_task_with_dependencies, durable_async_task_with_dependencies, - durable_sync_task_with_dependencies, task_with_type_aliases, say_hello, say_hello_unsafely, serde_workflow, + durable_spawn_dag, + dag_child_workflow, + durable_non_determinism, + durable_replay_reset, + memo_task, + evictable_sleep, + evictable_wait_for_event, + evictable_child_spawn, + evictable_child_bulk_spawn, + multiple_eviction, + non_evictable_sleep, + eviction_child_task, + eviction_bulk_child_task, + memo_now_caching, ], lifespan=lifespan, ) diff --git a/sdks/python/generate.sh b/sdks/python/generate.sh index f704e72c8..90ee05534 100755 --- a/sdks/python/generate.sh +++ b/sdks/python/generate.sh @@ -25,6 +25,9 @@ mkdir -p $dst_dir tmp_dir=./tmp +rm -rf hatchet_sdk/clients/rest/models +rm -rf hatchet_sdk/clients/rest/api + # generate into tmp folder openapi-generator-cli generate -i ../../bin/oas/openapi.yaml -g python -o ./tmp --skip-validate-spec \ --global-property=apiTests=false \ @@ -60,11 +63,13 @@ MIN_GRPCIO_VERSION=$(grep '^grpcio = ' pyproject.toml | cut -d'"' -f2 | tr -d '^ poetry add "grpcio@$MIN_GRPCIO_VERSION" "grpcio-tools@$MIN_GRPCIO_VERSION" + proto_paths=( "../../api-contracts/dispatcher dispatcher.proto" "../../api-contracts/events events.proto" "../../api-contracts/workflows workflows.proto" "../../api-contracts v1/shared/condition.proto" + "../../api-contracts v1/shared/trigger.proto" "../../api-contracts v1/dispatcher.proto" "../../api-contracts v1/workflows.proto" ) @@ -77,6 +82,7 @@ for entry in "${proto_paths[@]}"; do poetry run python -m grpc_tools.protoc \ --proto_path="$proto_path" \ + --proto_path=../../api-contracts \ --python_out=./hatchet_sdk/contracts \ --pyi_out=./hatchet_sdk/contracts \ --grpc_python_out=./hatchet_sdk/contracts \ diff --git a/sdks/python/hatchet_sdk/__init__.py b/sdks/python/hatchet_sdk/__init__.py index 6fe497718..8080276c0 100644 --- a/sdks/python/hatchet_sdk/__init__.py +++ b/sdks/python/hatchet_sdk/__init__.py @@ -4,9 +4,6 @@ from hatchet_sdk.clients.admin import ( TriggerWorkflowOptions, ) from hatchet_sdk.clients.events import PushEventOptions -from hatchet_sdk.clients.listeners.durable_event_listener import ( - RegisterDurableEventRequest, -) from hatchet_sdk.clients.listeners.run_event_listener import ( RunEventListener, StepRunEventType, @@ -49,23 +46,12 @@ from hatchet_sdk.clients.rest.models.event_workflow_run_summary import ( from hatchet_sdk.clients.rest.models.get_step_run_diff_response import ( GetStepRunDiffResponse, ) -from hatchet_sdk.clients.rest.models.github_app_installation import ( - GithubAppInstallation, -) -from hatchet_sdk.clients.rest.models.github_branch import GithubBranch -from hatchet_sdk.clients.rest.models.github_repo import GithubRepo from hatchet_sdk.clients.rest.models.job import Job from hatchet_sdk.clients.rest.models.job_run import JobRun from hatchet_sdk.clients.rest.models.job_run_status import JobRunStatus -from hatchet_sdk.clients.rest.models.link_github_repository_request import ( - LinkGithubRepositoryRequest, -) from hatchet_sdk.clients.rest.models.list_api_tokens_response import ( ListAPITokensResponse, ) -from hatchet_sdk.clients.rest.models.list_github_app_installations_response import ( - ListGithubAppInstallationsResponse, -) from hatchet_sdk.clients.rest.models.list_pull_requests_response import ( ListPullRequestsResponse, ) @@ -115,9 +101,6 @@ from hatchet_sdk.clients.rest.models.v1_webhook_hmac_encoding import ( from hatchet_sdk.clients.rest.models.v1_webhook_source_name import V1WebhookSourceName from hatchet_sdk.clients.rest.models.worker_list import WorkerList from hatchet_sdk.clients.rest.models.workflow import Workflow -from hatchet_sdk.clients.rest.models.workflow_deployment_config import ( - WorkflowDeploymentConfig, -) from hatchet_sdk.clients.rest.models.workflow_list import WorkflowList from hatchet_sdk.clients.rest.models.workflow_run import WorkflowRun from hatchet_sdk.clients.rest.models.workflow_run_list import WorkflowRunList @@ -156,7 +139,9 @@ from hatchet_sdk.contracts.workflows_pb2 import ( ) from hatchet_sdk.exceptions import ( DedupeViolationError, + EvictionNotSupportedError, FailedTaskRunExceptionGroup, + NonDeterminismError, NonRetryableException, TaskRunError, ) @@ -218,6 +203,7 @@ __all__ = [ "EventOrderByDirection", "EventOrderByField", "EventWorkflowRunSummary", + "EvictionNotSupportedError", "FailedTaskRunExceptionGroup", "GetStepRunDiffResponse", "GithubAppInstallation", @@ -236,6 +222,7 @@ __all__ = [ "LogLineList", "LogLineOrderByDirection", "LogLineOrderByField", + "NonDeterminismError", "NonRetryableException", "OTelAttribute", "OpenTelemetryConfig", diff --git a/sdks/python/hatchet_sdk/clients/admin.py b/sdks/python/hatchet_sdk/clients/admin.py index 7649743f2..5873d4b8d 100644 --- a/sdks/python/hatchet_sdk/clients/admin.py +++ b/sdks/python/hatchet_sdk/clients/admin.py @@ -17,6 +17,7 @@ from hatchet_sdk.config import ClientConfig from hatchet_sdk.connection import new_conn from hatchet_sdk.contracts import workflows_pb2 as v0_workflow_protos from hatchet_sdk.contracts.v1 import workflows_pb2 as workflow_protos +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as trigger_protos from hatchet_sdk.contracts.v1.workflows_pb2_grpc import AdminServiceStub from hatchet_sdk.contracts.workflows_pb2_grpc import WorkflowServiceStub from hatchet_sdk.exceptions import DedupeViolationError @@ -124,6 +125,7 @@ class TaskRunDetail(BaseModel): output: JSONSerializableMapping | None = None error: str | None = None status: V1TaskStatus + is_evicted: bool = False class WorkflowRunDetail(BaseModel): @@ -189,13 +191,13 @@ class AdminClient: workflow_name: str, input: str | None, options: TriggerWorkflowOptions, - ) -> v0_workflow_protos.TriggerWorkflowRequest: + ) -> trigger_protos.TriggerWorkflowRequest: _options = self.TriggerWorkflowRequest.model_validate(options.model_dump()) desired_worker_labels = None if _options.desired_worker_label: desired_worker_labels = { - key: v0_workflow_protos.DesiredWorkerLabels( + key: trigger_protos.DesiredWorkerLabels( str_value=d.value if not isinstance(d.value, int) else None, int_value=d.value if isinstance(d.value, int) else None, required=d.required, @@ -205,7 +207,7 @@ class AdminClient: for key, d in _options.desired_worker_label.items() } - return v0_workflow_protos.TriggerWorkflowRequest( + return trigger_protos.TriggerWorkflowRequest( name=workflow_name, input=input, parent_id=_options.parent_id, @@ -354,7 +356,7 @@ class AdminClient: workflow_name: str, input: str | None, options: TriggerWorkflowOptions, - ) -> v0_workflow_protos.TriggerWorkflowRequest: + ) -> trigger_protos.TriggerWorkflowRequest: workflow_run_id = ctx_workflow_run_id.get() step_run_id = ctx_step_run_id.get() worker_id = ctx_worker_id.get() @@ -601,6 +603,7 @@ class AdminClient: ), error=details.error if details.error else None, status=RunStatus.from_proto(details.status).to_v1_task_status(), + is_evicted=getattr(details, "is_evicted", False), ) for readable_id, details in response.task_runs.items() }, diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py index 460799107..2abfd89e2 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py @@ -2,7 +2,7 @@ import asyncio import json import time from collections.abc import AsyncGenerator -from typing import TYPE_CHECKING, cast +from typing import cast import grpc import grpc.aio @@ -17,6 +17,7 @@ from hatchet_sdk.clients.events import proto_timestamp_now from hatchet_sdk.clients.listeners.run_event_listener import ( DEFAULT_ACTION_LISTENER_RETRY_INTERVAL, ) +from hatchet_sdk.config import ClientConfig from hatchet_sdk.connection import new_conn from hatchet_sdk.contracts.dispatcher_pb2 import ActionType as ActionTypeProto from hatchet_sdk.contracts.dispatcher_pb2 import ( @@ -34,10 +35,6 @@ from hatchet_sdk.utils.backoff import exp_backoff_sleep from hatchet_sdk.utils.proto_enums import convert_proto_enum_to_python from hatchet_sdk.utils.typing import JSONSerializableMapping -if TYPE_CHECKING: - from hatchet_sdk.config import ClientConfig - - DEFAULT_ACTION_TIMEOUT = 600 # seconds DEFAULT_ACTION_LISTENER_RETRY_COUNT = 15 @@ -118,7 +115,7 @@ class ActionListener: ) if self.last_heartbeat_succeeded is False: - logger.info("listener established") + logger.info("action listener established") now = time.time() diff = now - self.time_last_hb_succeeded @@ -254,6 +251,8 @@ class ActionListener: priority=assigned_action.priority, workflow_version_id=assigned_action.workflow_version_id, workflow_id=assigned_action.workflow_id, + durable_task_invocation_count=assigned_action.durable_task_invocation_count + or None, ) yield action @@ -275,12 +274,7 @@ class ActionListener: self.run_heartbeat = False logger.info("ListenV2 not available, falling back to Listen") else: - # TODO retry - if e.code() == grpc.StatusCode.UNAVAILABLE: - logger.exception("action listener error") - else: - # Unknown error, report and break - logger.exception("action listener error") + logger.error("action listener error - reconnecting") self.retries = self.retries + 1 diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py b/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py index d49081628..772a6dc18 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py @@ -4,13 +4,18 @@ from sys import version_info from typing import cast import grpc.aio +import tenacity from google.protobuf.timestamp_pb2 import Timestamp from hatchet_sdk.clients.dispatcher.action_listener import ( ActionListener, GetActionListenerRequest, ) -from hatchet_sdk.clients.rest.tenacity_utils import tenacity_retry +from hatchet_sdk.clients.rest.tenacity_utils import ( + tenacity_alert_retry, + tenacity_retry, + tenacity_should_retry, +) from hatchet_sdk.config import ClientConfig from hatchet_sdk.connection import new_conn from hatchet_sdk.contracts.dispatcher_pb2 import ( @@ -92,23 +97,36 @@ class DispatcherClient: return ActionListener(self.config, response.worker_id) - async def get_version(self) -> str: - """Call GetVersion RPC. Returns the engine semantic version string. + @tenacity.retry( + reraise=True, + wait=tenacity.wait_exponential_jitter(initial=0.5, max=5), + stop=tenacity.stop_after_attempt(3), + before_sleep=tenacity_alert_retry, + retry=tenacity.retry_if_exception(tenacity_should_retry), + ) + async def get_version(self) -> str | None: + """Call GetVersion RPC. Returns the engine semantic version string, + or ``None`` if the engine is too old to support GetVersion. - Raises grpc.RpcError with UNIMPLEMENTED on older engines. + Retries transient gRPC errors up to 3 times with exponential backoff. """ if not self.aio_client: aio_conn = new_conn(self.config, True) self.aio_client = DispatcherStub(aio_conn) - response = cast( - GetVersionResponse, - await self.aio_client.GetVersion( # type: ignore[misc] - GetVersionRequest(), - timeout=DEFAULT_REGISTER_TIMEOUT, - metadata=get_metadata(self.token), - ), - ) + try: + response = cast( + GetVersionResponse, + await self.aio_client.GetVersion( # type: ignore[misc] + GetVersionRequest(), + timeout=DEFAULT_REGISTER_TIMEOUT, + metadata=get_metadata(self.token), + ), + ) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.UNIMPLEMENTED: + return None + raise return response.version diff --git a/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py b/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py index 47957023d..eabc21173 100644 --- a/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py +++ b/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py @@ -1,129 +1,688 @@ +import asyncio import json -from collections.abc import AsyncIterator -from typing import Any, Literal, cast +from collections.abc import AsyncIterator, Callable +from contextlib import suppress +from dataclasses import dataclass +from datetime import timedelta +from typing import Annotated, Literal, cast -import grpc import grpc.aio -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, Field +from typing_extensions import Never, Self -from hatchet_sdk.clients.listeners.pooled_listener import PooledListener -from hatchet_sdk.clients.rest.tenacity_utils import tenacity_retry -from hatchet_sdk.conditions import Condition, SleepCondition, UserEventCondition +from hatchet_sdk.clients.admin import ( + AdminClient, + TriggerWorkflowOptions, +) from hatchet_sdk.config import ClientConfig from hatchet_sdk.connection import new_conn from hatchet_sdk.contracts.v1.dispatcher_pb2 import ( - DurableEvent, - ListenForDurableEventRequest, -) -from hatchet_sdk.contracts.v1.dispatcher_pb2 import ( - RegisterDurableEventRequest as RegisterDurableEventRequestProto, + DurableEventLogEntryRef, + DurableTaskAwaitedCompletedEntry, + DurableTaskCompleteMemoRequest, + DurableTaskErrorType, + DurableTaskEventLogEntryCompletedResponse, + DurableTaskEvictInvocationRequest, + DurableTaskMemoRequest, + DurableTaskRequest, + DurableTaskRequestRegisterWorker, + DurableTaskResponse, + DurableTaskTriggerRunsRequest, + DurableTaskWaitForRequest, + DurableTaskWorkerStatusRequest, ) from hatchet_sdk.contracts.v1.dispatcher_pb2_grpc import V1DispatcherStub from hatchet_sdk.contracts.v1.shared.condition_pb2 import DurableEventListenerConditions +from hatchet_sdk.exceptions import NonDeterminismError +from hatchet_sdk.logger import logger from hatchet_sdk.metadata import get_metadata +from hatchet_sdk.utils.cache import TTLCache +from hatchet_sdk.utils.typing import JSONSerializableMapping -DEFAULT_DURABLE_EVENT_LISTENER_RETRY_INTERVAL = 3 # seconds -DEFAULT_DURABLE_EVENT_LISTENER_RETRY_COUNT = 5 -DEFAULT_DURABLE_EVENT_LISTENER_INTERRUPT_INTERVAL = 1800 # 30 minutes +DEFAULT_RECONNECT_INTERVAL = 3 # seconds -class RegisterDurableEventRequest(BaseModel): - model_config = ConfigDict(arbitrary_types_allowed=True) +@dataclass(frozen=True) +class WaitForEvent: + wait_for_conditions: DurableEventListenerConditions - task_id: str - signal_key: str - conditions: list[Condition] - config: ClientConfig - def to_proto(self) -> RegisterDurableEventRequestProto: - return RegisterDurableEventRequestProto( - task_id=self.task_id, - signal_key=self.signal_key, - conditions=DurableEventListenerConditions( - sleep_conditions=[ - c.to_proto(self.config) - for c in self.conditions - if isinstance(c, SleepCondition) - ], - user_event_conditions=[ - c.to_proto(self.config) - for c in self.conditions - if isinstance(c, UserEventCondition) - ], - ), +@dataclass(frozen=True) +class RunChildEvent: + workflow_name: str + input: str | None + trigger_workflow_opts: TriggerWorkflowOptions + + +@dataclass(frozen=True) +class RunChildrenEvent: + children: list[RunChildEvent] + + +@dataclass(frozen=True) +class MemoEvent: + memo_key: bytes + result: str | None + + +DurableTaskSendEvent = WaitForEvent | RunChildrenEvent | MemoEvent + + +class MaybeCachedMemoEntry(BaseModel): + found: bool + data: bytes | None = None + + +class DurableTaskRunAckEntry(BaseModel): + node_id: int + branch_id: int + + +class DurableTaskEventRunAck(BaseModel): + ack_type: Literal["run"] = "run" + invocation_count: int + durable_task_external_id: str + run_entries: list[DurableTaskRunAckEntry] = Field(default_factory=list) + + +class DurableTaskEventMemoAck(BaseModel): + ack_type: Literal["memo"] = "memo" + invocation_count: int + durable_task_external_id: str + branch_id: int + node_id: int + memo_already_existed: bool + memo_result_payload: bytes | None = None + + +class DurableTaskEventWaitForAck(BaseModel): + ack_type: Literal["wait"] = "wait" + invocation_count: int + durable_task_external_id: str + branch_id: int + node_id: int + + +DurableTaskEventAck = Annotated[ + DurableTaskEventRunAck | DurableTaskEventMemoAck | DurableTaskEventWaitForAck, + Field(discriminator="ack_type"), +] + + +class DurableTaskEventLogEntryResult(BaseModel): + durable_task_external_id: str + node_id: int + payload: JSONSerializableMapping | None + + @classmethod + def from_proto(cls, proto: DurableTaskEventLogEntryCompletedResponse) -> Self: + payload: JSONSerializableMapping | None = None + if proto.payload: + payload = json.loads(proto.payload.decode("utf-8")) + + return cls( + durable_task_external_id=proto.ref.durable_task_external_id, + node_id=proto.ref.node_id, + payload=payload, ) -class ParsedKey(BaseModel): - task_id: str - signal_key: str +TaskExternalId = str +NodeId = int +BranchId = int +InvocationCount = int + +PendingCallback = tuple[TaskExternalId, InvocationCount, BranchId, NodeId] +PendingEventAck = tuple[TaskExternalId, InvocationCount] +PendingEvictionAck = tuple[TaskExternalId, InvocationCount] -class DurableEventListener( - PooledListener[ListenForDurableEventRequest, DurableEvent, V1DispatcherStub] -): - def _generate_key(self, task_id: str, signal_key: str) -> str: - return task_id + ":" + signal_key - - def generate_key(self, response: DurableEvent) -> str: - return self._generate_key( - task_id=response.task_id, - signal_key=response.signal_key, - ) - - def parse_key(self, key: str) -> ParsedKey: - task_id, signal_key = key.split(":", maxsplit=1) - - return ParsedKey( - task_id=task_id, - signal_key=signal_key, - ) - - async def create_subscription( +class DurableEventListener: + def __init__( self, - request: AsyncIterator[ListenForDurableEventRequest], - metadata: tuple[tuple[str, str]], - ) -> grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent]: - if self.client is None: - conn = new_conn(self.config, True) - self.client = V1DispatcherStub(conn) + config: ClientConfig, + admin_client: AdminClient, + on_server_evict: Callable[[str, int], None] | None = None, + ): + self.config = config + self.token = config.token + self.admin_client = admin_client - return cast( - grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent], - self.client.ListenForDurableEvent( - request, # type: ignore[arg-type] - metadata=metadata, + self._worker_id: str | None = None + + self._conn: grpc.aio.Channel | None = None + self._stub: V1DispatcherStub | None = None + self._stream: ( + grpc.aio.StreamStreamCall[DurableTaskRequest, DurableTaskResponse] | None + ) = None + + self._request_queue: asyncio.Queue[DurableTaskRequest] | None = None + self._pending_event_acks: dict[ + PendingEventAck, asyncio.Future[DurableTaskEventAck] + ] = {} + self._pending_eviction_acks: dict[PendingEvictionAck, asyncio.Future[None]] = {} + self._pending_callbacks: dict[ + PendingCallback, asyncio.Future[DurableTaskEventLogEntryResult] + ] = {} + + # Completions that arrived before wait_for_callback() registered a + # future in _pending_callbacks. This happens when the server delivers + # an entry_completed between the event ack and the wait_for_callback + # call (e.g. an already-satisfied sleep delivered via polling). + self._buffered_completions: TTLCache[ + PendingCallback, DurableTaskEventLogEntryResult + ] = TTLCache(ttl=timedelta(seconds=10)) + + self._receive_task: asyncio.Task[None] | None = None + self._send_task: asyncio.Task[None] | None = None + self._running = False + self._start_lock = asyncio.Lock() + + self._on_server_evict = on_server_evict + + @property + def worker_id(self) -> str | None: + return self._worker_id + + async def _connect(self) -> None: + if self._conn is not None: + with suppress(Exception): + await self._conn.close() + + logger.info("durable event listener connecting...") + + self._conn = new_conn(self.config, aio=True) + self._stub = V1DispatcherStub(self._conn) + self._request_queue = asyncio.Queue() + + self._stream = cast( + grpc.aio.StreamStreamCall[DurableTaskRequest, DurableTaskResponse], + self._stub.DurableTask( + self._request_iterator(), # type: ignore[arg-type] + metadata=get_metadata(self.token), ), ) - def create_request_body(self, item: str) -> ListenForDurableEventRequest: - key = self.parse_key(item) - return ListenForDurableEventRequest( - task_id=key.task_id, - signal_key=key.signal_key, + await self._register_worker() + await self._poll_worker_status() + logger.info("durable event listener connected") + + async def start(self, worker_id: str) -> None: + async with self._start_lock: + if self._running: + return + + self._worker_id = worker_id + self._running = True + + await self._connect() + + self._receive_task = asyncio.create_task(self._receive_loop()) + self._send_task = asyncio.create_task(self._send_loop()) + + async def ensure_started(self, worker_id: str) -> None: + if not self._running: + await self.start(worker_id) + + async def stop(self) -> None: + self._running = False + self._buffered_completions.stop_eviction_job() + + self._fail_all_pending(Exception("DurableListener stopped")) + + if self._receive_task: + self._receive_task.cancel() + with suppress(asyncio.CancelledError): + await self._receive_task + + if self._send_task: + self._send_task.cancel() + with suppress(asyncio.CancelledError): + await self._send_task + + if self._conn: + await self._conn.close() + + async def _request_iterator(self) -> AsyncIterator[DurableTaskRequest]: + if not self._request_queue: + raise RuntimeError("Request queue not initialized") + + while self._running: + with suppress(asyncio.TimeoutError): + yield await asyncio.wait_for(self._request_queue.get(), timeout=1.0) + + async def _send_loop(self) -> None: + while self._running: + await asyncio.sleep(1) + await self._poll_worker_status() + + async def _poll_worker_status(self) -> None: + if self._request_queue is None or self._worker_id is None: + return + + if not self._pending_callbacks: + return + + waiting = [ + DurableTaskAwaitedCompletedEntry( + durable_task_external_id=task_ext_id, + invocation_count=inv_count, + node_id=node_id, + branch_id=branch_id, + ) + for (task_ext_id, inv_count, branch_id, node_id) in self._pending_callbacks + ] + + request = DurableTaskRequest( + worker_status=DurableTaskWorkerStatusRequest( + worker_id=self._worker_id, + waiting_entries=waiting, + ) ) + await self._request_queue.put(request) - def register_durable_event( - self, request: RegisterDurableEventRequest - ) -> Literal[True]: - conn = new_conn(self.config, True) - client = V1DispatcherStub(conn) + def _fail_pending_acks(self, exc: Exception) -> None: + for future in self._pending_event_acks.values(): + if not future.done(): + future.set_exception(exc) + self._pending_event_acks.clear() - register_durable_event = tenacity_retry( - client.RegisterDurableEvent, self.config.tenacity + for eviction_future in self._pending_eviction_acks.values(): + if not eviction_future.done(): + eviction_future.set_exception(exc) + self._pending_eviction_acks.clear() + + def _fail_all_pending(self, exc: Exception) -> None: + self._fail_pending_acks(exc) + + for future in self._pending_callbacks.values(): + if not future.done(): + future.set_exception(exc) + self._pending_callbacks.clear() + self._buffered_completions.clear() + + async def _receive_loop(self) -> None: + while self._running: + if not self._stream: + await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL) + continue + + try: + async for response in self._stream: + await self._handle_response(response) + + if self._running: + logger.warning( + f"durable event listener disconnected (EOF), reconnecting in {DEFAULT_RECONNECT_INTERVAL}s..." + ) + self._fail_pending_acks( + ConnectionResetError("durable stream disconnected") + ) + await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL) + await self._connect() + + except grpc.aio.AioRpcError as e: + if e.code() == grpc.StatusCode.CANCELLED: + break + logger.warning( + f"durable event listener disconnected: code={e.code()}, details={e.details()}, reconnecting in {DEFAULT_RECONNECT_INTERVAL}s..." + ) + if self._running: + self._fail_pending_acks( + ConnectionResetError( + f"durable stream error: {e.code()} {e.details()}" + ) + ) + await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL) + try: + await self._connect() + except Exception: + logger.exception("failed to reconnect durable event listener") + + except asyncio.CancelledError: + break + + except Exception as e: + logger.exception(f"unexpected error in durable event listener: {e}") + if self._running: + self._fail_pending_acks(e) + await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL) + try: + await self._connect() + except Exception: + logger.exception("failed to reconnect durable event listener") + + async def _handle_response(self, response: DurableTaskResponse) -> None: + if response.HasField("register_worker"): + pass + elif response.HasField("trigger_runs_ack"): + trigger_ack = response.trigger_runs_ack + event_key = ( + trigger_ack.durable_task_external_id, + trigger_ack.invocation_count, + ) + trigger_ack_future = self._pending_event_acks.pop(event_key, None) + if trigger_ack_future is not None and not trigger_ack_future.done(): + trigger_ack_future.set_result( + DurableTaskEventRunAck( + invocation_count=trigger_ack.invocation_count, + durable_task_external_id=trigger_ack.durable_task_external_id, + run_entries=[ + DurableTaskRunAckEntry( + node_id=e.node_id, + branch_id=e.branch_id, + ) + for e in trigger_ack.run_entries + ], + ) + ) + elif response.HasField("memo_ack"): + memo_ack = response.memo_ack + event_key = ( + memo_ack.ref.durable_task_external_id, + memo_ack.ref.invocation_count, + ) + memo_ack_future = self._pending_event_acks.pop(event_key, None) + if memo_ack_future is not None and not memo_ack_future.done(): + memo_ack_future.set_result( + DurableTaskEventMemoAck( + invocation_count=memo_ack.ref.invocation_count, + durable_task_external_id=memo_ack.ref.durable_task_external_id, + node_id=memo_ack.ref.node_id, + branch_id=memo_ack.ref.branch_id, + memo_already_existed=memo_ack.memo_already_existed, + memo_result_payload=memo_ack.memo_result_payload, + ) + ) + elif response.HasField("wait_for_ack"): + wait_for_ack = response.wait_for_ack + event_key = ( + wait_for_ack.ref.durable_task_external_id, + wait_for_ack.ref.invocation_count, + ) + wait_for_ack_future = self._pending_event_acks.pop(event_key, None) + if wait_for_ack_future is not None and not wait_for_ack_future.done(): + wait_for_ack_future.set_result( + DurableTaskEventWaitForAck( + invocation_count=wait_for_ack.ref.invocation_count, + durable_task_external_id=wait_for_ack.ref.durable_task_external_id, + node_id=wait_for_ack.ref.node_id, + branch_id=wait_for_ack.ref.branch_id, + ) + ) + elif response.HasField("entry_completed"): + completed = response.entry_completed + completed_key = ( + completed.ref.durable_task_external_id, + completed.ref.invocation_count, + completed.ref.branch_id, + completed.ref.node_id, + ) + result = DurableTaskEventLogEntryResult.from_proto(completed) + if completed_key in self._pending_callbacks: + completed_future = self._pending_callbacks[completed_key] + if not completed_future.done(): + completed_future.set_result(result) + del self._pending_callbacks[completed_key] + else: + self._buffered_completions[completed_key] = result + elif response.HasField("eviction_ack"): + eviction_ack = response.eviction_ack + eviction_key = ( + eviction_ack.durable_task_external_id, + eviction_ack.invocation_count, + ) + if eviction_key in self._pending_eviction_acks: + future = self._pending_eviction_acks.pop(eviction_key) + if not future.done(): + future.set_result(None) + elif response.HasField("server_evict"): + evict = response.server_evict + logger.info( + f"received server eviction notification for task {evict.durable_task_external_id} " + f"invocation {evict.invocation_count}: {evict.reason}" + ) + self.cleanup_task_state( + evict.durable_task_external_id, evict.invocation_count + ) + if self._on_server_evict is not None: + self._on_server_evict( + evict.durable_task_external_id, evict.invocation_count + ) + elif response.HasField("error"): + error = response.error + exc: Exception + + if ( + error.error_type + == DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_NONDETERMINISM + ): + exc = NonDeterminismError( + task_external_id=error.ref.durable_task_external_id, + invocation_count=error.ref.invocation_count, + message=error.error_message, + node_id=error.ref.node_id, + ) + else: + ## fallthrough, this shouldn't happen unless we add an error type to the engine and the SDK + ## hasn't been updated to handle it + exc = Exception( + "Unspecified durable task error: " + + error.error_message + + f" (type: {error.error_type})" + ) + + event_key = (error.ref.durable_task_external_id, error.ref.invocation_count) + if event_key in self._pending_event_acks: + error_pending_ack_future = self._pending_event_acks.pop(event_key) + if not error_pending_ack_future.done(): + error_pending_ack_future.set_exception(exc) + + callback_key = ( + error.ref.durable_task_external_id, + error.ref.invocation_count, + error.ref.branch_id, + error.ref.node_id, + ) + + if callback_key in self._pending_callbacks: + error_pending_callback_future = self._pending_callbacks.pop( + callback_key + ) + if not error_pending_callback_future.done(): + error_pending_callback_future.set_exception(exc) + + error_eviction_key: PendingEvictionAck = ( + error.ref.durable_task_external_id, + error.ref.invocation_count, + ) + if error_eviction_key in self._pending_eviction_acks: + eviction_future = self._pending_eviction_acks.pop(error_eviction_key) + if not eviction_future.done(): + eviction_future.set_exception(exc) + + async def _register_worker(self) -> None: + if self._request_queue is None or self._worker_id is None: + raise RuntimeError("Client not started") + + request = DurableTaskRequest( + register_worker=DurableTaskRequestRegisterWorker(worker_id=self._worker_id) ) + await self._request_queue.put(request) - register_durable_event( - request.to_proto(), - timeout=5, - metadata=get_metadata(self.token), + async def send_event( + self, + durable_task_external_id: str, + invocation_count: int, + event: DurableTaskSendEvent, + ) -> DurableTaskEventAck: + if self._request_queue is None: + raise RuntimeError("Client not started") + + key = (durable_task_external_id, invocation_count) + future: asyncio.Future[DurableTaskEventAck] = asyncio.Future() + self._pending_event_acks[key] = future + + request: DurableTaskRequest + + if isinstance(event, RunChildrenEvent): + trigger_opts_list = [ + self.admin_client._create_workflow_run_request( + workflow_name=child.workflow_name, + input=child.input, + options=child.trigger_workflow_opts, + ) + for child in event.children + ] + + trigger_req = DurableTaskTriggerRunsRequest( + durable_task_external_id=durable_task_external_id, + invocation_count=invocation_count, + trigger_opts=trigger_opts_list, + ) + + request = DurableTaskRequest(trigger_runs=trigger_req) + + elif isinstance(event, WaitForEvent): + wait_req = DurableTaskWaitForRequest( + durable_task_external_id=durable_task_external_id, + invocation_count=invocation_count, + wait_for_conditions=event.wait_for_conditions, + ) + + request = DurableTaskRequest(wait_for=wait_req) + elif isinstance(event, MemoEvent): + memo_req = DurableTaskMemoRequest( + durable_task_external_id=durable_task_external_id, + invocation_count=invocation_count, + key=event.memo_key, + ) + + if event.result is not None: + memo_req.payload = event.result.encode("utf-8") + + request = DurableTaskRequest(memo=memo_req) + + else: + e: Never = event + raise ValueError(f"Unknown durable task send event: {e}") + + await self._request_queue.put(request) + + return await future + + async def wait_for_callback( + self, + durable_task_external_id: str, + invocation_count: int, + branch_id: int, + node_id: int, + ) -> DurableTaskEventLogEntryResult: + key = (durable_task_external_id, invocation_count, branch_id, node_id) + + if key in self._buffered_completions: + return self._buffered_completions.pop(key) + + if key not in self._pending_callbacks: + future: asyncio.Future[DurableTaskEventLogEntryResult] = asyncio.Future() + self._pending_callbacks[key] = future + await self._poll_worker_status() + + return await self._pending_callbacks[key] + + def cleanup_task_state( + self, durable_task_external_id: str, invocation_count: int + ) -> None: + """Remove pending callbacks, acks, and buffered completions for old invocations of a task.""" + stale_cb_keys = [ + k + for k in self._pending_callbacks + if k[0] == durable_task_external_id and k[1] <= invocation_count + ] + for k in stale_cb_keys: + fut = self._pending_callbacks.pop(k) + if not fut.done(): + fut.cancel() + + stale_ack_keys = [ + ak + for ak in self._pending_event_acks + if ak[0] == durable_task_external_id and ak[1] <= invocation_count + ] + for ak in stale_ack_keys: + ack_fut = self._pending_event_acks.pop(ak) + if not ack_fut.done(): + ack_fut.cancel() + + stale_early_keys = [ + ek + for ek in self._buffered_completions + if ek[0] == durable_task_external_id and ek[1] <= invocation_count + ] + for ek in stale_early_keys: + del self._buffered_completions[ek] + + _EVICTION_ACK_TIMEOUT_S = 30.0 + + async def send_evict_invocation( + self, + durable_task_external_id: str, + invocation_count: int, + reason: str | None = None, + ) -> None: + """Send an eviction request to the server and wait for acknowledgement.""" + if self._request_queue is None: + raise RuntimeError("Client not started") + + eviction_key: PendingEvictionAck = ( + durable_task_external_id, + invocation_count, ) + ack_future: asyncio.Future[None] = asyncio.Future() + self._pending_eviction_acks[eviction_key] = ack_future - return True + req = DurableTaskEvictInvocationRequest( + durable_task_external_id=durable_task_external_id, + invocation_count=invocation_count, + ) + if reason is not None: + req.reason = reason - async def result(self, task_id: str, signal_key: str) -> dict[str, Any]: - key = self._generate_key(task_id, signal_key) + request = DurableTaskRequest(evict_invocation=req) + await self._request_queue.put(request) - event = await self.subscribe(key) + try: + await asyncio.wait_for(ack_future, timeout=self._EVICTION_ACK_TIMEOUT_S) + except asyncio.TimeoutError as err: + self._pending_eviction_acks.pop(eviction_key, None) + raise TimeoutError( + f"Eviction ack timed out after {self._EVICTION_ACK_TIMEOUT_S:.0f}s " + f"for task {durable_task_external_id} invocation {invocation_count}" + ) from err - return cast(dict[str, Any], json.loads(event.data.decode("utf-8"))) + async def send_memo_completed_notification( + self, + durable_task_external_id: str, + node_id: int, + branch_id: int, + invocation_count: int, + memo_key: bytes, + memo_result_payload: bytes | None, + ) -> None: + if self._request_queue is None: + raise RuntimeError("Client not started") + + await self._request_queue.put( + DurableTaskRequest( + complete_memo=DurableTaskCompleteMemoRequest( + ref=DurableEventLogEntryRef( + durable_task_external_id=durable_task_external_id, + node_id=node_id, + invocation_count=invocation_count, + branch_id=branch_id, + ), + memo_key=memo_key, + payload=memo_result_payload, + ) + ) + ) diff --git a/sdks/python/hatchet_sdk/clients/listeners/legacy/__init__.py b/sdks/python/hatchet_sdk/clients/listeners/legacy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/sdks/python/hatchet_sdk/clients/listeners/legacy/pre_eviction_durable_event_listener.py b/sdks/python/hatchet_sdk/clients/listeners/legacy/pre_eviction_durable_event_listener.py new file mode 100644 index 000000000..2ed68b2bb --- /dev/null +++ b/sdks/python/hatchet_sdk/clients/listeners/legacy/pre_eviction_durable_event_listener.py @@ -0,0 +1,125 @@ +import json +from collections.abc import AsyncIterator +from typing import Any, Literal, cast + +import grpc +import grpc.aio +from pydantic import BaseModel, ConfigDict + +from hatchet_sdk.clients.listeners.pooled_listener import PooledListener +from hatchet_sdk.clients.rest.tenacity_utils import tenacity_retry +from hatchet_sdk.conditions import Condition, SleepCondition, UserEventCondition +from hatchet_sdk.config import ClientConfig +from hatchet_sdk.connection import new_conn +from hatchet_sdk.contracts.v1.dispatcher_pb2 import ( + DurableEvent, + ListenForDurableEventRequest, +) +from hatchet_sdk.contracts.v1.dispatcher_pb2 import ( + RegisterDurableEventRequest as RegisterDurableEventRequestProto, +) +from hatchet_sdk.contracts.v1.dispatcher_pb2_grpc import V1DispatcherStub +from hatchet_sdk.contracts.v1.shared.condition_pb2 import DurableEventListenerConditions +from hatchet_sdk.metadata import get_metadata + + +class RegisterDurableEventRequest(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + task_id: str + signal_key: str + conditions: list[Condition] + config: ClientConfig + + def to_proto(self) -> RegisterDurableEventRequestProto: + return RegisterDurableEventRequestProto( + task_id=self.task_id, + signal_key=self.signal_key, + conditions=DurableEventListenerConditions( + sleep_conditions=[ + c.to_proto(self.config) + for c in self.conditions + if isinstance(c, SleepCondition) + ], + user_event_conditions=[ + c.to_proto(self.config) + for c in self.conditions + if isinstance(c, UserEventCondition) + ], + ), + ) + + +class ParsedKey(BaseModel): + task_id: str + signal_key: str + + +class PreEvictionDurableEventListener( + PooledListener[ListenForDurableEventRequest, DurableEvent, V1DispatcherStub] +): + def _generate_key(self, task_id: str, signal_key: str) -> str: + return task_id + ":" + signal_key + + def generate_key(self, response: DurableEvent) -> str: + return self._generate_key( + task_id=response.task_id, + signal_key=response.signal_key, + ) + + def parse_key(self, key: str) -> ParsedKey: + task_id, signal_key = key.split(":", maxsplit=1) + + return ParsedKey( + task_id=task_id, + signal_key=signal_key, + ) + + async def create_subscription( + self, + request: AsyncIterator[ListenForDurableEventRequest], + metadata: tuple[tuple[str, str]], + ) -> grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent]: + if self.client is None: + conn = new_conn(self.config, True) + self.client = V1DispatcherStub(conn) + + return cast( + grpc.aio.UnaryStreamCall[ListenForDurableEventRequest, DurableEvent], + self.client.ListenForDurableEvent( + request, # type: ignore[arg-type] + metadata=metadata, + ), + ) + + def create_request_body(self, item: str) -> ListenForDurableEventRequest: + key = self.parse_key(item) + return ListenForDurableEventRequest( + task_id=key.task_id, + signal_key=key.signal_key, + ) + + def register_durable_event( + self, request: RegisterDurableEventRequest + ) -> Literal[True]: + conn = new_conn(self.config, True) + client = V1DispatcherStub(conn) + + register_durable_event = tenacity_retry( + client.RegisterDurableEvent, self.config.tenacity + ) + + register_durable_event( + request.to_proto(), + timeout=5, + metadata=get_metadata(self.token), + ) + + return True + + async def result(self, task_id: str, signal_key: str) -> dict[str, Any]: + key = self._generate_key(task_id, signal_key) + + event = await self.subscribe(key) + + return cast(dict[str, Any], json.loads(event.data.decode("utf-8"))) diff --git a/sdks/python/hatchet_sdk/clients/rest/__init__.py b/sdks/python/hatchet_sdk/clients/rest/__init__.py index 0ed156258..91a8086fa 100644 --- a/sdks/python/hatchet_sdk/clients/rest/__init__.py +++ b/sdks/python/hatchet_sdk/clients/rest/__init__.py @@ -265,6 +265,12 @@ from hatchet_sdk.clients.rest.models.user_tenant_memberships_list import ( UserTenantMembershipsList, ) from hatchet_sdk.clients.rest.models.user_tenant_public import UserTenantPublic +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_request import ( + V1BranchDurableTaskRequest, +) +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_response import ( + V1BranchDurableTaskResponse, +) from hatchet_sdk.clients.rest.models.v1_cel_debug_request import V1CELDebugRequest from hatchet_sdk.clients.rest.models.v1_cel_debug_response import V1CELDebugResponse from hatchet_sdk.clients.rest.models.v1_cel_debug_response_status import ( @@ -307,6 +313,11 @@ from hatchet_sdk.clients.rest.models.v1_log_line_order_by_direction import ( ) from hatchet_sdk.clients.rest.models.v1_replay_task_request import V1ReplayTaskRequest from hatchet_sdk.clients.rest.models.v1_replayed_tasks import V1ReplayedTasks +from hatchet_sdk.clients.rest.models.v1_restore_task_response import ( + V1RestoreTaskResponse, +) +from hatchet_sdk.clients.rest.models.v1_running_detail_count import V1RunningDetailCount +from hatchet_sdk.clients.rest.models.v1_running_filter import V1RunningFilter from hatchet_sdk.clients.rest.models.v1_task_event import V1TaskEvent from hatchet_sdk.clients.rest.models.v1_task_event_list import V1TaskEventList from hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType diff --git a/sdks/python/hatchet_sdk/clients/rest/api/task_api.py b/sdks/python/hatchet_sdk/clients/rest/api/task_api.py index 5a9bdb6a1..082fe5f6f 100644 --- a/sdks/python/hatchet_sdk/clients/rest/api/task_api.py +++ b/sdks/python/hatchet_sdk/clients/rest/api/task_api.py @@ -25,6 +25,9 @@ from hatchet_sdk.clients.rest.models.v1_cancelled_tasks import V1CancelledTasks from hatchet_sdk.clients.rest.models.v1_dag_children import V1DagChildren from hatchet_sdk.clients.rest.models.v1_replay_task_request import V1ReplayTaskRequest from hatchet_sdk.clients.rest.models.v1_replayed_tasks import V1ReplayedTasks +from hatchet_sdk.clients.rest.models.v1_restore_task_response import ( + V1RestoreTaskResponse, +) from hatchet_sdk.clients.rest.models.v1_task_event_list import V1TaskEventList from hatchet_sdk.clients.rest.models.v1_task_point_metrics import V1TaskPointMetrics from hatchet_sdk.clients.rest.models.v1_task_run_metric import V1TaskRunMetric @@ -2311,3 +2314,263 @@ class TaskApi: _host=_host, _request_auth=_request_auth, ) + + @validate_call + def v1_task_restore( + self, + task: Annotated[ + str, + Field(min_length=36, strict=True, max_length=36, description="The task id"), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> V1RestoreTaskResponse: + """Restore a task + + Restore an evicted durable task + + :param task: The task id (required) + :type task: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._v1_task_restore_serialize( + task=task, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "V1RestoreTaskResponse", + "400": "APIErrors", + "403": "APIErrors", + "404": "APIErrors", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + def v1_task_restore_with_http_info( + self, + task: Annotated[ + str, + Field(min_length=36, strict=True, max_length=36, description="The task id"), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[V1RestoreTaskResponse]: + """Restore a task + + Restore an evicted durable task + + :param task: The task id (required) + :type task: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._v1_task_restore_serialize( + task=task, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "V1RestoreTaskResponse", + "400": "APIErrors", + "403": "APIErrors", + "404": "APIErrors", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + def v1_task_restore_without_preload_content( + self, + task: Annotated[ + str, + Field(min_length=36, strict=True, max_length=36, description="The task id"), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Restore a task + + Restore an evicted durable task + + :param task: The task id (required) + :type task: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._v1_task_restore_serialize( + task=task, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "V1RestoreTaskResponse", + "400": "APIErrors", + "403": "APIErrors", + "404": "APIErrors", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _v1_task_restore_serialize( + self, + task, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if task is not None: + _path_params["task"] = task + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["cookieAuth", "bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/api/v1/stable/tasks/{task}/restore", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdks/python/hatchet_sdk/clients/rest/api/workflow_runs_api.py b/sdks/python/hatchet_sdk/clients/rest/api/workflow_runs_api.py index 9f84b0c12..ac08d85a1 100644 --- a/sdks/python/hatchet_sdk/clients/rest/api/workflow_runs_api.py +++ b/sdks/python/hatchet_sdk/clients/rest/api/workflow_runs_api.py @@ -20,6 +20,13 @@ from datetime import datetime from pydantic import Field, StrictBool, StrictInt, StrictStr from typing import List, Optional from typing_extensions import Annotated +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_request import ( + V1BranchDurableTaskRequest, +) +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_response import ( + V1BranchDurableTaskResponse, +) +from hatchet_sdk.clients.rest.models.v1_running_filter import V1RunningFilter from hatchet_sdk.clients.rest.models.v1_task_event_list import V1TaskEventList from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus from hatchet_sdk.clients.rest.models.v1_task_summary_list import V1TaskSummaryList @@ -49,6 +56,300 @@ class WorkflowRunsApi: api_client = ApiClient.get_default() self.api_client = api_client + @validate_call + def v1_durable_task_branch( + self, + tenant: Annotated[ + str, + Field( + min_length=36, strict=True, max_length=36, description="The tenant id" + ), + ], + v1_branch_durable_task_request: Annotated[ + V1BranchDurableTaskRequest, Field(description="The branch request") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> V1BranchDurableTaskResponse: + """Branch durable task + + Branch a durable task from a specific node, creating a new branch and re-processing its matches. + + :param tenant: The tenant id (required) + :type tenant: str + :param v1_branch_durable_task_request: The branch request (required) + :type v1_branch_durable_task_request: V1BranchDurableTaskRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._v1_durable_task_branch_serialize( + tenant=tenant, + v1_branch_durable_task_request=v1_branch_durable_task_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "V1BranchDurableTaskResponse", + "400": "APIErrors", + "403": "APIErrors", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + def v1_durable_task_branch_with_http_info( + self, + tenant: Annotated[ + str, + Field( + min_length=36, strict=True, max_length=36, description="The tenant id" + ), + ], + v1_branch_durable_task_request: Annotated[ + V1BranchDurableTaskRequest, Field(description="The branch request") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[V1BranchDurableTaskResponse]: + """Branch durable task + + Branch a durable task from a specific node, creating a new branch and re-processing its matches. + + :param tenant: The tenant id (required) + :type tenant: str + :param v1_branch_durable_task_request: The branch request (required) + :type v1_branch_durable_task_request: V1BranchDurableTaskRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._v1_durable_task_branch_serialize( + tenant=tenant, + v1_branch_durable_task_request=v1_branch_durable_task_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "V1BranchDurableTaskResponse", + "400": "APIErrors", + "403": "APIErrors", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + def v1_durable_task_branch_without_preload_content( + self, + tenant: Annotated[ + str, + Field( + min_length=36, strict=True, max_length=36, description="The tenant id" + ), + ], + v1_branch_durable_task_request: Annotated[ + V1BranchDurableTaskRequest, Field(description="The branch request") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Branch durable task + + Branch a durable task from a specific node, creating a new branch and re-processing its matches. + + :param tenant: The tenant id (required) + :type tenant: str + :param v1_branch_durable_task_request: The branch request (required) + :type v1_branch_durable_task_request: V1BranchDurableTaskRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._v1_durable_task_branch_serialize( + tenant=tenant, + v1_branch_durable_task_request=v1_branch_durable_task_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "V1BranchDurableTaskResponse", + "400": "APIErrors", + "403": "APIErrors", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _v1_durable_task_branch_serialize( + self, + tenant, + v1_branch_durable_task_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if tenant is not None: + _path_params["tenant"] = tenant + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if v1_branch_durable_task_request is not None: + _body_params = v1_branch_durable_task_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["cookieAuth", "bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/api/v1/stable/tenants/{tenant}/durable-tasks/branch", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + @validate_call def v1_workflow_run_create( self, @@ -670,6 +971,12 @@ class WorkflowRunsApi: ], Field(description="The workflow ids to find runs for"), ] = None, + running_filter: Annotated[ + Optional[V1RunningFilter], + Field( + description="Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL." + ), + ] = None, _request_timeout: Union[ None, Annotated[StrictFloat, Field(gt=0)], @@ -698,6 +1005,8 @@ class WorkflowRunsApi: :type additional_metadata: List[str] :param workflow_ids: The workflow ids to find runs for :type workflow_ids: List[str] + :param running_filter: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + :type running_filter: V1RunningFilter :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of @@ -727,6 +1036,7 @@ class WorkflowRunsApi: until=until, additional_metadata=additional_metadata, workflow_ids=workflow_ids, + running_filter=running_filter, _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -775,6 +1085,12 @@ class WorkflowRunsApi: ], Field(description="The workflow ids to find runs for"), ] = None, + running_filter: Annotated[ + Optional[V1RunningFilter], + Field( + description="Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL." + ), + ] = None, _request_timeout: Union[ None, Annotated[StrictFloat, Field(gt=0)], @@ -803,6 +1119,8 @@ class WorkflowRunsApi: :type additional_metadata: List[str] :param workflow_ids: The workflow ids to find runs for :type workflow_ids: List[str] + :param running_filter: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + :type running_filter: V1RunningFilter :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of @@ -832,6 +1150,7 @@ class WorkflowRunsApi: until=until, additional_metadata=additional_metadata, workflow_ids=workflow_ids, + running_filter=running_filter, _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -880,6 +1199,12 @@ class WorkflowRunsApi: ], Field(description="The workflow ids to find runs for"), ] = None, + running_filter: Annotated[ + Optional[V1RunningFilter], + Field( + description="Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL." + ), + ] = None, _request_timeout: Union[ None, Annotated[StrictFloat, Field(gt=0)], @@ -908,6 +1233,8 @@ class WorkflowRunsApi: :type additional_metadata: List[str] :param workflow_ids: The workflow ids to find runs for :type workflow_ids: List[str] + :param running_filter: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + :type running_filter: V1RunningFilter :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of @@ -937,6 +1264,7 @@ class WorkflowRunsApi: until=until, additional_metadata=additional_metadata, workflow_ids=workflow_ids, + running_filter=running_filter, _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -962,6 +1290,7 @@ class WorkflowRunsApi: until, additional_metadata, workflow_ids, + running_filter, _request_auth, _content_type, _headers, @@ -1023,6 +1352,10 @@ class WorkflowRunsApi: _query_params.append(("workflow_ids", workflow_ids)) + if running_filter is not None: + + _query_params.append(("running_filter", running_filter.value)) + # process the header parameters # process the form parameters # process the body parameter @@ -1959,6 +2292,12 @@ class WorkflowRunsApi: description="A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset." ), ] = None, + running_filter: Annotated[ + Optional[V1RunningFilter], + Field( + description="Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL." + ), + ] = None, _request_timeout: Union[ None, Annotated[StrictFloat, Field(gt=0)], @@ -2001,6 +2340,8 @@ class WorkflowRunsApi: :type triggering_event_external_id: str :param include_payloads: A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset. :type include_payloads: bool + :param running_filter: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + :type running_filter: V1RunningFilter :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of @@ -2037,6 +2378,7 @@ class WorkflowRunsApi: parent_task_external_id=parent_task_external_id, triggering_event_external_id=triggering_event_external_id, include_payloads=include_payloads, + running_filter=running_filter, _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -2115,6 +2457,12 @@ class WorkflowRunsApi: description="A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset." ), ] = None, + running_filter: Annotated[ + Optional[V1RunningFilter], + Field( + description="Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL." + ), + ] = None, _request_timeout: Union[ None, Annotated[StrictFloat, Field(gt=0)], @@ -2157,6 +2505,8 @@ class WorkflowRunsApi: :type triggering_event_external_id: str :param include_payloads: A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset. :type include_payloads: bool + :param running_filter: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + :type running_filter: V1RunningFilter :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of @@ -2193,6 +2543,7 @@ class WorkflowRunsApi: parent_task_external_id=parent_task_external_id, triggering_event_external_id=triggering_event_external_id, include_payloads=include_payloads, + running_filter=running_filter, _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -2271,6 +2622,12 @@ class WorkflowRunsApi: description="A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset." ), ] = None, + running_filter: Annotated[ + Optional[V1RunningFilter], + Field( + description="Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL." + ), + ] = None, _request_timeout: Union[ None, Annotated[StrictFloat, Field(gt=0)], @@ -2313,6 +2670,8 @@ class WorkflowRunsApi: :type triggering_event_external_id: str :param include_payloads: A flag for whether or not to include the input and output payloads in the response. Defaults to `true` if unset. :type include_payloads: bool + :param running_filter: Filter within the RUNNING status bucket. ALL returns both on-worker and evicted tasks, ON_WORKER returns only tasks running on a worker, EVICTED returns only evicted tasks. Defaults to ALL. + :type running_filter: V1RunningFilter :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of @@ -2349,6 +2708,7 @@ class WorkflowRunsApi: parent_task_external_id=parent_task_external_id, triggering_event_external_id=triggering_event_external_id, include_payloads=include_payloads, + running_filter=running_filter, _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -2381,6 +2741,7 @@ class WorkflowRunsApi: parent_task_external_id, triggering_event_external_id, include_payloads, + running_filter, _request_auth, _content_type, _headers, @@ -2472,6 +2833,10 @@ class WorkflowRunsApi: _query_params.append(("include_payloads", include_payloads)) + if running_filter is not None: + + _query_params.append(("running_filter", running_filter.value)) + # process the header parameters # process the form parameters # process the body parameter diff --git a/sdks/python/hatchet_sdk/clients/rest/models/__init__.py b/sdks/python/hatchet_sdk/clients/rest/models/__init__.py index 57b29bee5..ff89b3da6 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/__init__.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/__init__.py @@ -228,6 +228,12 @@ from hatchet_sdk.clients.rest.models.user_tenant_memberships_list import ( UserTenantMembershipsList, ) from hatchet_sdk.clients.rest.models.user_tenant_public import UserTenantPublic +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_request import ( + V1BranchDurableTaskRequest, +) +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_response import ( + V1BranchDurableTaskResponse, +) from hatchet_sdk.clients.rest.models.v1_cel_debug_request import V1CELDebugRequest from hatchet_sdk.clients.rest.models.v1_cel_debug_response import V1CELDebugResponse from hatchet_sdk.clients.rest.models.v1_cel_debug_response_status import ( @@ -270,6 +276,11 @@ from hatchet_sdk.clients.rest.models.v1_log_line_order_by_direction import ( ) from hatchet_sdk.clients.rest.models.v1_replay_task_request import V1ReplayTaskRequest from hatchet_sdk.clients.rest.models.v1_replayed_tasks import V1ReplayedTasks +from hatchet_sdk.clients.rest.models.v1_restore_task_response import ( + V1RestoreTaskResponse, +) +from hatchet_sdk.clients.rest.models.v1_running_detail_count import V1RunningDetailCount +from hatchet_sdk.clients.rest.models.v1_running_filter import V1RunningFilter from hatchet_sdk.clients.rest.models.v1_task_event import V1TaskEvent from hatchet_sdk.clients.rest.models.v1_task_event_list import V1TaskEventList from hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType diff --git a/sdks/python/hatchet_sdk/clients/rest/models/bulk_create_event_response.py b/sdks/python/hatchet_sdk/clients/rest/models/bulk_create_event_response.py deleted file mode 100644 index e48552b52..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/bulk_create_event_response.py +++ /dev/null @@ -1,109 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Self - -from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta -from hatchet_sdk.clients.rest.models.event import Event - - -class BulkCreateEventResponse(BaseModel): - """ - BulkCreateEventResponse - """ # noqa: E501 - - metadata: APIResourceMeta - events: List[Event] = Field(description="The events.") - __properties: ClassVar[List[str]] = ["metadata", "events"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of BulkCreateEventResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of metadata - if self.metadata: - _dict["metadata"] = self.metadata.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in events (list) - _items = [] - if self.events: - for _item_events in self.events: - if _item_events: - _items.append(_item_events.to_dict()) - _dict["events"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of BulkCreateEventResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "metadata": ( - APIResourceMeta.from_dict(obj["metadata"]) - if obj.get("metadata") is not None - else None - ), - "events": ( - [Event.from_dict(_item) for _item in obj["events"]] - if obj.get("events") is not None - else None - ), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/cancel_step_run_request.py b/sdks/python/hatchet_sdk/clients/rest/models/cancel_step_run_request.py deleted file mode 100644 index 4716a1972..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/cancel_step_run_request.py +++ /dev/null @@ -1,82 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel -from typing_extensions import Self - - -class CancelStepRunRequest(BaseModel): - """ - CancelStepRunRequest - """ # noqa: E501 - - input: Dict[str, Any] - __properties: ClassVar[List[str]] = ["input"] - - model_config = { - "populate_by_name": True, - "validate_assignment": True, - "protected_namespaces": (), - } - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of CancelStepRunRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of CancelStepRunRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"input": obj.get("input")}) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/github_app_installation.py b/sdks/python/hatchet_sdk/clients/rest/models/github_app_installation.py deleted file mode 100644 index bb2c5f9ea..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/github_app_installation.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - -from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta - - -class GithubAppInstallation(BaseModel): - """ - GithubAppInstallation - """ # noqa: E501 - - metadata: APIResourceMeta - installation_settings_url: StrictStr - account_name: StrictStr - account_avatar_url: StrictStr - __properties: ClassVar[List[str]] = [ - "metadata", - "installation_settings_url", - "account_name", - "account_avatar_url", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GithubAppInstallation from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of metadata - if self.metadata: - _dict["metadata"] = self.metadata.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GithubAppInstallation from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "metadata": ( - APIResourceMeta.from_dict(obj["metadata"]) - if obj.get("metadata") is not None - else None - ), - "installation_settings_url": obj.get("installation_settings_url"), - "account_name": obj.get("account_name"), - "account_avatar_url": obj.get("account_avatar_url"), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/github_branch.py b/sdks/python/hatchet_sdk/clients/rest/models/github_branch.py deleted file mode 100644 index 71290bd5e..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/github_branch.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr -from typing_extensions import Self - - -class GithubBranch(BaseModel): - """ - GithubBranch - """ # noqa: E501 - - branch_name: StrictStr - is_default: StrictBool - __properties: ClassVar[List[str]] = ["branch_name", "is_default"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GithubBranch from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GithubBranch from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - {"branch_name": obj.get("branch_name"), "is_default": obj.get("is_default")} - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/link_github_repository_request.py b/sdks/python/hatchet_sdk/clients/rest/models/link_github_repository_request.py deleted file mode 100644 index 52252b333..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/link_github_repository_request.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Annotated, Self - - -class LinkGithubRepositoryRequest(BaseModel): - """ - LinkGithubRepositoryRequest - """ # noqa: E501 - - installation_id: Annotated[ - str, Field(min_length=36, strict=True, max_length=36) - ] = Field(description="The repository name.", alias="installationId") - git_repo_name: StrictStr = Field( - description="The repository name.", alias="gitRepoName" - ) - git_repo_owner: StrictStr = Field( - description="The repository owner.", alias="gitRepoOwner" - ) - git_repo_branch: StrictStr = Field( - description="The repository branch.", alias="gitRepoBranch" - ) - __properties: ClassVar[List[str]] = [ - "installationId", - "gitRepoName", - "gitRepoOwner", - "gitRepoBranch", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of LinkGithubRepositoryRequest from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of LinkGithubRepositoryRequest from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "installationId": obj.get("installationId"), - "gitRepoName": obj.get("gitRepoName"), - "gitRepoOwner": obj.get("gitRepoOwner"), - "gitRepoBranch": obj.get("gitRepoBranch"), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/list_github_app_installations_response.py b/sdks/python/hatchet_sdk/clients/rest/models/list_github_app_installations_response.py deleted file mode 100644 index ea3dcda29..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/list_github_app_installations_response.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - -from hatchet_sdk.clients.rest.models.github_app_installation import ( - GithubAppInstallation, -) -from hatchet_sdk.clients.rest.models.pagination_response import PaginationResponse - - -class ListGithubAppInstallationsResponse(BaseModel): - """ - ListGithubAppInstallationsResponse - """ # noqa: E501 - - pagination: PaginationResponse - rows: List[GithubAppInstallation] - __properties: ClassVar[List[str]] = ["pagination", "rows"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of ListGithubAppInstallationsResponse from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of pagination - if self.pagination: - _dict["pagination"] = self.pagination.to_dict() - # override the default output from pydantic by calling `to_dict()` of each item in rows (list) - _items = [] - if self.rows: - for _item in self.rows: - if _item: - _items.append(_item.to_dict()) - _dict["rows"] = _items - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of ListGithubAppInstallationsResponse from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "pagination": ( - PaginationResponse.from_dict(obj["pagination"]) - if obj.get("pagination") is not None - else None - ), - "rows": ( - [GithubAppInstallation.from_dict(_item) for _item in obj["rows"]] - if obj.get("rows") is not None - else None - ), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_cel_debug_error_response.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_branch_durable_task_request.py similarity index 63% rename from sdks/python/hatchet_sdk/clients/rest/models/v1_cel_debug_error_response.py rename to sdks/python/hatchet_sdk/clients/rest/models/v1_branch_durable_task_request.py index 4b9463d2f..684c090cb 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_cel_debug_error_response.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_branch_durable_task_request.py @@ -12,31 +12,35 @@ Do not edit the class manually. """ # noqa: E501 from __future__ import annotations - -import json import pprint import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set +import json -from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List +from typing_extensions import Annotated +from typing import Optional, Set from typing_extensions import Self -class V1CELDebugErrorResponse(BaseModel): +class V1BranchDurableTaskRequest(BaseModel): """ - V1CELDebugErrorResponse + V1BranchDurableTaskRequest """ # noqa: E501 - status: StrictStr - error: StrictStr = Field(description="The error message if the evaluation failed") - __properties: ClassVar[List[str]] = ["status", "error"] - - @field_validator("status") - def status_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["ERROR"]): - raise ValueError("must be one of enum values ('ERROR')") - return value + task_external_id: Annotated[ + str, Field(min_length=36, strict=True, max_length=36) + ] = Field( + description="The external id of the durable task to branch.", + alias="taskExternalId", + ) + node_id: StrictInt = Field( + description="The node id to replay from.", alias="nodeId" + ) + branch_id: StrictInt = Field( + description="The branch id to replay from.", alias="branchId" + ) + __properties: ClassVar[List[str]] = ["taskExternalId", "nodeId", "branchId"] model_config = ConfigDict( populate_by_name=True, @@ -55,7 +59,7 @@ class V1CELDebugErrorResponse(BaseModel): @classmethod def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1CELDebugErrorResponse from a JSON string""" + """Create an instance of V1BranchDurableTaskRequest from a JSON string""" return cls.from_dict(json.loads(json_str)) def to_dict(self) -> Dict[str, Any]: @@ -79,7 +83,7 @@ class V1CELDebugErrorResponse(BaseModel): @classmethod def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1CELDebugErrorResponse from a dict""" + """Create an instance of V1BranchDurableTaskRequest from a dict""" if obj is None: return None @@ -87,6 +91,10 @@ class V1CELDebugErrorResponse(BaseModel): return cls.model_validate(obj) _obj = cls.model_validate( - {"status": obj.get("status"), "error": obj.get("error")} + { + "taskExternalId": obj.get("taskExternalId"), + "nodeId": obj.get("nodeId"), + "branchId": obj.get("branchId"), + } ) return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_cel_debug_success_response.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_branch_durable_task_response.py similarity index 63% rename from sdks/python/hatchet_sdk/clients/rest/models/v1_cel_debug_success_response.py rename to sdks/python/hatchet_sdk/clients/rest/models/v1_branch_durable_task_response.py index 5eac906a5..9df01579e 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_cel_debug_success_response.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_branch_durable_task_response.py @@ -12,40 +12,34 @@ Do not edit the class manually. """ # noqa: E501 from __future__ import annotations - -import json import pprint import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set +import json -from pydantic import ( - BaseModel, - ConfigDict, - Field, - StrictBool, - StrictStr, - field_validator, -) +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List +from typing_extensions import Annotated +from typing import Optional, Set from typing_extensions import Self -class V1CELDebugSuccessResponse(BaseModel): +class V1BranchDurableTaskResponse(BaseModel): """ - V1CELDebugSuccessResponse + V1BranchDurableTaskResponse """ # noqa: E501 - status: StrictStr - output: StrictBool = Field( - description="The result of the CEL expression evaluation" + task_external_id: Annotated[ + str, Field(min_length=36, strict=True, max_length=36) + ] = Field( + description="The external id of the durable task.", alias="taskExternalId" ) - __properties: ClassVar[List[str]] = ["status", "output"] - - @field_validator("status") - def status_validate_enum(cls, value): - """Validates the enum""" - if value not in set(["SUCCESS"]): - raise ValueError("must be one of enum values ('SUCCESS')") - return value + node_id: StrictInt = Field( + description="The node id of the new entry.", alias="nodeId" + ) + branch_id: StrictInt = Field( + description="The branch id of the new entry.", alias="branchId" + ) + __properties: ClassVar[List[str]] = ["taskExternalId", "nodeId", "branchId"] model_config = ConfigDict( populate_by_name=True, @@ -64,7 +58,7 @@ class V1CELDebugSuccessResponse(BaseModel): @classmethod def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1CELDebugSuccessResponse from a JSON string""" + """Create an instance of V1BranchDurableTaskResponse from a JSON string""" return cls.from_dict(json.loads(json_str)) def to_dict(self) -> Dict[str, Any]: @@ -88,7 +82,7 @@ class V1CELDebugSuccessResponse(BaseModel): @classmethod def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1CELDebugSuccessResponse from a dict""" + """Create an instance of V1BranchDurableTaskResponse from a dict""" if obj is None: return None @@ -96,6 +90,10 @@ class V1CELDebugSuccessResponse(BaseModel): return cls.model_validate(obj) _obj = cls.model_validate( - {"status": obj.get("status"), "output": obj.get("output")} + { + "taskExternalId": obj.get("taskExternalId"), + "nodeId": obj.get("nodeId"), + "branchId": obj.get("branchId"), + } ) return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_api_key_all_of_auth_type.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_api_key_all_of_auth_type.py deleted file mode 100644 index 01e119749..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_api_key_all_of_auth_type.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - - -class V1CreateWebhookRequestAPIKeyAllOfAuthType(BaseModel): - """ - V1CreateWebhookRequestAPIKeyAllOfAuthType - """ # noqa: E501 - - __properties: ClassVar[List[str]] = [] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1CreateWebhookRequestAPIKeyAllOfAuthType from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1CreateWebhookRequestAPIKeyAllOfAuthType from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({}) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_hmac_all_of_auth_type.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_hmac_all_of_auth_type.py deleted file mode 100644 index 3328c9ff9..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_hmac_all_of_auth_type.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict -from typing_extensions import Self - - -class V1CreateWebhookRequestHMACAllOfAuthType(BaseModel): - """ - V1CreateWebhookRequestHMACAllOfAuthType - """ # noqa: E501 - - __properties: ClassVar[List[str]] = [] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1CreateWebhookRequestHMACAllOfAuthType from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1CreateWebhookRequestHMACAllOfAuthType from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({}) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_basic_auth_all_of_auth_type.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_restore_task_response.py similarity index 79% rename from sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_basic_auth_all_of_auth_type.py rename to sdks/python/hatchet_sdk/clients/rest/models/v1_restore_task_response.py index a98638696..4da777f6c 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_create_webhook_request_basic_auth_all_of_auth_type.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_restore_task_response.py @@ -12,22 +12,23 @@ Do not edit the class manually. """ # noqa: E501 from __future__ import annotations - -import json import pprint import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set +import json -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, StrictBool +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set from typing_extensions import Self -class V1CreateWebhookRequestBasicAuthAllOfAuthType(BaseModel): +class V1RestoreTaskResponse(BaseModel): """ - V1CreateWebhookRequestBasicAuthAllOfAuthType + V1RestoreTaskResponse """ # noqa: E501 - __properties: ClassVar[List[str]] = [] + requeued: StrictBool + __properties: ClassVar[List[str]] = ["requeued"] model_config = ConfigDict( populate_by_name=True, @@ -46,7 +47,7 @@ class V1CreateWebhookRequestBasicAuthAllOfAuthType(BaseModel): @classmethod def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1CreateWebhookRequestBasicAuthAllOfAuthType from a JSON string""" + """Create an instance of V1RestoreTaskResponse from a JSON string""" return cls.from_dict(json.loads(json_str)) def to_dict(self) -> Dict[str, Any]: @@ -70,12 +71,12 @@ class V1CreateWebhookRequestBasicAuthAllOfAuthType(BaseModel): @classmethod def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1CreateWebhookRequestBasicAuthAllOfAuthType from a dict""" + """Create an instance of V1RestoreTaskResponse from a dict""" if obj is None: return None if not isinstance(obj, dict): return cls.model_validate(obj) - _obj = cls.model_validate({}) + _obj = cls.model_validate({"requeued": obj.get("requeued")}) return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/github_repo.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_running_detail_count.py similarity index 71% rename from sdks/python/hatchet_sdk/clients/rest/models/github_repo.py rename to sdks/python/hatchet_sdk/clients/rest/models/v1_running_detail_count.py index 694680b62..0060c0f60 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/github_repo.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_running_detail_count.py @@ -12,24 +12,29 @@ Do not edit the class manually. """ # noqa: E501 from __future__ import annotations - -import json import pprint import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set +import json -from pydantic import BaseModel, ConfigDict, StrictStr +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set from typing_extensions import Self -class GithubRepo(BaseModel): +class V1RunningDetailCount(BaseModel): """ - GithubRepo + V1RunningDetailCount """ # noqa: E501 - repo_owner: StrictStr - repo_name: StrictStr - __properties: ClassVar[List[str]] = ["repo_owner", "repo_name"] + evicted: StrictInt = Field( + description="The number of evicted tasks within the RUNNING status bucket." + ) + on_worker: StrictInt = Field( + description="The number of tasks currently on a worker within the RUNNING status bucket.", + alias="onWorker", + ) + __properties: ClassVar[List[str]] = ["evicted", "onWorker"] model_config = ConfigDict( populate_by_name=True, @@ -48,7 +53,7 @@ class GithubRepo(BaseModel): @classmethod def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of GithubRepo from a JSON string""" + """Create an instance of V1RunningDetailCount from a JSON string""" return cls.from_dict(json.loads(json_str)) def to_dict(self) -> Dict[str, Any]: @@ -72,7 +77,7 @@ class GithubRepo(BaseModel): @classmethod def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of GithubRepo from a dict""" + """Create an instance of V1RunningDetailCount from a dict""" if obj is None: return None @@ -80,6 +85,6 @@ class GithubRepo(BaseModel): return cls.model_validate(obj) _obj = cls.model_validate( - {"repo_owner": obj.get("repo_owner"), "repo_name": obj.get("repo_name")} + {"evicted": obj.get("evicted"), "onWorker": obj.get("onWorker")} ) return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/tenant_ui_version.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_running_filter.py similarity index 72% rename from sdks/python/hatchet_sdk/clients/rest/models/tenant_ui_version.py rename to sdks/python/hatchet_sdk/clients/rest/models/v1_running_filter.py index fc44c1f4a..4a9dc2187 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/tenant_ui_version.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_running_filter.py @@ -12,25 +12,24 @@ Do not edit the class manually. """ # noqa: E501 from __future__ import annotations - import json from enum import Enum - from typing_extensions import Self -class TenantUIVersion(str, Enum): +class V1RunningFilter(str, Enum): """ - TenantUIVersion + V1RunningFilter """ """ allowed enum values """ - V0 = "V0" - V1 = "V1" + ALL = "ALL" + EVICTED = "EVICTED" + ON_WORKER = "ON_WORKER" @classmethod def from_json(cls, json_str: str) -> Self: - """Create an instance of TenantUIVersion from a JSON string""" + """Create an instance of V1RunningFilter from a JSON string""" return cls(json.loads(json_str)) diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_task.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_task.py deleted file mode 100644 index 8c23b1dcc..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_task.py +++ /dev/null @@ -1,173 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from datetime import datetime -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr -from typing_extensions import Annotated, Self - -from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta -from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus - - -class V1Task(BaseModel): - """ - V1Task - """ # noqa: E501 - - metadata: APIResourceMeta - task_id: StrictInt = Field(description="The ID of the task.", alias="taskId") - task_inserted_at: datetime = Field( - description="The timestamp the task was inserted.", alias="taskInsertedAt" - ) - status: V1TaskStatus - started_at: Optional[datetime] = Field( - default=None, - description="The timestamp the task run started.", - alias="startedAt", - ) - finished_at: Optional[datetime] = Field( - default=None, - description="The timestamp the task run finished.", - alias="finishedAt", - ) - duration: Optional[StrictInt] = Field( - default=None, description="The duration of the task run, in milliseconds." - ) - tenant_id: Annotated[str, Field(min_length=36, strict=True, max_length=36)] = Field( - description="The ID of the tenant.", alias="tenantId" - ) - additional_metadata: Optional[Dict[str, Any]] = Field( - default=None, - description="Additional metadata for the task run.", - alias="additionalMetadata", - ) - display_name: StrictStr = Field( - description="The display name of the task run.", alias="displayName" - ) - workflow_id: StrictStr = Field(alias="workflowId") - input: StrictStr = Field(description="The input for the task run.") - output: Optional[StrictStr] = Field( - default=None, description="The output of the task run (for the latest run)" - ) - error_message: Optional[StrictStr] = Field( - default=None, - description="The error message of the task run (for the latest run)", - alias="errorMessage", - ) - workflow_run_external_id: Optional[ - Annotated[str, Field(min_length=36, strict=True, max_length=36)] - ] = Field( - default=None, - description="The external ID of the workflow run.", - alias="workflowRunExternalId", - ) - __properties: ClassVar[List[str]] = [ - "metadata", - "taskId", - "taskInsertedAt", - "status", - "startedAt", - "finishedAt", - "duration", - "tenantId", - "additionalMetadata", - "displayName", - "workflowId", - "input", - "output", - "errorMessage", - "workflowRunExternalId", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1Task from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of metadata - if self.metadata: - _dict["metadata"] = self.metadata.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1Task from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "metadata": ( - APIResourceMeta.from_dict(obj["metadata"]) - if obj.get("metadata") is not None - else None - ), - "taskId": obj.get("taskId"), - "taskInsertedAt": obj.get("taskInsertedAt"), - "status": obj.get("status"), - "startedAt": obj.get("startedAt"), - "finishedAt": obj.get("finishedAt"), - "duration": obj.get("duration"), - "tenantId": obj.get("tenantId"), - "additionalMetadata": obj.get("additionalMetadata"), - "displayName": obj.get("displayName"), - "workflowId": obj.get("workflowId"), - "input": obj.get("input"), - "output": obj.get("output"), - "errorMessage": obj.get("errorMessage"), - "workflowRunExternalId": obj.get("workflowRunExternalId"), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_event_type.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_event_type.py index 8921561b8..2817f71ee 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_event_type.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_event_type.py @@ -46,6 +46,8 @@ class V1TaskEventType(str, Enum): QUEUED = "QUEUED" SKIPPED = "SKIPPED" COULD_NOT_SEND_TO_WORKER = "COULD_NOT_SEND_TO_WORKER" + DURABLE_EVICTED = "DURABLE_EVICTED" + DURABLE_RESTORING = "DURABLE_RESTORING" @classmethod def from_json(cls, json_str: str) -> Self: diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_run_metric.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_run_metric.py index 1229a4f65..7ce1341f8 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_run_metric.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_run_metric.py @@ -16,8 +16,9 @@ import pprint import re # noqa: F401 import json -from pydantic import BaseModel, ConfigDict, StrictInt -from typing import Any, ClassVar, Dict, List +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List, Optional +from hatchet_sdk.clients.rest.models.v1_running_detail_count import V1RunningDetailCount from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus from typing import Optional, Set from typing_extensions import Self @@ -30,7 +31,10 @@ class V1TaskRunMetric(BaseModel): status: V1TaskStatus count: StrictInt - __properties: ClassVar[List[str]] = ["status", "count"] + running_detail_count: Optional[V1RunningDetailCount] = Field( + default=None, alias="runningDetailCount" + ) + __properties: ClassVar[List[str]] = ["status", "count", "runningDetailCount"] model_config = ConfigDict( populate_by_name=True, @@ -69,6 +73,9 @@ class V1TaskRunMetric(BaseModel): exclude=excluded_fields, exclude_none=True, ) + # override the default output from pydantic by calling `to_dict()` of running_detail_count + if self.running_detail_count: + _dict["runningDetailCount"] = self.running_detail_count.to_dict() return _dict @classmethod @@ -81,6 +88,14 @@ class V1TaskRunMetric(BaseModel): return cls.model_validate(obj) _obj = cls.model_validate( - {"status": obj.get("status"), "count": obj.get("count")} + { + "status": obj.get("status"), + "count": obj.get("count"), + "runningDetailCount": ( + V1RunningDetailCount.from_dict(obj["runningDetailCount"]) + if obj.get("runningDetailCount") is not None + else None + ), + } ) return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_summary.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_summary.py index f8c273d28..a54933b0c 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_summary.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_summary.py @@ -17,7 +17,7 @@ import re # noqa: F401 import json from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr +from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr from typing import Any, ClassVar, Dict, List, Optional from typing_extensions import Annotated from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta @@ -79,6 +79,11 @@ class V1TaskSummary(BaseModel): description="The output of the task run (for the latest run)" ) status: V1TaskStatus + is_evicted: Optional[StrictBool] = Field( + default=None, + description="Whether the task has been evicted from a worker (still counts as RUNNING).", + alias="isEvicted", + ) started_at: Optional[datetime] = Field( default=None, description="The timestamp the task run started.", @@ -134,6 +139,7 @@ class V1TaskSummary(BaseModel): "numSpawnedChildren", "output", "status", + "isEvicted", "startedAt", "stepId", "taskExternalId", @@ -232,6 +238,7 @@ class V1TaskSummary(BaseModel): "numSpawnedChildren": obj.get("numSpawnedChildren"), "output": obj.get("output"), "status": obj.get("status"), + "isEvicted": obj.get("isEvicted"), "startedAt": obj.get("startedAt"), "stepId": obj.get("stepId"), "taskExternalId": obj.get("taskExternalId"), diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_timing.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_timing.py index e60588375..bb9c76b4f 100644 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_task_timing.py +++ b/sdks/python/hatchet_sdk/clients/rest/models/v1_task_timing.py @@ -17,7 +17,7 @@ import re # noqa: F401 import json from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr +from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr from typing import Any, ClassVar, Dict, List, Optional from typing_extensions import Annotated from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta @@ -34,6 +34,11 @@ class V1TaskTiming(BaseModel): metadata: APIResourceMeta depth: StrictInt = Field(description="The depth of the task in the waterfall.") status: V1TaskStatus + is_evicted: Optional[StrictBool] = Field( + default=None, + description="Whether the task has been evicted from a worker (still counts as RUNNING).", + alias="isEvicted", + ) task_display_name: StrictStr = Field( description="The display name of the task run.", alias="taskDisplayName" ) @@ -86,6 +91,7 @@ class V1TaskTiming(BaseModel): "metadata", "depth", "status", + "isEvicted", "taskDisplayName", "taskExternalId", "taskId", @@ -160,6 +166,7 @@ class V1TaskTiming(BaseModel): ), "depth": obj.get("depth"), "status": obj.get("status"), + "isEvicted": obj.get("isEvicted"), "taskDisplayName": obj.get("taskDisplayName"), "taskExternalId": obj.get("taskExternalId"), "taskId": obj.get("taskId"), diff --git a/sdks/python/hatchet_sdk/clients/rest/models/v1_webhook_receive200_response.py b/sdks/python/hatchet_sdk/clients/rest/models/v1_webhook_receive200_response.py deleted file mode 100644 index 7343c0d19..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/v1_webhook_receive200_response.py +++ /dev/null @@ -1,82 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, StrictStr -from typing_extensions import Self - - -class V1WebhookReceive200Response(BaseModel): - """ - V1WebhookReceive200Response - """ # noqa: E501 - - message: Optional[StrictStr] = None - __properties: ClassVar[List[str]] = ["message"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of V1WebhookReceive200Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of V1WebhookReceive200Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"message": obj.get("message")}) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/workflow_deployment_config.py b/sdks/python/hatchet_sdk/clients/rest/models/workflow_deployment_config.py deleted file mode 100644 index 5d6020c95..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/workflow_deployment_config.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field, StrictStr -from typing_extensions import Self - -from hatchet_sdk.clients.rest.models.api_resource_meta import APIResourceMeta -from hatchet_sdk.clients.rest.models.github_app_installation import ( - GithubAppInstallation, -) - - -class WorkflowDeploymentConfig(BaseModel): - """ - WorkflowDeploymentConfig - """ # noqa: E501 - - metadata: APIResourceMeta - git_repo_name: StrictStr = Field( - description="The repository name.", alias="gitRepoName" - ) - git_repo_owner: StrictStr = Field( - description="The repository owner.", alias="gitRepoOwner" - ) - git_repo_branch: StrictStr = Field( - description="The repository branch.", alias="gitRepoBranch" - ) - github_app_installation: Optional[GithubAppInstallation] = Field( - default=None, - description="The Github App installation.", - alias="githubAppInstallation", - ) - github_app_installation_id: StrictStr = Field( - description="The id of the Github App installation.", - alias="githubAppInstallationId", - ) - __properties: ClassVar[List[str]] = [ - "metadata", - "gitRepoName", - "gitRepoOwner", - "gitRepoBranch", - "githubAppInstallation", - "githubAppInstallationId", - ] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of WorkflowDeploymentConfig from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - # override the default output from pydantic by calling `to_dict()` of metadata - if self.metadata: - _dict["metadata"] = self.metadata.to_dict() - # override the default output from pydantic by calling `to_dict()` of github_app_installation - if self.github_app_installation: - _dict["githubAppInstallation"] = self.github_app_installation.to_dict() - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of WorkflowDeploymentConfig from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "metadata": ( - APIResourceMeta.from_dict(obj["metadata"]) - if obj.get("metadata") is not None - else None - ), - "gitRepoName": obj.get("gitRepoName"), - "gitRepoOwner": obj.get("gitRepoOwner"), - "gitRepoBranch": obj.get("gitRepoBranch"), - "githubAppInstallation": ( - GithubAppInstallation.from_dict(obj["githubAppInstallation"]) - if obj.get("githubAppInstallation") is not None - else None - ), - "githubAppInstallationId": obj.get("githubAppInstallationId"), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/workflow_run_cancel200_response.py b/sdks/python/hatchet_sdk/clients/rest/models/workflow_run_cancel200_response.py deleted file mode 100644 index d4f2bd9af..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/workflow_run_cancel200_response.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, ConfigDict, Field -from typing_extensions import Annotated, Self - - -class WorkflowRunCancel200Response(BaseModel): - """ - WorkflowRunCancel200Response - """ # noqa: E501 - - workflow_run_ids: Optional[ - List[Annotated[str, Field(min_length=36, strict=True, max_length=36)]] - ] = Field(default=None, alias="workflowRunIds") - __properties: ClassVar[List[str]] = ["workflowRunIds"] - - model_config = ConfigDict( - populate_by_name=True, - validate_assignment=True, - protected_namespaces=(), - ) - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of WorkflowRunCancel200Response from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of WorkflowRunCancel200Response from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate({"workflowRunIds": obj.get("workflowRunIds")}) - return _obj diff --git a/sdks/python/hatchet_sdk/clients/rest/models/workflow_version_concurrency.py b/sdks/python/hatchet_sdk/clients/rest/models/workflow_version_concurrency.py deleted file mode 100644 index d0b982d4c..000000000 --- a/sdks/python/hatchet_sdk/clients/rest/models/workflow_version_concurrency.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" -Hatchet API - -The Hatchet API - -The version of the OpenAPI document: 1.0.0 -Generated by OpenAPI Generator (https://openapi-generator.tech) - -Do not edit the class manually. -""" # noqa: E501 - -from __future__ import annotations - -import json -import pprint -import re # noqa: F401 -from typing import Any, ClassVar, Dict, List, Optional, Set - -from pydantic import BaseModel, Field, StrictInt, StrictStr, field_validator -from typing_extensions import Self - - -class WorkflowVersionConcurrency(BaseModel): - """ - WorkflowVersionConcurrency - """ # noqa: E501 - - max_runs: StrictInt = Field( - description="The maximum number of concurrent workflow runs.", alias="maxRuns" - ) - limit_strategy: StrictStr = Field( - description="The strategy to use when the concurrency limit is reached.", - alias="limitStrategy", - ) - get_concurrency_group: StrictStr = Field( - description="An action which gets the concurrency group for the WorkflowRun.", - alias="getConcurrencyGroup", - ) - __properties: ClassVar[List[str]] = [ - "maxRuns", - "limitStrategy", - "getConcurrencyGroup", - ] - - @field_validator("limit_strategy") - def limit_strategy_validate_enum(cls, value): - """Validates the enum""" - if value not in set( - ["CANCEL_IN_PROGRESS", "DROP_NEWEST", "QUEUE_NEWEST", "GROUP_ROUND_ROBIN"] - ): - raise ValueError( - "must be one of enum values ('CANCEL_IN_PROGRESS', 'DROP_NEWEST', 'QUEUE_NEWEST', 'GROUP_ROUND_ROBIN')" - ) - return value - - model_config = { - "populate_by_name": True, - "validate_assignment": True, - "protected_namespaces": (), - } - - def to_str(self) -> str: - """Returns the string representation of the model using alias""" - return pprint.pformat(self.model_dump(by_alias=True)) - - def to_json(self) -> str: - """Returns the JSON representation of the model using alias""" - # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead - return json.dumps(self.to_dict()) - - @classmethod - def from_json(cls, json_str: str) -> Optional[Self]: - """Create an instance of WorkflowVersionConcurrency from a JSON string""" - return cls.from_dict(json.loads(json_str)) - - def to_dict(self) -> Dict[str, Any]: - """Return the dictionary representation of the model using alias. - - This has the following differences from calling pydantic's - `self.model_dump(by_alias=True)`: - - * `None` is only added to the output dict for nullable fields that - were set at model initialization. Other fields with value `None` - are ignored. - """ - excluded_fields: Set[str] = set([]) - - _dict = self.model_dump( - by_alias=True, - exclude=excluded_fields, - exclude_none=True, - ) - return _dict - - @classmethod - def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: - """Create an instance of WorkflowVersionConcurrency from a dict""" - if obj is None: - return None - - if not isinstance(obj, dict): - return cls.model_validate(obj) - - _obj = cls.model_validate( - { - "maxRuns": obj.get("maxRuns"), - "limitStrategy": obj.get("limitStrategy"), - "getConcurrencyGroup": obj.get("getConcurrencyGroup"), - } - ) - return _obj diff --git a/sdks/python/hatchet_sdk/conditions.py b/sdks/python/hatchet_sdk/conditions.py index 2852d9455..2d23bb990 100644 --- a/sdks/python/hatchet_sdk/conditions.py +++ b/sdks/python/hatchet_sdk/conditions.py @@ -10,6 +10,7 @@ from hatchet_sdk.config import ClientConfig from hatchet_sdk.contracts.v1.shared.condition_pb2 import Action as ProtoAction from hatchet_sdk.contracts.v1.shared.condition_pb2 import ( BaseMatchCondition, + DurableEventListenerConditions, ParentOverrideMatchCondition, SleepMatchCondition, UserEventMatchCondition, @@ -155,3 +156,16 @@ def flatten_conditions(conditions: list[Condition | OrGroup]) -> list[Condition] flattened.append(condition) return flattened + + +def build_conditions_proto( + conditions: list[Condition], config: ClientConfig +) -> DurableEventListenerConditions: + return DurableEventListenerConditions( + sleep_conditions=[ + c.to_proto(config) for c in conditions if isinstance(c, SleepCondition) + ], + user_event_conditions=[ + c.to_proto(config) for c in conditions if isinstance(c, UserEventCondition) + ], + ) diff --git a/sdks/python/hatchet_sdk/context/context.py b/sdks/python/hatchet_sdk/context/context.py index 58ff9f2d2..39f70a5e8 100644 --- a/sdks/python/hatchet_sdk/context/context.py +++ b/sdks/python/hatchet_sdk/context/context.py @@ -1,10 +1,19 @@ +from __future__ import annotations + import asyncio +import hashlib import json -from datetime import timedelta -from typing import TYPE_CHECKING, Any, cast +from collections.abc import Awaitable, Callable +from datetime import UTC, datetime, timedelta +from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar, cast, overload from warnings import warn -from hatchet_sdk.clients.admin import AdminClient +from pydantic import BaseModel, TypeAdapter + +from hatchet_sdk.clients.admin import ( + AdminClient, + WorkflowRunTriggerConfig, +) from hatchet_sdk.clients.dispatcher.dispatcher import ( # type: ignore[attr-defined] Action, DispatcherClient, @@ -12,25 +21,77 @@ from hatchet_sdk.clients.dispatcher.dispatcher import ( # type: ignore[attr-def from hatchet_sdk.clients.events import EventClient from hatchet_sdk.clients.listeners.durable_event_listener import ( DurableEventListener, - RegisterDurableEventRequest, + DurableTaskEventMemoAck, + DurableTaskEventRunAck, + DurableTaskEventWaitForAck, + MemoEvent, + RunChildEvent, + RunChildrenEvent, + WaitForEvent, +) +from hatchet_sdk.clients.listeners.legacy.pre_eviction_durable_event_listener import ( + PreEvictionDurableEventListener, ) from hatchet_sdk.conditions import ( OrGroup, SleepCondition, UserEventCondition, + build_conditions_proto, flatten_conditions, ) +from hatchet_sdk.context.pre_eviction import aio_wait_for_pre_eviction from hatchet_sdk.context.worker_context import WorkerContext +from hatchet_sdk.deprecated.deprecation import semver_less_than +from hatchet_sdk.engine_version import MinEngineVersion from hatchet_sdk.exceptions import TaskRunError from hatchet_sdk.features.runs import RunsClient from hatchet_sdk.logger import logger -from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_expr -from hatchet_sdk.utils.typing import JSONSerializableMapping, LogLevel +from hatchet_sdk.runnables.types import ( + R, + TWorkflowInput, + ValidTaskReturnType, +) +from hatchet_sdk.serde import HATCHET_PYDANTIC_SENTINEL +from hatchet_sdk.utils.timedelta_to_expression import ( + Duration, + expr_to_timedelta, + timedelta_to_expr, +) +from hatchet_sdk.utils.typing import ( + DataclassInstance, + JSONSerializableMapping, + LogLevel, +) +from hatchet_sdk.worker.durable_eviction.instrumentation import ( + aio_durable_eviction_wait, +) +from hatchet_sdk.worker.durable_eviction.manager import DurableEvictionManager from hatchet_sdk.worker.runner.utils.capture_logs import AsyncLogSender, LogRecord +PMemo = ParamSpec("PMemo") +TMemo = TypeVar("TMemo", bound=ValidTaskReturnType) + if TYPE_CHECKING: from hatchet_sdk.runnables.task import Task - from hatchet_sdk.runnables.types import R, TWorkflowInput + + +TPayload = TypeVar("TPayload", bound=BaseModel | DataclassInstance | dict[str, Any]) + + +class SleepResult(BaseModel): + duration: timedelta + + +class MemoNowResult(BaseModel): + ts: datetime + + +def _compute_memo_key(task_run_external_id: str, *args: Any, **kwargs: Any) -> bytes: + h = hashlib.sha256() + h.update(task_run_external_id.encode()) + h.update(json.dumps(args, default=str, sort_keys=True).encode()) + h.update(json.dumps(kwargs, default=str, sort_keys=True).encode()) + return h.digest() class Context: @@ -40,7 +101,9 @@ class Context: dispatcher_client: DispatcherClient, admin_client: AdminClient, event_client: EventClient, - durable_event_listener: DurableEventListener | None, + durable_event_listener: ( + DurableEventListener | PreEvictionDurableEventListener | None + ), worker: WorkerContext, runs_client: RunsClient, lifespan_context: Any | None, @@ -80,7 +143,7 @@ class Context: return index - def was_skipped(self, task: "Task[TWorkflowInput, R]") -> bool: + def was_skipped(self, task: Task[TWorkflowInput, R]) -> bool: """ Check if a given task was skipped. You can read about skipping in [the docs](https://docs.hatchet.run/home/conditional-workflows#skip_if). @@ -93,7 +156,7 @@ class Context: def trigger_data(self) -> JSONSerializableMapping: return self.data.triggers - def task_output(self, task: "Task[TWorkflowInput, R]") -> "R": + def task_output(self, task: Task[TWorkflowInput, R]) -> R: """ Get the output of a parent task in a DAG. @@ -119,7 +182,7 @@ class Context: ), ) - def aio_task_output(self, task: "Task[TWorkflowInput, R]") -> "R": + def aio_task_output(self, task: Task[TWorkflowInput, R]) -> R: warn( "`aio_task_output` is deprecated. Use `task_output` instead.", DeprecationWarning, @@ -396,7 +459,7 @@ class Context: def fetch_task_run_error( self, - task: "Task[TWorkflowInput, R]", + task: Task[TWorkflowInput, R], ) -> str | None: """ **DEPRECATED**: Use `get_task_run_error` instead. @@ -417,7 +480,7 @@ class Context: def get_task_run_error( self, - task: "Task[TWorkflowInput, R]", + task: Task[TWorkflowInput, R], ) -> TaskRunError | None: """ A helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run. @@ -442,7 +505,9 @@ class DurableContext(Context): dispatcher_client: DispatcherClient, admin_client: AdminClient, event_client: EventClient, - durable_event_listener: DurableEventListener | None, + durable_event_listener: ( + DurableEventListener | PreEvictionDurableEventListener | None + ), worker: WorkerContext, runs_client: RunsClient, lifespan_context: Any | None, @@ -450,6 +515,8 @@ class DurableContext(Context): max_attempts: int, task_name: str, workflow_name: str, + durable_eviction_manager: DurableEvictionManager | None = None, + engine_version: str | None = None, ): super().__init__( action, @@ -467,6 +534,28 @@ class DurableContext(Context): ) self._wait_index = 0 + self._durable_eviction_manager = durable_eviction_manager + self._engine_version = engine_version + + @property + def _durable_listener(self) -> DurableEventListener: + if self.durable_event_listener is None: + raise ValueError("Durable task client is not available") + + if not isinstance(self.durable_event_listener, DurableEventListener): + raise TypeError( + "Expected DurableEventListener, got " + f"{type(self.durable_event_listener).__name__}" + ) + return self.durable_event_listener + + @property + def _supports_durable_eviction(self) -> bool: + if not self._engine_version: + return False + return not semver_less_than( + self._engine_version, MinEngineVersion.DURABLE_EVICTION + ) @property def wait_index(self) -> int: @@ -478,6 +567,8 @@ class DurableContext(Context): return index + ## IMPORTANT: This method is instrumented by HatchetInstrumentor._wrap_aio_wait_for. + ## Keep the signature in sync with the instrumentor wrapper. async def aio_wait_for( self, signal_key: str, @@ -490,28 +581,52 @@ class DurableContext(Context): :param *conditions: The conditions to wait for. Can be a SleepCondition or UserEventCondition. :return: A dictionary containing the results of the wait. - :raises ValueError: If the durable event listener is not available. + + :raises ValueError: If the durable task client is not available. + :raises TypeError: If the durable event listener is not of type DurableEventListener or PreEvictionDurableEventListener. """ if self.durable_event_listener is None: - raise ValueError("Durable event listener is not available") + raise ValueError("Durable task client is not available") - task_id = self.step_run_id + if not self._supports_durable_eviction: + return await aio_wait_for_pre_eviction(self, signal_key, *conditions) - request = RegisterDurableEventRequest( - task_id=task_id, - signal_key=signal_key, - conditions=flatten_conditions(list(conditions)), - config=self.runs_client.client_config, + listener = self._durable_listener + + await self._ensure_stream_started() + + flat_conditions = flatten_conditions(list(conditions)) + conditions_proto = build_conditions_proto( + flat_conditions, self.runs_client.client_config + ) + ack = await listener.send_event( + durable_task_external_id=self.step_run_id, + invocation_count=self.invocation_count, + event=WaitForEvent(wait_for_conditions=conditions_proto), ) - self.durable_event_listener.register_durable_event(request) + if not isinstance(ack, DurableTaskEventWaitForAck): + raise TypeError(f"Expected wait-for ack, got {type(ack).__name__}") - return await self.durable_event_listener.result( - task_id, - signal_key, - ) + node_id = ack.node_id + branch_id = ack.branch_id - async def aio_sleep_for(self, duration: Duration) -> dict[str, Any]: + async with aio_durable_eviction_wait( + wait_kind="wait_for", + resource_id=signal_key, + action_key=self.action.key, + eviction_manager=self._durable_eviction_manager, + ): + result = await listener.wait_for_callback( + durable_task_external_id=self.step_run_id, + node_id=node_id, + branch_id=branch_id, + invocation_count=self.invocation_count, + ) + + return result.payload or {} + + async def aio_sleep_for(self, duration: Duration) -> SleepResult: """ Lightweight wrapper for durable sleep. Allows for shorthand usage of `ctx.aio_wait_for` when specifying a sleep condition. @@ -520,7 +635,254 @@ class DurableContext(Context): wait_index = self._increment_wait_index() - return await self.aio_wait_for( + res = await self.aio_wait_for( f"sleep:{timedelta_to_expr(duration)}-{wait_index}", SleepCondition(duration=duration), ) + + ## lots of implicit use of engine semantics / internal logic here. + ## the engine returns an object like this: + ## {"CREATE": {"signal_key_1": [{"id": ...}]}} + ## since we have a single match we're looking for, we know that + ## the list of matches will only have one item, so we can extract and parse it + matches: dict[str, list[dict[str, Any]]] = res.get("CREATE", {}) + _, raw_matches = next(iter(matches.items())) + sleep = raw_matches[0] + + return SleepResult( + duration=expr_to_timedelta( + sleep.get("sleep_duration", timedelta_to_expr(duration)) + ) + ) + + @overload + async def aio_wait_for_event( + self, + key: str, + expression: str | None = None, + *, + payload_validator: type[TPayload], + ) -> TPayload: ... + + @overload + async def aio_wait_for_event( + self, + key: str, + expression: str | None = None, + ) -> dict[str, Any]: ... + + async def aio_wait_for_event( + self, + key: str, + expression: str | None = None, + *, + payload_validator: type[Any] | None = None, + ) -> Any: + """ + Lightweight wrapper for waiting for a user event. Allows for shorthand usage of `ctx.aio_wait_for` when specifying a user event condition. + + For more complicated conditions, use `ctx.aio_wait_for` directly. + + :param key: The event key to wait for. + :param expression: An optional CEL expression to filter events. + :param payload_validator: An optional type (e.g. a Pydantic model, dataclass, or TypedDict) to validate the event payload against. If provided, the payload will be validated and returned as an instance of this type. + + :return: The payload of the event, validated against the provided payload_validator if it was given, or as a raw dictionary if no payload_validator was provided. + """ + + wait_index = self._increment_wait_index() + + result = await self.aio_wait_for( + f"event:{key}-{wait_index}", + UserEventCondition(event_key=key, expression=expression), + ) + + ## lots of implicit use of engine semantics / internal logic here. + ## the engine returns an object like this: + ## {"CREATE": {"signal_key_1": [{"id": ...}]}} + ## since we have a single match we're looking for, we know that + ## the list of matches will only have one item, so we can extract and parse it + matches: dict[str, list[dict[str, Any]]] = result.get("CREATE", {}) + _, raw_matches = next(iter(matches.items())) + raw_payload = raw_matches[0] + + if payload_validator is not None: + adapter = TypeAdapter(payload_validator) + return adapter.validate_python( + raw_payload, context=HATCHET_PYDANTIC_SENTINEL + ) + + return raw_payload + + ## IMPORTANT: This method is instrumented by HatchetInstrumentor._wrap_spawn_children_no_wait. + ## Keep the signature in sync with the instrumentor wrapper. + async def _spawn_children_no_wait( + self, + configs: list[WorkflowRunTriggerConfig], + ) -> list[tuple[int, int, str]]: + listener = self._durable_listener + + await self._ensure_stream_started() + + ack = await listener.send_event( + durable_task_external_id=self.step_run_id, + invocation_count=self.invocation_count, + event=RunChildrenEvent( + children=[ + RunChildEvent( + workflow_name=c.workflow_name, + input=c.input, + trigger_workflow_opts=c.options, + ) + for c in configs + ] + ), + ) + + if not isinstance(ack, DurableTaskEventRunAck): + raise TypeError(f"Expected run ack, got {type(ack).__name__}") + + return [ + (entry.node_id, entry.branch_id, configs[i].workflow_name) + for i, entry in enumerate(ack.run_entries) + ] + + async def _aio_result_for_spawned_child( + self, + node_id: int, + branch_id: int, + workflow_name: str, + ) -> dict[str, Any]: + listener = self._durable_listener + + async with aio_durable_eviction_wait( + wait_kind="spawn_child", + resource_id=workflow_name, + action_key=self.action.key, + eviction_manager=self._durable_eviction_manager, + ): + result = await listener.wait_for_callback( + durable_task_external_id=self.step_run_id, + node_id=node_id, + branch_id=branch_id, + invocation_count=self.invocation_count, + ) + + return result.payload or {} + + async def _ensure_stream_started(self) -> None: + if not isinstance(self.durable_event_listener, DurableEventListener): + raise ValueError("Durable task client is not available") + + await self.durable_event_listener.ensure_started(self.action.worker_id) + + @property + def invocation_count(self) -> int: + return self.action.durable_task_invocation_count or 1 + + ## IMPORTANT: This method is instrumented by HatchetInstrumentor._wrap_aio_memo. + ## Keep the signature in sync with the instrumentor wrapper. + async def _aio_memo( + self, + fn: Callable[PMemo, Awaitable[TMemo]], + result_validator: type[TMemo], + /, + *args: PMemo.args, + **kwargs: PMemo.kwargs, + ) -> TMemo: + """ + Memoize a function by storing its result in durable storage. This is useful for caching the results of expensive computations that you don't want to repeat on every workflow replay without needing to spawn a child workflow or set up an external cache. The function signature is intended to behave similarly to `asyncio.to_thread` or other similar uses of partially applied functions, where you pass in the function and its arguments separately. + + Note that memoization is performed at the _task run_ level, meaning you cannot cache across tasks (whether they're part of the same workflow or otherwise). + + :param fn: The function to compute the value to be memoized. This should be an async function that returns the value to be memoized. + :param result_validator: The type of the result to be memoized. This is used for validating the result when it's retrieved from durable storage and for properly serializing the result of the function call. This is required and generally we recommend using either a Pydantic model, a dataclass, or a TypedDict, but you can also use `dict` as an escape hatch. + :param *args: The arguments to pass to the function when computing the value to be memoized. These are used for computing the memoization key, so that different arguments will result in different cached values. + :param **kwargs: The keyword arguments to pass to the function when computing the value to be memoized. These are used for computing the memoization key, so that different keyword arguments will result in different cached values. + + :return: The memoized value, either retrieved from durable storage or computed by calling the function. + + :raises TypeError: If the durable event listener is not of type DurableEventListener or PreEvictionDurableEventListener. + """ + if not self._supports_durable_eviction: + logger.warning( + "Engine does not support memoization (requires >= %s). " + "aio_memo will execute the function but results will not be " + "persisted across replays. Upgrade your engine to enable durable memoization.", + MinEngineVersion.DURABLE_EVICTION, + ) + return await fn(*args, **kwargs) + + listener = self._durable_listener + + run_external_id = self.step_run_id + adapter = TypeAdapter(result_validator) + + key = _compute_memo_key(self.step_run_id, *args, **kwargs) + + ack = await listener.send_event( + durable_task_external_id=run_external_id, + invocation_count=self.invocation_count, + event=MemoEvent(memo_key=key, result=None), + ) + + if not isinstance(ack, DurableTaskEventMemoAck): + raise TypeError(f"Expected memo ack, got {type(ack).__name__}") + + if ack.memo_already_existed and ack.memo_result_payload is None: + logger.warning( + "memo key found in durable storage but no data was returned. rerunning the function to recompute the value. " + ) + + if ack.memo_already_existed and ack.memo_result_payload is not None: + serialized_result = ack.memo_result_payload + result = adapter.validate_json( + serialized_result, context=HATCHET_PYDANTIC_SENTINEL + ) + else: + result = await fn(*args, **kwargs) + serialized_result = adapter.dump_json( + result, context=HATCHET_PYDANTIC_SENTINEL + ) + + await self._ensure_stream_started() + + await listener.send_memo_completed_notification( + durable_task_external_id=run_external_id, + node_id=ack.node_id, + branch_id=ack.branch_id, + invocation_count=self.invocation_count, + memo_result_payload=serialized_result, + memo_key=key, + ) + + return result + + async def _now(self) -> MemoNowResult: + ts = await asyncio.to_thread(datetime.now, UTC) + return MemoNowResult(ts=ts) + + async def aio_now(self) -> datetime: + """ + Get the current timestamp. This is a wrapper around `datetime.now()` that is memoized using durable storage, so that it will return the same timestamp across replays of the same task run. + + :return: The current timestamp, memoized across replays of the same task run. + """ + now = await self._aio_memo( + self._now, + MemoNowResult, + ) + + return now.ts + + async def aio_sleep_until(self, wake_at: datetime) -> SleepResult: + """ + Durably sleep until a specific timestamp. + + :param wake_at: The timestamp to sleep until. + + :return: A SleepResult containing the actual duration slept, which may be different from the intended duration if the workflow was evicted and resumed. + """ + now = await self.aio_now() + + return await self.aio_sleep_for(wake_at - now) diff --git a/sdks/python/hatchet_sdk/context/pre_eviction.py b/sdks/python/hatchet_sdk/context/pre_eviction.py new file mode 100644 index 000000000..416ec873b --- /dev/null +++ b/sdks/python/hatchet_sdk/context/pre_eviction.py @@ -0,0 +1,48 @@ +"""Pre-eviction fallback implementations for DurableContext. + +These methods support engines older than MIN_DURABLE_EVICTION_VERSION. +Remove this module when support for those engines is dropped. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from hatchet_sdk.clients.listeners.legacy.pre_eviction_durable_event_listener import ( + PreEvictionDurableEventListener, + RegisterDurableEventRequest, +) +from hatchet_sdk.conditions import ( + OrGroup, + SleepCondition, + UserEventCondition, + flatten_conditions, +) + +if TYPE_CHECKING: + from hatchet_sdk.context.context import DurableContext + + +async def aio_wait_for_pre_eviction( + ctx: DurableContext, + signal_key: str, + *conditions: SleepCondition | UserEventCondition | OrGroup, +) -> dict[str, Any]: + if not isinstance(ctx.durable_event_listener, PreEvictionDurableEventListener): + raise TypeError( + "Expected PreEvictionDurableEventListener, got " + f"{type(ctx.durable_event_listener).__name__}" + ) + + task_id = ctx.step_run_id + + request = RegisterDurableEventRequest( + task_id=task_id, + signal_key=signal_key, + conditions=flatten_conditions(list(conditions)), + config=ctx.runs_client.client_config, + ) + + ctx.durable_event_listener.register_durable_event(request) + + return await ctx.durable_event_listener.result(task_id, signal_key) diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py index 700560628..0a13a4d84 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py @@ -25,7 +25,7 @@ _sym_db = _symbol_database.Default() from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64ispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n\x0cWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_value\"\xcc\x01\n\x0bRuntimeInfo\x12\x18\n\x0bsdk_version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1d\n\x10language_version\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_sdk_versionB\x0b\n\t_languageB\x13\n\x11_language_versionB\x05\n\x03_osB\x08\n\x06_extra\"\xb1\x03\n\x15WorkerRegisterRequest\x12\x13\n\x0bworker_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x12\n\x05slots\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x17\n\nwebhook_id\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\'\n\x0cruntime_info\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x12;\n\x0bslot_config\x18\t \x03(\x0b\x32&.WorkerRegisterRequest.SlotConfigEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x1a\x31\n\x0fSlotConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_slotsB\r\n\x0b_webhook_idB\x0f\n\r_runtime_info\"S\n\x16WorkerRegisterResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x13\n\x0bworker_name\x18\x03 \x01(\t\"\xa4\x01\n\x19UpsertWorkerLabelsRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"B\n\x1aUpsertWorkerLabelsResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\x98\x05\n\x0e\x41ssignedAction\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x0e\n\x06job_id\x18\x04 \x01(\t\x12\x10\n\x08job_name\x18\x05 \x01(\t\x12\x12\n\njob_run_id\x18\x06 \x01(\t\x12\x0f\n\x07task_id\x18\x07 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x08 \x01(\t\x12\x11\n\taction_id\x18\t \x01(\t\x12 \n\x0b\x61\x63tion_type\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x16\n\x0e\x61\x63tion_payload\x18\x0b \x01(\t\x12\x11\n\ttask_name\x18\x0c \x01(\t\x12\x13\n\x0bretry_count\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x18\n\x0bworkflow_id\x18\x13 \x01(\tH\x04\x88\x01\x01\x12 \n\x13workflow_version_id\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\x0e\n\x0c_workflow_idB\x16\n\x14_workflow_version_id\"(\n\x13WorkerListenRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"-\n\x18WorkerUnsubscribeRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"A\n\x19WorkerUnsubscribeResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xec\x01\n\x13GroupKeyActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x11\n\taction_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\nevent_type\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x15\n\revent_payload\x18\x07 \x01(\t\"\xde\x02\n\x0fStepActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x05 \x01(\t\x12\x11\n\taction_id\x18\x06 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\nevent_type\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x15\n\revent_payload\x18\t \x01(\t\x12\x18\n\x0bretry_count\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1d\n\x10should_not_retry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_retry_countB\x13\n\x11_should_not_retry\";\n\x13\x41\x63tionEventResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xcc\x01\n SubscribeToWorkflowEventsRequest\x12\x1c\n\x0fworkflow_run_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_meta_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\"\n\x15\x61\x64\x64itional_meta_value\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x12\n\x10_workflow_run_idB\x16\n\x14_additional_meta_keyB\x18\n\x16_additional_meta_value\"9\n\x1eSubscribeToWorkflowRunsRequest\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"\xe7\x02\n\rWorkflowEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12$\n\rresource_type\x18\x02 \x01(\x0e\x32\r.ResourceType\x12&\n\nevent_type\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x13\n\x0bresource_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\revent_payload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x19\n\x0ctask_retries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x18\n\x0bretry_count\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x18\n\x0b\x65vent_index\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0f\n\r_task_retriesB\x0e\n\x0c_retry_countB\x0e\n\x0c_event_index\"\xac\x01\n\x10WorkflowRunEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12)\n\nevent_type\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x33\n\x0f\x65vent_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x92\x01\n\rStepRunResult\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"c\n\rOverridesData\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x17\n\x0f\x63\x61ller_filename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"W\n\x10HeartbeatRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x30\n\x0cheartbeat_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"S\n\x15RefreshTimeoutRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x1c\n\x14increment_timeout_by\x18\x02 \x01(\t\"H\n\x16RefreshTimeoutResponse\x12.\n\ntimeout_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x12ReleaseSlotRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse\"\x13\n\x11GetVersionRequest\"%\n\x12GetVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\t*A\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03\x12\x08\n\x04RUBY\x10\x04*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xb1\x07\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x12\x37\n\nGetVersion\x12\x12.GetVersionRequest\x1a\x13.GetVersionResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64ispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n\x0cWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_value\"\xcc\x01\n\x0bRuntimeInfo\x12\x18\n\x0bsdk_version\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1d\n\x10language_version\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_sdk_versionB\x0b\n\t_languageB\x13\n\x11_language_versionB\x05\n\x03_osB\x08\n\x06_extra\"\xb1\x03\n\x15WorkerRegisterRequest\x12\x13\n\x0bworker_name\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x12\n\x05slots\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x17\n\nwebhook_id\x18\x06 \x01(\tH\x01\x88\x01\x01\x12\'\n\x0cruntime_info\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x12;\n\x0bslot_config\x18\t \x03(\x0b\x32&.WorkerRegisterRequest.SlotConfigEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x1a\x31\n\x0fSlotConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_slotsB\r\n\x0b_webhook_idB\x0f\n\r_runtime_info\"S\n\x16WorkerRegisterResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x13\n\x0bworker_name\x18\x03 \x01(\t\"\xa4\x01\n\x19UpsertWorkerLabelsRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"B\n\x1aUpsertWorkerLabelsResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xe6\x05\n\x0e\x41ssignedAction\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x0e\n\x06job_id\x18\x04 \x01(\t\x12\x10\n\x08job_name\x18\x05 \x01(\t\x12\x12\n\njob_run_id\x18\x06 \x01(\t\x12\x0f\n\x07task_id\x18\x07 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x08 \x01(\t\x12\x11\n\taction_id\x18\t \x01(\t\x12 \n\x0b\x61\x63tion_type\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x16\n\x0e\x61\x63tion_payload\x18\x0b \x01(\t\x12\x11\n\ttask_name\x18\x0c \x01(\t\x12\x13\n\x0bretry_count\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x18\n\x0bworkflow_id\x18\x13 \x01(\tH\x04\x88\x01\x01\x12 \n\x13workflow_version_id\x18\x14 \x01(\tH\x05\x88\x01\x01\x12*\n\x1d\x64urable_task_invocation_count\x18\x15 \x01(\x05H\x06\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\x0e\n\x0c_workflow_idB\x16\n\x14_workflow_version_idB \n\x1e_durable_task_invocation_count\"(\n\x13WorkerListenRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"-\n\x18WorkerUnsubscribeRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\"A\n\x19WorkerUnsubscribeResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xec\x01\n\x13GroupKeyActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x17\n\x0fworkflow_run_id\x18\x02 \x01(\t\x12\x1c\n\x14get_group_key_run_id\x18\x03 \x01(\t\x12\x11\n\taction_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\nevent_type\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x15\n\revent_payload\x18\x07 \x01(\t\"\xde\x02\n\x0fStepActionEvent\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x1c\n\x14task_run_external_id\x18\x05 \x01(\t\x12\x11\n\taction_id\x18\x06 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\nevent_type\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x15\n\revent_payload\x18\t \x01(\t\x12\x18\n\x0bretry_count\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1d\n\x10should_not_retry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_retry_countB\x13\n\x11_should_not_retry\";\n\x13\x41\x63tionEventResponse\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xcc\x01\n SubscribeToWorkflowEventsRequest\x12\x1c\n\x0fworkflow_run_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_meta_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\"\n\x15\x61\x64\x64itional_meta_value\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x12\n\x10_workflow_run_idB\x16\n\x14_additional_meta_keyB\x18\n\x16_additional_meta_value\"9\n\x1eSubscribeToWorkflowRunsRequest\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"\xe7\x02\n\rWorkflowEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12$\n\rresource_type\x18\x02 \x01(\x0e\x32\r.ResourceType\x12&\n\nevent_type\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x13\n\x0bresource_id\x18\x04 \x01(\t\x12\x33\n\x0f\x65vent_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x15\n\revent_payload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x19\n\x0ctask_retries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x18\n\x0bretry_count\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x18\n\x0b\x65vent_index\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0f\n\r_task_retriesB\x0e\n\x0c_retry_countB\x0e\n\x0c_event_index\"\xac\x01\n\x10WorkflowRunEvent\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\x12)\n\nevent_type\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x33\n\x0f\x65vent_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x92\x01\n\rStepRunResult\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\njob_run_id\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"c\n\rOverridesData\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x17\n\x0f\x63\x61ller_filename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"W\n\x10HeartbeatRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x30\n\x0cheartbeat_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"S\n\x15RefreshTimeoutRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\x12\x1c\n\x14increment_timeout_by\x18\x02 \x01(\t\"H\n\x16RefreshTimeoutResponse\x12.\n\ntimeout_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"2\n\x12ReleaseSlotRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse\"9\n\x19RestoreEvictedTaskRequest\x12\x1c\n\x14task_run_external_id\x18\x01 \x01(\t\".\n\x1aRestoreEvictedTaskResponse\x12\x10\n\x08requeued\x18\x01 \x01(\x08\"\x13\n\x11GetVersionRequest\"%\n\x12GetVersionResponse\x12\x0f\n\x07version\x18\x01 \x01(\t*A\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03\x12\x08\n\x04RUBY\x10\x04*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\x82\x08\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12RestoreEvictedTask\x12\x1a.RestoreEvictedTaskRequest\x1a\x1b.RestoreEvictedTaskResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x12\x37\n\nGetVersion\x12\x12.GetVersionRequest\x1a\x13.GetVersionResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -39,20 +39,20 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['_WORKERREGISTERREQUEST_SLOTCONFIGENTRY']._serialized_options = b'8\001' _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._loaded_options = None _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_options = b'8\001' - _globals['_SDKS']._serialized_start=4066 - _globals['_SDKS']._serialized_end=4131 - _globals['_ACTIONTYPE']._serialized_start=4133 - _globals['_ACTIONTYPE']._serialized_end=4211 - _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_start=4214 - _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_end=4376 - _globals['_STEPACTIONEVENTTYPE']._serialized_start=4379 - _globals['_STEPACTIONEVENTTYPE']._serialized_end=4551 - _globals['_RESOURCETYPE']._serialized_start=4553 - _globals['_RESOURCETYPE']._serialized_end=4654 - _globals['_RESOURCEEVENTTYPE']._serialized_start=4657 - _globals['_RESOURCEEVENTTYPE']._serialized_end=4911 - _globals['_WORKFLOWRUNEVENTTYPE']._serialized_start=4913 - _globals['_WORKFLOWRUNEVENTTYPE']._serialized_end=4973 + _globals['_SDKS']._serialized_start=4251 + _globals['_SDKS']._serialized_end=4316 + _globals['_ACTIONTYPE']._serialized_start=4318 + _globals['_ACTIONTYPE']._serialized_end=4396 + _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_start=4399 + _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_end=4561 + _globals['_STEPACTIONEVENTTYPE']._serialized_start=4564 + _globals['_STEPACTIONEVENTTYPE']._serialized_end=4736 + _globals['_RESOURCETYPE']._serialized_start=4738 + _globals['_RESOURCETYPE']._serialized_end=4839 + _globals['_RESOURCEEVENTTYPE']._serialized_start=4842 + _globals['_RESOURCEEVENTTYPE']._serialized_end=5096 + _globals['_WORKFLOWRUNEVENTTYPE']._serialized_start=5098 + _globals['_WORKFLOWRUNEVENTTYPE']._serialized_end=5158 _globals['_WORKERLABELS']._serialized_start=53 _globals['_WORKERLABELS']._serialized_end=143 _globals['_RUNTIMEINFO']._serialized_start=146 @@ -72,49 +72,53 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['_UPSERTWORKERLABELSRESPONSE']._serialized_start=1040 _globals['_UPSERTWORKERLABELSRESPONSE']._serialized_end=1106 _globals['_ASSIGNEDACTION']._serialized_start=1109 - _globals['_ASSIGNEDACTION']._serialized_end=1773 - _globals['_WORKERLISTENREQUEST']._serialized_start=1775 - _globals['_WORKERLISTENREQUEST']._serialized_end=1815 - _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_start=1817 - _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_end=1862 - _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_start=1864 - _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_end=1929 - _globals['_GROUPKEYACTIONEVENT']._serialized_start=1932 - _globals['_GROUPKEYACTIONEVENT']._serialized_end=2168 - _globals['_STEPACTIONEVENT']._serialized_start=2171 - _globals['_STEPACTIONEVENT']._serialized_end=2521 - _globals['_ACTIONEVENTRESPONSE']._serialized_start=2523 - _globals['_ACTIONEVENTRESPONSE']._serialized_end=2582 - _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_start=2585 - _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_end=2789 - _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_start=2791 - _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_end=2848 - _globals['_WORKFLOWEVENT']._serialized_start=2851 - _globals['_WORKFLOWEVENT']._serialized_end=3210 - _globals['_WORKFLOWRUNEVENT']._serialized_start=3213 - _globals['_WORKFLOWRUNEVENT']._serialized_end=3385 - _globals['_STEPRUNRESULT']._serialized_start=3388 - _globals['_STEPRUNRESULT']._serialized_end=3534 - _globals['_OVERRIDESDATA']._serialized_start=3536 - _globals['_OVERRIDESDATA']._serialized_end=3635 - _globals['_OVERRIDESDATARESPONSE']._serialized_start=3637 - _globals['_OVERRIDESDATARESPONSE']._serialized_end=3660 - _globals['_HEARTBEATREQUEST']._serialized_start=3662 - _globals['_HEARTBEATREQUEST']._serialized_end=3749 - _globals['_HEARTBEATRESPONSE']._serialized_start=3751 - _globals['_HEARTBEATRESPONSE']._serialized_end=3770 - _globals['_REFRESHTIMEOUTREQUEST']._serialized_start=3772 - _globals['_REFRESHTIMEOUTREQUEST']._serialized_end=3855 - _globals['_REFRESHTIMEOUTRESPONSE']._serialized_start=3857 - _globals['_REFRESHTIMEOUTRESPONSE']._serialized_end=3929 - _globals['_RELEASESLOTREQUEST']._serialized_start=3931 - _globals['_RELEASESLOTREQUEST']._serialized_end=3981 - _globals['_RELEASESLOTRESPONSE']._serialized_start=3983 - _globals['_RELEASESLOTRESPONSE']._serialized_end=4004 - _globals['_GETVERSIONREQUEST']._serialized_start=4006 - _globals['_GETVERSIONREQUEST']._serialized_end=4025 - _globals['_GETVERSIONRESPONSE']._serialized_start=4027 - _globals['_GETVERSIONRESPONSE']._serialized_end=4064 - _globals['_DISPATCHER']._serialized_start=4976 - _globals['_DISPATCHER']._serialized_end=5921 + _globals['_ASSIGNEDACTION']._serialized_end=1851 + _globals['_WORKERLISTENREQUEST']._serialized_start=1853 + _globals['_WORKERLISTENREQUEST']._serialized_end=1893 + _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_start=1895 + _globals['_WORKERUNSUBSCRIBEREQUEST']._serialized_end=1940 + _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_start=1942 + _globals['_WORKERUNSUBSCRIBERESPONSE']._serialized_end=2007 + _globals['_GROUPKEYACTIONEVENT']._serialized_start=2010 + _globals['_GROUPKEYACTIONEVENT']._serialized_end=2246 + _globals['_STEPACTIONEVENT']._serialized_start=2249 + _globals['_STEPACTIONEVENT']._serialized_end=2599 + _globals['_ACTIONEVENTRESPONSE']._serialized_start=2601 + _globals['_ACTIONEVENTRESPONSE']._serialized_end=2660 + _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_start=2663 + _globals['_SUBSCRIBETOWORKFLOWEVENTSREQUEST']._serialized_end=2867 + _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_start=2869 + _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_end=2926 + _globals['_WORKFLOWEVENT']._serialized_start=2929 + _globals['_WORKFLOWEVENT']._serialized_end=3288 + _globals['_WORKFLOWRUNEVENT']._serialized_start=3291 + _globals['_WORKFLOWRUNEVENT']._serialized_end=3463 + _globals['_STEPRUNRESULT']._serialized_start=3466 + _globals['_STEPRUNRESULT']._serialized_end=3612 + _globals['_OVERRIDESDATA']._serialized_start=3614 + _globals['_OVERRIDESDATA']._serialized_end=3713 + _globals['_OVERRIDESDATARESPONSE']._serialized_start=3715 + _globals['_OVERRIDESDATARESPONSE']._serialized_end=3738 + _globals['_HEARTBEATREQUEST']._serialized_start=3740 + _globals['_HEARTBEATREQUEST']._serialized_end=3827 + _globals['_HEARTBEATRESPONSE']._serialized_start=3829 + _globals['_HEARTBEATRESPONSE']._serialized_end=3848 + _globals['_REFRESHTIMEOUTREQUEST']._serialized_start=3850 + _globals['_REFRESHTIMEOUTREQUEST']._serialized_end=3933 + _globals['_REFRESHTIMEOUTRESPONSE']._serialized_start=3935 + _globals['_REFRESHTIMEOUTRESPONSE']._serialized_end=4007 + _globals['_RELEASESLOTREQUEST']._serialized_start=4009 + _globals['_RELEASESLOTREQUEST']._serialized_end=4059 + _globals['_RELEASESLOTRESPONSE']._serialized_start=4061 + _globals['_RELEASESLOTRESPONSE']._serialized_end=4082 + _globals['_RESTOREEVICTEDTASKREQUEST']._serialized_start=4084 + _globals['_RESTOREEVICTEDTASKREQUEST']._serialized_end=4141 + _globals['_RESTOREEVICTEDTASKRESPONSE']._serialized_start=4143 + _globals['_RESTOREEVICTEDTASKRESPONSE']._serialized_end=4189 + _globals['_GETVERSIONREQUEST']._serialized_start=4191 + _globals['_GETVERSIONREQUEST']._serialized_end=4210 + _globals['_GETVERSIONRESPONSE']._serialized_start=4212 + _globals['_GETVERSIONRESPONSE']._serialized_end=4249 + _globals['_DISPATCHER']._serialized_start=5161 + _globals['_DISPATCHER']._serialized_end=6187 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi index 393db66c5..a02bfeca7 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi @@ -177,7 +177,7 @@ class UpsertWorkerLabelsResponse(_message.Message): def __init__(self, tenant_id: _Optional[str] = ..., worker_id: _Optional[str] = ...) -> None: ... class AssignedAction(_message.Message): - __slots__ = ("tenant_id", "workflow_run_id", "get_group_key_run_id", "job_id", "job_name", "job_run_id", "task_id", "task_run_external_id", "action_id", "action_type", "action_payload", "task_name", "retry_count", "additional_metadata", "child_workflow_index", "child_workflow_key", "parent_workflow_run_id", "priority", "workflow_id", "workflow_version_id") + __slots__ = ("tenant_id", "workflow_run_id", "get_group_key_run_id", "job_id", "job_name", "job_run_id", "task_id", "task_run_external_id", "action_id", "action_type", "action_payload", "task_name", "retry_count", "additional_metadata", "child_workflow_index", "child_workflow_key", "parent_workflow_run_id", "priority", "workflow_id", "workflow_version_id", "durable_task_invocation_count") TENANT_ID_FIELD_NUMBER: _ClassVar[int] WORKFLOW_RUN_ID_FIELD_NUMBER: _ClassVar[int] GET_GROUP_KEY_RUN_ID_FIELD_NUMBER: _ClassVar[int] @@ -198,6 +198,7 @@ class AssignedAction(_message.Message): PRIORITY_FIELD_NUMBER: _ClassVar[int] WORKFLOW_ID_FIELD_NUMBER: _ClassVar[int] WORKFLOW_VERSION_ID_FIELD_NUMBER: _ClassVar[int] + DURABLE_TASK_INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] tenant_id: str workflow_run_id: str get_group_key_run_id: str @@ -218,7 +219,8 @@ class AssignedAction(_message.Message): priority: int workflow_id: str workflow_version_id: str - def __init__(self, tenant_id: _Optional[str] = ..., workflow_run_id: _Optional[str] = ..., get_group_key_run_id: _Optional[str] = ..., job_id: _Optional[str] = ..., job_name: _Optional[str] = ..., job_run_id: _Optional[str] = ..., task_id: _Optional[str] = ..., task_run_external_id: _Optional[str] = ..., action_id: _Optional[str] = ..., action_type: _Optional[_Union[ActionType, str]] = ..., action_payload: _Optional[str] = ..., task_name: _Optional[str] = ..., retry_count: _Optional[int] = ..., additional_metadata: _Optional[str] = ..., child_workflow_index: _Optional[int] = ..., child_workflow_key: _Optional[str] = ..., parent_workflow_run_id: _Optional[str] = ..., priority: _Optional[int] = ..., workflow_id: _Optional[str] = ..., workflow_version_id: _Optional[str] = ...) -> None: ... + durable_task_invocation_count: int + def __init__(self, tenant_id: _Optional[str] = ..., workflow_run_id: _Optional[str] = ..., get_group_key_run_id: _Optional[str] = ..., job_id: _Optional[str] = ..., job_name: _Optional[str] = ..., job_run_id: _Optional[str] = ..., task_id: _Optional[str] = ..., task_run_external_id: _Optional[str] = ..., action_id: _Optional[str] = ..., action_type: _Optional[_Union[ActionType, str]] = ..., action_payload: _Optional[str] = ..., task_name: _Optional[str] = ..., retry_count: _Optional[int] = ..., additional_metadata: _Optional[str] = ..., child_workflow_index: _Optional[int] = ..., child_workflow_key: _Optional[str] = ..., parent_workflow_run_id: _Optional[str] = ..., priority: _Optional[int] = ..., workflow_id: _Optional[str] = ..., workflow_version_id: _Optional[str] = ..., durable_task_invocation_count: _Optional[int] = ...) -> None: ... class WorkerListenRequest(_message.Message): __slots__ = ("worker_id",) @@ -410,6 +412,18 @@ class ReleaseSlotResponse(_message.Message): __slots__ = () def __init__(self) -> None: ... +class RestoreEvictedTaskRequest(_message.Message): + __slots__ = ("task_run_external_id",) + TASK_RUN_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + task_run_external_id: str + def __init__(self, task_run_external_id: _Optional[str] = ...) -> None: ... + +class RestoreEvictedTaskResponse(_message.Message): + __slots__ = ("requeued",) + REQUEUED_FIELD_NUMBER: _ClassVar[int] + requeued: bool + def __init__(self, requeued: bool = ...) -> None: ... + class GetVersionRequest(_message.Message): __slots__ = () def __init__(self) -> None: ... diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py index dd51b9f62..e33bea57a 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2_grpc.py @@ -94,6 +94,11 @@ class DispatcherStub(object): request_serializer=dispatcher__pb2.ReleaseSlotRequest.SerializeToString, response_deserializer=dispatcher__pb2.ReleaseSlotResponse.FromString, _registered_method=True) + self.RestoreEvictedTask = channel.unary_unary( + '/Dispatcher/RestoreEvictedTask', + request_serializer=dispatcher__pb2.RestoreEvictedTaskRequest.SerializeToString, + response_deserializer=dispatcher__pb2.RestoreEvictedTaskResponse.FromString, + _registered_method=True) self.UpsertWorkerLabels = channel.unary_unary( '/Dispatcher/UpsertWorkerLabels', request_serializer=dispatcher__pb2.UpsertWorkerLabelsRequest.SerializeToString, @@ -184,6 +189,12 @@ class DispatcherServicer(object): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def RestoreEvictedTask(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def UpsertWorkerLabels(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -262,6 +273,11 @@ def add_DispatcherServicer_to_server(servicer, server): request_deserializer=dispatcher__pb2.ReleaseSlotRequest.FromString, response_serializer=dispatcher__pb2.ReleaseSlotResponse.SerializeToString, ), + 'RestoreEvictedTask': grpc.unary_unary_rpc_method_handler( + servicer.RestoreEvictedTask, + request_deserializer=dispatcher__pb2.RestoreEvictedTaskRequest.FromString, + response_serializer=dispatcher__pb2.RestoreEvictedTaskResponse.SerializeToString, + ), 'UpsertWorkerLabels': grpc.unary_unary_rpc_method_handler( servicer.UpsertWorkerLabels, request_deserializer=dispatcher__pb2.UpsertWorkerLabelsRequest.FromString, @@ -607,6 +623,33 @@ class Dispatcher(object): metadata, _registered_method=True) + @staticmethod + def RestoreEvictedTask(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/Dispatcher/RestoreEvictedTask', + dispatcher__pb2.RestoreEvictedTaskRequest.SerializeToString, + dispatcher__pb2.RestoreEvictedTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + @staticmethod def UpsertWorkerLabels(request, target, diff --git a/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.py b/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.py index 988661d82..946f1f7e6 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.py @@ -23,9 +23,10 @@ _sym_db = _symbol_database.Default() from hatchet_sdk.contracts.v1.shared import condition_pb2 as v1_dot_shared_dot_condition__pb2 +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as v1_dot_shared_dot_trigger__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/dispatcher.proto\x12\x02v1\x1a\x19v1/shared/condition.proto\"z\n\x1bRegisterDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x36\n\nconditions\x18\x03 \x01(\x0b\x32\".v1.DurableEventListenerConditions\"\x1e\n\x1cRegisterDurableEventResponse\"C\n\x1cListenForDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\"A\n\x0c\x44urableEvent\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x32\xbe\x01\n\x0cV1Dispatcher\x12[\n\x14RegisterDurableEvent\x12\x1f.v1.RegisterDurableEventRequest\x1a .v1.RegisterDurableEventResponse\"\x00\x12Q\n\x15ListenForDurableEvent\x12 .v1.ListenForDurableEventRequest\x1a\x10.v1.DurableEvent\"\x00(\x01\x30\x01\x42\x42Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/dispatcher.proto\x12\x02v1\x1a\x19v1/shared/condition.proto\x1a\x17v1/shared/trigger.proto\"5\n DurableTaskRequestRegisterWorker\x12\x11\n\tworker_id\x18\x01 \x01(\t\"6\n!DurableTaskResponseRegisterWorker\x12\x11\n\tworker_id\x18\x01 \x01(\t\"y\n\x17\x44urableEventLogEntryRef\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x18\n\x10invocation_count\x18\x02 \x01(\x05\x12\x11\n\tbranch_id\x18\x03 \x01(\x03\x12\x0f\n\x07node_id\x18\x04 \x01(\x03\"<\n\x16\x44urableTaskRunAckEntry\x12\x0f\n\x07node_id\x18\x01 \x01(\x03\x12\x11\n\tbranch_id\x18\x02 \x01(\x03\"\xa3\x01\n\x1f\x44urableTaskEventMemoAckResponse\x12(\n\x03ref\x18\x01 \x01(\x0b\x32\x1b.v1.DurableEventLogEntryRef\x12\x1c\n\x14memo_already_existed\x18\x02 \x01(\x08\x12 \n\x13memo_result_payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\x16\n\x14_memo_result_payload\"\x95\x01\n&DurableTaskEventTriggerRunsAckResponse\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x18\n\x10invocation_count\x18\x02 \x01(\x05\x12/\n\x0brun_entries\x18\x03 \x03(\x0b\x32\x1a.v1.DurableTaskRunAckEntry\"N\n\"DurableTaskEventWaitForAckResponse\x12(\n\x03ref\x18\x01 \x01(\x0b\x32\x1b.v1.DurableEventLogEntryRef\"f\n)DurableTaskEventLogEntryCompletedResponse\x12(\n\x03ref\x18\x01 \x01(\x0b\x32\x1b.v1.DurableEventLogEntryRef\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"\x7f\n!DurableTaskEvictInvocationRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x05\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12\x13\n\x06reason\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_reason\"\\\n\x1e\x44urableTaskEvictionAckResponse\x12\x18\n\x10invocation_count\x18\x01 \x01(\x05\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\"\x82\x01\n DurableTaskAwaitedCompletedEntry\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x11\n\tbranch_id\x18\x02 \x01(\x03\x12\x0f\n\x07node_id\x18\x03 \x01(\x03\x12\x18\n\x10invocation_count\x18\x04 \x01(\x05\"j\n\x1c\x44urableTaskServerEvictNotice\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x18\n\x10invocation_count\x18\x02 \x01(\x05\x12\x0e\n\x06reason\x18\x03 \x01(\t\"r\n\x1e\x44urableTaskWorkerStatusRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12=\n\x0fwaiting_entries\x18\x02 \x03(\x0b\x32$.v1.DurableTaskAwaitedCompletedEntry\"m\n\x1e\x44urableTaskCompleteMemoRequest\x12(\n\x03ref\x18\x01 \x01(\x0b\x32\x1b.v1.DurableEventLogEntryRef\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\x12\x10\n\x08memo_key\x18\x03 \x01(\x0c\"\x83\x01\n\x16\x44urableTaskMemoRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x05\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\x0c\x12\x14\n\x07payload\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x8d\x01\n\x1d\x44urableTaskTriggerRunsRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x05\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12\x30\n\x0ctrigger_opts\x18\x03 \x03(\x0b\x32\x1a.v1.TriggerWorkflowRequest\"\xb5\x01\n\x19\x44urableTaskWaitForRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x05\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12\x44\n\x13wait_for_conditions\x18\x03 \x01(\x0b\x32\".v1.DurableEventListenerConditionsH\x00\x88\x01\x01\x42\x16\n\x14_wait_for_conditions\"\xb7\x03\n\x12\x44urableTaskRequest\x12?\n\x0fregister_worker\x18\x01 \x01(\x0b\x32$.v1.DurableTaskRequestRegisterWorkerH\x00\x12*\n\x04memo\x18\x02 \x01(\x0b\x32\x1a.v1.DurableTaskMemoRequestH\x00\x12\x39\n\x0ctrigger_runs\x18\x03 \x01(\x0b\x32!.v1.DurableTaskTriggerRunsRequestH\x00\x12\x31\n\x08wait_for\x18\x04 \x01(\x0b\x32\x1d.v1.DurableTaskWaitForRequestH\x00\x12\x41\n\x10\x65vict_invocation\x18\x05 \x01(\x0b\x32%.v1.DurableTaskEvictInvocationRequestH\x00\x12;\n\rworker_status\x18\x06 \x01(\x0b\x32\".v1.DurableTaskWorkerStatusRequestH\x00\x12;\n\rcomplete_memo\x18\x07 \x01(\x0b\x32\".v1.DurableTaskCompleteMemoRequestH\x00\x42\t\n\x07message\"\x89\x01\n\x18\x44urableTaskErrorResponse\x12(\n\x03ref\x18\x01 \x01(\x0b\x32\x1b.v1.DurableEventLogEntryRef\x12,\n\nerror_type\x18\x02 \x01(\x0e\x32\x18.v1.DurableTaskErrorType\x12\x15\n\rerror_message\x18\x03 \x01(\t\"\x92\x04\n\x13\x44urableTaskResponse\x12@\n\x0fregister_worker\x18\x01 \x01(\x0b\x32%.v1.DurableTaskResponseRegisterWorkerH\x00\x12\x37\n\x08memo_ack\x18\x02 \x01(\x0b\x32#.v1.DurableTaskEventMemoAckResponseH\x00\x12\x46\n\x10trigger_runs_ack\x18\x03 \x01(\x0b\x32*.v1.DurableTaskEventTriggerRunsAckResponseH\x00\x12>\n\x0cwait_for_ack\x18\x04 \x01(\x0b\x32&.v1.DurableTaskEventWaitForAckResponseH\x00\x12H\n\x0f\x65ntry_completed\x18\x05 \x01(\x0b\x32-.v1.DurableTaskEventLogEntryCompletedResponseH\x00\x12-\n\x05\x65rror\x18\x06 \x01(\x0b\x32\x1c.v1.DurableTaskErrorResponseH\x00\x12:\n\x0c\x65viction_ack\x18\x07 \x01(\x0b\x32\".v1.DurableTaskEvictionAckResponseH\x00\x12\x38\n\x0cserver_evict\x18\x08 \x01(\x0b\x32 .v1.DurableTaskServerEvictNoticeH\x00\x42\t\n\x07message\"z\n\x1bRegisterDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x36\n\nconditions\x18\x03 \x01(\x0b\x32\".v1.DurableEventListenerConditions\"\x1e\n\x1cRegisterDurableEventResponse\"C\n\x1cListenForDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\"A\n\x0c\x44urableEvent\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c*k\n\x14\x44urableTaskErrorType\x12\'\n#DURABLE_TASK_ERROR_TYPE_UNSPECIFIED\x10\x00\x12*\n&DURABLE_TASK_ERROR_TYPE_NONDETERMINISM\x10\x01\x32\x84\x02\n\x0cV1Dispatcher\x12\x44\n\x0b\x44urableTask\x12\x16.v1.DurableTaskRequest\x1a\x17.v1.DurableTaskResponse\"\x00(\x01\x30\x01\x12[\n\x14RegisterDurableEvent\x12\x1f.v1.RegisterDurableEventRequest\x1a .v1.RegisterDurableEventResponse\"\x00\x12Q\n\x15ListenForDurableEvent\x12 .v1.ListenForDurableEventRequest\x1a\x10.v1.DurableEvent\"\x00(\x01\x30\x01\x42\x42Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -33,14 +34,56 @@ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'v1.dispatcher_pb2', _global if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1' - _globals['_REGISTERDURABLEEVENTREQUEST']._serialized_start=54 - _globals['_REGISTERDURABLEEVENTREQUEST']._serialized_end=176 - _globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_start=178 - _globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_end=208 - _globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_start=210 - _globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_end=277 - _globals['_DURABLEEVENT']._serialized_start=279 - _globals['_DURABLEEVENT']._serialized_end=344 - _globals['_V1DISPATCHER']._serialized_start=347 - _globals['_V1DISPATCHER']._serialized_end=537 + _globals['_DURABLETASKERRORTYPE']._serialized_start=3437 + _globals['_DURABLETASKERRORTYPE']._serialized_end=3544 + _globals['_DURABLETASKREQUESTREGISTERWORKER']._serialized_start=79 + _globals['_DURABLETASKREQUESTREGISTERWORKER']._serialized_end=132 + _globals['_DURABLETASKRESPONSEREGISTERWORKER']._serialized_start=134 + _globals['_DURABLETASKRESPONSEREGISTERWORKER']._serialized_end=188 + _globals['_DURABLEEVENTLOGENTRYREF']._serialized_start=190 + _globals['_DURABLEEVENTLOGENTRYREF']._serialized_end=311 + _globals['_DURABLETASKRUNACKENTRY']._serialized_start=313 + _globals['_DURABLETASKRUNACKENTRY']._serialized_end=373 + _globals['_DURABLETASKEVENTMEMOACKRESPONSE']._serialized_start=376 + _globals['_DURABLETASKEVENTMEMOACKRESPONSE']._serialized_end=539 + _globals['_DURABLETASKEVENTTRIGGERRUNSACKRESPONSE']._serialized_start=542 + _globals['_DURABLETASKEVENTTRIGGERRUNSACKRESPONSE']._serialized_end=691 + _globals['_DURABLETASKEVENTWAITFORACKRESPONSE']._serialized_start=693 + _globals['_DURABLETASKEVENTWAITFORACKRESPONSE']._serialized_end=771 + _globals['_DURABLETASKEVENTLOGENTRYCOMPLETEDRESPONSE']._serialized_start=773 + _globals['_DURABLETASKEVENTLOGENTRYCOMPLETEDRESPONSE']._serialized_end=875 + _globals['_DURABLETASKEVICTINVOCATIONREQUEST']._serialized_start=877 + _globals['_DURABLETASKEVICTINVOCATIONREQUEST']._serialized_end=1004 + _globals['_DURABLETASKEVICTIONACKRESPONSE']._serialized_start=1006 + _globals['_DURABLETASKEVICTIONACKRESPONSE']._serialized_end=1098 + _globals['_DURABLETASKAWAITEDCOMPLETEDENTRY']._serialized_start=1101 + _globals['_DURABLETASKAWAITEDCOMPLETEDENTRY']._serialized_end=1231 + _globals['_DURABLETASKSERVEREVICTNOTICE']._serialized_start=1233 + _globals['_DURABLETASKSERVEREVICTNOTICE']._serialized_end=1339 + _globals['_DURABLETASKWORKERSTATUSREQUEST']._serialized_start=1341 + _globals['_DURABLETASKWORKERSTATUSREQUEST']._serialized_end=1455 + _globals['_DURABLETASKCOMPLETEMEMOREQUEST']._serialized_start=1457 + _globals['_DURABLETASKCOMPLETEMEMOREQUEST']._serialized_end=1566 + _globals['_DURABLETASKMEMOREQUEST']._serialized_start=1569 + _globals['_DURABLETASKMEMOREQUEST']._serialized_end=1700 + _globals['_DURABLETASKTRIGGERRUNSREQUEST']._serialized_start=1703 + _globals['_DURABLETASKTRIGGERRUNSREQUEST']._serialized_end=1844 + _globals['_DURABLETASKWAITFORREQUEST']._serialized_start=1847 + _globals['_DURABLETASKWAITFORREQUEST']._serialized_end=2028 + _globals['_DURABLETASKREQUEST']._serialized_start=2031 + _globals['_DURABLETASKREQUEST']._serialized_end=2470 + _globals['_DURABLETASKERRORRESPONSE']._serialized_start=2473 + _globals['_DURABLETASKERRORRESPONSE']._serialized_end=2610 + _globals['_DURABLETASKRESPONSE']._serialized_start=2613 + _globals['_DURABLETASKRESPONSE']._serialized_end=3143 + _globals['_REGISTERDURABLEEVENTREQUEST']._serialized_start=3145 + _globals['_REGISTERDURABLEEVENTREQUEST']._serialized_end=3267 + _globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_start=3269 + _globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_end=3299 + _globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_start=3301 + _globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_end=3368 + _globals['_DURABLEEVENT']._serialized_start=3370 + _globals['_DURABLEEVENT']._serialized_end=3435 + _globals['_V1DISPATCHER']._serialized_start=3547 + _globals['_V1DISPATCHER']._serialized_end=3807 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.pyi b/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.pyi index c8b3ddc79..eddb855ca 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2.pyi @@ -1,11 +1,225 @@ from hatchet_sdk.contracts.v1.shared import condition_pb2 as _condition_pb2 +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as _trigger_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message -from collections.abc import Mapping as _Mapping +from collections.abc import Iterable as _Iterable, Mapping as _Mapping from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor +class DurableTaskErrorType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + DURABLE_TASK_ERROR_TYPE_UNSPECIFIED: _ClassVar[DurableTaskErrorType] + DURABLE_TASK_ERROR_TYPE_NONDETERMINISM: _ClassVar[DurableTaskErrorType] +DURABLE_TASK_ERROR_TYPE_UNSPECIFIED: DurableTaskErrorType +DURABLE_TASK_ERROR_TYPE_NONDETERMINISM: DurableTaskErrorType + +class DurableTaskRequestRegisterWorker(_message.Message): + __slots__ = ("worker_id",) + WORKER_ID_FIELD_NUMBER: _ClassVar[int] + worker_id: str + def __init__(self, worker_id: _Optional[str] = ...) -> None: ... + +class DurableTaskResponseRegisterWorker(_message.Message): + __slots__ = ("worker_id",) + WORKER_ID_FIELD_NUMBER: _ClassVar[int] + worker_id: str + def __init__(self, worker_id: _Optional[str] = ...) -> None: ... + +class DurableEventLogEntryRef(_message.Message): + __slots__ = ("durable_task_external_id", "invocation_count", "branch_id", "node_id") + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + BRANCH_ID_FIELD_NUMBER: _ClassVar[int] + NODE_ID_FIELD_NUMBER: _ClassVar[int] + durable_task_external_id: str + invocation_count: int + branch_id: int + node_id: int + def __init__(self, durable_task_external_id: _Optional[str] = ..., invocation_count: _Optional[int] = ..., branch_id: _Optional[int] = ..., node_id: _Optional[int] = ...) -> None: ... + +class DurableTaskRunAckEntry(_message.Message): + __slots__ = ("node_id", "branch_id") + NODE_ID_FIELD_NUMBER: _ClassVar[int] + BRANCH_ID_FIELD_NUMBER: _ClassVar[int] + node_id: int + branch_id: int + def __init__(self, node_id: _Optional[int] = ..., branch_id: _Optional[int] = ...) -> None: ... + +class DurableTaskEventMemoAckResponse(_message.Message): + __slots__ = ("ref", "memo_already_existed", "memo_result_payload") + REF_FIELD_NUMBER: _ClassVar[int] + MEMO_ALREADY_EXISTED_FIELD_NUMBER: _ClassVar[int] + MEMO_RESULT_PAYLOAD_FIELD_NUMBER: _ClassVar[int] + ref: DurableEventLogEntryRef + memo_already_existed: bool + memo_result_payload: bytes + def __init__(self, ref: _Optional[_Union[DurableEventLogEntryRef, _Mapping]] = ..., memo_already_existed: bool = ..., memo_result_payload: _Optional[bytes] = ...) -> None: ... + +class DurableTaskEventTriggerRunsAckResponse(_message.Message): + __slots__ = ("durable_task_external_id", "invocation_count", "run_entries") + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + RUN_ENTRIES_FIELD_NUMBER: _ClassVar[int] + durable_task_external_id: str + invocation_count: int + run_entries: _containers.RepeatedCompositeFieldContainer[DurableTaskRunAckEntry] + def __init__(self, durable_task_external_id: _Optional[str] = ..., invocation_count: _Optional[int] = ..., run_entries: _Optional[_Iterable[_Union[DurableTaskRunAckEntry, _Mapping]]] = ...) -> None: ... + +class DurableTaskEventWaitForAckResponse(_message.Message): + __slots__ = ("ref",) + REF_FIELD_NUMBER: _ClassVar[int] + ref: DurableEventLogEntryRef + def __init__(self, ref: _Optional[_Union[DurableEventLogEntryRef, _Mapping]] = ...) -> None: ... + +class DurableTaskEventLogEntryCompletedResponse(_message.Message): + __slots__ = ("ref", "payload") + REF_FIELD_NUMBER: _ClassVar[int] + PAYLOAD_FIELD_NUMBER: _ClassVar[int] + ref: DurableEventLogEntryRef + payload: bytes + def __init__(self, ref: _Optional[_Union[DurableEventLogEntryRef, _Mapping]] = ..., payload: _Optional[bytes] = ...) -> None: ... + +class DurableTaskEvictInvocationRequest(_message.Message): + __slots__ = ("invocation_count", "durable_task_external_id", "reason") + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + REASON_FIELD_NUMBER: _ClassVar[int] + invocation_count: int + durable_task_external_id: str + reason: str + def __init__(self, invocation_count: _Optional[int] = ..., durable_task_external_id: _Optional[str] = ..., reason: _Optional[str] = ...) -> None: ... + +class DurableTaskEvictionAckResponse(_message.Message): + __slots__ = ("invocation_count", "durable_task_external_id") + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + invocation_count: int + durable_task_external_id: str + def __init__(self, invocation_count: _Optional[int] = ..., durable_task_external_id: _Optional[str] = ...) -> None: ... + +class DurableTaskAwaitedCompletedEntry(_message.Message): + __slots__ = ("durable_task_external_id", "branch_id", "node_id", "invocation_count") + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + BRANCH_ID_FIELD_NUMBER: _ClassVar[int] + NODE_ID_FIELD_NUMBER: _ClassVar[int] + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + durable_task_external_id: str + branch_id: int + node_id: int + invocation_count: int + def __init__(self, durable_task_external_id: _Optional[str] = ..., branch_id: _Optional[int] = ..., node_id: _Optional[int] = ..., invocation_count: _Optional[int] = ...) -> None: ... + +class DurableTaskServerEvictNotice(_message.Message): + __slots__ = ("durable_task_external_id", "invocation_count", "reason") + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + REASON_FIELD_NUMBER: _ClassVar[int] + durable_task_external_id: str + invocation_count: int + reason: str + def __init__(self, durable_task_external_id: _Optional[str] = ..., invocation_count: _Optional[int] = ..., reason: _Optional[str] = ...) -> None: ... + +class DurableTaskWorkerStatusRequest(_message.Message): + __slots__ = ("worker_id", "waiting_entries") + WORKER_ID_FIELD_NUMBER: _ClassVar[int] + WAITING_ENTRIES_FIELD_NUMBER: _ClassVar[int] + worker_id: str + waiting_entries: _containers.RepeatedCompositeFieldContainer[DurableTaskAwaitedCompletedEntry] + def __init__(self, worker_id: _Optional[str] = ..., waiting_entries: _Optional[_Iterable[_Union[DurableTaskAwaitedCompletedEntry, _Mapping]]] = ...) -> None: ... + +class DurableTaskCompleteMemoRequest(_message.Message): + __slots__ = ("ref", "payload", "memo_key") + REF_FIELD_NUMBER: _ClassVar[int] + PAYLOAD_FIELD_NUMBER: _ClassVar[int] + MEMO_KEY_FIELD_NUMBER: _ClassVar[int] + ref: DurableEventLogEntryRef + payload: bytes + memo_key: bytes + def __init__(self, ref: _Optional[_Union[DurableEventLogEntryRef, _Mapping]] = ..., payload: _Optional[bytes] = ..., memo_key: _Optional[bytes] = ...) -> None: ... + +class DurableTaskMemoRequest(_message.Message): + __slots__ = ("invocation_count", "durable_task_external_id", "key", "payload") + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + KEY_FIELD_NUMBER: _ClassVar[int] + PAYLOAD_FIELD_NUMBER: _ClassVar[int] + invocation_count: int + durable_task_external_id: str + key: bytes + payload: bytes + def __init__(self, invocation_count: _Optional[int] = ..., durable_task_external_id: _Optional[str] = ..., key: _Optional[bytes] = ..., payload: _Optional[bytes] = ...) -> None: ... + +class DurableTaskTriggerRunsRequest(_message.Message): + __slots__ = ("invocation_count", "durable_task_external_id", "trigger_opts") + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + TRIGGER_OPTS_FIELD_NUMBER: _ClassVar[int] + invocation_count: int + durable_task_external_id: str + trigger_opts: _containers.RepeatedCompositeFieldContainer[_trigger_pb2.TriggerWorkflowRequest] + def __init__(self, invocation_count: _Optional[int] = ..., durable_task_external_id: _Optional[str] = ..., trigger_opts: _Optional[_Iterable[_Union[_trigger_pb2.TriggerWorkflowRequest, _Mapping]]] = ...) -> None: ... + +class DurableTaskWaitForRequest(_message.Message): + __slots__ = ("invocation_count", "durable_task_external_id", "wait_for_conditions") + INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int] + DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + WAIT_FOR_CONDITIONS_FIELD_NUMBER: _ClassVar[int] + invocation_count: int + durable_task_external_id: str + wait_for_conditions: _condition_pb2.DurableEventListenerConditions + def __init__(self, invocation_count: _Optional[int] = ..., durable_task_external_id: _Optional[str] = ..., wait_for_conditions: _Optional[_Union[_condition_pb2.DurableEventListenerConditions, _Mapping]] = ...) -> None: ... + +class DurableTaskRequest(_message.Message): + __slots__ = ("register_worker", "memo", "trigger_runs", "wait_for", "evict_invocation", "worker_status", "complete_memo") + REGISTER_WORKER_FIELD_NUMBER: _ClassVar[int] + MEMO_FIELD_NUMBER: _ClassVar[int] + TRIGGER_RUNS_FIELD_NUMBER: _ClassVar[int] + WAIT_FOR_FIELD_NUMBER: _ClassVar[int] + EVICT_INVOCATION_FIELD_NUMBER: _ClassVar[int] + WORKER_STATUS_FIELD_NUMBER: _ClassVar[int] + COMPLETE_MEMO_FIELD_NUMBER: _ClassVar[int] + register_worker: DurableTaskRequestRegisterWorker + memo: DurableTaskMemoRequest + trigger_runs: DurableTaskTriggerRunsRequest + wait_for: DurableTaskWaitForRequest + evict_invocation: DurableTaskEvictInvocationRequest + worker_status: DurableTaskWorkerStatusRequest + complete_memo: DurableTaskCompleteMemoRequest + def __init__(self, register_worker: _Optional[_Union[DurableTaskRequestRegisterWorker, _Mapping]] = ..., memo: _Optional[_Union[DurableTaskMemoRequest, _Mapping]] = ..., trigger_runs: _Optional[_Union[DurableTaskTriggerRunsRequest, _Mapping]] = ..., wait_for: _Optional[_Union[DurableTaskWaitForRequest, _Mapping]] = ..., evict_invocation: _Optional[_Union[DurableTaskEvictInvocationRequest, _Mapping]] = ..., worker_status: _Optional[_Union[DurableTaskWorkerStatusRequest, _Mapping]] = ..., complete_memo: _Optional[_Union[DurableTaskCompleteMemoRequest, _Mapping]] = ...) -> None: ... + +class DurableTaskErrorResponse(_message.Message): + __slots__ = ("ref", "error_type", "error_message") + REF_FIELD_NUMBER: _ClassVar[int] + ERROR_TYPE_FIELD_NUMBER: _ClassVar[int] + ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] + ref: DurableEventLogEntryRef + error_type: DurableTaskErrorType + error_message: str + def __init__(self, ref: _Optional[_Union[DurableEventLogEntryRef, _Mapping]] = ..., error_type: _Optional[_Union[DurableTaskErrorType, str]] = ..., error_message: _Optional[str] = ...) -> None: ... + +class DurableTaskResponse(_message.Message): + __slots__ = ("register_worker", "memo_ack", "trigger_runs_ack", "wait_for_ack", "entry_completed", "error", "eviction_ack", "server_evict") + REGISTER_WORKER_FIELD_NUMBER: _ClassVar[int] + MEMO_ACK_FIELD_NUMBER: _ClassVar[int] + TRIGGER_RUNS_ACK_FIELD_NUMBER: _ClassVar[int] + WAIT_FOR_ACK_FIELD_NUMBER: _ClassVar[int] + ENTRY_COMPLETED_FIELD_NUMBER: _ClassVar[int] + ERROR_FIELD_NUMBER: _ClassVar[int] + EVICTION_ACK_FIELD_NUMBER: _ClassVar[int] + SERVER_EVICT_FIELD_NUMBER: _ClassVar[int] + register_worker: DurableTaskResponseRegisterWorker + memo_ack: DurableTaskEventMemoAckResponse + trigger_runs_ack: DurableTaskEventTriggerRunsAckResponse + wait_for_ack: DurableTaskEventWaitForAckResponse + entry_completed: DurableTaskEventLogEntryCompletedResponse + error: DurableTaskErrorResponse + eviction_ack: DurableTaskEvictionAckResponse + server_evict: DurableTaskServerEvictNotice + def __init__(self, register_worker: _Optional[_Union[DurableTaskResponseRegisterWorker, _Mapping]] = ..., memo_ack: _Optional[_Union[DurableTaskEventMemoAckResponse, _Mapping]] = ..., trigger_runs_ack: _Optional[_Union[DurableTaskEventTriggerRunsAckResponse, _Mapping]] = ..., wait_for_ack: _Optional[_Union[DurableTaskEventWaitForAckResponse, _Mapping]] = ..., entry_completed: _Optional[_Union[DurableTaskEventLogEntryCompletedResponse, _Mapping]] = ..., error: _Optional[_Union[DurableTaskErrorResponse, _Mapping]] = ..., eviction_ack: _Optional[_Union[DurableTaskEvictionAckResponse, _Mapping]] = ..., server_evict: _Optional[_Union[DurableTaskServerEvictNotice, _Mapping]] = ...) -> None: ... + class RegisterDurableEventRequest(_message.Message): __slots__ = ("task_id", "signal_key", "conditions") TASK_ID_FIELD_NUMBER: _ClassVar[int] diff --git a/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py b/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py index 74d39ceec..96f2b25dd 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py +++ b/sdks/python/hatchet_sdk/contracts/v1/dispatcher_pb2_grpc.py @@ -34,6 +34,11 @@ class V1DispatcherStub(object): Args: channel: A grpc.Channel. """ + self.DurableTask = channel.stream_stream( + '/v1.V1Dispatcher/DurableTask', + request_serializer=v1_dot_dispatcher__pb2.DurableTaskRequest.SerializeToString, + response_deserializer=v1_dot_dispatcher__pb2.DurableTaskResponse.FromString, + _registered_method=True) self.RegisterDurableEvent = channel.unary_unary( '/v1.V1Dispatcher/RegisterDurableEvent', request_serializer=v1_dot_dispatcher__pb2.RegisterDurableEventRequest.SerializeToString, @@ -49,12 +54,19 @@ class V1DispatcherStub(object): class V1DispatcherServicer(object): """Missing associated documentation comment in .proto file.""" - def RegisterDurableEvent(self, request, context): + def DurableTask(self, request_iterator, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def RegisterDurableEvent(self, request, context): + """NOTE: deprecated after DurableEventLog is implemented + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def ListenForDurableEvent(self, request_iterator, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -64,6 +76,11 @@ class V1DispatcherServicer(object): def add_V1DispatcherServicer_to_server(servicer, server): rpc_method_handlers = { + 'DurableTask': grpc.stream_stream_rpc_method_handler( + servicer.DurableTask, + request_deserializer=v1_dot_dispatcher__pb2.DurableTaskRequest.FromString, + response_serializer=v1_dot_dispatcher__pb2.DurableTaskResponse.SerializeToString, + ), 'RegisterDurableEvent': grpc.unary_unary_rpc_method_handler( servicer.RegisterDurableEvent, request_deserializer=v1_dot_dispatcher__pb2.RegisterDurableEventRequest.FromString, @@ -85,6 +102,33 @@ def add_V1DispatcherServicer_to_server(servicer, server): class V1Dispatcher(object): """Missing associated documentation comment in .proto file.""" + @staticmethod + def DurableTask(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/v1.V1Dispatcher/DurableTask', + v1_dot_dispatcher__pb2.DurableTaskRequest.SerializeToString, + v1_dot_dispatcher__pb2.DurableTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + @staticmethod def RegisterDurableEvent(request, target, diff --git a/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2.py b/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2.py new file mode 100644 index 000000000..a1e6b14f1 --- /dev/null +++ b/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: v1/shared/trigger.proto +# Protobuf Python Version: 6.31.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 31, + 1, + '', + 'v1/shared/trigger.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17v1/shared/trigger.proto\x12\x02v1\"\xe8\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb2\x04\n\x16TriggerWorkflowRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\t\x12\x16\n\tparent_id\x18\x03 \x01(\tH\x00\x88\x01\x01\x12(\n\x1bparent_task_run_external_id\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63hild_index\x18\x05 \x01(\x05H\x02\x88\x01\x01\x12\x16\n\tchild_key\x18\x06 \x01(\tH\x03\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_metadata\x18\x07 \x01(\tH\x04\x88\x01\x01\x12\x1e\n\x11\x64\x65sired_worker_id\x18\x08 \x01(\tH\x05\x88\x01\x01\x12\x15\n\x08priority\x18\t \x01(\x05H\x06\x88\x01\x01\x12R\n\x15\x64\x65sired_worker_labels\x18\n \x03(\x0b\x32\x33.v1.TriggerWorkflowRequest.DesiredWorkerLabelsEntry\x1aS\n\x18\x44\x65siredWorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x0c\n\n_parent_idB\x1e\n\x1c_parent_task_run_external_idB\x0e\n\x0c_child_indexB\x0c\n\n_child_keyB\x16\n\x14_additional_metadataB\x14\n\x12_desired_worker_idB\x0b\n\t_priority*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x42\x42Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'v1.shared.trigger_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1' + _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._loaded_options = None + _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._serialized_options = b'8\001' + _globals['_WORKERLABELCOMPARATOR']._serialized_start=832 + _globals['_WORKERLABELCOMPARATOR']._serialized_end=965 + _globals['_DESIREDWORKERLABELS']._serialized_start=32 + _globals['_DESIREDWORKERLABELS']._serialized_end=264 + _globals['_TRIGGERWORKFLOWREQUEST']._serialized_start=267 + _globals['_TRIGGERWORKFLOWREQUEST']._serialized_end=829 + _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._serialized_start=611 + _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._serialized_end=694 +# @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2.pyi b/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2.pyi new file mode 100644 index 000000000..28aefdcb4 --- /dev/null +++ b/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2.pyi @@ -0,0 +1,68 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from collections.abc import Mapping as _Mapping +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class WorkerLabelComparator(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + EQUAL: _ClassVar[WorkerLabelComparator] + NOT_EQUAL: _ClassVar[WorkerLabelComparator] + GREATER_THAN: _ClassVar[WorkerLabelComparator] + GREATER_THAN_OR_EQUAL: _ClassVar[WorkerLabelComparator] + LESS_THAN: _ClassVar[WorkerLabelComparator] + LESS_THAN_OR_EQUAL: _ClassVar[WorkerLabelComparator] +EQUAL: WorkerLabelComparator +NOT_EQUAL: WorkerLabelComparator +GREATER_THAN: WorkerLabelComparator +GREATER_THAN_OR_EQUAL: WorkerLabelComparator +LESS_THAN: WorkerLabelComparator +LESS_THAN_OR_EQUAL: WorkerLabelComparator + +class DesiredWorkerLabels(_message.Message): + __slots__ = ("str_value", "int_value", "required", "comparator", "weight") + STR_VALUE_FIELD_NUMBER: _ClassVar[int] + INT_VALUE_FIELD_NUMBER: _ClassVar[int] + REQUIRED_FIELD_NUMBER: _ClassVar[int] + COMPARATOR_FIELD_NUMBER: _ClassVar[int] + WEIGHT_FIELD_NUMBER: _ClassVar[int] + str_value: str + int_value: int + required: bool + comparator: WorkerLabelComparator + weight: int + def __init__(self, str_value: _Optional[str] = ..., int_value: _Optional[int] = ..., required: bool = ..., comparator: _Optional[_Union[WorkerLabelComparator, str]] = ..., weight: _Optional[int] = ...) -> None: ... + +class TriggerWorkflowRequest(_message.Message): + __slots__ = ("name", "input", "parent_id", "parent_task_run_external_id", "child_index", "child_key", "additional_metadata", "desired_worker_id", "priority", "desired_worker_labels") + class DesiredWorkerLabelsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: DesiredWorkerLabels + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[DesiredWorkerLabels, _Mapping]] = ...) -> None: ... + NAME_FIELD_NUMBER: _ClassVar[int] + INPUT_FIELD_NUMBER: _ClassVar[int] + PARENT_ID_FIELD_NUMBER: _ClassVar[int] + PARENT_TASK_RUN_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + CHILD_INDEX_FIELD_NUMBER: _ClassVar[int] + CHILD_KEY_FIELD_NUMBER: _ClassVar[int] + ADDITIONAL_METADATA_FIELD_NUMBER: _ClassVar[int] + DESIRED_WORKER_ID_FIELD_NUMBER: _ClassVar[int] + PRIORITY_FIELD_NUMBER: _ClassVar[int] + DESIRED_WORKER_LABELS_FIELD_NUMBER: _ClassVar[int] + name: str + input: str + parent_id: str + parent_task_run_external_id: str + child_index: int + child_key: str + additional_metadata: str + desired_worker_id: str + priority: int + desired_worker_labels: _containers.MessageMap[str, DesiredWorkerLabels] + def __init__(self, name: _Optional[str] = ..., input: _Optional[str] = ..., parent_id: _Optional[str] = ..., parent_task_run_external_id: _Optional[str] = ..., child_index: _Optional[int] = ..., child_key: _Optional[str] = ..., additional_metadata: _Optional[str] = ..., desired_worker_id: _Optional[str] = ..., priority: _Optional[int] = ..., desired_worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ...) -> None: ... diff --git a/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2_grpc.py b/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2_grpc.py new file mode 100644 index 000000000..d3ccb592d --- /dev/null +++ b/sdks/python/hatchet_sdk/contracts/v1/shared/trigger_pb2_grpc.py @@ -0,0 +1,24 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + + +GRPC_GENERATED_VERSION = '1.76.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + ' but the generated code in v1/shared/trigger_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py index 8ac7d7101..3a9ab133c 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py @@ -24,9 +24,10 @@ _sym_db = _symbol_database.Default() from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from hatchet_sdk.contracts.v1.shared import condition_pb2 as v1_dot_shared_dot_condition__pb2 +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as v1_dot_shared_dot_trigger__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"[\n\x12\x43\x61ncelTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"[\n\x12ReplayTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\xae\x02\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12U\n\x15\x64\x65sired_worker_labels\x18\x05 \x03(\x0b\x32\x36.v1.TriggerWorkflowRunRequest.DesiredWorkerLabelsEntry\x1aS\n\x18\x44\x65siredWorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xac\x04\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilter\x12\x1e\n\x11input_json_schema\x18\x0e \x01(\x0cH\x04\x88\x01\x01\x42\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priorityB\x14\n\x12_input_json_schema\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe8\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb7\x05\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x12\x12\n\nis_durable\x18\x0e \x01(\x08\x12;\n\rslot_requests\x18\x0f \x03(\x0b\x32$.v1.CreateTaskOpts.SlotRequestsEntry\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x1a\x33\n\x11SlotRequestsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t\"+\n\x14GetRunDetailsRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\x96\x01\n\rTaskRunDetail\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12\x13\n\x0breadable_id\x18\x05 \x01(\tB\x08\n\x06_errorB\t\n\x07_output\"\xf0\x01\n\x15GetRunDetailsResponse\x12\r\n\x05input\x18\x01 \x01(\x0c\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12:\n\ttask_runs\x18\x03 \x03(\x0b\x32\'.v1.GetRunDetailsResponse.TaskRunsEntry\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x01(\x0c\x1a\x42\n\rTaskRunsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.v1.TaskRunDetail:\x02\x38\x01*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*N\n\tRunStatus\x12\n\n\x06QUEUED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xfd\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponse\x12\x44\n\rGetRunDetails\x12\x18.v1.GetRunDetailsRequest\x1a\x19.v1.GetRunDetailsResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\x1a\x17v1/shared/trigger.proto\"[\n\x12\x43\x61ncelTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"[\n\x12ReplayTasksRequest\x12\x14\n\x0c\x65xternal_ids\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\xae\x02\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12U\n\x15\x64\x65sired_worker_labels\x18\x05 \x03(\x0b\x32\x36.v1.TriggerWorkflowRunRequest.DesiredWorkerLabelsEntry\x1aS\n\x18\x44\x65siredWorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"X\n\x18\x42ranchDurableTaskRequest\x12\x18\n\x10task_external_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\x12\x11\n\tbranch_id\x18\x03 \x01(\x03\"Y\n\x19\x42ranchDurableTaskResponse\x12\x18\n\x10task_external_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\x12\x11\n\tbranch_id\x18\x03 \x01(\x03\"\xac\x04\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilter\x12\x1e\n\x11input_json_schema\x18\x0e \x01(\x0cH\x04\x88\x01\x01\x42\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priorityB\x14\n\x12_input_json_schema\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xb7\x05\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x12\x12\n\nis_durable\x18\x0e \x01(\x08\x12;\n\rslot_requests\x18\x0f \x03(\x0b\x32$.v1.CreateTaskOpts.SlotRequestsEntry\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x1a\x33\n\x11SlotRequestsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t\"+\n\x14GetRunDetailsRequest\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xaa\x01\n\rTaskRunDetail\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12\x13\n\x0breadable_id\x18\x05 \x01(\t\x12\x12\n\nis_evicted\x18\x06 \x01(\x08\x42\x08\n\x06_errorB\t\n\x07_output\"\x84\x02\n\x15GetRunDetailsResponse\x12\r\n\x05input\x18\x01 \x01(\x0c\x12\x1d\n\x06status\x18\x02 \x01(\x0e\x32\r.v1.RunStatus\x12:\n\ttask_runs\x18\x03 \x03(\x0b\x32\'.v1.GetRunDetailsResponse.TaskRunsEntry\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x01(\x0c\x12\x12\n\nis_evicted\x18\x06 \x01(\x08\x1a\x42\n\rTaskRunsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.v1.TaskRunDetail:\x02\x38\x01*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*[\n\tRunStatus\x12\n\n\x06QUEUED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04\x12\x0b\n\x07\x45VICTED\x10\x05*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04\x32\xcf\x03\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponse\x12\x44\n\rGetRunDetails\x12\x18.v1.GetRunDetailsRequest\x1a\x19.v1.GetRunDetailsResponse\x12P\n\x11\x42ranchDurableTask\x12\x1c.v1.BranchDurableTaskRequest\x1a\x1d.v1.BranchDurableTaskResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -42,58 +43,58 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_options = b'8\001' _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._loaded_options = None _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_options = b'8\001' - _globals['_STICKYSTRATEGY']._serialized_start=3400 - _globals['_STICKYSTRATEGY']._serialized_end=3436 - _globals['_RATELIMITDURATION']._serialized_start=3438 - _globals['_RATELIMITDURATION']._serialized_end=3531 - _globals['_RUNSTATUS']._serialized_start=3533 - _globals['_RUNSTATUS']._serialized_end=3611 - _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_start=3613 - _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_end=3740 - _globals['_WORKERLABELCOMPARATOR']._serialized_start=3743 - _globals['_WORKERLABELCOMPARATOR']._serialized_end=3876 - _globals['_CANCELTASKSREQUEST']._serialized_start=86 - _globals['_CANCELTASKSREQUEST']._serialized_end=177 - _globals['_REPLAYTASKSREQUEST']._serialized_start=179 - _globals['_REPLAYTASKSREQUEST']._serialized_end=270 - _globals['_TASKSFILTER']._serialized_start=273 - _globals['_TASKSFILTER']._serialized_end=456 - _globals['_CANCELTASKSRESPONSE']._serialized_start=458 - _globals['_CANCELTASKSRESPONSE']._serialized_end=504 - _globals['_REPLAYTASKSRESPONSE']._serialized_start=506 - _globals['_REPLAYTASKSRESPONSE']._serialized_end=551 - _globals['_TRIGGERWORKFLOWRUNREQUEST']._serialized_start=554 - _globals['_TRIGGERWORKFLOWRUNREQUEST']._serialized_end=856 - _globals['_TRIGGERWORKFLOWRUNREQUEST_DESIREDWORKERLABELSENTRY']._serialized_start=760 - _globals['_TRIGGERWORKFLOWRUNREQUEST_DESIREDWORKERLABELSENTRY']._serialized_end=843 - _globals['_TRIGGERWORKFLOWRUNRESPONSE']._serialized_start=858 - _globals['_TRIGGERWORKFLOWRUNRESPONSE']._serialized_end=907 - _globals['_CREATEWORKFLOWVERSIONREQUEST']._serialized_start=910 - _globals['_CREATEWORKFLOWVERSIONREQUEST']._serialized_end=1466 - _globals['_DEFAULTFILTER']._serialized_start=1468 - _globals['_DEFAULTFILTER']._serialized_end=1552 - _globals['_CONCURRENCY']._serialized_start=1555 - _globals['_CONCURRENCY']._serialized_end=1702 - _globals['_DESIREDWORKERLABELS']._serialized_start=1705 - _globals['_DESIREDWORKERLABELS']._serialized_end=1937 - _globals['_CREATETASKOPTS']._serialized_start=1940 - _globals['_CREATETASKOPTS']._serialized_end=2635 - _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_start=2427 - _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_end=2503 - _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_start=2505 - _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_end=2556 - _globals['_CREATETASKRATELIMIT']._serialized_start=2638 - _globals['_CREATETASKRATELIMIT']._serialized_end=2891 - _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_start=2893 - _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_end=2957 - _globals['_GETRUNDETAILSREQUEST']._serialized_start=2959 - _globals['_GETRUNDETAILSREQUEST']._serialized_end=3002 - _globals['_TASKRUNDETAIL']._serialized_start=3005 - _globals['_TASKRUNDETAIL']._serialized_end=3155 - _globals['_GETRUNDETAILSRESPONSE']._serialized_start=3158 - _globals['_GETRUNDETAILSRESPONSE']._serialized_end=3398 - _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_start=3332 - _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_end=3398 - _globals['_ADMINSERVICE']._serialized_start=3879 - _globals['_ADMINSERVICE']._serialized_end=4260 + _globals['_STICKYSTRATEGY']._serialized_start=3411 + _globals['_STICKYSTRATEGY']._serialized_end=3447 + _globals['_RATELIMITDURATION']._serialized_start=3449 + _globals['_RATELIMITDURATION']._serialized_end=3542 + _globals['_RUNSTATUS']._serialized_start=3544 + _globals['_RUNSTATUS']._serialized_end=3635 + _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_start=3637 + _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_end=3764 + _globals['_CANCELTASKSREQUEST']._serialized_start=111 + _globals['_CANCELTASKSREQUEST']._serialized_end=202 + _globals['_REPLAYTASKSREQUEST']._serialized_start=204 + _globals['_REPLAYTASKSREQUEST']._serialized_end=295 + _globals['_TASKSFILTER']._serialized_start=298 + _globals['_TASKSFILTER']._serialized_end=481 + _globals['_CANCELTASKSRESPONSE']._serialized_start=483 + _globals['_CANCELTASKSRESPONSE']._serialized_end=529 + _globals['_REPLAYTASKSRESPONSE']._serialized_start=531 + _globals['_REPLAYTASKSRESPONSE']._serialized_end=576 + _globals['_TRIGGERWORKFLOWRUNREQUEST']._serialized_start=579 + _globals['_TRIGGERWORKFLOWRUNREQUEST']._serialized_end=881 + _globals['_TRIGGERWORKFLOWRUNREQUEST_DESIREDWORKERLABELSENTRY']._serialized_start=785 + _globals['_TRIGGERWORKFLOWRUNREQUEST_DESIREDWORKERLABELSENTRY']._serialized_end=868 + _globals['_TRIGGERWORKFLOWRUNRESPONSE']._serialized_start=883 + _globals['_TRIGGERWORKFLOWRUNRESPONSE']._serialized_end=932 + _globals['_BRANCHDURABLETASKREQUEST']._serialized_start=934 + _globals['_BRANCHDURABLETASKREQUEST']._serialized_end=1022 + _globals['_BRANCHDURABLETASKRESPONSE']._serialized_start=1024 + _globals['_BRANCHDURABLETASKRESPONSE']._serialized_end=1113 + _globals['_CREATEWORKFLOWVERSIONREQUEST']._serialized_start=1116 + _globals['_CREATEWORKFLOWVERSIONREQUEST']._serialized_end=1672 + _globals['_DEFAULTFILTER']._serialized_start=1674 + _globals['_DEFAULTFILTER']._serialized_end=1758 + _globals['_CONCURRENCY']._serialized_start=1761 + _globals['_CONCURRENCY']._serialized_end=1908 + _globals['_CREATETASKOPTS']._serialized_start=1911 + _globals['_CREATETASKOPTS']._serialized_end=2606 + _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_start=2398 + _globals['_CREATETASKOPTS_WORKERLABELSENTRY']._serialized_end=2474 + _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_start=2476 + _globals['_CREATETASKOPTS_SLOTREQUESTSENTRY']._serialized_end=2527 + _globals['_CREATETASKRATELIMIT']._serialized_start=2609 + _globals['_CREATETASKRATELIMIT']._serialized_end=2862 + _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_start=2864 + _globals['_CREATEWORKFLOWVERSIONRESPONSE']._serialized_end=2928 + _globals['_GETRUNDETAILSREQUEST']._serialized_start=2930 + _globals['_GETRUNDETAILSREQUEST']._serialized_end=2973 + _globals['_TASKRUNDETAIL']._serialized_start=2976 + _globals['_TASKRUNDETAIL']._serialized_end=3146 + _globals['_GETRUNDETAILSRESPONSE']._serialized_start=3149 + _globals['_GETRUNDETAILSRESPONSE']._serialized_end=3409 + _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_start=3343 + _globals['_GETRUNDETAILSRESPONSE_TASKRUNSENTRY']._serialized_end=3409 + _globals['_ADMINSERVICE']._serialized_start=3767 + _globals['_ADMINSERVICE']._serialized_end=4230 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi index 6ebd23da3..3015f8267 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi @@ -2,6 +2,7 @@ import datetime from google.protobuf import timestamp_pb2 as _timestamp_pb2 from hatchet_sdk.contracts.v1.shared import condition_pb2 as _condition_pb2 +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as _trigger_pb2 from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor @@ -33,6 +34,7 @@ class RunStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): COMPLETED: _ClassVar[RunStatus] FAILED: _ClassVar[RunStatus] CANCELLED: _ClassVar[RunStatus] + EVICTED: _ClassVar[RunStatus] class ConcurrencyLimitStrategy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () @@ -41,15 +43,6 @@ class ConcurrencyLimitStrategy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper QUEUE_NEWEST: _ClassVar[ConcurrencyLimitStrategy] GROUP_ROUND_ROBIN: _ClassVar[ConcurrencyLimitStrategy] CANCEL_NEWEST: _ClassVar[ConcurrencyLimitStrategy] - -class WorkerLabelComparator(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - EQUAL: _ClassVar[WorkerLabelComparator] - NOT_EQUAL: _ClassVar[WorkerLabelComparator] - GREATER_THAN: _ClassVar[WorkerLabelComparator] - GREATER_THAN_OR_EQUAL: _ClassVar[WorkerLabelComparator] - LESS_THAN: _ClassVar[WorkerLabelComparator] - LESS_THAN_OR_EQUAL: _ClassVar[WorkerLabelComparator] SOFT: StickyStrategy HARD: StickyStrategy SECOND: RateLimitDuration @@ -64,17 +57,12 @@ RUNNING: RunStatus COMPLETED: RunStatus FAILED: RunStatus CANCELLED: RunStatus +EVICTED: RunStatus CANCEL_IN_PROGRESS: ConcurrencyLimitStrategy DROP_NEWEST: ConcurrencyLimitStrategy QUEUE_NEWEST: ConcurrencyLimitStrategy GROUP_ROUND_ROBIN: ConcurrencyLimitStrategy CANCEL_NEWEST: ConcurrencyLimitStrategy -EQUAL: WorkerLabelComparator -NOT_EQUAL: WorkerLabelComparator -GREATER_THAN: WorkerLabelComparator -GREATER_THAN_OR_EQUAL: WorkerLabelComparator -LESS_THAN: WorkerLabelComparator -LESS_THAN_OR_EQUAL: WorkerLabelComparator class CancelTasksRequest(_message.Message): __slots__ = ("external_ids", "filter") @@ -125,8 +113,8 @@ class TriggerWorkflowRunRequest(_message.Message): KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str - value: DesiredWorkerLabels - def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[DesiredWorkerLabels, _Mapping]] = ...) -> None: ... + value: _trigger_pb2.DesiredWorkerLabels + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_trigger_pb2.DesiredWorkerLabels, _Mapping]] = ...) -> None: ... WORKFLOW_NAME_FIELD_NUMBER: _ClassVar[int] INPUT_FIELD_NUMBER: _ClassVar[int] ADDITIONAL_METADATA_FIELD_NUMBER: _ClassVar[int] @@ -136,8 +124,8 @@ class TriggerWorkflowRunRequest(_message.Message): input: bytes additional_metadata: bytes priority: int - desired_worker_labels: _containers.MessageMap[str, DesiredWorkerLabels] - def __init__(self, workflow_name: _Optional[str] = ..., input: _Optional[bytes] = ..., additional_metadata: _Optional[bytes] = ..., priority: _Optional[int] = ..., desired_worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ...) -> None: ... + desired_worker_labels: _containers.MessageMap[str, _trigger_pb2.DesiredWorkerLabels] + def __init__(self, workflow_name: _Optional[str] = ..., input: _Optional[bytes] = ..., additional_metadata: _Optional[bytes] = ..., priority: _Optional[int] = ..., desired_worker_labels: _Optional[_Mapping[str, _trigger_pb2.DesiredWorkerLabels]] = ...) -> None: ... class TriggerWorkflowRunResponse(_message.Message): __slots__ = ("external_id",) @@ -145,6 +133,26 @@ class TriggerWorkflowRunResponse(_message.Message): external_id: str def __init__(self, external_id: _Optional[str] = ...) -> None: ... +class BranchDurableTaskRequest(_message.Message): + __slots__ = ("task_external_id", "node_id", "branch_id") + TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + NODE_ID_FIELD_NUMBER: _ClassVar[int] + BRANCH_ID_FIELD_NUMBER: _ClassVar[int] + task_external_id: str + node_id: int + branch_id: int + def __init__(self, task_external_id: _Optional[str] = ..., node_id: _Optional[int] = ..., branch_id: _Optional[int] = ...) -> None: ... + +class BranchDurableTaskResponse(_message.Message): + __slots__ = ("task_external_id", "node_id", "branch_id") + TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + NODE_ID_FIELD_NUMBER: _ClassVar[int] + BRANCH_ID_FIELD_NUMBER: _ClassVar[int] + task_external_id: str + node_id: int + branch_id: int + def __init__(self, task_external_id: _Optional[str] = ..., node_id: _Optional[int] = ..., branch_id: _Optional[int] = ...) -> None: ... + class CreateWorkflowVersionRequest(_message.Message): __slots__ = ("name", "description", "version", "event_triggers", "cron_triggers", "tasks", "concurrency", "cron_input", "on_failure_task", "sticky", "default_priority", "concurrency_arr", "default_filters", "input_json_schema") NAME_FIELD_NUMBER: _ClassVar[int] @@ -197,20 +205,6 @@ class Concurrency(_message.Message): limit_strategy: ConcurrencyLimitStrategy def __init__(self, expression: _Optional[str] = ..., max_runs: _Optional[int] = ..., limit_strategy: _Optional[_Union[ConcurrencyLimitStrategy, str]] = ...) -> None: ... -class DesiredWorkerLabels(_message.Message): - __slots__ = ("str_value", "int_value", "required", "comparator", "weight") - STR_VALUE_FIELD_NUMBER: _ClassVar[int] - INT_VALUE_FIELD_NUMBER: _ClassVar[int] - REQUIRED_FIELD_NUMBER: _ClassVar[int] - COMPARATOR_FIELD_NUMBER: _ClassVar[int] - WEIGHT_FIELD_NUMBER: _ClassVar[int] - str_value: str - int_value: int - required: bool - comparator: WorkerLabelComparator - weight: int - def __init__(self, str_value: _Optional[str] = ..., int_value: _Optional[int] = ..., required: bool = ..., comparator: _Optional[_Union[WorkerLabelComparator, str]] = ..., weight: _Optional[int] = ...) -> None: ... - class CreateTaskOpts(_message.Message): __slots__ = ("readable_id", "action", "timeout", "inputs", "parents", "retries", "rate_limits", "worker_labels", "backoff_factor", "backoff_max_seconds", "concurrency", "conditions", "schedule_timeout", "is_durable", "slot_requests") class WorkerLabelsEntry(_message.Message): @@ -218,8 +212,8 @@ class CreateTaskOpts(_message.Message): KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str - value: DesiredWorkerLabels - def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[DesiredWorkerLabels, _Mapping]] = ...) -> None: ... + value: _trigger_pb2.DesiredWorkerLabels + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_trigger_pb2.DesiredWorkerLabels, _Mapping]] = ...) -> None: ... class SlotRequestsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -249,7 +243,7 @@ class CreateTaskOpts(_message.Message): parents: _containers.RepeatedScalarFieldContainer[str] retries: int rate_limits: _containers.RepeatedCompositeFieldContainer[CreateTaskRateLimit] - worker_labels: _containers.MessageMap[str, DesiredWorkerLabels] + worker_labels: _containers.MessageMap[str, _trigger_pb2.DesiredWorkerLabels] backoff_factor: float backoff_max_seconds: int concurrency: _containers.RepeatedCompositeFieldContainer[Concurrency] @@ -257,7 +251,7 @@ class CreateTaskOpts(_message.Message): schedule_timeout: str is_durable: bool slot_requests: _containers.ScalarMap[str, int] - def __init__(self, readable_id: _Optional[str] = ..., action: _Optional[str] = ..., timeout: _Optional[str] = ..., inputs: _Optional[str] = ..., parents: _Optional[_Iterable[str]] = ..., retries: _Optional[int] = ..., rate_limits: _Optional[_Iterable[_Union[CreateTaskRateLimit, _Mapping]]] = ..., worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ..., backoff_factor: _Optional[float] = ..., backoff_max_seconds: _Optional[int] = ..., concurrency: _Optional[_Iterable[_Union[Concurrency, _Mapping]]] = ..., conditions: _Optional[_Union[_condition_pb2.TaskConditions, _Mapping]] = ..., schedule_timeout: _Optional[str] = ..., is_durable: bool = ..., slot_requests: _Optional[_Mapping[str, int]] = ...) -> None: ... + def __init__(self, readable_id: _Optional[str] = ..., action: _Optional[str] = ..., timeout: _Optional[str] = ..., inputs: _Optional[str] = ..., parents: _Optional[_Iterable[str]] = ..., retries: _Optional[int] = ..., rate_limits: _Optional[_Iterable[_Union[CreateTaskRateLimit, _Mapping]]] = ..., worker_labels: _Optional[_Mapping[str, _trigger_pb2.DesiredWorkerLabels]] = ..., backoff_factor: _Optional[float] = ..., backoff_max_seconds: _Optional[int] = ..., concurrency: _Optional[_Iterable[_Union[Concurrency, _Mapping]]] = ..., conditions: _Optional[_Union[_condition_pb2.TaskConditions, _Mapping]] = ..., schedule_timeout: _Optional[str] = ..., is_durable: bool = ..., slot_requests: _Optional[_Mapping[str, int]] = ...) -> None: ... class CreateTaskRateLimit(_message.Message): __slots__ = ("key", "units", "key_expr", "units_expr", "limit_values_expr", "duration") @@ -290,21 +284,23 @@ class GetRunDetailsRequest(_message.Message): def __init__(self, external_id: _Optional[str] = ...) -> None: ... class TaskRunDetail(_message.Message): - __slots__ = ("external_id", "status", "error", "output", "readable_id") + __slots__ = ("external_id", "status", "error", "output", "readable_id", "is_evicted") EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] ERROR_FIELD_NUMBER: _ClassVar[int] OUTPUT_FIELD_NUMBER: _ClassVar[int] READABLE_ID_FIELD_NUMBER: _ClassVar[int] + IS_EVICTED_FIELD_NUMBER: _ClassVar[int] external_id: str status: RunStatus error: str output: bytes readable_id: str - def __init__(self, external_id: _Optional[str] = ..., status: _Optional[_Union[RunStatus, str]] = ..., error: _Optional[str] = ..., output: _Optional[bytes] = ..., readable_id: _Optional[str] = ...) -> None: ... + is_evicted: bool + def __init__(self, external_id: _Optional[str] = ..., status: _Optional[_Union[RunStatus, str]] = ..., error: _Optional[str] = ..., output: _Optional[bytes] = ..., readable_id: _Optional[str] = ..., is_evicted: bool = ...) -> None: ... class GetRunDetailsResponse(_message.Message): - __slots__ = ("input", "status", "task_runs", "done", "additional_metadata") + __slots__ = ("input", "status", "task_runs", "done", "additional_metadata", "is_evicted") class TaskRunsEntry(_message.Message): __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] @@ -317,9 +313,11 @@ class GetRunDetailsResponse(_message.Message): TASK_RUNS_FIELD_NUMBER: _ClassVar[int] DONE_FIELD_NUMBER: _ClassVar[int] ADDITIONAL_METADATA_FIELD_NUMBER: _ClassVar[int] + IS_EVICTED_FIELD_NUMBER: _ClassVar[int] input: bytes status: RunStatus task_runs: _containers.MessageMap[str, TaskRunDetail] done: bool additional_metadata: bytes - def __init__(self, input: _Optional[bytes] = ..., status: _Optional[_Union[RunStatus, str]] = ..., task_runs: _Optional[_Mapping[str, TaskRunDetail]] = ..., done: bool = ..., additional_metadata: _Optional[bytes] = ...) -> None: ... + is_evicted: bool + def __init__(self, input: _Optional[bytes] = ..., status: _Optional[_Union[RunStatus, str]] = ..., task_runs: _Optional[_Mapping[str, TaskRunDetail]] = ..., done: bool = ..., additional_metadata: _Optional[bytes] = ..., is_evicted: bool = ...) -> None: ... diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2_grpc.py b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2_grpc.py index 7229fe29b..f9e2e8d37 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2_grpc.py +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2_grpc.py @@ -60,6 +60,11 @@ class AdminServiceStub(object): request_serializer=v1_dot_workflows__pb2.GetRunDetailsRequest.SerializeToString, response_deserializer=v1_dot_workflows__pb2.GetRunDetailsResponse.FromString, _registered_method=True) + self.BranchDurableTask = channel.unary_unary( + '/v1.AdminService/BranchDurableTask', + request_serializer=v1_dot_workflows__pb2.BranchDurableTaskRequest.SerializeToString, + response_deserializer=v1_dot_workflows__pb2.BranchDurableTaskResponse.FromString, + _registered_method=True) class AdminServiceServicer(object): @@ -96,6 +101,12 @@ class AdminServiceServicer(object): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def BranchDurableTask(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_AdminServiceServicer_to_server(servicer, server): rpc_method_handlers = { @@ -124,6 +135,11 @@ def add_AdminServiceServicer_to_server(servicer, server): request_deserializer=v1_dot_workflows__pb2.GetRunDetailsRequest.FromString, response_serializer=v1_dot_workflows__pb2.GetRunDetailsResponse.SerializeToString, ), + 'BranchDurableTask': grpc.unary_unary_rpc_method_handler( + servicer.BranchDurableTask, + request_deserializer=v1_dot_workflows__pb2.BranchDurableTaskRequest.FromString, + response_serializer=v1_dot_workflows__pb2.BranchDurableTaskResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'v1.AdminService', rpc_method_handlers) @@ -270,3 +286,30 @@ class AdminService(object): timeout, metadata, _registered_method=True) + + @staticmethod + def BranchDurableTask(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/v1.AdminService/BranchDurableTask', + v1_dot_workflows__pb2.BranchDurableTaskRequest.SerializeToString, + v1_dot_workflows__pb2.BranchDurableTaskResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdks/python/hatchet_sdk/contracts/workflows_pb2.py b/sdks/python/hatchet_sdk/contracts/workflows_pb2.py index 1e9e5b474..84c39233f 100644 --- a/sdks/python/hatchet_sdk/contracts/workflows_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/workflows_pb2.py @@ -23,9 +23,10 @@ _sym_db = _symbol_database.Default() from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as v1_dot_shared_dot_trigger__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fworkflows.proto\x1a\x1fgoogle/protobuf/timestamp.proto\">\n\x12PutWorkflowRequest\x12(\n\x04opts\x18\x01 \x01(\x0b\x32\x1a.CreateWorkflowVersionOpts\"\xbf\x04\n\x19\x43reateWorkflowVersionOpts\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12\x36\n\x12scheduled_triggers\x18\x06 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\x12$\n\x04jobs\x18\x07 \x03(\x0b\x32\x16.CreateWorkflowJobOpts\x12-\n\x0b\x63oncurrency\x18\x08 \x01(\x0b\x32\x18.WorkflowConcurrencyOpts\x12\x1d\n\x10schedule_timeout\x18\t \x01(\tH\x00\x88\x01\x01\x12\x17\n\ncron_input\x18\n \x01(\tH\x01\x88\x01\x01\x12\x33\n\x0eon_failure_job\x18\x0b \x01(\x0b\x32\x16.CreateWorkflowJobOptsH\x02\x88\x01\x01\x12$\n\x06sticky\x18\x0c \x01(\x0e\x32\x0f.StickyStrategyH\x03\x88\x01\x01\x12 \n\x04kind\x18\r \x01(\x0e\x32\r.WorkflowKindH\x04\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0e \x01(\x05H\x05\x88\x01\x01\x42\x13\n\x11_schedule_timeoutB\r\n\x0b_cron_inputB\x11\n\x0f_on_failure_jobB\t\n\x07_stickyB\x07\n\x05_kindB\x13\n\x11_default_priority\"\xd0\x01\n\x17WorkflowConcurrencyOpts\x12\x13\n\x06\x61\x63tion\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x36\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x19.ConcurrencyLimitStrategyH\x02\x88\x01\x01\x12\x17\n\nexpression\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\t\n\x07_actionB\x0b\n\t_max_runsB\x11\n\x0f_limit_strategyB\r\n\x0b_expression\"h\n\x15\x43reateWorkflowJobOpts\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12&\n\x05steps\x18\x04 \x03(\x0b\x32\x17.CreateWorkflowStepOptsJ\x04\x08\x03\x10\x04\"\xe5\x01\n\x13\x44\x65siredWorkerLabels\x12\x16\n\tstr_value\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tint_value\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12/\n\ncomparator\x18\x04 \x01(\x0e\x32\x16.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0c\n\n_str_valueB\x0c\n\n_int_valueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb5\x03\n\x16\x43reateWorkflowStepOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x11\n\tuser_data\x18\x06 \x01(\t\x12\x0f\n\x07retries\x18\x07 \x01(\x05\x12)\n\x0brate_limits\x18\x08 \x03(\x0b\x32\x14.CreateStepRateLimit\x12@\n\rworker_labels\x18\t \x03(\x0b\x32).CreateWorkflowStepOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\n \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\x0b \x01(\x05H\x01\x88\x01\x01\x1aI\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.DesiredWorkerLabels:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_seconds\"\xfa\x01\n\x13\x43reateStepRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12)\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x12.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"\x16\n\x14ListWorkflowsRequest\"\x83\x03\n\x17ScheduleWorkflowRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tschedules\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05input\x18\x03 \x01(\t\x12\x16\n\tparent_id\x18\x04 \x01(\tH\x00\x88\x01\x01\x12(\n\x1bparent_task_run_external_id\x18\x05 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63hild_index\x18\x06 \x01(\x05H\x02\x88\x01\x01\x12\x16\n\tchild_key\x18\x07 \x01(\tH\x03\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_metadata\x18\x08 \x01(\tH\x04\x88\x01\x01\x12\x15\n\x08priority\x18\t \x01(\x05H\x05\x88\x01\x01\x42\x0c\n\n_parent_idB\x1e\n\x1c_parent_task_run_external_idB\x0e\n\x0c_child_indexB\x0c\n\n_child_keyB\x16\n\x14_additional_metadataB\x0b\n\t_priority\"O\n\x11ScheduledWorkflow\x12\n\n\x02id\x18\x01 \x01(\t\x12.\n\ntrigger_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xe3\x01\n\x0fWorkflowVersion\x12\n\n\x02id\x18\x01 \x01(\t\x12.\n\ncreated_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nupdated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07version\x18\x05 \x01(\t\x12\r\n\x05order\x18\x06 \x01(\x03\x12\x13\n\x0bworkflow_id\x18\x07 \x01(\t\x12/\n\x13scheduled_workflows\x18\x08 \x03(\x0b\x32\x12.ScheduledWorkflow\"?\n\x17WorkflowTriggerEventRef\x12\x11\n\tparent_id\x18\x01 \x01(\t\x12\x11\n\tevent_key\x18\x02 \x01(\t\"9\n\x16WorkflowTriggerCronRef\x12\x11\n\tparent_id\x18\x01 \x01(\t\x12\x0c\n\x04\x63ron\x18\x02 \x01(\t\"H\n\x1a\x42ulkTriggerWorkflowRequest\x12*\n\tworkflows\x18\x01 \x03(\x0b\x32\x17.TriggerWorkflowRequest\"7\n\x1b\x42ulkTriggerWorkflowResponse\x12\x18\n\x10workflow_run_ids\x18\x01 \x03(\t\"\xac\x04\n\x16TriggerWorkflowRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\t\x12\x16\n\tparent_id\x18\x03 \x01(\tH\x00\x88\x01\x01\x12(\n\x1bparent_task_run_external_id\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63hild_index\x18\x05 \x01(\x05H\x02\x88\x01\x01\x12\x16\n\tchild_key\x18\x06 \x01(\tH\x03\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_metadata\x18\x07 \x01(\tH\x04\x88\x01\x01\x12\x1e\n\x11\x64\x65sired_worker_id\x18\x08 \x01(\tH\x05\x88\x01\x01\x12\x15\n\x08priority\x18\t \x01(\x05H\x06\x88\x01\x01\x12O\n\x15\x64\x65sired_worker_labels\x18\n \x03(\x0b\x32\x30.TriggerWorkflowRequest.DesiredWorkerLabelsEntry\x1aP\n\x18\x44\x65siredWorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.DesiredWorkerLabels:\x02\x38\x01\x42\x0c\n\n_parent_idB\x1e\n\x1c_parent_task_run_external_idB\x0e\n\x0c_child_indexB\x0c\n\n_child_keyB\x16\n\x14_additional_metadataB\x14\n\x12_desired_worker_idB\x0b\n\t_priority\"2\n\x17TriggerWorkflowResponse\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"W\n\x13PutRateLimitRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x05\x12$\n\x08\x64uration\x18\x03 \x01(\x0e\x32\x12.RateLimitDuration\"\x16\n\x14PutRateLimitResponse*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*2\n\x0cWorkflowKind\x12\x0c\n\x08\x46UNCTION\x10\x00\x12\x0b\n\x07\x44URABLE\x10\x01\x12\x07\n\x03\x44\x41G\x10\x02*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06\x32\xdc\x02\n\x0fWorkflowService\x12\x34\n\x0bPutWorkflow\x12\x13.PutWorkflowRequest\x1a\x10.WorkflowVersion\x12>\n\x10ScheduleWorkflow\x12\x18.ScheduleWorkflowRequest\x1a\x10.WorkflowVersion\x12\x44\n\x0fTriggerWorkflow\x12\x17.TriggerWorkflowRequest\x1a\x18.TriggerWorkflowResponse\x12P\n\x13\x42ulkTriggerWorkflow\x12\x1b.BulkTriggerWorkflowRequest\x1a\x1c.BulkTriggerWorkflowResponse\x12;\n\x0cPutRateLimit\x12\x14.PutRateLimitRequest\x1a\x15.PutRateLimitResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/admin/contractsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fworkflows.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17v1/shared/trigger.proto\">\n\x12PutWorkflowRequest\x12(\n\x04opts\x18\x01 \x01(\x0b\x32\x1a.CreateWorkflowVersionOpts\"\xbf\x04\n\x19\x43reateWorkflowVersionOpts\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12\x36\n\x12scheduled_triggers\x18\x06 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\x12$\n\x04jobs\x18\x07 \x03(\x0b\x32\x16.CreateWorkflowJobOpts\x12-\n\x0b\x63oncurrency\x18\x08 \x01(\x0b\x32\x18.WorkflowConcurrencyOpts\x12\x1d\n\x10schedule_timeout\x18\t \x01(\tH\x00\x88\x01\x01\x12\x17\n\ncron_input\x18\n \x01(\tH\x01\x88\x01\x01\x12\x33\n\x0eon_failure_job\x18\x0b \x01(\x0b\x32\x16.CreateWorkflowJobOptsH\x02\x88\x01\x01\x12$\n\x06sticky\x18\x0c \x01(\x0e\x32\x0f.StickyStrategyH\x03\x88\x01\x01\x12 \n\x04kind\x18\r \x01(\x0e\x32\r.WorkflowKindH\x04\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0e \x01(\x05H\x05\x88\x01\x01\x42\x13\n\x11_schedule_timeoutB\r\n\x0b_cron_inputB\x11\n\x0f_on_failure_jobB\t\n\x07_stickyB\x07\n\x05_kindB\x13\n\x11_default_priority\"\xd0\x01\n\x17WorkflowConcurrencyOpts\x12\x13\n\x06\x61\x63tion\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x36\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x19.ConcurrencyLimitStrategyH\x02\x88\x01\x01\x12\x17\n\nexpression\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\t\n\x07_actionB\x0b\n\t_max_runsB\x11\n\x0f_limit_strategyB\r\n\x0b_expression\"h\n\x15\x43reateWorkflowJobOpts\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12&\n\x05steps\x18\x04 \x03(\x0b\x32\x17.CreateWorkflowStepOptsJ\x04\x08\x03\x10\x04\"\xb8\x03\n\x16\x43reateWorkflowStepOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x11\n\tuser_data\x18\x06 \x01(\t\x12\x0f\n\x07retries\x18\x07 \x01(\x05\x12)\n\x0brate_limits\x18\x08 \x03(\x0b\x32\x14.CreateStepRateLimit\x12@\n\rworker_labels\x18\t \x03(\x0b\x32).CreateWorkflowStepOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\n \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\x0b \x01(\x05H\x01\x88\x01\x01\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_seconds\"\xfa\x01\n\x13\x43reateStepRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12)\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x12.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"\x16\n\x14ListWorkflowsRequest\"\x83\x03\n\x17ScheduleWorkflowRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tschedules\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05input\x18\x03 \x01(\t\x12\x16\n\tparent_id\x18\x04 \x01(\tH\x00\x88\x01\x01\x12(\n\x1bparent_task_run_external_id\x18\x05 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63hild_index\x18\x06 \x01(\x05H\x02\x88\x01\x01\x12\x16\n\tchild_key\x18\x07 \x01(\tH\x03\x88\x01\x01\x12 \n\x13\x61\x64\x64itional_metadata\x18\x08 \x01(\tH\x04\x88\x01\x01\x12\x15\n\x08priority\x18\t \x01(\x05H\x05\x88\x01\x01\x42\x0c\n\n_parent_idB\x1e\n\x1c_parent_task_run_external_idB\x0e\n\x0c_child_indexB\x0c\n\n_child_keyB\x16\n\x14_additional_metadataB\x0b\n\t_priority\"O\n\x11ScheduledWorkflow\x12\n\n\x02id\x18\x01 \x01(\t\x12.\n\ntrigger_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xe3\x01\n\x0fWorkflowVersion\x12\n\n\x02id\x18\x01 \x01(\t\x12.\n\ncreated_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nupdated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07version\x18\x05 \x01(\t\x12\r\n\x05order\x18\x06 \x01(\x03\x12\x13\n\x0bworkflow_id\x18\x07 \x01(\t\x12/\n\x13scheduled_workflows\x18\x08 \x03(\x0b\x32\x12.ScheduledWorkflow\"?\n\x17WorkflowTriggerEventRef\x12\x11\n\tparent_id\x18\x01 \x01(\t\x12\x11\n\tevent_key\x18\x02 \x01(\t\"9\n\x16WorkflowTriggerCronRef\x12\x11\n\tparent_id\x18\x01 \x01(\t\x12\x0c\n\x04\x63ron\x18\x02 \x01(\t\"K\n\x1a\x42ulkTriggerWorkflowRequest\x12-\n\tworkflows\x18\x01 \x03(\x0b\x32\x1a.v1.TriggerWorkflowRequest\"7\n\x1b\x42ulkTriggerWorkflowResponse\x12\x18\n\x10workflow_run_ids\x18\x01 \x03(\t\"2\n\x17TriggerWorkflowResponse\x12\x17\n\x0fworkflow_run_id\x18\x01 \x01(\t\"W\n\x13PutRateLimitRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x05\x12$\n\x08\x64uration\x18\x03 \x01(\x0e\x32\x12.RateLimitDuration\"\x16\n\x14PutRateLimitResponse*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*2\n\x0cWorkflowKind\x12\x0c\n\x08\x46UNCTION\x10\x00\x12\x0b\n\x07\x44URABLE\x10\x01\x12\x07\n\x03\x44\x41G\x10\x02*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06\x32\xdf\x02\n\x0fWorkflowService\x12\x34\n\x0bPutWorkflow\x12\x13.PutWorkflowRequest\x1a\x10.WorkflowVersion\x12>\n\x10ScheduleWorkflow\x12\x18.ScheduleWorkflowRequest\x1a\x10.WorkflowVersion\x12G\n\x0fTriggerWorkflow\x12\x1a.v1.TriggerWorkflowRequest\x1a\x18.TriggerWorkflowResponse\x12P\n\x13\x42ulkTriggerWorkflow\x12\x1b.BulkTriggerWorkflowRequest\x1a\x1c.BulkTriggerWorkflowResponse\x12;\n\x0cPutRateLimit\x12\x14.PutRateLimitRequest\x1a\x15.PutRateLimitResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/admin/contractsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -35,60 +36,55 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._serialized_options = b'Z@github.com/hatchet-dev/hatchet/internal/services/admin/contracts' _globals['_CREATEWORKFLOWSTEPOPTS_WORKERLABELSENTRY']._loaded_options = None _globals['_CREATEWORKFLOWSTEPOPTS_WORKERLABELSENTRY']._serialized_options = b'8\001' - _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._loaded_options = None - _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._serialized_options = b'8\001' - _globals['_STICKYSTRATEGY']._serialized_start=3640 - _globals['_STICKYSTRATEGY']._serialized_end=3676 - _globals['_WORKFLOWKIND']._serialized_start=3678 - _globals['_WORKFLOWKIND']._serialized_end=3728 - _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_start=3730 - _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_end=3857 - _globals['_WORKERLABELCOMPARATOR']._serialized_start=3860 - _globals['_WORKERLABELCOMPARATOR']._serialized_end=3993 - _globals['_RATELIMITDURATION']._serialized_start=3995 - _globals['_RATELIMITDURATION']._serialized_end=4088 - _globals['_PUTWORKFLOWREQUEST']._serialized_start=52 - _globals['_PUTWORKFLOWREQUEST']._serialized_end=114 - _globals['_CREATEWORKFLOWVERSIONOPTS']._serialized_start=117 - _globals['_CREATEWORKFLOWVERSIONOPTS']._serialized_end=692 - _globals['_WORKFLOWCONCURRENCYOPTS']._serialized_start=695 - _globals['_WORKFLOWCONCURRENCYOPTS']._serialized_end=903 - _globals['_CREATEWORKFLOWJOBOPTS']._serialized_start=905 - _globals['_CREATEWORKFLOWJOBOPTS']._serialized_end=1009 - _globals['_DESIREDWORKERLABELS']._serialized_start=1012 - _globals['_DESIREDWORKERLABELS']._serialized_end=1241 - _globals['_CREATEWORKFLOWSTEPOPTS']._serialized_start=1244 - _globals['_CREATEWORKFLOWSTEPOPTS']._serialized_end=1681 - _globals['_CREATEWORKFLOWSTEPOPTS_WORKERLABELSENTRY']._serialized_start=1565 - _globals['_CREATEWORKFLOWSTEPOPTS_WORKERLABELSENTRY']._serialized_end=1638 - _globals['_CREATESTEPRATELIMIT']._serialized_start=1684 - _globals['_CREATESTEPRATELIMIT']._serialized_end=1934 - _globals['_LISTWORKFLOWSREQUEST']._serialized_start=1936 - _globals['_LISTWORKFLOWSREQUEST']._serialized_end=1958 - _globals['_SCHEDULEWORKFLOWREQUEST']._serialized_start=1961 - _globals['_SCHEDULEWORKFLOWREQUEST']._serialized_end=2348 - _globals['_SCHEDULEDWORKFLOW']._serialized_start=2350 - _globals['_SCHEDULEDWORKFLOW']._serialized_end=2429 - _globals['_WORKFLOWVERSION']._serialized_start=2432 - _globals['_WORKFLOWVERSION']._serialized_end=2659 - _globals['_WORKFLOWTRIGGEREVENTREF']._serialized_start=2661 - _globals['_WORKFLOWTRIGGEREVENTREF']._serialized_end=2724 - _globals['_WORKFLOWTRIGGERCRONREF']._serialized_start=2726 - _globals['_WORKFLOWTRIGGERCRONREF']._serialized_end=2783 - _globals['_BULKTRIGGERWORKFLOWREQUEST']._serialized_start=2785 - _globals['_BULKTRIGGERWORKFLOWREQUEST']._serialized_end=2857 - _globals['_BULKTRIGGERWORKFLOWRESPONSE']._serialized_start=2859 - _globals['_BULKTRIGGERWORKFLOWRESPONSE']._serialized_end=2914 - _globals['_TRIGGERWORKFLOWREQUEST']._serialized_start=2917 - _globals['_TRIGGERWORKFLOWREQUEST']._serialized_end=3473 - _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._serialized_start=3258 - _globals['_TRIGGERWORKFLOWREQUEST_DESIREDWORKERLABELSENTRY']._serialized_end=3338 - _globals['_TRIGGERWORKFLOWRESPONSE']._serialized_start=3475 - _globals['_TRIGGERWORKFLOWRESPONSE']._serialized_end=3525 - _globals['_PUTRATELIMITREQUEST']._serialized_start=3527 - _globals['_PUTRATELIMITREQUEST']._serialized_end=3614 - _globals['_PUTRATELIMITRESPONSE']._serialized_start=3616 - _globals['_PUTRATELIMITRESPONSE']._serialized_end=3638 - _globals['_WORKFLOWSERVICE']._serialized_start=4091 - _globals['_WORKFLOWSERVICE']._serialized_end=4439 + _globals['_STICKYSTRATEGY']._serialized_start=2880 + _globals['_STICKYSTRATEGY']._serialized_end=2916 + _globals['_WORKFLOWKIND']._serialized_start=2918 + _globals['_WORKFLOWKIND']._serialized_end=2968 + _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_start=2970 + _globals['_CONCURRENCYLIMITSTRATEGY']._serialized_end=3097 + _globals['_RATELIMITDURATION']._serialized_start=3099 + _globals['_RATELIMITDURATION']._serialized_end=3192 + _globals['_PUTWORKFLOWREQUEST']._serialized_start=77 + _globals['_PUTWORKFLOWREQUEST']._serialized_end=139 + _globals['_CREATEWORKFLOWVERSIONOPTS']._serialized_start=142 + _globals['_CREATEWORKFLOWVERSIONOPTS']._serialized_end=717 + _globals['_WORKFLOWCONCURRENCYOPTS']._serialized_start=720 + _globals['_WORKFLOWCONCURRENCYOPTS']._serialized_end=928 + _globals['_CREATEWORKFLOWJOBOPTS']._serialized_start=930 + _globals['_CREATEWORKFLOWJOBOPTS']._serialized_end=1034 + _globals['_CREATEWORKFLOWSTEPOPTS']._serialized_start=1037 + _globals['_CREATEWORKFLOWSTEPOPTS']._serialized_end=1477 + _globals['_CREATEWORKFLOWSTEPOPTS_WORKERLABELSENTRY']._serialized_start=1358 + _globals['_CREATEWORKFLOWSTEPOPTS_WORKERLABELSENTRY']._serialized_end=1434 + _globals['_CREATESTEPRATELIMIT']._serialized_start=1480 + _globals['_CREATESTEPRATELIMIT']._serialized_end=1730 + _globals['_LISTWORKFLOWSREQUEST']._serialized_start=1732 + _globals['_LISTWORKFLOWSREQUEST']._serialized_end=1754 + _globals['_SCHEDULEWORKFLOWREQUEST']._serialized_start=1757 + _globals['_SCHEDULEWORKFLOWREQUEST']._serialized_end=2144 + _globals['_SCHEDULEDWORKFLOW']._serialized_start=2146 + _globals['_SCHEDULEDWORKFLOW']._serialized_end=2225 + _globals['_WORKFLOWVERSION']._serialized_start=2228 + _globals['_WORKFLOWVERSION']._serialized_end=2455 + _globals['_WORKFLOWTRIGGEREVENTREF']._serialized_start=2457 + _globals['_WORKFLOWTRIGGEREVENTREF']._serialized_end=2520 + _globals['_WORKFLOWTRIGGERCRONREF']._serialized_start=2522 + _globals['_WORKFLOWTRIGGERCRONREF']._serialized_end=2579 + _globals['_BULKTRIGGERWORKFLOWREQUEST']._serialized_start=2581 + _globals['_BULKTRIGGERWORKFLOWREQUEST']._serialized_end=2656 + _globals['_BULKTRIGGERWORKFLOWRESPONSE']._serialized_start=2658 + _globals['_BULKTRIGGERWORKFLOWRESPONSE']._serialized_end=2713 + _globals['_TRIGGERWORKFLOWRESPONSE']._serialized_start=2715 + _globals['_TRIGGERWORKFLOWRESPONSE']._serialized_end=2765 + _globals['_PUTRATELIMITREQUEST']._serialized_start=2767 + _globals['_PUTRATELIMITREQUEST']._serialized_end=2854 + _globals['_PUTRATELIMITRESPONSE']._serialized_start=2856 + _globals['_PUTRATELIMITRESPONSE']._serialized_end=2878 + _globals['_WORKFLOWSERVICE']._serialized_start=3195 + _globals['_WORKFLOWSERVICE']._serialized_end=3546 # @@protoc_insertion_point(module_scope) + +# Re-export for backwards compatibility +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import TriggerWorkflowRequest as TriggerWorkflowRequest # noqa: F401 +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import DesiredWorkerLabels as DesiredWorkerLabels # noqa: F401 +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import WorkerLabelComparator as WorkerLabelComparator # noqa: F401 diff --git a/sdks/python/hatchet_sdk/contracts/workflows_pb2.pyi b/sdks/python/hatchet_sdk/contracts/workflows_pb2.pyi index 0728a4380..ad6a31d56 100644 --- a/sdks/python/hatchet_sdk/contracts/workflows_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/workflows_pb2.pyi @@ -1,6 +1,7 @@ import datetime from google.protobuf import timestamp_pb2 as _timestamp_pb2 +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as _trigger_pb2 from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor @@ -29,15 +30,6 @@ class ConcurrencyLimitStrategy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper GROUP_ROUND_ROBIN: _ClassVar[ConcurrencyLimitStrategy] CANCEL_NEWEST: _ClassVar[ConcurrencyLimitStrategy] -class WorkerLabelComparator(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - EQUAL: _ClassVar[WorkerLabelComparator] - NOT_EQUAL: _ClassVar[WorkerLabelComparator] - GREATER_THAN: _ClassVar[WorkerLabelComparator] - GREATER_THAN_OR_EQUAL: _ClassVar[WorkerLabelComparator] - LESS_THAN: _ClassVar[WorkerLabelComparator] - LESS_THAN_OR_EQUAL: _ClassVar[WorkerLabelComparator] - class RateLimitDuration(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () SECOND: _ClassVar[RateLimitDuration] @@ -57,12 +49,6 @@ DROP_NEWEST: ConcurrencyLimitStrategy QUEUE_NEWEST: ConcurrencyLimitStrategy GROUP_ROUND_ROBIN: ConcurrencyLimitStrategy CANCEL_NEWEST: ConcurrencyLimitStrategy -EQUAL: WorkerLabelComparator -NOT_EQUAL: WorkerLabelComparator -GREATER_THAN: WorkerLabelComparator -GREATER_THAN_OR_EQUAL: WorkerLabelComparator -LESS_THAN: WorkerLabelComparator -LESS_THAN_OR_EQUAL: WorkerLabelComparator SECOND: RateLimitDuration MINUTE: RateLimitDuration HOUR: RateLimitDuration @@ -131,20 +117,6 @@ class CreateWorkflowJobOpts(_message.Message): steps: _containers.RepeatedCompositeFieldContainer[CreateWorkflowStepOpts] def __init__(self, name: _Optional[str] = ..., description: _Optional[str] = ..., steps: _Optional[_Iterable[_Union[CreateWorkflowStepOpts, _Mapping]]] = ...) -> None: ... -class DesiredWorkerLabels(_message.Message): - __slots__ = ("str_value", "int_value", "required", "comparator", "weight") - STR_VALUE_FIELD_NUMBER: _ClassVar[int] - INT_VALUE_FIELD_NUMBER: _ClassVar[int] - REQUIRED_FIELD_NUMBER: _ClassVar[int] - COMPARATOR_FIELD_NUMBER: _ClassVar[int] - WEIGHT_FIELD_NUMBER: _ClassVar[int] - str_value: str - int_value: int - required: bool - comparator: WorkerLabelComparator - weight: int - def __init__(self, str_value: _Optional[str] = ..., int_value: _Optional[int] = ..., required: bool = ..., comparator: _Optional[_Union[WorkerLabelComparator, str]] = ..., weight: _Optional[int] = ...) -> None: ... - class CreateWorkflowStepOpts(_message.Message): __slots__ = ("readable_id", "action", "timeout", "inputs", "parents", "user_data", "retries", "rate_limits", "worker_labels", "backoff_factor", "backoff_max_seconds") class WorkerLabelsEntry(_message.Message): @@ -152,8 +124,8 @@ class CreateWorkflowStepOpts(_message.Message): KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str - value: DesiredWorkerLabels - def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[DesiredWorkerLabels, _Mapping]] = ...) -> None: ... + value: _trigger_pb2.DesiredWorkerLabels + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[_trigger_pb2.DesiredWorkerLabels, _Mapping]] = ...) -> None: ... READABLE_ID_FIELD_NUMBER: _ClassVar[int] ACTION_FIELD_NUMBER: _ClassVar[int] TIMEOUT_FIELD_NUMBER: _ClassVar[int] @@ -173,10 +145,10 @@ class CreateWorkflowStepOpts(_message.Message): user_data: str retries: int rate_limits: _containers.RepeatedCompositeFieldContainer[CreateStepRateLimit] - worker_labels: _containers.MessageMap[str, DesiredWorkerLabels] + worker_labels: _containers.MessageMap[str, _trigger_pb2.DesiredWorkerLabels] backoff_factor: float backoff_max_seconds: int - def __init__(self, readable_id: _Optional[str] = ..., action: _Optional[str] = ..., timeout: _Optional[str] = ..., inputs: _Optional[str] = ..., parents: _Optional[_Iterable[str]] = ..., user_data: _Optional[str] = ..., retries: _Optional[int] = ..., rate_limits: _Optional[_Iterable[_Union[CreateStepRateLimit, _Mapping]]] = ..., worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ..., backoff_factor: _Optional[float] = ..., backoff_max_seconds: _Optional[int] = ...) -> None: ... + def __init__(self, readable_id: _Optional[str] = ..., action: _Optional[str] = ..., timeout: _Optional[str] = ..., inputs: _Optional[str] = ..., parents: _Optional[_Iterable[str]] = ..., user_data: _Optional[str] = ..., retries: _Optional[int] = ..., rate_limits: _Optional[_Iterable[_Union[CreateStepRateLimit, _Mapping]]] = ..., worker_labels: _Optional[_Mapping[str, _trigger_pb2.DesiredWorkerLabels]] = ..., backoff_factor: _Optional[float] = ..., backoff_max_seconds: _Optional[int] = ...) -> None: ... class CreateStepRateLimit(_message.Message): __slots__ = ("key", "units", "key_expr", "units_expr", "limit_values_expr", "duration") @@ -265,8 +237,8 @@ class WorkflowTriggerCronRef(_message.Message): class BulkTriggerWorkflowRequest(_message.Message): __slots__ = ("workflows",) WORKFLOWS_FIELD_NUMBER: _ClassVar[int] - workflows: _containers.RepeatedCompositeFieldContainer[TriggerWorkflowRequest] - def __init__(self, workflows: _Optional[_Iterable[_Union[TriggerWorkflowRequest, _Mapping]]] = ...) -> None: ... + workflows: _containers.RepeatedCompositeFieldContainer[_trigger_pb2.TriggerWorkflowRequest] + def __init__(self, workflows: _Optional[_Iterable[_Union[_trigger_pb2.TriggerWorkflowRequest, _Mapping]]] = ...) -> None: ... class BulkTriggerWorkflowResponse(_message.Message): __slots__ = ("workflow_run_ids",) @@ -274,37 +246,6 @@ class BulkTriggerWorkflowResponse(_message.Message): workflow_run_ids: _containers.RepeatedScalarFieldContainer[str] def __init__(self, workflow_run_ids: _Optional[_Iterable[str]] = ...) -> None: ... -class TriggerWorkflowRequest(_message.Message): - __slots__ = ("name", "input", "parent_id", "parent_task_run_external_id", "child_index", "child_key", "additional_metadata", "desired_worker_id", "priority", "desired_worker_labels") - class DesiredWorkerLabelsEntry(_message.Message): - __slots__ = ("key", "value") - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: DesiredWorkerLabels - def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[DesiredWorkerLabels, _Mapping]] = ...) -> None: ... - NAME_FIELD_NUMBER: _ClassVar[int] - INPUT_FIELD_NUMBER: _ClassVar[int] - PARENT_ID_FIELD_NUMBER: _ClassVar[int] - PARENT_TASK_RUN_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] - CHILD_INDEX_FIELD_NUMBER: _ClassVar[int] - CHILD_KEY_FIELD_NUMBER: _ClassVar[int] - ADDITIONAL_METADATA_FIELD_NUMBER: _ClassVar[int] - DESIRED_WORKER_ID_FIELD_NUMBER: _ClassVar[int] - PRIORITY_FIELD_NUMBER: _ClassVar[int] - DESIRED_WORKER_LABELS_FIELD_NUMBER: _ClassVar[int] - name: str - input: str - parent_id: str - parent_task_run_external_id: str - child_index: int - child_key: str - additional_metadata: str - desired_worker_id: str - priority: int - desired_worker_labels: _containers.MessageMap[str, DesiredWorkerLabels] - def __init__(self, name: _Optional[str] = ..., input: _Optional[str] = ..., parent_id: _Optional[str] = ..., parent_task_run_external_id: _Optional[str] = ..., child_index: _Optional[int] = ..., child_key: _Optional[str] = ..., additional_metadata: _Optional[str] = ..., desired_worker_id: _Optional[str] = ..., priority: _Optional[int] = ..., desired_worker_labels: _Optional[_Mapping[str, DesiredWorkerLabels]] = ...) -> None: ... - class TriggerWorkflowResponse(_message.Message): __slots__ = ("workflow_run_id",) WORKFLOW_RUN_ID_FIELD_NUMBER: _ClassVar[int] @@ -324,3 +265,8 @@ class PutRateLimitRequest(_message.Message): class PutRateLimitResponse(_message.Message): __slots__ = () def __init__(self) -> None: ... + +# Re-export for backwards compatibility +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import TriggerWorkflowRequest as TriggerWorkflowRequest +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import DesiredWorkerLabels as DesiredWorkerLabels +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import WorkerLabelComparator as WorkerLabelComparator diff --git a/sdks/python/hatchet_sdk/contracts/workflows_pb2_grpc.py b/sdks/python/hatchet_sdk/contracts/workflows_pb2_grpc.py index aa944e5df..a474d36bf 100644 --- a/sdks/python/hatchet_sdk/contracts/workflows_pb2_grpc.py +++ b/sdks/python/hatchet_sdk/contracts/workflows_pb2_grpc.py @@ -3,6 +3,7 @@ import grpc import warnings +from hatchet_sdk.contracts.v1.shared import trigger_pb2 as v1_dot_shared_dot_trigger__pb2 from hatchet_sdk.contracts import workflows_pb2 as workflows__pb2 GRPC_GENERATED_VERSION = '1.76.0' @@ -47,7 +48,7 @@ class WorkflowServiceStub(object): _registered_method=True) self.TriggerWorkflow = channel.unary_unary( '/WorkflowService/TriggerWorkflow', - request_serializer=workflows__pb2.TriggerWorkflowRequest.SerializeToString, + request_serializer=v1_dot_shared_dot_trigger__pb2.TriggerWorkflowRequest.SerializeToString, response_deserializer=workflows__pb2.TriggerWorkflowResponse.FromString, _registered_method=True) self.BulkTriggerWorkflow = channel.unary_unary( @@ -111,7 +112,7 @@ def add_WorkflowServiceServicer_to_server(servicer, server): ), 'TriggerWorkflow': grpc.unary_unary_rpc_method_handler( servicer.TriggerWorkflow, - request_deserializer=workflows__pb2.TriggerWorkflowRequest.FromString, + request_deserializer=v1_dot_shared_dot_trigger__pb2.TriggerWorkflowRequest.FromString, response_serializer=workflows__pb2.TriggerWorkflowResponse.SerializeToString, ), 'BulkTriggerWorkflow': grpc.unary_unary_rpc_method_handler( @@ -205,7 +206,7 @@ class WorkflowService(object): request, target, '/WorkflowService/TriggerWorkflow', - workflows__pb2.TriggerWorkflowRequest.SerializeToString, + v1_dot_shared_dot_trigger__pb2.TriggerWorkflowRequest.SerializeToString, workflows__pb2.TriggerWorkflowResponse.FromString, options, channel_credentials, diff --git a/sdks/python/hatchet_sdk/deprecated/deprecation.py b/sdks/python/hatchet_sdk/deprecated/deprecation.py index a91071b14..1e549b692 100644 --- a/sdks/python/hatchet_sdk/deprecated/deprecation.py +++ b/sdks/python/hatchet_sdk/deprecated/deprecation.py @@ -53,19 +53,13 @@ def emit_deprecation_notice( ) -> None: """Emit a time-aware deprecation notice. - Args: - feature: A short identifier for the deprecated feature (used for - deduplication so each feature only logs once per process). - message: The human-readable deprecation message. - start: The UTC datetime when the deprecation window began. - warn_days: Days after *start* during which a warning is logged (default 90). - error_days: Days after *start* during which an error is logged. - After this window, calls have a 20% chance of raising. - If None (default), the error/raise phase is never reached — - the notice stays at error-level logging indefinitely. + :param feature: A short identifier for the deprecated feature (used for deduplication so each feature only logs once per process). + :param message: The human-readable deprecation message. + :param start: The UTC datetime when the deprecation window began. + :param warn_days: Days after *start* during which a warning is logged (default 90). + :param error_days: Days after *start* during which an error is logged. After this window, calls have a 20% chance of raising. If None (default), the error/raise phase is never reached — the notice stays at error-level logging indefinitely. - Raises: - DeprecationError: After the error_days window, raised ~20% of the time. + :raises: DeprecationError: After the error_days window, raised ~20% of the time. """ now = datetime.now(tz=timezone.utc) days_since = (now - start).days @@ -80,7 +74,7 @@ def emit_deprecation_notice( # Phase 2: error-level log (indefinite when error_days is None) if feature not in _already_logged: logger.error( - f"{message} " "This fallback will be removed soon. Upgrade immediately." + f"{message} This fallback will be removed soon. Upgrade immediately." ) _already_logged.add(feature) diff --git a/sdks/python/hatchet_sdk/deprecated/worker.py b/sdks/python/hatchet_sdk/deprecated/worker.py index 4dce590ca..3e63f301b 100644 --- a/sdks/python/hatchet_sdk/deprecated/worker.py +++ b/sdks/python/hatchet_sdk/deprecated/worker.py @@ -205,6 +205,7 @@ def _legacy_run_action_runner( worker.name + name_suffix, action_registry, max_runs, + max_runs, worker.config, action_queue, event_queue, diff --git a/sdks/python/hatchet_sdk/engine_version.py b/sdks/python/hatchet_sdk/engine_version.py new file mode 100644 index 000000000..4de283082 --- /dev/null +++ b/sdks/python/hatchet_sdk/engine_version.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class MinEngineVersion(str, Enum): + """Minimum engine version required for a given feature.""" + + SLOT_CONFIG = "v0.78.23" + DURABLE_EVICTION = "v0.80.0" diff --git a/sdks/python/hatchet_sdk/exceptions.py b/sdks/python/hatchet_sdk/exceptions.py index 3ecc0c3e6..b3ee2349a 100644 --- a/sdks/python/hatchet_sdk/exceptions.py +++ b/sdks/python/hatchet_sdk/exceptions.py @@ -2,6 +2,28 @@ import json import traceback from typing import cast +from hatchet_sdk.engine_version import MinEngineVersion + + +class NonDeterminismError(Exception): + def __init__( + self, task_external_id: str, invocation_count: int, message: str, node_id: int + ) -> None: + self.task_external_id = task_external_id + self.invocation_count = invocation_count + self.message = message + self.node_id = node_id + + detail = ( + message + if message + else f"Non-determinism detected in task {task_external_id} on invocation {invocation_count} at node {node_id}" + ) + + super().__init__( + f"{detail}\nCheck out our documentation for more details on expectations of durable tasks: https://docs.hatchet.run/v1/patterns/mixing-patterns" + ) + class InvalidDependencyError(Exception): pass @@ -170,3 +192,15 @@ class IllegalTaskOutputError(Exception): class LifespanSetupError(Exception): pass + + +class EvictionNotSupportedError(NonRetryableException): + """Raised when an eviction policy is configured against an engine version + that does not support durable-task eviction.""" + + def __init__(self, engine_version: str | None = None) -> None: + version_info = f" (engine version: {engine_version})" if engine_version else "" + super().__init__( + f"Eviction policies require engine >= {MinEngineVersion.DURABLE_EVICTION}{version_info}. " + "Please upgrade your Hatchet engine or remove the eviction policy from your task." + ) diff --git a/sdks/python/hatchet_sdk/features/runs.py b/sdks/python/hatchet_sdk/features/runs.py index aa4983af6..86c32c2ff 100644 --- a/sdks/python/hatchet_sdk/features/runs.py +++ b/sdks/python/hatchet_sdk/features/runs.py @@ -16,6 +16,12 @@ from hatchet_sdk.clients.listeners.workflow_listener import PooledWorkflowRunLis from hatchet_sdk.clients.rest.api.task_api import TaskApi from hatchet_sdk.clients.rest.api.workflow_runs_api import WorkflowRunsApi from hatchet_sdk.clients.rest.api_client import ApiClient +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_request import ( + V1BranchDurableTaskRequest, +) +from hatchet_sdk.clients.rest.models.v1_branch_durable_task_response import ( + V1BranchDurableTaskResponse, +) from hatchet_sdk.clients.rest.models.v1_cancel_task_request import V1CancelTaskRequest from hatchet_sdk.clients.rest.models.v1_replay_task_request import V1ReplayTaskRequest from hatchet_sdk.clients.rest.models.v1_task_filter import V1TaskFilter @@ -856,6 +862,40 @@ class RunsClient(BaseRestClient): if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM: yield chunk.payload + def reset_durable_task( + self, task_external_id: str, node_id: int, branch_id: int + ) -> V1BranchDurableTaskResponse: + """ + Reset a durable task from a specific node id, creating a new branch. + + :param task_external_id: The external ID (UUID) of the durable task to reset. + :param node_id: The node ID to replay from. + :param branch_id: The branch ID to replay from. + :return: The reset response containing the new node_id and branch_id. + """ + with self.client() as client: + return self._wra(client).v1_durable_task_branch( + tenant=self.client_config.tenant_id, + v1_branch_durable_task_request=V1BranchDurableTaskRequest( + taskExternalId=task_external_id, nodeId=node_id, branchId=branch_id + ), + ) + + async def aio_reset_durable_task( + self, task_external_id: str, node_id: int, branch_id: int + ) -> V1BranchDurableTaskResponse: + """ + Reset a durable task from a specific node id, creating a new branch. + + :param task_external_id: The external ID (UUID) of the durable task to reset. + :param node_id: The node ID to replay from. + :param branch_id: The branch ID to replay from. + :return: The reset response containing the new node_id and branch_id. + """ + return await asyncio.to_thread( + self.reset_durable_task, task_external_id, node_id, branch_id + ) + def get_details(self, external_id: str) -> WorkflowRunDetail: return self.admin_client.get_details(external_id=external_id) diff --git a/sdks/python/hatchet_sdk/hatchet.py b/sdks/python/hatchet_sdk/hatchet.py index eba5a1528..bbde5aa68 100644 --- a/sdks/python/hatchet_sdk/hatchet.py +++ b/sdks/python/hatchet_sdk/hatchet.py @@ -28,6 +28,10 @@ from hatchet_sdk.labels import DesiredWorkerLabel from hatchet_sdk.logger import logger from hatchet_sdk.rate_limit import RateLimit from hatchet_sdk.runnables.contextvars import ctx_hatchet_context +from hatchet_sdk.runnables.eviction import ( + DEFAULT_DURABLE_TASK_EVICTION_POLICY, + EvictionPolicy, +) from hatchet_sdk.runnables.types import ( ConcurrencyExpression, DefaultFilter, @@ -185,6 +189,14 @@ class Hatchet: """ return self._client.config.namespace + async def aio_get_engine_version(self) -> str | None: + """Fetch the engine version via the dispatcher's GetVersion RPC. + + :return: The engine version string, or ``None`` if the engine is too old + to support GetVersion. + """ + return await self._client.dispatcher.get_version() + def worker( self, name: str, @@ -552,6 +564,7 @@ class Hatchet: backoff_max_seconds: int | None = None, default_filters: list[DefaultFilter] | None = None, default_additional_metadata: JSONSerializableMapping | None = None, + eviction_policy: EvictionPolicy | None = DEFAULT_DURABLE_TASK_EVICTION_POLICY, ) -> Callable[ [Callable[Concatenate[EmptyModel, DurableContext, P], R | CoroutineLike[R]]], Standalone[EmptyModel, R], @@ -581,6 +594,7 @@ class Hatchet: backoff_max_seconds: int | None = None, default_filters: list[DefaultFilter] | None = None, default_additional_metadata: JSONSerializableMapping | None = None, + eviction_policy: EvictionPolicy | None = DEFAULT_DURABLE_TASK_EVICTION_POLICY, ) -> Callable[ [ Callable[ @@ -613,6 +627,7 @@ class Hatchet: backoff_max_seconds: int | None = None, default_filters: list[DefaultFilter] | None = None, default_additional_metadata: JSONSerializableMapping | None = None, + eviction_policy: EvictionPolicy | None = DEFAULT_DURABLE_TASK_EVICTION_POLICY, ) -> ( Callable[ [ @@ -670,6 +685,8 @@ class Hatchet: :param default_additional_metadata: A dictionary of additional metadata to attach to each run of this task by default. + :param eviction_policy: An optional eviction policy controlling when idle durable tasks are evicted from workers. + :returns: A decorator which creates a `Standalone` task object. """ @@ -715,6 +732,7 @@ class Hatchet: backoff_factor=backoff_factor, backoff_max_seconds=backoff_max_seconds, concurrency=_concurrency, + eviction_policy=eviction_policy, ) return Standalone[TWorkflowInput, R]( diff --git a/sdks/python/hatchet_sdk/labels.py b/sdks/python/hatchet_sdk/labels.py index 882fe855b..544431d7c 100644 --- a/sdks/python/hatchet_sdk/labels.py +++ b/sdks/python/hatchet_sdk/labels.py @@ -1,6 +1,6 @@ from pydantic import BaseModel -from hatchet_sdk.contracts.v1.workflows_pb2 import DesiredWorkerLabels +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import DesiredWorkerLabels class DesiredWorkerLabel(BaseModel): diff --git a/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py b/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py index 75a923713..e41d1773e 100644 --- a/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py +++ b/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py @@ -49,6 +49,7 @@ from hatchet_sdk.clients.events import ( EventClient, PushEventOptions, ) +from hatchet_sdk.context.context import DurableContext from hatchet_sdk.contracts.events_pb2 import Event from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action @@ -268,6 +269,24 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] self._wrap_async_run_workflows, ) + wrap_function_wrapper( + hatchet_sdk, + "context.context.DurableContext.aio_wait_for", + self._wrap_aio_wait_for, + ) + + wrap_function_wrapper( + hatchet_sdk, + "context.context.DurableContext._spawn_children_no_wait", + self._wrap_spawn_children_no_wait, + ) + + wrap_function_wrapper( + hatchet_sdk, + "context.context.DurableContext.aio_memo", + self._wrap_aio_memo, + ) + def extract_bound_args( self, wrapped_func: Callable[..., Any], @@ -724,6 +743,141 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] return await wrapped(workflow_run_configs_with_meta) + ## IMPORTANT: Keep these types in sync with the wrapped method's signature + async def _wrap_aio_wait_for( + self, + wrapped: Callable[..., Coroutine[None, None, dict[str, Any]]], + instance: DurableContext, + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> dict[str, Any]: + params = self.extract_bound_args(wrapped, args, kwargs) + + signal_key = cast(str, params[0]) + conditions = params[1:] + + traceparent = _parse_carrier_from_metadata(instance.action.additional_metadata) + + attributes: dict[OTelAttribute, str | int | None] = { + OTelAttribute.SIGNAL_KEY: signal_key, + OTelAttribute.NUM_CONDITIONS: len(conditions), + OTelAttribute.STEP_RUN_ID: instance.step_run_id, + } + + with self._tracer.start_as_current_span( + "hatchet.durable.wait_for", + attributes={ + f"hatchet.{k.value}": v + for k, v in attributes.items() + if v is not None and k not in self.config.otel.excluded_attributes + }, + context=traceparent, + kind=SpanKind.INTERNAL, + ) as span: + try: + return await wrapped(*args, **kwargs) + except Exception as e: + span.set_status(StatusCode.ERROR, str(e)) + raise + + ## IMPORTANT: Keep these types in sync with the wrapped method's signature + async def _wrap_spawn_children_no_wait( + self, + wrapped: Callable[..., Coroutine[None, None, list[tuple[int, int, str]]]], + instance: DurableContext, + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> list[tuple[int, int, str]]: + params = self.extract_bound_args(wrapped, args, kwargs) + + configs = cast(list[WorkflowRunTriggerConfig], params[0]) + + traceparent = _parse_carrier_from_metadata(instance.action.additional_metadata) + + if len(configs) == 1: + config = configs[0] + span_name = "hatchet.run_workflow" + span_attributes = { + f"hatchet.{k.value}": v + for k, v in { + OTelAttribute.WORKFLOW_NAME: config.workflow_name, + OTelAttribute.ACTION_PAYLOAD: config.input, + OTelAttribute.PARENT_ID: config.options.parent_id, + OTelAttribute.PARENT_STEP_RUN_ID: config.options.parent_step_run_id, + OTelAttribute.CHILD_INDEX: config.options.child_index, + OTelAttribute.CHILD_KEY: config.options.child_key, + OTelAttribute.NAMESPACE: config.options.namespace, + OTelAttribute.ADDITIONAL_METADATA: json.dumps( + config.options.additional_metadata, default=str + ), + OTelAttribute.PRIORITY: config.options.priority, + OTelAttribute.DESIRED_WORKER_ID: config.options.desired_worker_id, + OTelAttribute.STICKY: config.options.sticky, + OTelAttribute.KEY: config.options.key, + }.items() + if v + and k not in self.config.otel.excluded_attributes + and v != "{}" + and v != "[]" + } + else: + unique_workflow_names = {c.workflow_name for c in configs} + span_name = "hatchet.run_workflows" + span_attributes = { + "hatchet.num_workflows": len(configs), + "hatchet.unique_workflow_names": json.dumps( + unique_workflow_names, default=str + ), + } + + with self._tracer.start_as_current_span( + span_name, + attributes=span_attributes, + context=traceparent, + kind=SpanKind.PRODUCER, + ) as span: + try: + return await wrapped(*args, **kwargs) + except Exception as e: + span.set_status(StatusCode.ERROR, str(e)) + raise + + ## IMPORTANT: Keep these types in sync with the wrapped method's signature + async def _wrap_aio_memo( + self, + wrapped: Callable[..., Coroutine[None, None, Any]], + instance: DurableContext, + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> Any: + params = self.extract_bound_args(wrapped, args, kwargs) + + fn = params[0] + fn_name = getattr(fn, "__name__", str(fn)) + + traceparent = _parse_carrier_from_metadata(instance.action.additional_metadata) + + attributes = { + OTelAttribute.MEMO_FN_NAME: fn_name, + OTelAttribute.STEP_RUN_ID: instance.step_run_id, + } + + with self._tracer.start_as_current_span( + f"hatchet.durable.memo.{fn_name}", + attributes={ + f"hatchet.{k.value}": v + for k, v in attributes.items() + if v is not None and k not in self.config.otel.excluded_attributes + }, + context=traceparent, + kind=SpanKind.INTERNAL, + ) as span: + try: + return await wrapped(*args, **kwargs) + except Exception as e: + span.set_status(StatusCode.ERROR, str(e)) + raise + def _uninstrument(self, **kwargs: InstrumentKwargs) -> None: self.tracer_provider = NoOpTracerProvider() self.meter_provider = NoOpMeterProvider() @@ -737,3 +891,6 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] unwrap(hatchet_sdk, "clients.admin.AdminClient.schedule_workflow") unwrap(hatchet_sdk, "clients.admin.AdminClient.run_workflows") unwrap(hatchet_sdk, "clients.admin.AdminClient.aio_run_workflows") + unwrap(hatchet_sdk, "context.context.DurableContext.aio_wait_for") + unwrap(hatchet_sdk, "context.context.DurableContext._spawn_children_no_wait") + unwrap(hatchet_sdk, "context.context.DurableContext.aio_memo") diff --git a/sdks/python/hatchet_sdk/runnables/action.py b/sdks/python/hatchet_sdk/runnables/action.py index 544665bdb..4adf7c20a 100644 --- a/sdks/python/hatchet_sdk/runnables/action.py +++ b/sdks/python/hatchet_sdk/runnables/action.py @@ -1,16 +1,14 @@ import json from dataclasses import field from enum import Enum -from typing import TYPE_CHECKING, Any +from typing import Any from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator +from hatchet_sdk.config import ClientConfig from hatchet_sdk.utils.opentelemetry import OTelAttribute from hatchet_sdk.utils.typing import JSONSerializableMapping -if TYPE_CHECKING: - from hatchet_sdk.config import ClientConfig - ActionKey = str @@ -75,6 +73,7 @@ class Action(BaseModel): parent_workflow_run_id: str | None = None priority: int | None = None + durable_task_invocation_count: int | None = None def get_otel_attributes(self, config: "ClientConfig") -> dict[str, str | int]: try: diff --git a/sdks/python/hatchet_sdk/runnables/contextvars.py b/sdks/python/hatchet_sdk/runnables/contextvars.py index ea09ce8a9..44ac16ff4 100644 --- a/sdks/python/hatchet_sdk/runnables/contextvars.py +++ b/sdks/python/hatchet_sdk/runnables/contextvars.py @@ -10,7 +10,7 @@ from hatchet_sdk.runnables.action import ActionKey from hatchet_sdk.utils.typing import JSONSerializableMapping if TYPE_CHECKING: - from hatchet_sdk.context.context import Context + from hatchet_sdk.context.context import Context, DurableContext ctx_workflow_run_id: ContextVar[str | None] = ContextVar( "ctx_workflow_run_id", default=None @@ -26,6 +26,9 @@ ctx_additional_metadata: ContextVar[JSONSerializableMapping | None] = ContextVar ctx_task_retry_count: ContextVar[int | None] = ContextVar( "ctx_task_retry_count", default=0 ) +ctx_durable_context: ContextVar[DurableContext | None] = ContextVar( + "ctx_durable_context", default=None +) workflow_spawn_indices = Counter[ActionKey]() diff --git a/sdks/python/hatchet_sdk/runnables/eviction.py b/sdks/python/hatchet_sdk/runnables/eviction.py new file mode 100644 index 000000000..5e6347d20 --- /dev/null +++ b/sdks/python/hatchet_sdk/runnables/eviction.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +from datetime import timedelta + +from pydantic import BaseModel, ConfigDict + + +class EvictionPolicy(BaseModel): + """ + Task-scoped eviction parameters for *durable* tasks. + + :ivar ttl: Maximum continuous waiting duration before TTL-eligible eviction. + Applies to time spent in SDK-instrumented "waiting" states (e.g. + `ctx.aio_wait_for(...)`, waiting for a workflow run result). + :ivar allow_capacity_eviction: Whether this task may be evicted under durable-slot pressure. + :ivar priority: Lower values are evicted first when multiple candidates exist. + + Setting the durable task's eviction params to `None` means the task run is + never eligible for eviction. + + **Example** + ```python + EvictionPolicy( + ttl=timedelta(minutes=10), + allow_capacity_eviction=True, + priority=0, + ) + ``` + """ + + model_config = ConfigDict(frozen=True) + + ttl: timedelta | None + + allow_capacity_eviction: bool = True + + priority: int = 0 + + +# Shared sensible defaults (single source of truth). +# NOTE: When changing these values, update the :param durable_run_eviction: / :param eviction_policy: +# docstrings in workflow.Workflow.durable_task and hatchet.Hatchet.durable_task to match. +DEFAULT_DURABLE_TASK_EVICTION_POLICY = EvictionPolicy( + ttl=timedelta(minutes=15), + allow_capacity_eviction=True, + priority=0, +) diff --git a/sdks/python/hatchet_sdk/runnables/task.py b/sdks/python/hatchet_sdk/runnables/task.py index bfd52a6d2..9c2d7ca90 100644 --- a/sdks/python/hatchet_sdk/runnables/task.py +++ b/sdks/python/hatchet_sdk/runnables/task.py @@ -38,12 +38,14 @@ from hatchet_sdk.conditions import ( from hatchet_sdk.context.context import Context, DurableContext from hatchet_sdk.context.worker_context import WorkerContext from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions +from hatchet_sdk.contracts.v1.shared.trigger_pb2 import DesiredWorkerLabels from hatchet_sdk.contracts.v1.workflows_pb2 import ( CreateTaskOpts, CreateTaskRateLimit, - DesiredWorkerLabels, ) from hatchet_sdk.exceptions import InvalidDependencyError +from hatchet_sdk.logger import logger +from hatchet_sdk.runnables.eviction import EvictionPolicy from hatchet_sdk.runnables.types import ( ConcurrencyExpression, R, @@ -151,8 +153,10 @@ class Task(Generic[TWorkflowInput, R]): skip_if: list[Condition | OrGroup] | None, cancel_if: list[Condition | OrGroup] | None, slot_requests: dict[str, int] | None = None, + durable_eviction: EvictionPolicy | None = None, ) -> None: self.is_durable = is_durable + self.durable_eviction = durable_eviction if slot_requests is None: slot_requests = {"durable": 1} if is_durable else {"default": 1} self.slot_requests = slot_requests @@ -185,6 +189,11 @@ class Task(Generic[TWorkflowInput, R]): step_output=TypeAdapter(normalize_validator(return_type)), ) + if not self.is_async_function and self.is_durable: + logger.warning( + f"{self.fn.__name__} is defined as a synchronous, durable task. in the future, durable tasks will only support `async`. please update this durable task to be async, or make it non-durable." + ) + async def _parse_maybe_cm_param( self, parsed: DependencyToInject, diff --git a/sdks/python/hatchet_sdk/runnables/types.py b/sdks/python/hatchet_sdk/runnables/types.py index fb1b94e41..068a75275 100644 --- a/sdks/python/hatchet_sdk/runnables/types.py +++ b/sdks/python/hatchet_sdk/runnables/types.py @@ -2,11 +2,18 @@ import inspect import json from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, ParamSpec, TypeAlias, TypeGuard, TypeVar, overload +from typing import ( + TYPE_CHECKING, + Any, + ParamSpec, + TypeAlias, + TypeGuard, + TypeVar, + overload, +) from pydantic import BaseModel, ConfigDict, Field, TypeAdapter -from hatchet_sdk.context.context import Context, DurableContext from hatchet_sdk.contracts.v1.workflows_pb2 import Concurrency from hatchet_sdk.contracts.v1.workflows_pb2 import DefaultFilter as DefaultFilterProto from hatchet_sdk.utils.timedelta_to_expression import Duration @@ -16,6 +23,10 @@ from hatchet_sdk.utils.typing import ( JSONSerializableMapping, ) +if TYPE_CHECKING: + from hatchet_sdk.context.context import Context, DurableContext + + ValidTaskReturnType = BaseModel | Mapping[str, Any] | DataclassInstance | None R = TypeVar("R", bound=ValidTaskReturnType) @@ -40,12 +51,20 @@ class ConcurrencyLimitStrategy(str, Enum): class ConcurrencyExpression(BaseModel): """ Defines concurrency limits for a workflow using a CEL expression. - Args: - expression (str): CEL expression to determine concurrency grouping. (i.e. "input.user_id") - max_runs (int): Maximum number of concurrent workflow runs. - limit_strategy (ConcurrencyLimitStrategy): Strategy for handling limit violations. - Example: - ConcurrencyExpression("input.user_id", 5, ConcurrencyLimitStrategy.CANCEL_IN_PROGRESS) + + :ivar expression: CEL expression to determine concurrency grouping. (i.e. "input.user_id") + :ivar max_runs: Maximum number of concurrent workflow runs. + :ivar limit_strategy: Strategy for handling limit violations. + + + **Example** + ```python + ConcurrencyExpression( + "input.user_id", + 5, + ConcurrencyLimitStrategy.CANCEL_IN_PROGRESS + ) + ``` """ expression: str @@ -139,8 +158,8 @@ class StepType(str, Enum): ON_SUCCESS = "on_success" -AsyncFunc = Callable[[TWorkflowInput, Context], AwaitableLike[R]] -SyncFunc = Callable[[TWorkflowInput, Context], R] +AsyncFunc = Callable[[TWorkflowInput, "Context"], AwaitableLike[R]] +SyncFunc = Callable[[TWorkflowInput, "Context"], R] TaskFunc = AsyncFunc[TWorkflowInput, R] | SyncFunc[TWorkflowInput, R] @@ -156,8 +175,8 @@ def is_sync_fn( return not inspect.iscoroutinefunction(fn) -DurableAsyncFunc = Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]] -DurableSyncFunc = Callable[[TWorkflowInput, DurableContext], R] +DurableAsyncFunc = Callable[[TWorkflowInput, "DurableContext"], AwaitableLike[R]] +DurableSyncFunc = Callable[[TWorkflowInput, "DurableContext"], R] DurableTaskFunc = ( DurableAsyncFunc[TWorkflowInput, R] | DurableSyncFunc[TWorkflowInput, R] ) diff --git a/sdks/python/hatchet_sdk/runnables/workflow.py b/sdks/python/hatchet_sdk/runnables/workflow.py index cbc3e9baa..70e9f5784 100644 --- a/sdks/python/hatchet_sdk/runnables/workflow.py +++ b/sdks/python/hatchet_sdk/runnables/workflow.py @@ -38,6 +38,13 @@ from hatchet_sdk.contracts.v1.workflows_pb2 import StickyStrategy as StickyStrat from hatchet_sdk.contracts.workflows_pb2 import WorkflowVersion from hatchet_sdk.labels import DesiredWorkerLabel, transform_desired_worker_label from hatchet_sdk.rate_limit import RateLimit +from hatchet_sdk.runnables.contextvars import ( + ctx_durable_context, +) +from hatchet_sdk.runnables.eviction import ( + DEFAULT_DURABLE_TASK_EVICTION_POLICY, + EvictionPolicy, +) from hatchet_sdk.runnables.task import Task from hatchet_sdk.runnables.types import ( ConcurrencyExpression, @@ -679,13 +686,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :returns: The result of the workflow execution as a dictionary. """ - - ref = self.client._client.admin.run_workflow( - workflow_name=self.config.name, - input=self._serialize_input(input, target="string"), - options=self._create_options_with_combined_additional_meta(options), - ) - + ref = self.run_no_wait(input, options) return ref.result() async def aio_run_no_wait( @@ -702,7 +703,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :returns: A `WorkflowRunRef` object representing the reference to the workflow run. """ - return await self.client._client.admin.aio_run_workflow( workflow_name=self.config.name, input=self._serialize_input(input, target="string"), @@ -723,13 +723,29 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :param options: Additional options for workflow execution like metadata and parent workflow ID. :returns: The result of the workflow execution as a dictionary. - """ - ref = await self.client._client.admin.aio_run_workflow( - workflow_name=self.config.name, - input=self._serialize_input(input, target="string"), - options=self._create_options_with_combined_additional_meta(options), - ) + :raises RuntimeError: If durable child workflow spawning returns no run references. + """ + durable_ctx = ctx_durable_context.get() + if durable_ctx is not None and durable_ctx._supports_durable_eviction: + config = WorkflowRunTriggerConfig( + workflow_name=self.config.name, + input=self._serialize_input(input, target="string"), + options=self._create_options_with_combined_additional_meta(options), + ) + refs = await durable_ctx._spawn_children_no_wait([config]) + if not refs: + raise RuntimeError( + "Failed to spawn durable child workflow: no run references returned" + ) + node_id, branch_id, workflow_name = refs[0] + return await durable_ctx._aio_result_for_spawned_child( + node_id=node_id, + branch_id=branch_id, + workflow_name=workflow_name, + ) + + ref = await self.aio_run_no_wait(input, options) return await ref.aio_result() def _get_result( @@ -769,10 +785,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :param return_exceptions: If `True`, exceptions will be returned as part of the results instead of raising them. :returns: A list of results for each workflow run. """ - refs = self.client._client.admin.run_workflows( - workflows=workflows, - ) - + refs = self.run_many_no_wait(workflows) return [self._get_result(ref, return_exceptions) for ref in refs] @overload @@ -802,12 +815,25 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :param return_exceptions: If `True`, exceptions will be returned as part of the results instead of raising them. :returns: A list of results for each workflow run. """ - refs = await self.client._client.admin.aio_run_workflows( - workflows=workflows, - ) + durable_ctx = ctx_durable_context.get() + if durable_ctx is not None and durable_ctx._supports_durable_eviction: + spawned_refs = await durable_ctx._spawn_children_no_wait(workflows) + return await asyncio.gather( + *[ + durable_ctx._aio_result_for_spawned_child( + node_id=node_id, + branch_id=branch_id, + workflow_name=workflow_name, + ) + for node_id, branch_id, workflow_name in spawned_refs + ], + return_exceptions=return_exceptions, + ) + workflow_refs = await self.aio_run_many_no_wait(workflows) return await asyncio.gather( - *[ref.aio_result() for ref in refs], return_exceptions=return_exceptions + *[ref.aio_result() for ref in workflow_refs], + return_exceptions=return_exceptions, ) def run_many_no_wait( @@ -839,9 +865,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :returns: A list of `WorkflowRunRef` objects, each representing a reference to a workflow run. """ - return await self.client._client.admin.aio_run_workflows( - workflows=workflows, - ) + return await self.client._client.admin.aio_run_workflows(workflows=workflows) def _parse_task_name( self, @@ -960,6 +984,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): wait_for: list[Condition | OrGroup] | None = None, skip_if: list[Condition | OrGroup] | None = None, cancel_if: list[Condition | OrGroup] | None = None, + eviction_policy: EvictionPolicy | None = DEFAULT_DURABLE_TASK_EVICTION_POLICY, ) -> Callable[ [ Callable[ @@ -1001,6 +1026,8 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :param cancel_if: A list of conditions that, if met, will cause the task to be canceled. + :param eviction_policy: An optional eviction policy controlling when this durable task can be evicted from a worker slot while waiting. + :returns: A decorator which creates a `Task` object. """ @@ -1039,6 +1066,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): wait_for=wait_for, skip_if=skip_if, cancel_if=cancel_if, + durable_eviction=eviction_policy, ) self._durable_tasks.append(task) @@ -1238,9 +1266,7 @@ class TaskRunRef(Generic[TWorkflowInput, R]): return self.workflow_run_id async def aio_result(self) -> R: - result = await self._wrr.workflow_run_listener.aio_result( - self._wrr.workflow_run_id - ) + result = await self._wrr.aio_result() return self._s._extract_result(result) def result(self) -> R: @@ -1285,11 +1311,14 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): if isinstance(result, BaseException): return result - ## if a task is cancelled, we can get `None` back here + # if a task is cancelled, we can get `None` back here ## this is a bit of an edge case since both `None` and an empty dict ## would cause Pydantic validation errors, but if you were expecting a `dict` ## return, then the empty dict would not error and would work correctly - output = result.get(self._task.name) or {} + + # Durable child callbacks can return the task payload directly, while + # non-durable child runs typically return {task_name: payload}. + output = result.get(self._task.name) or result or {} return cast( R, diff --git a/sdks/python/hatchet_sdk/utils/cache.py b/sdks/python/hatchet_sdk/utils/cache.py index d979ffe51..dd8fa70f1 100644 --- a/sdks/python/hatchet_sdk/utils/cache.py +++ b/sdks/python/hatchet_sdk/utils/cache.py @@ -1,5 +1,9 @@ +import asyncio from collections import OrderedDict -from typing import TypeVar +from collections.abc import Iterator +from dataclasses import dataclass +from datetime import UTC, datetime, timedelta +from typing import Generic, TypeVar K = TypeVar("K") V = TypeVar("V") @@ -18,3 +22,53 @@ class BoundedDict(OrderedDict[K, V]): if len(self) > self.maxsize: self.popitem(last=False) + + +@dataclass +class TTLCacheEntry(Generic[V]): + value: V + expires_at: datetime + + +class TTLCache(Generic[K, V]): + def __init__(self, ttl: timedelta) -> None: + self.ttl = ttl + self.cache: dict[K, TTLCacheEntry[V]] = {} + + self.eviction_job = asyncio.create_task(self._start_eviction_job()) + + def __setitem__(self, key: K, value: V) -> None: + self.cache[key] = TTLCacheEntry( + value=value, expires_at=datetime.now(tz=UTC) + self.ttl + ) + + def __getitem__(self, key: K) -> V: + return self.cache[key].value + + def __contains__(self, key: object) -> bool: + return key in self.cache + + def __delitem__(self, key: K) -> None: + del self.cache[key] + + def __iter__(self) -> Iterator[K]: + return iter(self.cache) + + def pop(self, key: K) -> V: + return self.cache.pop(key).value + + def clear(self) -> None: + self.cache.clear() + + def stop_eviction_job(self) -> None: + self.eviction_job.cancel() + + async def _start_eviction_job(self) -> None: + while True: + await asyncio.sleep(self.ttl.total_seconds()) + + now = datetime.now(tz=UTC) + expired = [k for k, entry in self.cache.items() if entry.expires_at <= now] + + for key in expired: + del self.cache[key] diff --git a/sdks/python/hatchet_sdk/utils/opentelemetry.py b/sdks/python/hatchet_sdk/utils/opentelemetry.py index 83e1a677d..b8da86bfe 100644 --- a/sdks/python/hatchet_sdk/utils/opentelemetry.py +++ b/sdks/python/hatchet_sdk/utils/opentelemetry.py @@ -41,3 +41,8 @@ class OTelAttribute(str, Enum): ## Schedule Workflow RUN_AT_TIMESTAMPS = "run_at_timestamps" + + ## Durable Context + SIGNAL_KEY = "signal_key" + NUM_CONDITIONS = "num_conditions" + MEMO_FN_NAME = "memo_fn_name" diff --git a/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py b/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py index f2cedddcb..10401d2f1 100644 --- a/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py +++ b/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py @@ -21,3 +21,19 @@ def timedelta_to_expr(td: Duration) -> str: if seconds % MINUTE == 0: return f"{seconds // MINUTE}m" return f"{seconds}s" + + +def expr_to_timedelta(expr: str) -> timedelta: + unit = expr[-1] + value = int(expr[:-1]) + + if unit == "d": + return timedelta(days=value) + if unit == "h": + return timedelta(hours=value) + if unit == "m": + return timedelta(minutes=value) + if unit == "s": + return timedelta(seconds=value) + + raise ValueError(f"Invalid time expression: {expr}") diff --git a/sdks/python/hatchet_sdk/worker/durable_eviction/__init__.py b/sdks/python/hatchet_sdk/worker/durable_eviction/__init__.py new file mode 100644 index 000000000..063dc5342 --- /dev/null +++ b/sdks/python/hatchet_sdk/worker/durable_eviction/__init__.py @@ -0,0 +1,13 @@ +from hatchet_sdk.worker.durable_eviction.cache import DurableEvictionCache +from hatchet_sdk.worker.durable_eviction.manager import ( + DEFAULT_DURABLE_EVICTION_CONFIG, + DurableEvictionConfig, + DurableEvictionManager, +) + +__all__ = [ + "DEFAULT_DURABLE_EVICTION_CONFIG", + "DurableEvictionCache", + "DurableEvictionConfig", + "DurableEvictionManager", +] diff --git a/sdks/python/hatchet_sdk/worker/durable_eviction/cache.py b/sdks/python/hatchet_sdk/worker/durable_eviction/cache.py new file mode 100644 index 000000000..a38e0b89a --- /dev/null +++ b/sdks/python/hatchet_sdk/worker/durable_eviction/cache.py @@ -0,0 +1,218 @@ +from __future__ import annotations + +from datetime import datetime, timedelta +from enum import Enum + +from pydantic import BaseModel +from typing_extensions import assert_never + +from hatchet_sdk.logger import logger +from hatchet_sdk.runnables.action import ActionKey +from hatchet_sdk.runnables.eviction import EvictionPolicy + + +class EvictionCause(str, Enum): + TTL_EXCEEDED = "ttl_exceeded" + CAPACITY_PRESSURE = "capacity_pressure" + WORKER_SHUTDOWN = "worker_shutdown" + + +class DurableRunRecord(BaseModel): + key: ActionKey + step_run_id: str + invocation_count: int + eviction_policy: EvictionPolicy | None + registered_at: datetime + + # Waiting state -- ref-counted so concurrent waits (e.g. asyncio.gather + # over multiple child results) don't prematurely clear the waiting flag + # when one child completes before the others. + waiting_since: datetime | None = None + wait_kind: str | None = None + wait_resource_id: str | None = None + _wait_count: int = 0 + + # Set by the eviction manager before requesting eviction + eviction_reason: str | None = None + + @property + def is_waiting(self) -> bool: + return self._wait_count > 0 + + +class DurableEvictionCache: + def __init__(self) -> None: + self._runs: dict[ActionKey, DurableRunRecord] = {} + + def register_run( + self, + key: ActionKey, + step_run_id: str, + invocation_count: int, + now: datetime, + eviction_policy: EvictionPolicy | None, + ) -> None: + self._runs[key] = DurableRunRecord( + key=key, + step_run_id=step_run_id, + invocation_count=invocation_count, + eviction_policy=eviction_policy, + registered_at=now, + ) + + def unregister_run(self, key: ActionKey) -> None: + self._runs.pop(key, None) + + def get(self, key: ActionKey) -> DurableRunRecord | None: + return self._runs.get(key) + + def get_all_waiting(self) -> list[DurableRunRecord]: + return [r for r in self._runs.values() if r.is_waiting] + + def find_key_by_step_run_id(self, step_run_id: str) -> ActionKey | None: + for key, rec in self._runs.items(): + if rec.step_run_id == step_run_id: + return key + return None + + def mark_waiting( + self, + key: ActionKey, + now: datetime, + wait_kind: str, + resource_id: str, + ) -> None: + rec = self._runs.get(key) + if not rec: + return + + rec._wait_count += 1 + if rec._wait_count == 1: + rec.waiting_since = now + rec.wait_kind = wait_kind + rec.wait_resource_id = resource_id + + def mark_active(self, key: ActionKey, now: datetime) -> None: + rec = self._runs.get(key) + if not rec: + return + + rec._wait_count = max(0, rec._wait_count - 1) + if rec._wait_count == 0: + rec.waiting_since = None + rec.wait_kind = None + rec.wait_resource_id = None + + def _capacity_pressure( + self, durable_slots: int, reserve_slots: int, waiting_count: int + ) -> bool: + if durable_slots <= 0: + return False + + max_waiting = durable_slots - reserve_slots + if max_waiting <= 0: + return False + + return waiting_count >= max_waiting + + def select_eviction_candidate( + self, + now: datetime, + durable_slots: int, + reserve_slots: int, + min_wait_for_capacity_eviction: timedelta, + ) -> ActionKey | None: + waiting: list[DurableRunRecord] = [ + r + for r in self._runs.values() + if r.is_waiting and r.eviction_policy is not None + ] + + if not waiting: + return None + + # Prefer TTL-eligible candidates first. + ttl_eligible: list[DurableRunRecord] = [ + r + for r in waiting + if r.eviction_policy is not None + and r.eviction_policy.ttl is not None + and r.waiting_since is not None + and (now - r.waiting_since) >= r.eviction_policy.ttl + ] + + if ttl_eligible: + ttl_eligible.sort( + key=lambda r: ( + r.eviction_policy.priority if r.eviction_policy else 0, + r.waiting_since or now, + ) + ) + chosen = ttl_eligible[0] + ttl = chosen.eviction_policy.ttl if chosen.eviction_policy else None + chosen.eviction_reason = _build_eviction_reason( + EvictionCause.TTL_EXCEEDED, chosen, ttl=ttl + ) + logger.debug( + "DurableEvictionCache: TTL eviction candidate selected " + f"step_run_id={chosen.step_run_id} kind={chosen.wait_kind}" + ) + return chosen.key + + # Capacity eviction: only if we're above waiting capacity and run allows it. + capacity_pressure = self._capacity_pressure( + durable_slots=durable_slots, + reserve_slots=reserve_slots, + waiting_count=len(waiting), + ) + if not capacity_pressure: + return None + + capacity_candidates: list[DurableRunRecord] = [ + r + for r in waiting + if r.eviction_policy + and r.eviction_policy.allow_capacity_eviction + and r.waiting_since is not None + and (now - r.waiting_since) >= min_wait_for_capacity_eviction + ] + + if not capacity_candidates: + return None + + capacity_candidates.sort( + key=lambda r: ( + r.eviction_policy.priority if r.eviction_policy else 0, + r.waiting_since or now, + ) + ) + chosen = capacity_candidates[0] + chosen.eviction_reason = _build_eviction_reason( + EvictionCause.CAPACITY_PRESSURE, chosen + ) + logger.debug( + "DurableEvictionCache: capacity eviction candidate selected " + f"step_run_id={chosen.step_run_id} kind={chosen.wait_kind}" + ) + return chosen.key + + +def _build_eviction_reason( + cause: EvictionCause, + rec: DurableRunRecord, + ttl: timedelta | None = None, +) -> str: + wait_desc = rec.wait_kind or "unknown" + if rec.wait_resource_id: + wait_desc = f"{wait_desc}({rec.wait_resource_id})" + + match cause: + case EvictionCause.TTL_EXCEEDED: + ttl_str = f" ({ttl})" if ttl else "" + return f"Wait TTL{ttl_str} exceeded while waiting on {wait_desc}" + case EvictionCause.CAPACITY_PRESSURE: + return f"Worker at capacity while waiting on {wait_desc}" + case EvictionCause.WORKER_SHUTDOWN: + return f"Worker shutdown while waiting on {wait_desc}" + case _ as unreachable: + assert_never(unreachable) diff --git a/sdks/python/hatchet_sdk/worker/durable_eviction/instrumentation.py b/sdks/python/hatchet_sdk/worker/durable_eviction/instrumentation.py new file mode 100644 index 000000000..f8f4cf40f --- /dev/null +++ b/sdks/python/hatchet_sdk/worker/durable_eviction/instrumentation.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager + +from hatchet_sdk.runnables.action import ActionKey +from hatchet_sdk.worker.durable_eviction.manager import DurableEvictionManager + + +@asynccontextmanager +async def aio_durable_eviction_wait( + wait_kind: str, + resource_id: str, + action_key: ActionKey | None = None, + eviction_manager: DurableEvictionManager | None = None, +) -> AsyncIterator[None]: + """ + Mark an SDK-managed wait for the current durable run (if applicable). + + If action_key or eviction_manager is None, this is a no-op. + """ + + if action_key and eviction_manager is not None: + eviction_manager.mark_waiting( + action_key, wait_kind=wait_kind, resource_id=resource_id + ) + + try: + yield + finally: + if action_key and eviction_manager is not None: + eviction_manager.mark_active(action_key) diff --git a/sdks/python/hatchet_sdk/worker/durable_eviction/manager.py b/sdks/python/hatchet_sdk/worker/durable_eviction/manager.py new file mode 100644 index 000000000..d2c2d26a1 --- /dev/null +++ b/sdks/python/hatchet_sdk/worker/durable_eviction/manager.py @@ -0,0 +1,218 @@ +from __future__ import annotations + +import asyncio +from collections.abc import Awaitable, Callable +from datetime import datetime, timedelta, timezone + +from pydantic import BaseModel, ConfigDict + +from hatchet_sdk.logger import logger +from hatchet_sdk.runnables.action import ActionKey +from hatchet_sdk.runnables.eviction import EvictionPolicy +from hatchet_sdk.worker.durable_eviction.cache import ( + DurableEvictionCache, + DurableRunRecord, + EvictionCause, + _build_eviction_reason, +) + + +class DurableEvictionConfig(BaseModel): + model_config = ConfigDict(frozen=True) + + check_interval: timedelta = timedelta(seconds=1) + """How often we try selecting an eviction candidate.""" + + reserve_slots: int = 0 + """How many slots to reserve from capacity-based eviction decisions.""" + + min_wait_for_capacity_eviction: timedelta = timedelta(seconds=10) + """Avoid immediately evicting runs that have just entered a wait.""" + + +DEFAULT_DURABLE_EVICTION_CONFIG = DurableEvictionConfig() + + +class DurableEvictionManager: + def __init__( + self, + *, + durable_slots: int, + cancel_local: Callable[[ActionKey], None], + request_eviction_with_ack: Callable[ + [ActionKey, DurableRunRecord], Awaitable[None] + ], + config: DurableEvictionConfig = DEFAULT_DURABLE_EVICTION_CONFIG, + cache: DurableEvictionCache | None = None, + ) -> None: + self._durable_slots = durable_slots + self._cancel_local = cancel_local + self._request_eviction_with_ack = request_eviction_with_ack + self._config = config + self._cache = cache or DurableEvictionCache() + + self._task: asyncio.Task[None] | None = None + self._lock = asyncio.Lock() + + @property + def cache(self) -> DurableEvictionCache: + return self._cache + + def start(self) -> None: + # Lazy start (requires an event loop) + if self._task and not self._task.done(): + return + self._task = asyncio.create_task(self._run_loop()) + + def stop(self) -> None: + if self._task and not self._task.done(): + self._task.cancel() + + def register_run( + self, + key: ActionKey, + *, + step_run_id: str, + invocation_count: int, + eviction_policy: EvictionPolicy | None, + ) -> None: + self._cache.register_run( + key, + step_run_id, + invocation_count=invocation_count, + now=self._now(), + eviction_policy=eviction_policy, + ) + + def unregister_run(self, key: ActionKey) -> None: + self._cache.unregister_run(key) + + def mark_waiting( + self, + key: ActionKey, + *, + wait_kind: str, + resource_id: str, + ) -> None: + self._cache.mark_waiting( + key, + now=self._now(), + wait_kind=wait_kind, + resource_id=resource_id, + ) + + def mark_active(self, key: ActionKey) -> None: + self._cache.mark_active(key, now=self._now()) + + def _evict_run(self, key: ActionKey) -> None: + self._cancel_local(key) + self.unregister_run(key) + + async def _run_loop(self) -> None: + interval = self._config.check_interval.total_seconds() + + try: + while True: + await asyncio.sleep(interval) + await self._tick_safe() + except asyncio.CancelledError: + return + + async def _tick_safe(self) -> None: + try: + await self._tick() + except Exception: + logger.exception("DurableEvictionManager: error in eviction loop") + + async def _tick(self) -> None: + # Only one eviction *cycle* at a time. + # + # Within a tick we drain all currently-eligible candidates + async with self._lock: + evicted_this_tick: set[ActionKey] = set() + + while True: + key = self._cache.select_eviction_candidate( + now=self._now(), + durable_slots=self._durable_slots, + reserve_slots=self._config.reserve_slots, + min_wait_for_capacity_eviction=self._config.min_wait_for_capacity_eviction, + ) + if key is None: + return + + # Safety: avoid infinite loops if cache repeatedly returns same key. + if key in evicted_this_tick: + return + evicted_this_tick.add(key) + + rec = self._cache.get(key) + if rec is None: + continue + + if rec.eviction_policy is None: + continue + + logger.debug( + "DurableEvictionManager: evicting durable run " + f"task_run_external_id={rec.step_run_id} wait_kind={rec.wait_kind} " + f"resource_id={rec.wait_resource_id} ttl={rec.eviction_policy.ttl} " + f"capacity_allowed={rec.eviction_policy.allow_capacity_eviction}" + ) + + await self._request_eviction_with_ack(key, rec) + self._evict_run(key) + + def handle_server_eviction(self, step_run_id: str, invocation_count: int) -> None: + """Handle a server-initiated eviction notification for a stale invocation.""" + key = self._cache.find_key_by_step_run_id(step_run_id) + if key is None: + return + + rec = self._cache.get(key) + if rec is not None and rec.invocation_count != invocation_count: + return + + logger.info( + "DurableEvictionManager: server-initiated eviction for " + "step_run_id=%s invocation_count=%d", + step_run_id, + invocation_count, + ) + self._evict_run(key) + + async def evict_all_waiting(self) -> int: + """Evict every currently-waiting durable run. Used during graceful shutdown.""" + self.stop() + + waiting = self._cache.get_all_waiting() + evicted = 0 + + for rec in waiting: + rec.eviction_reason = _build_eviction_reason( + EvictionCause.WORKER_SHUTDOWN, rec + ) + + logger.debug( + "DurableEvictionManager: shutdown-evicting durable run " + f"task_run_external_id={rec.step_run_id} wait_kind={rec.wait_kind} " + f"resource_id={rec.wait_resource_id}" + ) + + try: + await self._request_eviction_with_ack(rec.key, rec) + except Exception: + logger.exception( + f"DurableEvictionManager: failed to send eviction for " + f"step_run_id={rec.step_run_id}" + ) + + # Always cancel locally even if the server ACK failed, so the + # future settles and exit_gracefully doesn't hang. + self._evict_run(rec.key) + evicted += 1 + + return evicted + + def _now(self) -> datetime: + return datetime.now(timezone.utc) diff --git a/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py b/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py index 154b5f22c..e856e5dee 100644 --- a/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py +++ b/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py @@ -22,6 +22,7 @@ class WorkerActionRunLoopManager: name: str, action_registry: dict[str, Task[Any, Any]], slots: int, + durable_slots: int, config: ClientConfig, action_queue: "Queue[Action | STOP_LOOP_TYPE]", event_queue: "Queue[ActionEvent]", @@ -30,10 +31,12 @@ class WorkerActionRunLoopManager: debug: bool, labels: dict[str, str | int] | None, lifespan_context: Any | None, + engine_version: str | None = None, ) -> None: self.name = name self.action_registry = action_registry self.slots = slots + self.durable_slots = durable_slots self.config = config self.action_queue = action_queue self.event_queue = event_queue @@ -42,6 +45,7 @@ class WorkerActionRunLoopManager: self.debug = debug self.labels = labels self.lifespan_context = lifespan_context + self.engine_version = engine_version if self.debug: logger.setLevel(logging.DEBUG) @@ -82,6 +86,10 @@ class WorkerActionRunLoopManager: self.action_queue.put(STOP_LOOP) self.log_sender.publish(STOP_LOOP) + async def evict_all_waiting_durable_runs(self) -> None: + if self.runner: + await self.runner.evict_all_waiting_durable_runs() + async def wait_for_tasks(self) -> None: if self.runner: await self.runner.wait_for_tasks() @@ -91,11 +99,13 @@ class WorkerActionRunLoopManager: self.event_queue, self.config, self.slots, + self.durable_slots, self.handle_kill, self.action_registry, self.labels, self.lifespan_context, self.log_sender, + engine_version=self.engine_version, ) logger.debug(f"'{self.name}' waiting for {list(self.action_registry.keys())}") diff --git a/sdks/python/hatchet_sdk/worker/runner/runner.py b/sdks/python/hatchet_sdk/worker/runner/runner.py index da3f2df8f..9fc1cd247 100644 --- a/sdks/python/hatchet_sdk/worker/runner/runner.py +++ b/sdks/python/hatchet_sdk/worker/runner/runner.py @@ -17,7 +17,12 @@ from hatchet_sdk.client import Client from hatchet_sdk.clients.admin import AdminClient from hatchet_sdk.clients.dispatcher.dispatcher import DispatcherClient from hatchet_sdk.clients.events import EventClient -from hatchet_sdk.clients.listeners.durable_event_listener import DurableEventListener +from hatchet_sdk.clients.listeners.durable_event_listener import ( + DurableEventListener, +) +from hatchet_sdk.clients.listeners.legacy.pre_eviction_durable_event_listener import ( + PreEvictionDurableEventListener, +) from hatchet_sdk.clients.listeners.run_event_listener import RunEventListenerClient from hatchet_sdk.clients.listeners.workflow_listener import PooledWorkflowRunListener from hatchet_sdk.config import ClientConfig @@ -28,6 +33,8 @@ from hatchet_sdk.contracts.dispatcher_pb2 import ( STEP_EVENT_TYPE_FAILED, STEP_EVENT_TYPE_STARTED, ) +from hatchet_sdk.deprecated.deprecation import semver_less_than +from hatchet_sdk.engine_version import MinEngineVersion from hatchet_sdk.exceptions import ( IllegalTaskOutputError, NonRetryableException, @@ -39,6 +46,7 @@ from hatchet_sdk.runnables.action import Action, ActionKey, ActionType from hatchet_sdk.runnables.contextvars import ( ctx_action_key, ctx_additional_metadata, + ctx_durable_context, ctx_hatchet_context, ctx_step_run_id, ctx_task_retry_count, @@ -54,6 +62,8 @@ from hatchet_sdk.serde import HATCHET_PYDANTIC_SENTINEL from hatchet_sdk.utils.cache import BoundedDict from hatchet_sdk.utils.serde import remove_null_unicode_character from hatchet_sdk.worker.action_listener_process import ActionEvent +from hatchet_sdk.worker.durable_eviction.cache import DurableRunRecord +from hatchet_sdk.worker.durable_eviction.manager import DurableEvictionManager from hatchet_sdk.worker.runner.utils.capture_logs import ( AsyncLogSender, ContextVarToCopy, @@ -78,16 +88,19 @@ class Runner: event_queue: "Queue[ActionEvent]", config: ClientConfig, slots: int, + durable_slots: int, handle_kill: bool, action_registry: dict[str, Task[TWorkflowInput, R]], labels: dict[str, str | int] | None, lifespan_context: Any | None, log_sender: AsyncLogSender, + engine_version: str | None = None, ): - # We store the config so we can dynamically create clients for the dispatcher client. self.config = config + self.engine_version = engine_version self.slots = slots + self.durable_slots = durable_slots self.tasks: dict[ActionKey, asyncio.Task[Any]] = {} # Store run ids and futures self.contexts: dict[ActionKey, Context] = {} # Store run ids and contexts self.cancellations = BoundedDict[str, bool](maxsize=1000) @@ -119,7 +132,30 @@ class Runner: admin_client=self.admin_client, ) self.event_client = EventClient(self.config) - self.durable_event_listener = DurableEventListener(self.config) + + has_durable_tasks = any( + task.is_durable for task in self.action_registry.values() + ) + self._supports_durable_eviction = bool( + engine_version + and not semver_less_than(engine_version, MinEngineVersion.DURABLE_EVICTION) + ) + + self.durable_event_listener: ( + DurableEventListener | PreEvictionDurableEventListener | None + ) + if has_durable_tasks and self._supports_durable_eviction: + self.durable_event_listener = DurableEventListener( + self.config, + admin_client=self.admin_client, + on_server_evict=self._server_evict_callback, + ) + elif has_durable_tasks: + self.durable_event_listener = PreEvictionDurableEventListener(self.config) + else: + self.durable_event_listener = None + + self.durable_eviction_manager: DurableEvictionManager | None = None self.worker_context = WorkerContext( labels=labels or {}, client=Client(config=config).dispatcher @@ -138,6 +174,17 @@ class Runner: if self.worker_context.id() is None: self.worker_context._worker_id = action.worker_id + if isinstance(self.durable_event_listener, DurableEventListener): + self.durable_event_listener_task = asyncio.create_task( + self.durable_event_listener.ensure_started(action.worker_id) + ) + self.durable_eviction_manager = DurableEvictionManager( + durable_slots=self.durable_slots, + cancel_local=self._eviction_cancel_callback, + request_eviction_with_ack=self._eviction_request, + ) + self.durable_eviction_manager.start() + t: asyncio.Task[Exception | None] | None = None match action.action_type: case ActionType.START_STEP_RUN: @@ -156,6 +203,45 @@ class Runner: self.running_tasks.add(t) t.add_done_callback(lambda task: self.running_tasks.discard(task)) + def _eviction_cancel_callback(self, key: ActionKey) -> None: + """Called from DurableEvictionManager when it evicts a run.""" + if key in self.contexts: + ctx = self.contexts[key] + ctx._set_cancellation_flag() + if isinstance( + self.durable_event_listener, DurableEventListener + ) and isinstance(ctx, DurableContext): + self.durable_event_listener.cleanup_task_state( + ctx.step_run_id, ctx.invocation_count + ) + self.cancellations[key] = True + if key in self.tasks: + self.tasks[key].cancel() + + def _server_evict_callback( + self, durable_task_external_id: str, invocation_count: int + ) -> None: + """Called from DurableEventListener when the server notifies a stale invocation.""" + if self.durable_eviction_manager is not None: + self.durable_eviction_manager.handle_server_eviction( + durable_task_external_id, invocation_count + ) + + async def _eviction_request(self, key: ActionKey, rec: DurableRunRecord) -> None: + """Called from DurableEvictionManager when it needs to request eviction from the server.""" + if not isinstance(self.durable_event_listener, DurableEventListener): + return + invocation_count = 1 + if key in self.contexts: + ctx = self.contexts[key] + if isinstance(ctx, DurableContext): + invocation_count = ctx.invocation_count + await self.durable_event_listener.send_evict_invocation( + durable_task_external_id=rec.step_run_id, + invocation_count=invocation_count, + reason=rec.eviction_reason, + ) + def step_run_callback( self, action: Action, t: Task[TWorkflowInput, R] ) -> Callable[[asyncio.Task[Any]], None]: @@ -252,6 +338,9 @@ class Runner: ctx_action_key.set(action.key) ctx_additional_metadata.set(action.additional_metadata) ctx_task_retry_count.set(action.retry_count) + ctx_durable_context.set( + ctx if isinstance(ctx, DurableContext) and task.is_durable else None + ) async with task._unpack_dependencies_with_cleanup(ctx) as dependencies: try: @@ -367,11 +456,20 @@ class Runner: del self.threads[key] if key in self.contexts: - if self.contexts[key].exit_flag: + ctx = self.contexts[key] + if ctx.exit_flag: self.cancellations[key] = True - + if isinstance( + self.durable_event_listener, DurableEventListener + ) and isinstance(ctx, DurableContext): + self.durable_event_listener.cleanup_task_state( + ctx.step_run_id, ctx.invocation_count + ) del self.contexts[key] + if self.durable_eviction_manager is not None: + self.durable_eviction_manager.unregister_run(key) + @overload def create_context( self, action: Action, task: Task[Any, Any], is_durable: Literal[True] = True @@ -388,21 +486,38 @@ class Runner: task: Task[Any, Any], is_durable: bool = True, ) -> Context | DurableContext: - constructor = DurableContext if is_durable else Context - - ctx = constructor( - action=action, - dispatcher_client=self.dispatcher_client, - admin_client=self.admin_client, - event_client=self.event_client, - durable_event_listener=self.durable_event_listener, - worker=self.worker_context, - runs_client=self.runs_client, - lifespan_context=self.lifespan_context, - log_sender=self.log_sender, - max_attempts=task.retries + 1, - task_name=task.name, - workflow_name=task.workflow.name, + ctx = ( + DurableContext( + action=action, + dispatcher_client=self.dispatcher_client, + admin_client=self.admin_client, + event_client=self.event_client, + durable_event_listener=self.durable_event_listener, + worker=self.worker_context, + runs_client=self.runs_client, + lifespan_context=self.lifespan_context, + log_sender=self.log_sender, + max_attempts=task.retries + 1, + task_name=task.name, + workflow_name=task.workflow.name, + durable_eviction_manager=self.durable_eviction_manager, + engine_version=self.engine_version, + ) + if is_durable + else Context( + action=action, + dispatcher_client=self.dispatcher_client, + admin_client=self.admin_client, + event_client=self.event_client, + durable_event_listener=self.durable_event_listener, + worker=self.worker_context, + runs_client=self.runs_client, + lifespan_context=self.lifespan_context, + log_sender=self.log_sender, + max_attempts=task.retries + 1, + task_name=task.name, + workflow_name=task.workflow.name, + ) ) ctx_hatchet_context.set(ctx) @@ -433,6 +548,14 @@ class Runner: ) ) + if action_func.is_durable and self.durable_eviction_manager is not None: + self.durable_eviction_manager.register_run( + action.key, + step_run_id=action.step_run_id, + invocation_count=action.durable_task_invocation_count or 1, + eviction_policy=action_func.durable_eviction, + ) + loop = asyncio.get_event_loop() task = loop.create_task( self.async_wrapped_action_func(context, action_func, action) @@ -553,6 +676,15 @@ class Runner: return serialized_output + async def evict_all_waiting_durable_runs(self) -> None: + """Evict all waiting durable runs so the worker can drain cleanly.""" + if self.durable_eviction_manager is None: + return + + evicted = await self.durable_eviction_manager.evict_all_waiting() + if evicted: + logger.info(f"evicted {evicted} waiting durable run(s) during shutdown") + async def wait_for_tasks(self) -> None: running = len(self.tasks.keys()) while running > 0: diff --git a/sdks/python/hatchet_sdk/worker/worker.py b/sdks/python/hatchet_sdk/worker/worker.py index b76795e21..e387d5766 100644 --- a/sdks/python/hatchet_sdk/worker/worker.py +++ b/sdks/python/hatchet_sdk/worker/worker.py @@ -7,6 +7,7 @@ import sys from collections.abc import AsyncGenerator, Callable from contextlib import AsyncExitStack, asynccontextmanager, suppress from dataclasses import dataclass, field +from datetime import datetime, timezone from enum import Enum from multiprocessing import Queue from multiprocessing.process import BaseProcess @@ -14,14 +15,16 @@ from types import FrameType from typing import Any, TypeVar from warnings import warn -import grpc - from hatchet_sdk.client import Client from hatchet_sdk.config import ClientConfig from hatchet_sdk.contracts.v1.workflows_pb2 import CreateWorkflowVersionRequest -from hatchet_sdk.deprecated.deprecation import semver_less_than +from hatchet_sdk.deprecated.deprecation import emit_deprecation_notice, semver_less_than from hatchet_sdk.deprecated.worker import legacy_aio_start -from hatchet_sdk.exceptions import LifespanSetupError, LoopAlreadyRunningError +from hatchet_sdk.engine_version import MinEngineVersion +from hatchet_sdk.exceptions import ( + LifespanSetupError, + LoopAlreadyRunningError, +) from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action from hatchet_sdk.runnables.contextvars import task_count @@ -33,6 +36,7 @@ from hatchet_sdk.worker.action_listener_process import ( worker_action_listener_process, ) from hatchet_sdk.worker.runner.run_loop_manager import WorkerActionRunLoopManager +from hatchet_sdk.worker.slot_types import SlotType T = TypeVar("T") @@ -99,6 +103,7 @@ class Worker: self.action_runner: WorkerActionRunLoopManager | None = None self._legacy_durable_action_runner: WorkerActionRunLoopManager | None = None + self._engine_version: str | None = None self.ctx = multiprocessing.get_context("spawn") @@ -208,14 +213,7 @@ class Worker: if self.handle_kill: sys.exit(0) - # Minimum engine version that supports multiple slot types. - _MIN_SLOT_CONFIG_VERSION = "v0.78.23" - - def _emit_legacy_deprecation(self) -> None: - from datetime import datetime, timezone - - from hatchet_sdk.deprecated.deprecation import emit_deprecation_notice - + def _emit_legacy_slot_deprecation(self) -> None: emit_deprecation_notice( feature="legacy-engine", message=( @@ -227,6 +225,32 @@ class Worker: error_days=180, ) + def _check_eviction_support(self, engine_version: str) -> None: + """Warn and strip eviction policies if the engine is too old to support them.""" + if not semver_less_than(engine_version, MinEngineVersion.DURABLE_EVICTION): + return + + tasks_with_eviction = [ + task + for task in self.action_registry.values() + if task.durable_eviction is not None + ] + if not tasks_with_eviction: + return + + names = ", ".join(t.name for t in tasks_with_eviction) + emit_deprecation_notice( + feature="pre-eviction-engine", + message=( + f"Engine {engine_version} does not support durable eviction " + f"(requires >= {MinEngineVersion.DURABLE_EVICTION}). " + f"Eviction policies will be ignored for tasks: {names}. " + "Please upgrade your Hatchet engine." + ), + start=datetime(2026, 3, 3, tzinfo=timezone.utc), + error_days=180, + ) + async def _check_engine_version(self) -> str | None: """Returns the engine version string, or None if engine is legacy (pre-slot-config). @@ -234,21 +258,13 @@ class Worker: version for slot_config support. Returns the version string for modern engines so callers can branch on specific versions. """ + version = await self.client.dispatcher.get_version() - try: - version = await self.client.dispatcher.get_version() + if not version or semver_less_than(version, MinEngineVersion.SLOT_CONFIG): + self._emit_legacy_slot_deprecation() + return None - # Empty version or older than minimum → legacy - if not version or semver_less_than(version, self._MIN_SLOT_CONFIG_VERSION): - self._emit_legacy_deprecation() - return None - - return version # new engine - except grpc.RpcError as e: - if e.code() == grpc.StatusCode.UNIMPLEMENTED: - self._emit_legacy_deprecation() - return None # old engine - raise + return version async def _aio_start(self) -> None: main_pid = os.getpid() @@ -270,6 +286,9 @@ class Worker: await legacy_aio_start(self) return + self._engine_version = engine_version + self._check_eviction_support(engine_version) + lifespan_context = None if self.lifespan: try: @@ -317,6 +336,7 @@ class Worker: self.name, self.action_registry, sum(self.slot_config.values()), + self.slot_config.get(SlotType.DURABLE.value, 0), self.config, self.action_queue, self.event_queue, @@ -325,6 +345,7 @@ class Worker: self.client.debug, self.labels, lifespan_context, + engine_version=self._engine_version, ) raise RuntimeError("event loop not set, cannot start action runner") @@ -503,11 +524,14 @@ class Worker: self.killing = True if self.action_runner: + # TODO-DURABLE: we nee to ensure that the worker is paused before calling this in all SDKs + await self.action_runner.evict_all_waiting_durable_runs() await self.action_runner.wait_for_tasks() await self.action_runner.exit_gracefully() # Also clean up the durable action runner (legacy mode) if self._legacy_durable_action_runner: + await self._legacy_durable_action_runner.evict_all_waiting_durable_runs() await self._legacy_durable_action_runner.wait_for_tasks() await self._legacy_durable_action_runner.exit_gracefully() diff --git a/sdks/python/poetry.lock b/sdks/python/poetry.lock index 5f7dee12b..1ca7288e5 100644 --- a/sdks/python/poetry.lock +++ b/sdks/python/poetry.lock @@ -228,6 +228,19 @@ files = [ {file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"}, ] +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" +description = "Backport of asyncio.Runner, a context manager that controls event loop life cycle." +optional = false +python-versions = "<3.11,>=3.8" +groups = ["test"] +markers = "python_version < \"3.11\"" +files = [ + {file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"}, + {file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"}, +] + [[package]] name = "beautifulsoup4" version = "4.14.3" @@ -2554,18 +2567,20 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests [[package]] name = "pytest-asyncio" -version = "0.25.3" +version = "1.3.0" description = "Pytest support for asyncio" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["test"] files = [ - {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, - {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, + {file = "pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5"}, + {file = "pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5"}, ] [package.dependencies] -pytest = ">=8.2,<9" +backports-asyncio-runner = {version = ">=1.1,<2", markers = "python_version < \"3.11\""} +pytest = ">=8.2,<10" +typing-extensions = {version = ">=4.12", markers = "python_version < \"3.13\""} [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] @@ -3512,4 +3527,4 @@ v0-sdk = [] [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "24f9e18c1ee8105148a3982817ee30b4610b487fd9d59644d176bebca4c2cdaa" +content-hash = "0709f06a9061501aa9add9f63db9ba2bbae68c5351a152ef76f29551ead2bbbd" diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index c44fc15c4..b2d79afb4 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -47,7 +47,7 @@ types-grpcio = "^1.0.0" [tool.poetry.group.test.dependencies] pytest = "^8.3.5" -pytest-asyncio = "^0.25.3" +pytest-asyncio = "^1.3.0" pytest-env = "^1.1.5" pytest-retry = "^1.7.0" psycopg = { extras = ["pool"], version = "^3.2.6" } @@ -102,6 +102,7 @@ exclude = [ "hatchet_sdk/clients/rest/rest.py", "hatchet_sdk/v0/*", "site/*", + "examples/dependency_injection/dependency_annotations312.py", ] strict = true enable_error_code = [ diff --git a/sdks/python/tests/test_durable_eviction_cache.py b/sdks/python/tests/test_durable_eviction_cache.py new file mode 100644 index 000000000..745f597c3 --- /dev/null +++ b/sdks/python/tests/test_durable_eviction_cache.py @@ -0,0 +1,226 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone + +from hatchet_sdk.runnables.eviction import EvictionPolicy +from hatchet_sdk.worker.durable_eviction.cache import DurableEvictionCache + + +def dt(seconds: int) -> datetime: + return datetime(2026, 1, 1, 0, 0, 0, tzinfo=timezone.utc) + timedelta( + seconds=seconds + ) + + +def test_ttl_eviction_prefers_oldest_waiting_and_priority() -> None: + cache = DurableEvictionCache() + + key1 = "run-1/0" + key2 = "run-2/0" + + eviction_low_prio = EvictionPolicy(ttl=timedelta(seconds=10), priority=0) + eviction_high_prio = EvictionPolicy(ttl=timedelta(seconds=10), priority=10) + + cache.register_run( + key1, "run-1", invocation_count=1, now=dt(0), eviction_policy=eviction_high_prio + ) + cache.register_run( + key2, "run-2", invocation_count=1, now=dt(0), eviction_policy=eviction_low_prio + ) + + cache.mark_waiting( + key1, now=dt(0), wait_kind="workflow_run_result", resource_id="wf1" + ) + cache.mark_waiting( + key2, now=dt(5), wait_kind="workflow_run_result", resource_id="wf2" + ) + + # Both are past TTL at now=20, but low priority should be evicted first. + chosen = cache.select_eviction_candidate( + now=dt(20), + durable_slots=100, + reserve_slots=0, + min_wait_for_capacity_eviction=timedelta(seconds=0), + ) + assert chosen == key2 + + +def test_none_eviction_params_never_selected() -> None: + cache = DurableEvictionCache() + + key_no = "run-no/0" + key_yes = "run-yes/0" + + cache.register_run( + key_no, "run-no", invocation_count=1, now=dt(0), eviction_policy=None + ) + cache.register_run( + key_yes, + "run-yes", + invocation_count=1, + now=dt(0), + eviction_policy=EvictionPolicy(ttl=timedelta(seconds=1)), + ) + + cache.mark_waiting(key_no, now=dt(0), wait_kind="durable_event", resource_id="x") + cache.mark_waiting(key_yes, now=dt(0), wait_kind="durable_event", resource_id="y") + + chosen = cache.select_eviction_candidate( + now=dt(10), + durable_slots=100, + reserve_slots=0, + min_wait_for_capacity_eviction=timedelta(seconds=0), + ) + assert chosen == key_yes + + +def test_capacity_eviction_respects_allow_capacity_and_min_wait() -> None: + cache = DurableEvictionCache() + + key_blocked = "run-blocked/0" + key_ok = "run-ok/0" + + cache.register_run( + key_blocked, + "run-blocked", + invocation_count=1, + now=dt(0), + eviction_policy=EvictionPolicy( + ttl=timedelta(hours=1), allow_capacity_eviction=False, priority=0 + ), + ) + cache.register_run( + key_ok, + "run-ok", + invocation_count=1, + now=dt(0), + eviction_policy=EvictionPolicy( + ttl=timedelta(hours=1), allow_capacity_eviction=True, priority=0 + ), + ) + + cache.mark_waiting( + key_blocked, now=dt(0), wait_kind="durable_event", resource_id="x" + ) + cache.mark_waiting(key_ok, now=dt(0), wait_kind="durable_event", resource_id="y") + + # Capacity pressure because waiting_count==durable_slots==2, but enforce min-wait. + chosen_too_soon = cache.select_eviction_candidate( + now=dt(5), + durable_slots=2, + reserve_slots=0, + min_wait_for_capacity_eviction=timedelta(seconds=10), + ) + assert chosen_too_soon is None + + # Now past min wait: only key_ok is eligible for capacity eviction. + chosen = cache.select_eviction_candidate( + now=dt(15), + durable_slots=2, + reserve_slots=0, + min_wait_for_capacity_eviction=timedelta(seconds=10), + ) + assert chosen == key_ok + + +def test_concurrent_waits_keep_waiting_until_all_resolved() -> None: + """Simulates asyncio.gather over 3 child aio_result() calls on the same run. + + When one child completes (mark_active), the run must remain in waiting + state until *all* concurrent waits have resolved. + """ + cache = DurableEvictionCache() + key = "run-bulk/0" + policy = EvictionPolicy(ttl=timedelta(seconds=5), priority=0) + + cache.register_run( + key, "run-bulk", invocation_count=1, now=dt(0), eviction_policy=policy + ) + + cache.mark_waiting(key, now=dt(1), wait_kind="spawn_child", resource_id="child0") + cache.mark_waiting(key, now=dt(1), wait_kind="spawn_child", resource_id="child1") + cache.mark_waiting(key, now=dt(1), wait_kind="spawn_child", resource_id="child2") + + rec = cache.get(key) + assert rec is not None + assert rec.is_waiting + assert rec._wait_count == 3 + + # child0 completes -- run should still be waiting + cache.mark_active(key, now=dt(2)) + assert rec.is_waiting + assert rec._wait_count == 2 + assert rec.waiting_since == dt(1) + + # TTL still fires while 2 children are pending + chosen = cache.select_eviction_candidate( + now=dt(10), + durable_slots=100, + reserve_slots=0, + min_wait_for_capacity_eviction=timedelta(seconds=0), + ) + assert chosen == key + + # child1 completes + cache.mark_active(key, now=dt(11)) + assert rec.is_waiting + assert rec._wait_count == 1 + + # child2 completes -- now the run is truly active + cache.mark_active(key, now=dt(12)) + assert not rec.is_waiting + assert rec._wait_count == 0 + assert rec.waiting_since is None + + +def test_find_key_by_step_run_id_returns_matching_key() -> None: + cache = DurableEvictionCache() + cache.register_run( + "run-a/0", "ext-a", invocation_count=1, now=dt(0), eviction_policy=None + ) + cache.register_run( + "run-b/0", "ext-b", invocation_count=1, now=dt(0), eviction_policy=None + ) + + assert cache.find_key_by_step_run_id("ext-a") == "run-a/0" + assert cache.find_key_by_step_run_id("ext-b") == "run-b/0" + + +def test_find_key_by_step_run_id_returns_none_for_unknown() -> None: + cache = DurableEvictionCache() + cache.register_run( + "run-a/0", "ext-a", invocation_count=1, now=dt(0), eviction_policy=None + ) + + assert cache.find_key_by_step_run_id("no-such-id") is None + + +def test_find_key_by_step_run_id_returns_none_after_unregister() -> None: + cache = DurableEvictionCache() + cache.register_run( + "run-a/0", "ext-a", invocation_count=1, now=dt(0), eviction_policy=None + ) + + assert cache.find_key_by_step_run_id("ext-a") == "run-a/0" + cache.unregister_run("run-a/0") + assert cache.find_key_by_step_run_id("ext-a") is None + + +def test_mark_active_floors_at_zero() -> None: + """Extra mark_active calls (defensive) should not go negative.""" + cache = DurableEvictionCache() + key = "run-extra/0" + policy = EvictionPolicy(ttl=timedelta(seconds=5), priority=0) + + cache.register_run( + key, "run-extra", invocation_count=1, now=dt(0), eviction_policy=policy + ) + cache.mark_waiting(key, now=dt(0), wait_kind="sleep", resource_id="s") + + cache.mark_active(key, now=dt(1)) + cache.mark_active(key, now=dt(2)) # extra call + + rec = cache.get(key) + assert rec is not None + assert rec._wait_count == 0 + assert not rec.is_waiting diff --git a/sdks/python/tests/test_durable_eviction_manager.py b/sdks/python/tests/test_durable_eviction_manager.py new file mode 100644 index 000000000..e7425cd96 --- /dev/null +++ b/sdks/python/tests/test_durable_eviction_manager.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +from datetime import timedelta +from unittest.mock import AsyncMock, MagicMock + +from hatchet_sdk.runnables.eviction import EvictionPolicy +from hatchet_sdk.worker.durable_eviction.manager import ( + DurableEvictionConfig, + DurableEvictionManager, +) + + +def _make_manager( + cancel_local: MagicMock | None = None, +) -> tuple[DurableEvictionManager, MagicMock]: + cancel = cancel_local or MagicMock() + request_eviction = AsyncMock() + + mgr = DurableEvictionManager( + durable_slots=10, + cancel_local=cancel, + request_eviction_with_ack=request_eviction, + config=DurableEvictionConfig(check_interval=timedelta(hours=1)), + ) + return mgr, cancel + + +def test_handle_server_eviction_cancels_and_unregisters() -> None: + mgr, cancel = _make_manager() + + key = "run-1/0" + mgr.register_run( + key, + step_run_id="ext-1", + invocation_count=2, + eviction_policy=EvictionPolicy(ttl=timedelta(seconds=30)), + ) + mgr.mark_waiting(key, wait_kind="sleep", resource_id="s1") + + mgr.handle_server_eviction("ext-1", 2) + + cancel.assert_called_once_with(key) + assert mgr.cache.get(key) is None + + +def test_handle_server_eviction_unknown_id_is_noop() -> None: + mgr, cancel = _make_manager() + + mgr.register_run( + "run-1/0", step_run_id="ext-1", invocation_count=1, eviction_policy=None + ) + + mgr.handle_server_eviction("no-such-id", 1) + + cancel.assert_not_called() + assert mgr.cache.get("run-1/0") is not None + + +def test_handle_server_eviction_only_evicts_matching_run() -> None: + mgr, cancel = _make_manager() + + mgr.register_run( + "run-1/0", + step_run_id="ext-1", + invocation_count=1, + eviction_policy=EvictionPolicy(ttl=timedelta(seconds=30)), + ) + mgr.register_run( + "run-2/0", + step_run_id="ext-2", + invocation_count=1, + eviction_policy=EvictionPolicy(ttl=timedelta(seconds=30)), + ) + mgr.mark_waiting("run-1/0", wait_kind="sleep", resource_id="s1") + mgr.mark_waiting("run-2/0", wait_kind="sleep", resource_id="s2") + + mgr.handle_server_eviction("ext-1", 1) + + cancel.assert_called_once_with("run-1/0") + assert mgr.cache.get("run-1/0") is None + assert mgr.cache.get("run-2/0") is not None + + +def test_handle_server_eviction_skips_newer_invocation() -> None: + mgr, cancel = _make_manager() + + mgr.register_run( + "run-1/0", + step_run_id="ext-1", + invocation_count=3, + eviction_policy=EvictionPolicy(ttl=timedelta(seconds=30)), + ) + mgr.mark_waiting("run-1/0", wait_kind="sleep", resource_id="s1") + + mgr.handle_server_eviction("ext-1", 2) + + cancel.assert_not_called() + assert mgr.cache.get("run-1/0") is not None + + +def test_handle_server_eviction_evicts_exact_invocation_match() -> None: + mgr, cancel = _make_manager() + + mgr.register_run( + "run-1/0", + step_run_id="ext-1", + invocation_count=5, + eviction_policy=EvictionPolicy(ttl=timedelta(seconds=30)), + ) + mgr.mark_waiting("run-1/0", wait_kind="sleep", resource_id="s1") + + mgr.handle_server_eviction("ext-1", 5) + + cancel.assert_called_once_with("run-1/0") + assert mgr.cache.get("run-1/0") is None diff --git a/sdks/python/tests/test_durations.py b/sdks/python/tests/test_durations.py index 82083b36c..0dbaf6229 100644 --- a/sdks/python/tests/test_durations.py +++ b/sdks/python/tests/test_durations.py @@ -1,6 +1,9 @@ from datetime import timedelta -from hatchet_sdk.utils.timedelta_to_expression import timedelta_to_expr +from hatchet_sdk.utils.timedelta_to_expression import ( + timedelta_to_expr, + expr_to_timedelta, +) def test_timedelta_to_expr() -> None: @@ -10,3 +13,13 @@ def test_timedelta_to_expr() -> None: assert timedelta_to_expr(timedelta(seconds=3661)) == "3661s" assert timedelta_to_expr(timedelta(hours=96)) == "96h" assert timedelta_to_expr(timedelta(hours=96, seconds=1)) == "345601s" + + +def test_expr_to_timedelta() -> None: + assert expr_to_timedelta("1h") == timedelta(hours=1) + assert expr_to_timedelta("1m") == timedelta(minutes=1) + assert expr_to_timedelta("1s") == timedelta(seconds=1) + assert expr_to_timedelta("3661s") == timedelta(seconds=3661) + assert expr_to_timedelta("96h") == timedelta(hours=96) + assert expr_to_timedelta("2d") == timedelta(days=2) + assert expr_to_timedelta("2d") == timedelta(hours=48) diff --git a/sdks/python/tests/unit/__init__.py b/sdks/python/tests/unit/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/sdks/python/tests/unit/conftest.py b/sdks/python/tests/unit/conftest.py new file mode 100644 index 000000000..452176e06 --- /dev/null +++ b/sdks/python/tests/unit/conftest.py @@ -0,0 +1,13 @@ +"""Override the session-scoped autouse worker fixture from the root conftest +so that pure unit tests can run without a live Hatchet server.""" + +from __future__ import annotations + +from collections.abc import Iterator + +import pytest + + +@pytest.fixture(scope="session", autouse=True) +def worker() -> Iterator[None]: + yield None diff --git a/sdks/python/tests/unit/test_durable_event_listener.py b/sdks/python/tests/unit/test_durable_event_listener.py new file mode 100644 index 000000000..812603808 --- /dev/null +++ b/sdks/python/tests/unit/test_durable_event_listener.py @@ -0,0 +1,402 @@ +"""Tests for DurableEventListener reconnection logic.""" + +from __future__ import annotations + +import asyncio +from collections.abc import AsyncIterator +from typing import Any +from unittest.mock import AsyncMock, MagicMock, patch + +import grpc +import grpc.aio +import pytest + +from hatchet_sdk.clients.listeners.durable_event_listener import ( + DEFAULT_RECONNECT_INTERVAL, + DurableEventListener, +) + +_MODULE = "hatchet_sdk.clients.listeners.durable_event_listener" + + +class ControllableStream: + """Async iterator whose lifetime can be controlled from tests.""" + + def __init__(self) -> None: + self._queue: asyncio.Queue[tuple[str, object]] = asyncio.Queue() + + def push(self, response: object) -> None: + self._queue.put_nowait(("response", response)) + + def end(self) -> None: + self._queue.put_nowait(("end", None)) + + def fail(self, error: BaseException) -> None: + self._queue.put_nowait(("error", error)) + + def __aiter__(self) -> ControllableStream: + return self + + async def __anext__(self) -> object: + kind, value = await self._queue.get() + if kind == "end": + raise StopAsyncIteration + if kind == "error": + raise value # type: ignore[misc] + return value + + +def _make_grpc_error(code: grpc.StatusCode, details: str = "") -> grpc.aio.AioRpcError: + empty: grpc.aio.Metadata = grpc.aio.Metadata() + return grpc.aio.AioRpcError(code, empty, empty, details) + + +class _Harness: + """Sets up a DurableEventListener with fully mocked gRPC dependencies.""" + + def __init__(self) -> None: + config = MagicMock() + config.token = "test-token" + admin_client = MagicMock() + self.listener = DurableEventListener(config, admin_client) + + self.streams: list[ControllableStream] = [] + self.call_count = 0 + + self._mock_conn = MagicMock() + self._mock_conn.close = AsyncMock() + + self._patches: list[Any] = [ + patch(f"{_MODULE}.new_conn", return_value=self._mock_conn), + patch(f"{_MODULE}.V1DispatcherStub", side_effect=self._make_stub), + patch(f"{_MODULE}.get_metadata", return_value=[]), + patch(f"{_MODULE}.DEFAULT_RECONNECT_INTERVAL", 0.01), + ] + for p in self._patches: + p.start() + + def _make_stub(self, _channel: object) -> MagicMock: + stub = MagicMock() + stub.DurableTask.side_effect = self._next_stream + return stub + + def _next_stream(self, *_a: object, **_kw: object) -> ControllableStream: + idx = min(self.call_count, len(self.streams) - 1) + self.call_count += 1 + return self.streams[idx] + + def add_eof_stream(self) -> ControllableStream: + s = ControllableStream() + s.end() + self.streams.append(s) + return s + + def add_hanging_stream(self) -> ControllableStream: + s = ControllableStream() + self.streams.append(s) + return s + + def add_error_stream(self, error: BaseException) -> ControllableStream: + s = ControllableStream() + s.fail(error) + self.streams.append(s) + return s + + async def start(self, worker_id: str = "w1") -> None: + await self.listener.start(worker_id) + + async def teardown(self) -> None: + try: + await self.listener.stop() + except Exception: + pass + for s in self.streams: + try: + s.end() + except Exception: + pass + for p in self._patches: + p.stop() + + +@pytest.fixture +async def harness() -> AsyncIterator[_Harness]: + h = _Harness() + yield h + await h.teardown() + + +# ── reconnection on stream EOF ── + + +async def test_opens_new_stream_after_eof(harness: _Harness) -> None: + harness.add_eof_stream() + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.15) + + assert harness.call_count >= 2 + + +async def test_multiple_eof_reconnects(harness: _Harness) -> None: + for _ in range(3): + harness.add_eof_stream() + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.3) + + assert harness.call_count >= 4 + + +# ── reconnection on gRPC error ── + + +async def test_reconnects_on_unavailable(harness: _Harness) -> None: + err = _make_grpc_error(grpc.StatusCode.UNAVAILABLE, "server unavailable") + harness.add_error_stream(err) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.15) + + assert harness.call_count >= 2 + + +async def test_reconnects_on_internal_error(harness: _Harness) -> None: + err = _make_grpc_error(grpc.StatusCode.INTERNAL, "internal") + harness.add_error_stream(err) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.15) + + assert harness.call_count >= 2 + + +async def test_reconnects_on_generic_exception(harness: _Harness) -> None: + s = ControllableStream() + harness.streams.append(s) + harness.add_hanging_stream() + + await harness.start() + s.fail(RuntimeError("unexpected")) + await asyncio.sleep(0.15) + + assert harness.call_count >= 2 + + +# ── does NOT reconnect on CANCELLED ── + + +async def test_breaks_out_on_grpc_cancelled(harness: _Harness) -> None: + err = _make_grpc_error(grpc.StatusCode.CANCELLED, "cancelled") + harness.add_error_stream(err) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.15) + + assert harness.call_count == 1 + + +# ── does NOT reconnect after stop ── + + +async def test_no_reconnect_after_stop(harness: _Harness) -> None: + harness.add_hanging_stream() + + await harness.start() + await harness.listener.stop() + await asyncio.sleep(0.15) + + assert harness.call_count == 1 + + +# ── _fail_pending_acks correctness ── + + +async def test_fail_pending_acks_clears_event_acks(harness: _Harness) -> None: + harness.add_hanging_stream() + await harness.start() + + future: asyncio.Future[object] = asyncio.get_event_loop().create_future() + harness.listener._pending_event_acks[("task1", 1)] = future # type: ignore[assignment] + + harness.listener._fail_pending_acks(ConnectionResetError("disconnected")) + + assert len(harness.listener._pending_event_acks) == 0 + with pytest.raises(ConnectionResetError, match="disconnected"): + future.result() + + +async def test_pending_callbacks_survive_disconnect( + harness: _Harness, +) -> None: + """Pending callbacks should survive a disconnect. + + Callbacks represent server-side durable event log entries that persist + across connections. After reconnection, _poll_worker_status re-reports + them and GetSatisfiedDurableEvents delivers completions on the new stream. + """ + harness.add_eof_stream() + harness.add_hanging_stream() + + await harness.start() + + future: asyncio.Future[object] = asyncio.get_event_loop().create_future() + # Swallow the exception that stop() will set during teardown + future.add_done_callback( + lambda f: f.exception() if f.done() and not f.cancelled() else None + ) + harness.listener._pending_callbacks[("task1", 1, 0, 1)] = future # type: ignore[assignment] + + await asyncio.sleep(0.15) + + assert not future.done(), ( + "_pending_callbacks were failed on disconnect — " + "callbacks should survive reconnection so the polling path can deliver them" + ) + assert ("task1", 1, 0, 1) in harness.listener._pending_callbacks + + +async def test_fail_pending_acks_clears_eviction_acks_on_disconnect( + harness: _Harness, +) -> None: + """Pending eviction acks should be failed on disconnect. + + If _fail_pending_acks does not clear _pending_eviction_acks, eviction + acknowledgments will hang indefinitely after a reconnection. + """ + harness.add_eof_stream() + harness.add_hanging_stream() + + await harness.start() + + future: asyncio.Future[None] = asyncio.get_event_loop().create_future() + harness.listener._pending_eviction_acks[("task1", 1)] = future + + await asyncio.sleep(0.15) + + assert future.done(), ( + "_pending_eviction_acks were not failed on disconnect — " + "eviction acks will hang forever after reconnection" + ) + + +# ── pending event acks rejected on EOF (integration) ── + + +async def test_event_acks_rejected_when_stream_ends( + harness: _Harness, +) -> None: + stream1 = ControllableStream() + harness.streams.append(stream1) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.05) + + future: asyncio.Future[object] = asyncio.get_event_loop().create_future() + harness.listener._pending_event_acks[("task1", 1)] = future # type: ignore[assignment] + + stream1.end() + await asyncio.sleep(0.15) + + assert future.done() + with pytest.raises(ConnectionResetError): + future.result() + + +async def test_event_acks_rejected_when_stream_errors( + harness: _Harness, +) -> None: + stream1 = ControllableStream() + harness.streams.append(stream1) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.05) + + future: asyncio.Future[object] = asyncio.get_event_loop().create_future() + harness.listener._pending_event_acks[("task1", 1)] = future # type: ignore[assignment] + + stream1.fail(_make_grpc_error(grpc.StatusCode.UNAVAILABLE, "gone")) + await asyncio.sleep(0.15) + + assert future.done() + with pytest.raises(ConnectionResetError): + future.result() + + +# ── worker re-registration ── + + +async def test_request_queue_exists_after_each_connect( + harness: _Harness, +) -> None: + harness.add_eof_stream() + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.15) + + assert harness.call_count >= 2 + assert harness.listener._request_queue is not None + + +# ── connect failure during reconnect ── + + +async def test_survives_connect_failure_and_keeps_running( + harness: _Harness, +) -> None: + """When _connect() fails during reconnection, the receive loop should + not crash. It should continue running and try reconnecting again.""" + stream1 = ControllableStream() + harness.streams.append(stream1) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.05) + + import hatchet_sdk.clients.listeners.durable_event_listener as mod + + original_new_conn = getattr(mod, "new_conn") + setattr(mod, "new_conn", MagicMock(side_effect=ConnectionError("network down"))) + + stream1.end() + await asyncio.sleep(0.3) + + setattr(mod, "new_conn", original_new_conn) + + assert harness.listener._running is True + + +# ── listener state after reconnect ── + + +async def test_still_running_after_reconnect(harness: _Harness) -> None: + harness.add_eof_stream() + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.15) + + assert harness.listener._running is True + + +async def test_has_new_stream_after_reconnect(harness: _Harness) -> None: + s1 = ControllableStream() + harness.streams.append(s1) + harness.add_hanging_stream() + + await harness.start() + await asyncio.sleep(0.05) + + old_stream = harness.listener._stream + s1.end() + await asyncio.sleep(0.15) + + assert harness.listener._stream is not old_stream diff --git a/sdks/typescript/README.md b/sdks/typescript/README.md index 88dbde5f9..27e6b031e 100644 --- a/sdks/typescript/README.md +++ b/sdks/typescript/README.md @@ -63,9 +63,21 @@ pnpm install pnpm build ``` -3. Run tests: +3. Run unit tests: ```bash -pnpm test +pnpm test:unit +``` + +4. Run e2e tests (requires a running Hatchet engine): +```bash +# Run all e2e tests +pnpm test:e2e + +# Run a specific e2e test file +pnpm test:e2e durable.e2e.ts + +# Run a specific test by name +pnpm test:e2e durable.e2e.ts -t "durable replay reset" ``` ## Contributing diff --git a/sdks/typescript/src/clients/admin/admin-client.ts b/sdks/typescript/src/clients/admin/admin-client.ts index 103c7bda9..6d4183177 100644 --- a/sdks/typescript/src/clients/admin/admin-client.ts +++ b/sdks/typescript/src/clients/admin/admin-client.ts @@ -2,9 +2,7 @@ import { Channel, ClientFactory } from 'nice-grpc'; import { BulkTriggerWorkflowRequest, CreateWorkflowVersionOpts, - DesiredWorkerLabels, RateLimitDuration, - WorkerLabelComparator, WorkflowServiceClient, WorkflowServiceDefinition, } from '@hatchet/protoc/workflows'; @@ -19,8 +17,9 @@ import { AdminServiceDefinition, CreateWorkflowVersionRequest, } from '@hatchet/protoc/v1/workflows'; -import { Priority, RunsClient } from '@hatchet/v1'; +import { Priority, RunsClient, WorkerLabelComparator } from '@hatchet/v1'; import { applyNamespace } from '@hatchet/util/apply-namespace'; +import { DesiredWorkerLabels } from '@hatchet/protoc/v1/shared/trigger'; import { Api } from '../rest'; import { WebhookWorkerCreateRequest, diff --git a/sdks/typescript/src/clients/dispatcher/action-listener.ts b/sdks/typescript/src/clients/dispatcher/action-listener.ts index 4bd9777b4..4d0cc3f0b 100644 --- a/sdks/typescript/src/clients/dispatcher/action-listener.ts +++ b/sdks/typescript/src/clients/dispatcher/action-listener.ts @@ -1,8 +1,4 @@ -import { - DispatcherClient as PbDispatcherClient, - AssignedAction, - ActionType, -} from '@hatchet/protoc/dispatcher'; +import { DispatcherClient as PbDispatcherClient, AssignedAction } from '@hatchet/protoc/dispatcher'; import { Status } from 'nice-grpc'; import { getGrpcErrorCode } from '@util/grpc-error'; @@ -23,28 +19,20 @@ enum ListenStrategy { LISTEN_STRATEGY_V2 = 2, } -export type Action = AssignedAction & { - /** @deprecated use taskRunId */ - stepRunId?: string; - /** @deprecated use taskId */ - stepId?: string; -}; +export type ActionKey = `${string}/${number}`; -export type ActionKey = string; +export type Action = AssignedAction & { readonly key: ActionKey }; -export function createActionKey(action: Action): ActionKey { - switch (action.actionType) { - case ActionType.START_GET_GROUP_KEY: - return `${action.getGroupKeyRunId}/${action.retryCount}`; - case ActionType.CANCEL_STEP_RUN: - case ActionType.START_STEP_RUN: - case ActionType.UNRECOGNIZED: - return `${action.taskRunExternalId}/${action.retryCount}`; - default: - // eslint-disable-next-line no-case-declarations - const exhaustivenessCheck: never = action.actionType; - throw new Error(`Unhandled action type: ${exhaustivenessCheck}`); - } +export function createAction(assignedAction: AssignedAction): Action { + const action = assignedAction as Action; + Object.defineProperty(action, 'key', { + get(): ActionKey { + return `${this.taskRunExternalId}/${this.retryCount}`; + }, + enumerable: true, + configurable: true, + }); + return action; } export class ActionListener { @@ -87,13 +75,7 @@ export class ActionListener { const listenClient = await client.getListenClient(); for await (const assignedAction of listenClient) { - const action: Action = { - ...assignedAction, - stepRunId: assignedAction.taskRunExternalId, - stepId: assignedAction.taskId, - }; - - yield action; + yield createAction(assignedAction); } } catch (e: unknown) { // If the stream was aborted (e.g., during worker shutdown), exit gracefully diff --git a/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.test.ts b/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.test.ts new file mode 100644 index 000000000..b74c6e4c7 --- /dev/null +++ b/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.test.ts @@ -0,0 +1,479 @@ +/* eslint-disable require-yield */ +import sleep from '@hatchet/util/sleep'; +import { DurableListenerClient } from './durable-listener-client'; + +jest.mock('@hatchet/util/sleep', () => ({ + __esModule: true, + default: jest.fn(() => Promise.resolve()), +})); + +const mockedSleep = jest.mocked(sleep); + +function noopLogger() { + return { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn() }; +} + +function mockConfig(): any { + return { logger: () => noopLogger(), log_level: 'OFF' }; +} + +async function settle(ms = 50): Promise { + await new Promise((r) => { + setTimeout(r, ms); + }); +} + +function emptyStream(): AsyncIterable { + return (async function* empty() {})(); +} + +function errorStream(err: Error): AsyncIterable { + return (async function* throwErr() { + throw err; + })(); +} + +function hangingStream(): { stream: AsyncIterable; end: () => void } { + let resolver!: () => void; + const gate = new Promise((r) => { + resolver = r; + }); + const stream = (async function* hang() { + await gate; + })(); + return { stream, end: () => resolver() }; +} + +function controllableStream() { + const buffer: any[] = []; + let waiter: ((v: { response?: any; done?: boolean; error?: Error }) => void) | null = null; + let ended = false; + + return { + push(response: any) { + if (waiter) { + const w = waiter; + waiter = null; + w({ response }); + } else { + buffer.push(response); + } + }, + end() { + ended = true; + if (waiter) { + const w = waiter; + waiter = null; + w({ done: true }); + } + }, + error(err: Error) { + if (waiter) { + const w = waiter; + waiter = null; + w({ error: err }); + } + }, + stream: { + async *[Symbol.asyncIterator]() { + while (true) { + if (buffer.length > 0) { + yield buffer.shift()!; + + continue; + } + if (ended) return; + const result = await new Promise<{ response?: any; done?: boolean; error?: Error }>( + (r) => { + waiter = r; + } + ); + if (result.error) throw result.error; + if (result.done) return; + if (result.response !== undefined) yield result.response; + } + }, + }, + }; +} + +function makeDeferred() { + let resolve!: (v: T) => void; + let reject!: (r: any) => void; + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + return { promise, resolve, reject }; +} + +describe('DurableListenerClient reconnection', () => { + let grpcClient: any; + let listener: DurableListenerClient; + const openStreams: { end: () => void }[] = []; + + function tracked(s: ReturnType) { + openStreams.push(s); + return s; + } + + beforeEach(() => { + jest.clearAllMocks(); + grpcClient = { durableTask: jest.fn() }; + const factory = { create: jest.fn(() => grpcClient) }; + listener = new DurableListenerClient(mockConfig(), {} as any, factory as any); + }); + + afterEach(async () => { + await listener.stop(); + for (const s of openStreams) s.end(); + openStreams.length = 0; + await settle(10); + }); + + // ── reconnection on stream EOF ── + + describe('reconnects on stream EOF', () => { + it('opens a new stream after the first stream ends', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? emptyStream() : h.stream)); + + await listener.start('w1'); + await settle(); + + expect(grpcClient.durableTask).toHaveBeenCalledTimes(2); + }); + + it('sleeps DEFAULT_RECONNECT_INTERVAL before reconnecting', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? emptyStream() : h.stream)); + + await listener.start('w1'); + await settle(); + + expect(mockedSleep).toHaveBeenCalledWith(3000); + }); + }); + + // ── reconnection on stream error ── + + describe('reconnects on stream error', () => { + it('opens a new stream after a non-abort error', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => + ++call === 1 ? errorStream(new Error('network reset')) : h.stream + ); + + await listener.start('w1'); + await settle(); + + expect(grpcClient.durableTask).toHaveBeenCalledTimes(2); + }); + + it('sleeps before reconnecting on error', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => + ++call === 1 ? errorStream(new Error('fail')) : h.stream + ); + + await listener.start('w1'); + await settle(); + + expect(mockedSleep).toHaveBeenCalledWith(3000); + }); + }); + + // ── no reconnect when stopped ── + + describe('does not reconnect when stopped', () => { + it('does not open a new stream after stop()', async () => { + const h = tracked(hangingStream()); + grpcClient.durableTask.mockReturnValue(h.stream); + + await listener.start('w1'); + await listener.stop(); + await settle(); + + expect(grpcClient.durableTask).toHaveBeenCalledTimes(1); + }); + }); + + // ── multiple sequential reconnects ── + + describe('multiple sequential reconnects', () => { + it('recovers through several consecutive EOFs', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call <= 3 ? emptyStream() : h.stream)); + + await listener.start('w1'); + await settle(150); + + expect(grpcClient.durableTask.mock.calls.length).toBeGreaterThanOrEqual(4); + }); + + it('recovers through several consecutive errors', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => + ++call <= 3 ? errorStream(new Error(`err-${call}`)) : h.stream + ); + + await listener.start('w1'); + await settle(150); + + expect(grpcClient.durableTask.mock.calls.length).toBeGreaterThanOrEqual(4); + }); + }); + + // ── worker re-registration ── + + describe('worker re-registration on reconnect', () => { + it('enqueues a registerWorker request for each connection', async () => { + const registrations: any[] = []; + const h = tracked(hangingStream()); + let call = 0; + + grpcClient.durableTask.mockImplementation((reqIter: AsyncIterable) => { + call++; + (async () => { + const iter = reqIter[Symbol.asyncIterator](); + const first = await iter.next(); + if (!first.done) registrations.push(first.value); + })(); + return call === 1 ? emptyStream() : h.stream; + }); + + await listener.start('w1'); + await settle(100); + + expect(registrations.length).toBeGreaterThanOrEqual(2); + for (const reg of registrations) { + expect(reg).toHaveProperty('registerWorker'); + expect(reg.registerWorker.workerId).toBe('w1'); + } + }); + }); + + // ── _failPendingAcks correctness ── + + describe('_failPendingAcks', () => { + beforeEach(async () => { + const h = tracked(hangingStream()); + grpcClient.durableTask.mockReturnValue(h.stream); + await listener.start('w1'); + }); + + it('rejects all pending event acks and clears the map', () => { + const l = listener as any; + const d = makeDeferred(); + l._pendingEventAcks.set('task:1', d); + + l._failPendingAcks(new Error('disconnected')); + + expect(l._pendingEventAcks.size).toBe(0); + return expect(d.promise).rejects.toThrow('disconnected'); + }); + + it('preserves pending callbacks (server-side state survives reconnection)', () => { + const l = listener as any; + const d = makeDeferred(); + // Swallow the rejection that stop() will produce in afterEach + d.promise.catch(() => {}); + l._pendingCallbacks.set('task:1:0:1', d); + + l._failPendingAcks(new Error('disconnected')); + + expect(l._pendingCallbacks.size).toBe(1); + expect(l._pendingCallbacks.get('task:1:0:1')).toBe(d); + }); + + it('rejects all pending eviction acks and clears the map', () => { + const l = listener as any; + const d = makeDeferred(); + l._pendingEvictionAcks.set('task:1', d); + + l._failPendingAcks(new Error('disconnected')); + + expect(l._pendingEvictionAcks.size).toBe(0); + return expect(d.promise).rejects.toThrow('disconnected'); + }); + + it('preserves buffered completions (server-side state survives reconnection)', () => { + const l = listener as any; + const completion = { + durableTaskExternalId: 'task', + nodeId: 1, + payload: {}, + }; + l._bufferedCompletions.set('task:1:0:1', completion); + + l._failPendingAcks(new Error('disconnected')); + + expect(l._bufferedCompletions.size).toBe(1); + expect(l._bufferedCompletions.get('task:1:0:1')).toBe(completion); + }); + }); + + // ── _failAllPending correctness (used on stop) ── + + describe('_failAllPending', () => { + beforeEach(async () => { + const h = tracked(hangingStream()); + grpcClient.durableTask.mockReturnValue(h.stream); + await listener.start('w1'); + }); + + it('rejects pending callbacks and clears the map', () => { + const l = listener as any; + const d = makeDeferred(); + l._pendingCallbacks.set('task:1:0:1', d); + + l._failAllPending(new Error('stopped')); + + expect(l._pendingCallbacks.size).toBe(0); + return expect(d.promise).rejects.toThrow('stopped'); + }); + + it('clears buffered completions', () => { + const l = listener as any; + l._bufferedCompletions.set('task:1:0:1', { + durableTaskExternalId: 'task', + nodeId: 1, + payload: {}, + }); + + l._failAllPending(new Error('stopped')); + + expect(l._bufferedCompletions.size).toBe(0); + }); + + it('also rejects pending event acks and eviction acks', () => { + const l = listener as any; + const ackD = makeDeferred(); + const evD = makeDeferred(); + l._pendingEventAcks.set('task:1', ackD); + l._pendingEvictionAcks.set('task:1', evD); + + l._failAllPending(new Error('stopped')); + + expect(l._pendingEventAcks.size).toBe(0); + expect(l._pendingEvictionAcks.size).toBe(0); + return Promise.all([ + expect(ackD.promise).rejects.toThrow('stopped'), + expect(evD.promise).rejects.toThrow('stopped'), + ]); + }); + }); + + // ── pending state rejected on stream disconnect ── + + describe('pending state is rejected on stream disconnect', () => { + it('rejects pending event acks when stream ends (EOF)', async () => { + const ctrl = controllableStream(); + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? ctrl.stream : h.stream)); + + await listener.start('w1'); + await settle(); + + const d = makeDeferred(); + (listener as any)._pendingEventAcks.set('task:1', d); + + const assertion = expect(d.promise).rejects.toThrow('durable stream disconnected'); + ctrl.end(); + await settle(); + await assertion; + }); + + it('preserves pending callbacks when stream ends (EOF)', async () => { + const ctrl = controllableStream(); + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? ctrl.stream : h.stream)); + + await listener.start('w1'); + await settle(); + + const d = makeDeferred(); + // Swallow the rejection that stop() will produce in afterEach + d.promise.catch(() => {}); + (listener as any)._pendingCallbacks.set('task:1:0:1', d); + + ctrl.end(); + await settle(); + + expect((listener as any)._pendingCallbacks.size).toBe(1); + expect((listener as any)._pendingCallbacks.get('task:1:0:1')).toBe(d); + }); + + it('rejects pending eviction acks when stream ends (EOF)', async () => { + const ctrl = controllableStream(); + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? ctrl.stream : h.stream)); + + await listener.start('w1'); + await settle(); + + const d = makeDeferred(); + (listener as any)._pendingEvictionAcks.set('task:1', d); + + const assertion = expect(d.promise).rejects.toThrow('durable stream disconnected'); + ctrl.end(); + await settle(); + await assertion; + }); + + it('rejects pending event acks when stream errors', async () => { + const ctrl = controllableStream(); + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? ctrl.stream : h.stream)); + + await listener.start('w1'); + await settle(); + + const d = makeDeferred(); + (listener as any)._pendingEventAcks.set('task:1', d); + + const assertion = expect(d.promise).rejects.toThrow('durable stream error'); + ctrl.error(new Error('transport failure')); + await settle(); + await assertion; + }); + }); + + // ── listener remains operational after reconnect ── + + describe('listener state after reconnect', () => { + it('is still running after reconnect', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? emptyStream() : h.stream)); + + await listener.start('w1'); + await settle(); + + expect((listener as any)._running).toBe(true); + }); + + it('creates a fresh AbortController for the new stream', async () => { + const h = tracked(hangingStream()); + let call = 0; + grpcClient.durableTask.mockImplementation(() => (++call === 1 ? emptyStream() : h.stream)); + + await listener.start('w1'); + await settle(); + + const abort = (listener as any)._receiveAbort as AbortController; + expect(abort).toBeDefined(); + expect(abort.signal.aborted).toBe(false); + }); + }); +}); diff --git a/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.ts b/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.ts index dceb9888b..e5272f007 100644 --- a/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.ts +++ b/sdks/typescript/src/clients/listeners/durable-listener/durable-listener-client.ts @@ -1,59 +1,964 @@ +import { EventEmitter, on } from 'events'; import { Channel, ClientFactory } from 'nice-grpc'; +import { isAbortError } from 'abort-controller-x'; +import { getErrorMessage } from '@hatchet/util/errors/hatchet-error'; import { ClientConfig } from '@clients/hatchet-client/client-config'; import { Logger } from '@hatchet/util/logger'; -import { V1DispatcherClient, V1DispatcherDefinition } from '@hatchet/protoc/v1/dispatcher'; -import { SleepMatchCondition, UserEventMatchCondition } from '@hatchet/protoc/v1/shared/condition'; -import { Api } from '../../rest'; -import { DurableEventGrpcPooledListener } from './pooled-durable-listener-client'; +import { + V1DispatcherClient, + V1DispatcherDefinition, + DurableTaskRequest, + DurableTaskResponse, + DurableTaskEventLogEntryCompletedResponse, + DurableTaskErrorType, + DurableTaskRequestRegisterWorker, + DurableTaskWorkerStatusRequest, + DurableTaskAwaitedCompletedEntry, + DurableTaskEvictInvocationRequest, + DurableTaskCompleteMemoRequest, + DurableEvent, + RegisterDurableEventResponse, + ListenForDurableEventRequest, + DurableTaskMemoRequest, + DurableTaskTriggerRunsRequest, + DurableTaskWaitForRequest, + DurableEventLogEntryRef, +} from '@hatchet/protoc/v1/dispatcher'; +import { + DurableEventListenerConditions, + SleepMatchCondition, + UserEventMatchCondition, +} from '@hatchet/protoc/v1/shared/condition'; +import { TriggerWorkflowRequest } from '@hatchet/protoc/v1/shared/trigger'; +import { NonDeterminismError } from '@hatchet/util/errors/non-determinism-error'; +import { createAbortError, bindAbortSignalHandler } from '@hatchet/util/abort-error'; +import sleep from '@hatchet/util/sleep'; + +class TTLMap { + private cache = new Map(); + private timer: ReturnType; + + constructor(private ttlMs: number) { + this.timer = setInterval(() => this.evict(), ttlMs); + } + + set(key: K, value: V): void { + this.cache.set(key, { value, expiresAt: Date.now() + this.ttlMs }); + } + + get(key: K): V | undefined { + return this.cache.get(key)?.value; + } + + get size(): number { + return this.cache.size; + } + + has(key: K): boolean { + return this.cache.has(key); + } + + delete(key: K): boolean { + return this.cache.delete(key); + } + + keys(): IterableIterator { + return this.cache.keys(); + } + + pop(key: K): V | undefined { + const entry = this.cache.get(key); + if (entry) { + this.cache.delete(key); + return entry.value; + } + return undefined; + } + + clear(): void { + this.cache.clear(); + } + + destroy(): void { + clearInterval(this.timer); + this.cache.clear(); + } + + private evict(): void { + const now = Date.now(); + for (const [key, entry] of this.cache) { + if (entry.expiresAt <= now) { + this.cache.delete(key); + } + } + } +} + +const DEFAULT_RECONNECT_INTERVAL = 3000; +const EVICTION_ACK_TIMEOUT_MS = 30_000; +const WORKER_STATUS_POLL_INTERVAL_MS = 1000; + +export interface DurableTaskRunAckEntryResult { + nodeId: number; + branchId: number; +} + +export interface DurableTaskEventRunAck { + ackType: 'run'; + invocationCount: number; + durableTaskExternalId: string; + runEntries: DurableTaskRunAckEntryResult[]; +} + +export interface DurableTaskEventMemoAck { + ackType: 'memo'; + invocationCount: number; + durableTaskExternalId: string; + branchId: number; + nodeId: number; + memoAlreadyExisted: boolean; + memoResultPayload?: Uint8Array; +} + +export interface DurableTaskEventWaitForAck { + ackType: 'waitFor'; + invocationCount: number; + durableTaskExternalId: string; + branchId: number; + nodeId: number; +} + +export type DurableTaskEventAck = + | DurableTaskEventRunAck + | DurableTaskEventMemoAck + | DurableTaskEventWaitForAck; + +export interface DurableTaskEventLogEntryResult { + durableTaskExternalId: string; + nodeId: number; + payload: Record | undefined; +} + +function eventLogEntryResultFromProto( + proto: DurableTaskEventLogEntryCompletedResponse +): DurableTaskEventLogEntryResult { + let payload: Record | undefined; + if (proto.payload && proto.payload.length > 0) { + payload = JSON.parse(new TextDecoder().decode(proto.payload)); + } + return { + durableTaskExternalId: proto.ref?.durableTaskExternalId ?? '', + nodeId: proto.ref?.nodeId ?? 0, + payload, + }; +} + +export interface WaitForEvent { + kind: 'waitFor'; + waitForConditions: DurableEventListenerConditions; +} + +export interface RunChildrenEvent { + kind: 'runChildren'; + triggerOpts: TriggerWorkflowRequest[]; +} + +export interface MemoEvent { + kind: 'memo'; + memoKey: Uint8Array; + payload?: Uint8Array; +} + +export type DurableTaskSendEvent = WaitForEvent | RunChildrenEvent | MemoEvent; + +type TaskExternalId = string; +type InvocationCount = number; +type BranchId = number; +type NodeId = number; + +type PendingEventAckKey = `${TaskExternalId}:${InvocationCount}`; +type PendingCallbackKey = `${TaskExternalId}:${InvocationCount}:${BranchId}:${NodeId}`; +type PendingEvictionAckKey = `${TaskExternalId}:${InvocationCount}`; + +function ackKey(taskExtId: string, invocationCount: number): PendingEventAckKey { + return `${taskExtId}:${invocationCount}`; +} +function callbackKey( + taskExtId: string, + invocationCount: number, + branchId: number, + nodeId: number +): PendingCallbackKey { + return `${taskExtId}:${invocationCount}:${branchId}:${nodeId}`; +} +function evictionKey(taskExtId: string, invocationCount: number): PendingEvictionAckKey { + return `${taskExtId}:${invocationCount}`; +} + +interface Deferred { + promise: Promise; + resolve: (value: T) => void; + reject: (reason: unknown) => void; +} + +function deferred(): Deferred { + let resolve!: (value: T) => void; + let reject!: (reason: unknown) => void; + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + return { promise, resolve, reject }; +} export class DurableListenerClient { config: ClientConfig; client: V1DispatcherClient; logger: Logger; - api: Api; - pooledListener: DurableEventGrpcPooledListener | undefined; + private _workerId: string | undefined; + private _running = false; + private _requestQueue: DurableTaskRequest[] = []; + private _requestNotify: (() => void) | undefined; - constructor(config: ClientConfig, channel: Channel, factory: ClientFactory, api: Api) { + private _pendingEventAcks = new Map>(); + private _pendingCallbacks = new Map< + PendingCallbackKey, + Deferred + >(); + // Completions that arrived before waitForCallback() registered a deferred + // in _pendingCallbacks. This happens when the server delivers an + // entryCompleted between the event ack and the waitForCallback call + // (e.g. an already-satisfied sleep delivered via polling). + private _bufferedCompletions = new TTLMap( + 10_000 + ); + private _pendingEvictionAcks = new Map>(); + + private _receiveAbort: AbortController | undefined; + private _statusInterval: ReturnType | undefined; + private _startLock: Promise | undefined; + + onServerEvict: ((durableTaskExternalId: string, invocationCount: number) => void) | undefined; + + constructor(config: ClientConfig, channel: Channel, factory: ClientFactory) { this.config = config; this.client = factory.create(V1DispatcherDefinition, channel); - this.logger = config.logger(`Listener`, config.log_level); - this.api = api; + this.logger = config.logger(`DurableListener`, config.log_level); } - subscribe(request: { taskId: string; signalKey: string }) { - if (!this.pooledListener) { - this.pooledListener = new DurableEventGrpcPooledListener(this, () => { - this.pooledListener = undefined; + get workerId(): string | undefined { + return this._workerId; + } + + async start(workerId: string): Promise { + if (this._startLock) { + await this._startLock; + return; + } + this._startLock = this._doStart(workerId); + await this._startLock; + } + + private async _doStart(workerId: string): Promise { + if (this._running) return; + this._workerId = workerId; + this._running = true; + await this._connect(); + this._startStatusPolling(); + } + + async ensureStarted(workerId: string): Promise { + if (!this._running) { + await this.start(workerId); + } + } + + async stop(): Promise { + this._running = false; + this._startLock = undefined; + if (this._statusInterval) { + clearInterval(this._statusInterval); + this._statusInterval = undefined; + } + if (this._receiveAbort) { + this._receiveAbort.abort(); + } + this._failPendingAcks(new Error('DurableListener stopped')); + this._bufferedCompletions.destroy(); + } + + private async _connect(): Promise { + this.logger.info('durable event listener connecting...'); + + this._requestQueue = []; + + this._receiveAbort = new AbortController(); + + this._enqueueRequest({ + registerWorker: { workerId: this._workerId! } as DurableTaskRequestRegisterWorker, + }); + + this._pollWorkerStatus(); + + void this._streamLoop(); + + this.logger.info('durable event listener connected'); + } + + private async _streamLoop(): Promise { + while (this._running) { + try { + const stream = this.client.durableTask(this._requestIterator(), { + signal: this._receiveAbort?.signal, + }); + + for await (const response of stream) { + this._handleResponse(response); + } + + if (this._running) { + this.logger.warn( + `durable event listener disconnected (EOF), reconnecting in ${DEFAULT_RECONNECT_INTERVAL}ms...` + ); + this._failPendingAcks(new Error('durable stream disconnected')); + await sleep(DEFAULT_RECONNECT_INTERVAL); + await this._connect(); + return; + } + } catch (e: unknown) { + if (isAbortError(e)) { + this.logger.debug('durable event listener aborted'); + return; + } + this.logger.error(`error in durable event listener: ${getErrorMessage(e)}`); + if (this._running) { + this._failPendingAcks(new Error(`durable stream error: ${getErrorMessage(e)}`)); + await sleep(DEFAULT_RECONNECT_INTERVAL); + await this._connect(); + return; + } + } + } + } + + private async *_requestIterator(): AsyncIterable { + while (this._running) { + while (this._requestQueue.length > 0) { + yield this._requestQueue.shift()!; + } + + await new Promise((resolve) => { + this._requestNotify = resolve; + }); + this._requestNotify = undefined; + } + } + + private _enqueueRequest(request: DurableTaskRequest): void { + this._requestQueue.push(request); + if (this._requestNotify) { + this._requestNotify(); + } + } + + private _startStatusPolling(): void { + if (this._statusInterval) { + clearInterval(this._statusInterval); + } + this._statusInterval = setInterval(() => { + this._pollWorkerStatus(); + }, WORKER_STATUS_POLL_INTERVAL_MS); + } + + private _pollWorkerStatus(): void { + if (!this._workerId || this._pendingCallbacks.size === 0) return; + + const waitingEntries: DurableTaskAwaitedCompletedEntry[] = []; + for (const key of this._pendingCallbacks.keys()) { + const parts = key.split(':'); + waitingEntries.push({ + durableTaskExternalId: parts[0], + invocationCount: parseInt(parts[1], 10), + branchId: parseInt(parts[2], 10), + nodeId: parseInt(parts[3], 10), }); } - return this.pooledListener.subscribe(request); + this._enqueueRequest({ + workerStatus: { + workerId: this._workerId, + waitingEntries, + } as DurableTaskWorkerStatusRequest, + }); } - result(request: { taskId: string; signalKey: string }, opts?: { signal?: AbortSignal }) { - if (!this.pooledListener) { - this.pooledListener = new DurableEventGrpcPooledListener(this, () => { - this.pooledListener = undefined; - }); + private _failPendingAcks(exc: Error): void { + for (const d of this._pendingEventAcks.values()) { + d.reject(exc); + } + this._pendingEventAcks.clear(); + + for (const d of this._pendingEvictionAcks.values()) { + d.reject(exc); + } + this._pendingEvictionAcks.clear(); + } + + private _failAllPending(exc: Error): void { + this._failPendingAcks(exc); + + for (const d of this._pendingCallbacks.values()) { + d.reject(exc); + } + this._pendingCallbacks.clear(); + this._bufferedCompletions.clear(); + } + + private _handleResponse(response: DurableTaskResponse): void { + if (response.registerWorker) { + // registration acknowledged + } else if (response.triggerRunsAck) { + const ack = response.triggerRunsAck; + const key = ackKey(ack.durableTaskExternalId, ack.invocationCount); + const pending = this._pendingEventAcks.get(key); + if (pending) { + pending.resolve({ + ackType: 'run', + invocationCount: ack.invocationCount, + durableTaskExternalId: ack.durableTaskExternalId, + runEntries: (ack.runEntries || []).map((e) => ({ + nodeId: e.nodeId, + branchId: e.branchId, + })), + }); + this._pendingEventAcks.delete(key); + } + } else if (response.memoAck) { + const ack = response.memoAck; + const { ref } = ack; + const key = ackKey(ref?.durableTaskExternalId ?? '', ref?.invocationCount ?? 0); + const pending = this._pendingEventAcks.get(key); + if (pending) { + pending.resolve({ + ackType: 'memo', + invocationCount: ref?.invocationCount ?? 0, + durableTaskExternalId: ref?.durableTaskExternalId ?? '', + branchId: ref?.branchId ?? 0, + nodeId: ref?.nodeId ?? 0, + memoAlreadyExisted: ack.memoAlreadyExisted, + memoResultPayload: ack.memoResultPayload, + }); + this._pendingEventAcks.delete(key); + } + } else if (response.waitForAck) { + const ack = response.waitForAck; + const { ref } = ack; + const key = ackKey(ref?.durableTaskExternalId ?? '', ref?.invocationCount ?? 0); + const pending = this._pendingEventAcks.get(key); + if (pending) { + pending.resolve({ + ackType: 'waitFor', + invocationCount: ref?.invocationCount ?? 0, + durableTaskExternalId: ref?.durableTaskExternalId ?? '', + branchId: ref?.branchId ?? 0, + nodeId: ref?.nodeId ?? 0, + }); + this._pendingEventAcks.delete(key); + } + } else if (response.entryCompleted) { + const completed = response.entryCompleted; + const { ref } = completed; + const key = callbackKey( + ref?.durableTaskExternalId ?? '', + ref?.invocationCount ?? 0, + ref?.branchId ?? 0, + ref?.nodeId ?? 0 + ); + const result = eventLogEntryResultFromProto(completed); + const pending = this._pendingCallbacks.get(key); + if (pending) { + pending.resolve(result); + this._pendingCallbacks.delete(key); + } else { + this._bufferedCompletions.set(key, result); + } + } else if (response.evictionAck) { + const ack = response.evictionAck; + const key = evictionKey(ack.durableTaskExternalId, ack.invocationCount); + const pending = this._pendingEvictionAcks.get(key); + if (pending) { + pending.resolve(); + this._pendingEvictionAcks.delete(key); + } + } else if (response.serverEvict) { + const evict = response.serverEvict; + this.logger.info( + `received server eviction notification for task ${evict.durableTaskExternalId} ` + + `invocation ${evict.invocationCount}: ${evict.reason}` + ); + this.cleanupTaskState(evict.durableTaskExternalId, evict.invocationCount); + if (this.onServerEvict) { + this.onServerEvict(evict.durableTaskExternalId, evict.invocationCount); + } + } else if (response.error) { + const { error } = response; + const { ref } = error; + let exc: Error; + + if (error.errorType === DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_NONDETERMINISM) { + exc = new NonDeterminismError( + ref?.durableTaskExternalId ?? '', + ref?.invocationCount ?? 0, + ref?.nodeId ?? 0, + error.errorMessage + ); + } else { + exc = new Error( + `Unspecified durable task error: ${error.errorMessage} (type: ${error.errorType})` + ); + } + + const eAckKey = ackKey(ref?.durableTaskExternalId ?? '', ref?.invocationCount ?? 0); + const pendingAck = this._pendingEventAcks.get(eAckKey); + if (pendingAck) { + pendingAck.reject(exc); + this._pendingEventAcks.delete(eAckKey); + } + + const eCbKey = callbackKey( + ref?.durableTaskExternalId ?? '', + ref?.invocationCount ?? 0, + ref?.branchId ?? 0, + ref?.nodeId ?? 0 + ); + const pendingCb = this._pendingCallbacks.get(eCbKey); + if (pendingCb) { + pendingCb.reject(exc); + this._pendingCallbacks.delete(eCbKey); + } + + const eEvKey = evictionKey(ref?.durableTaskExternalId ?? '', ref?.invocationCount ?? 0); + const pendingEv = this._pendingEvictionAcks.get(eEvKey); + if (pendingEv) { + pendingEv.reject(exc); + this._pendingEvictionAcks.delete(eEvKey); + } + } + } + + async sendEvent( + durableTaskExternalId: string, + invocationCount: number, + event: RunChildrenEvent + ): Promise; + async sendEvent( + durableTaskExternalId: string, + invocationCount: number, + event: WaitForEvent + ): Promise; + async sendEvent( + durableTaskExternalId: string, + invocationCount: number, + event: MemoEvent + ): Promise; + async sendEvent( + durableTaskExternalId: string, + invocationCount: number, + event: DurableTaskSendEvent + ): Promise { + const key = ackKey(durableTaskExternalId, invocationCount); + const d = deferred(); + this._pendingEventAcks.set(key, d); + + let request: DurableTaskRequest; + + switch (event.kind) { + case 'runChildren': { + const triggerRunsReq: DurableTaskTriggerRunsRequest = { + invocationCount, + durableTaskExternalId, + triggerOpts: event.triggerOpts, + }; + request = { triggerRuns: triggerRunsReq }; + break; + } + + case 'waitFor': { + const waitForReq: DurableTaskWaitForRequest = { + invocationCount, + durableTaskExternalId, + waitForConditions: event.waitForConditions, + }; + request = { waitFor: waitForReq }; + break; + } + + case 'memo': { + const memoReq: DurableTaskMemoRequest = { + invocationCount, + durableTaskExternalId, + key: event.memoKey, + payload: event.payload, + }; + request = { memo: memoReq }; + break; + } + + default: { + const _: never = event; + throw new Error(`Unknown durable task send event: ${_}`); + } } - return this.pooledListener.result(request, opts); + this._enqueueRequest(request); + return d.promise; } - registerDurableEvent(request: { + async waitForCallback( + durableTaskExternalId: string, + invocationCount: number, + branchId: number, + nodeId: number, + opts?: { signal?: AbortSignal } + ): Promise { + const key = callbackKey(durableTaskExternalId, invocationCount, branchId, nodeId); + + const early = this._bufferedCompletions.get(key); + if (early) { + this._bufferedCompletions.delete(key); + return early; + } + + if (!this._pendingCallbacks.has(key)) { + this._pendingCallbacks.set(key, deferred()); + this._pollWorkerStatus(); + } + + const d = this._pendingCallbacks.get(key)!; + const signal = opts?.signal; + + if (!signal) { + return d.promise; + } + + if (signal.aborted) { + return Promise.reject(createAbortError('Operation cancelled by AbortSignal')); + } + + return new Promise((resolve, reject) => { + let settled = false; + + const onAbort = () => { + if (settled) return; + settled = true; + reject(createAbortError('Operation cancelled by AbortSignal')); + }; + + bindAbortSignalHandler(signal, onAbort); + + d.promise.then( + (value) => { + if (settled) return; + settled = true; + signal.removeEventListener('abort', onAbort); + resolve(value); + }, + (err) => { + if (settled) return; + settled = true; + signal.removeEventListener('abort', onAbort); + reject(err); + } + ); + }); + } + + cleanupTaskState(durableTaskExternalId: string, invocationCount: number): void { + for (const [k, d] of this._pendingCallbacks) { + const parts = k.split(':'); + if (parts[0] === durableTaskExternalId && parseInt(parts[1], 10) <= invocationCount) { + d.reject(new Error('task state cleaned up')); + this._pendingCallbacks.delete(k); + } + } + + for (const [k, d] of this._pendingEventAcks) { + const parts = k.split(':'); + if (parts[0] === durableTaskExternalId && parseInt(parts[1], 10) <= invocationCount) { + d.reject(new Error('task state cleaned up')); + this._pendingEventAcks.delete(k); + } + } + + for (const k of this._bufferedCompletions.keys()) { + const parts = k.split(':'); + if (parts[0] === durableTaskExternalId && parseInt(parts[1], 10) <= invocationCount) { + this._bufferedCompletions.delete(k); + } + } + } + + async sendEvictInvocation( + durableTaskExternalId: string, + invocationCount: number, + reason?: string + ): Promise { + const key = evictionKey(durableTaskExternalId, invocationCount); + const d = deferred(); + this._pendingEvictionAcks.set(key, d); + + const req: DurableTaskEvictInvocationRequest = { + invocationCount, + durableTaskExternalId, + reason, + }; + + this._enqueueRequest({ evictInvocation: req }); + + const timeout = sleep(EVICTION_ACK_TIMEOUT_MS).then(() => { + throw new Error( + `Eviction ack timed out after ${EVICTION_ACK_TIMEOUT_MS}ms for task ${durableTaskExternalId} invocation ${invocationCount}` + ); + }); + + try { + await Promise.race([d.promise, timeout]); + } catch (err) { + this._pendingEvictionAcks.delete(key); + throw err; + } + } + + async sendMemoCompletedNotification( + durableTaskExternalId: string, + nodeId: number, + branchId: number, + invocationCount: number, + memoKey: Uint8Array, + memoResultPayload?: Uint8Array + ): Promise { + const ref: DurableEventLogEntryRef = { + durableTaskExternalId, + invocationCount, + branchId, + nodeId, + }; + + const req: DurableTaskCompleteMemoRequest = { + ref, + payload: memoResultPayload ?? new Uint8Array(), + memoKey, + }; + + this._enqueueRequest({ completeMemo: req }); + } + + /** + * @deprecated Legacy backward-compat: uses the old unary RegisterDurableEvent RPC. + */ + async registerDurableEvent(request: { taskId: string; signalKey: string; sleepConditions: Array; userEventConditions: Array; - }) { - if (!this.pooledListener) { - this.pooledListener = new DurableEventGrpcPooledListener(this, () => { - this.pooledListener = undefined; - }); - } + }): Promise { + return this.client.registerDurableEvent({ + taskId: request.taskId, + signalKey: request.signalKey, + conditions: { + sleepConditions: request.sleepConditions, + userEventConditions: request.userEventConditions, + }, + }); + } - return this.pooledListener.registerDurableEvent(request); + /** + * @deprecated Legacy backward-compat: uses the old streaming ListenForDurableEvent RPC. + */ + subscribe(request: { taskId: string; signalKey: string }): LegacyDurableEventStreamable { + if (!this._legacyPooledListener) { + this._legacyPooledListener = new LegacyPooledListener(this); + } + return this._legacyPooledListener.subscribe(request); + } + + /** + * @deprecated Legacy backward-compat: subscribes and waits for a single result. + */ + async result( + request: { taskId: string; signalKey: string }, + opts?: { signal?: AbortSignal } + ): Promise { + const subscriber = this.subscribe(request); + return subscriber.get({ signal: opts?.signal }); + } + + private _legacyPooledListener: LegacyPooledListener | undefined; +} + +/** + * @deprecated Legacy support for the old streaming ListenForDurableEvent RPC. + */ +export class LegacyDurableEventStreamable { + responseEmitter = new EventEmitter(); + private _onCleanup: () => void; + + constructor(onCleanup: () => void) { + this._onCleanup = onCleanup; + } + + async get(opts?: { signal?: AbortSignal }): Promise { + const signal = opts?.signal; + + return new Promise((resolve, reject) => { + let cleanedUp = false; + + const cleanup = () => { + if (cleanedUp) return; + cleanedUp = true; + this.responseEmitter.removeListener('response', onResponse); + if (signal) { + signal.removeEventListener('abort', onAbort); + } + this._onCleanup(); + }; + + const onResponse = (event: DurableEvent) => { + cleanup(); + resolve(event); + }; + + const onAbort = () => { + cleanup(); + reject(createAbortError('Operation cancelled by AbortSignal')); + }; + + if (signal?.aborted) { + onAbort(); + return; + } + + this.responseEmitter.once('response', onResponse); + if (signal) { + bindAbortSignalHandler(signal, onAbort); + } + }); + } +} + +/** + * @deprecated Legacy pooled listener for old ListenForDurableEvent streaming RPC. + */ +class LegacyPooledListener { + private client: DurableListenerClient; + private requestEmitter = new EventEmitter(); + private signal = new AbortController(); + private listener: AsyncIterable | undefined; + private subscribers: Record = {}; + private taskSignalKeyToSubscriptionIds: Record = {}; + private subscriptionCounter = 0; + private currRequester = 0; + + constructor(client: DurableListenerClient) { + this.client = client; + this.init(); + } + + private async init(retries = 0) { + const MAX_RETRY_INTERVAL = 5000; + const BASE_RETRY_INTERVAL = 100; + const MAX_RETRY_COUNT = 5; + + if (retries > 0) { + const backoffTime = Math.min(BASE_RETRY_INTERVAL * 2 ** (retries - 1), MAX_RETRY_INTERVAL); + await sleep(backoffTime); + } + + if (retries > MAX_RETRY_COUNT) return; + + try { + this.signal = new AbortController(); + this.currRequester++; + + this.listener = this.client.client.listenForDurableEvent(this.request(), { + signal: this.signal.signal, + }); + + for await (const event of this.listener) { + const subscriptionKey = `${event.taskId}|${event.signalKey}`; + const subscriptionIds = this.taskSignalKeyToSubscriptionIds[subscriptionKey] || []; + for (const subId of subscriptionIds) { + const emitter = this.subscribers[subId]; + if (emitter) { + emitter.responseEmitter.emit('response', event); + this.cleanupSubscription(subId); + } + } + } + } catch (e: unknown) { + if (isAbortError(e)) return; + } finally { + if (Object.keys(this.subscribers).length > 0) { + this.init(retries + 1); + } + } + } + + private cleanupSubscription(subscriptionId: string) { + const emitter = this.subscribers[subscriptionId]; + if (!emitter) return; + const key = Object.entries(this.taskSignalKeyToSubscriptionIds).find(([, ids]) => + ids.includes(subscriptionId) + )?.[0]; + delete this.subscribers[subscriptionId]; + if (key && this.taskSignalKeyToSubscriptionIds[key]) { + this.taskSignalKeyToSubscriptionIds[key] = this.taskSignalKeyToSubscriptionIds[key].filter( + (id) => id !== subscriptionId + ); + if (this.taskSignalKeyToSubscriptionIds[key].length === 0) { + delete this.taskSignalKeyToSubscriptionIds[key]; + } + } + } + + subscribe(request: { taskId: string; signalKey: string }): LegacyDurableEventStreamable { + const subscriptionId = (this.subscriptionCounter++).toString(); + const subscriber = new LegacyDurableEventStreamable(() => + this.cleanupSubscription(subscriptionId) + ); + this.subscribers[subscriptionId] = subscriber; + + const key = `${request.taskId}|${request.signalKey}`; + if (!this.taskSignalKeyToSubscriptionIds[key]) { + this.taskSignalKeyToSubscriptionIds[key] = []; + } + this.taskSignalKeyToSubscriptionIds[key].push(subscriptionId); + this.requestEmitter.emit('subscribe', request); + return subscriber; + } + + private async *request(): AsyncIterable { + const { currRequester } = this; + const existing = new Set(); + + for (const key in this.taskSignalKeyToSubscriptionIds) { + if (this.taskSignalKeyToSubscriptionIds[key].length > 0) { + const [taskId, signalKey] = key.split('|'); + existing.add(key); + yield { taskId, signalKey }; + } + } + + for await (const e of on(this.requestEmitter, 'subscribe')) { + if (currRequester !== this.currRequester) break; + const request = e[0] as ListenForDurableEventRequest; + const key = `${request.taskId}|${request.signalKey}`; + if (!existing.has(key)) { + existing.add(key); + yield request; + } + } } } diff --git a/sdks/typescript/src/clients/listeners/durable-listener/pooled-durable-listener-client.ts b/sdks/typescript/src/clients/listeners/durable-listener/pooled-durable-listener-client.ts index a1ee40348..45f4b88fa 100644 --- a/sdks/typescript/src/clients/listeners/durable-listener/pooled-durable-listener-client.ts +++ b/sdks/typescript/src/clients/listeners/durable-listener/pooled-durable-listener-client.ts @@ -1,4 +1,4 @@ -import { EventEmitter, getMaxListeners, on, setMaxListeners } from 'events'; +import { EventEmitter, on } from 'events'; import { DurableEvent, ListenForDurableEventRequest, @@ -8,7 +8,7 @@ import { import { isAbortError } from 'abort-controller-x'; import { getErrorMessage } from '@util/errors/hatchet-error'; import sleep from '@hatchet/util/sleep'; -import { createAbortError } from '@hatchet/util/abort-error'; +import { createAbortError, bindAbortSignalHandler } from '@hatchet/util/abort-error'; import { DurableEventListenerConditions, SleepMatchCondition, @@ -74,20 +74,7 @@ export class DurableEventStreamable { this.responseEmitter.once('response', onResponse); if (signal) { - /** - * Node defaults AbortSignal max listeners to 10, which is easy to exceed with - * legitimate high-concurrency waits (e.g. multiple concurrent `ctx.waitFor(...)` - * calls in the same task). - * - * If the signal is still at the default cap, bump it to a reasonable level - * to avoid noisy `MaxListenersExceededWarning` while still keeping protection - * against true leaks in unusual cases. - */ - const max = getMaxListeners(signal); - if (max !== 0 && max < 50) { - setMaxListeners(50, signal); - } - signal.addEventListener('abort', onAbort, { once: true }); + bindAbortSignalHandler(signal, onAbort); } }); } diff --git a/sdks/typescript/src/clients/listeners/run-listener/pooled-child-listener-client.ts b/sdks/typescript/src/clients/listeners/run-listener/pooled-child-listener-client.ts index e9838b52f..0630e3d83 100644 --- a/sdks/typescript/src/clients/listeners/run-listener/pooled-child-listener-client.ts +++ b/sdks/typescript/src/clients/listeners/run-listener/pooled-child-listener-client.ts @@ -1,4 +1,4 @@ -import { EventEmitter, getMaxListeners, on, setMaxListeners } from 'events'; +import { EventEmitter, on } from 'events'; import { WorkflowRunEvent, SubscribeToWorkflowRunsRequest, @@ -7,7 +7,7 @@ import { import { isAbortError } from 'abort-controller-x'; import { getErrorMessage } from '@util/errors/hatchet-error'; import sleep from '@hatchet/util/sleep'; -import { createAbortError } from '@hatchet/util/abort-error'; +import { createAbortError, bindAbortSignalHandler } from '@hatchet/util/abort-error'; import { RunListenerClient } from './child-listener-client'; export class Streamable { @@ -61,20 +61,7 @@ export class Streamable { this.responseEmitter.once('response', onResponse); if (signal) { - /** - * Node defaults AbortSignal max listeners to 10, which is easy to exceed with - * legitimate high-concurrency waits (e.g. a cancelled parent task fanning out - * to many child `.result()` waits). - * - * If the signal is still at the default cap, bump it to a reasonable level - * to avoid noisy `MaxListenersExceededWarning` while still keeping protection - * against true leaks in unusual cases. - */ - const max = getMaxListeners(signal); - if (max !== 0 && max < 50) { - setMaxListeners(50, signal); - } - signal.addEventListener('abort', onAbort, { once: true }); + bindAbortSignalHandler(signal, onAbort); } }); } diff --git a/sdks/typescript/src/clients/rest/generated/Api.ts b/sdks/typescript/src/clients/rest/generated/Api.ts index 116895e1c..fcfb9b1d9 100644 --- a/sdks/typescript/src/clients/rest/generated/Api.ts +++ b/sdks/typescript/src/clients/rest/generated/Api.ts @@ -111,6 +111,7 @@ import { V1ReplayTaskRequest, V1ReplayedTasks, V1RestoreTaskResponse, + V1RunningFilter, V1TaskEventList, V1TaskPointMetrics, V1TaskRunMetrics, @@ -408,6 +409,8 @@ export class Api extends HttpClient @@ -472,6 +475,8 @@ export class Api extends HttpClient diff --git a/sdks/typescript/src/clients/rest/generated/data-contracts.ts b/sdks/typescript/src/clients/rest/generated/data-contracts.ts index 566a86e3a..61d3f5de7 100644 --- a/sdks/typescript/src/clients/rest/generated/data-contracts.ts +++ b/sdks/typescript/src/clients/rest/generated/data-contracts.ts @@ -13,7 +13,6 @@ export enum V1TaskRunStatus { PENDING = 'PENDING', RUNNING = 'RUNNING', - EVICTED = 'EVICTED', COMPLETED = 'COMPLETED', FAILED = 'FAILED', CANCELLED = 'CANCELLED', @@ -257,6 +256,12 @@ export enum TenantVersion { V1 = 'V1', } +export enum V1RunningFilter { + ALL = 'ALL', + EVICTED = 'EVICTED', + ON_WORKER = 'ON_WORKER', +} + export enum V1LogLineOrderByDirection { ASC = 'ASC', DESC = 'DESC', @@ -303,7 +308,6 @@ export enum V1WorkflowType { export enum V1TaskStatus { QUEUED = 'QUEUED', RUNNING = 'RUNNING', - EVICTED = 'EVICTED', COMPLETED = 'COMPLETED', CANCELLED = 'CANCELLED', FAILED = 'FAILED', @@ -366,6 +370,8 @@ export interface V1TaskSummary { /** The output of the task run (for the latest run) */ output: object; status: V1TaskStatus; + /** Whether the task has been evicted from a worker (still counts as RUNNING). */ + isEvicted?: boolean; /** * The timestamp the task run started. * @format date-time @@ -717,6 +723,8 @@ export interface V1TaskTiming { /** The depth of the task in the waterfall. */ depth: number; status: V1TaskStatus; + /** Whether the task has been evicted from a worker (still counts as RUNNING). */ + isEvicted?: boolean; /** The display name of the task run. */ taskDisplayName: string; /** @@ -780,9 +788,17 @@ export interface V1TaskTimingList { rows: V1TaskTiming[]; } +export interface V1RunningDetailCount { + /** The number of evicted tasks within the RUNNING status bucket. */ + evicted: number; + /** The number of tasks currently on a worker within the RUNNING status bucket. */ + onWorker: number; +} + export interface V1TaskRunMetric { status: V1TaskStatus; count: number; + runningDetailCount?: V1RunningDetailCount; } export type V1TaskRunMetrics = V1TaskRunMetric[]; diff --git a/sdks/typescript/src/legacy/examples/affinity-workers.ts b/sdks/typescript/src/legacy/examples/affinity-workers.ts index 8152cc906..48372092b 100644 --- a/sdks/typescript/src/legacy/examples/affinity-workers.ts +++ b/sdks/typescript/src/legacy/examples/affinity-workers.ts @@ -1,4 +1,4 @@ -import { WorkerLabelComparator } from '@hatchet/protoc/workflows'; +import { WorkerLabelComparator } from '@hatchet/v1'; import Hatchet from '../../sdk'; import { Workflow } from '../../workflow'; diff --git a/sdks/typescript/src/legacy/legacy-client.ts b/sdks/typescript/src/legacy/legacy-client.ts index 02d4d9a50..3a5bcc770 100644 --- a/sdks/typescript/src/legacy/legacy-client.ts +++ b/sdks/typescript/src/legacy/legacy-client.ts @@ -122,8 +122,7 @@ export class LegacyHatchetClient { new DurableListenerClient( this.config, channelFactory(this.config, this.credentials), - clientFactory, - this.api + clientFactory ); this.logger = logger || this.config.logger('HatchetClient', this.config.log_level); diff --git a/sdks/typescript/src/legacy/step.ts b/sdks/typescript/src/legacy/step.ts index 34244ca90..b5c5e8991 100644 --- a/sdks/typescript/src/legacy/step.ts +++ b/sdks/typescript/src/legacy/step.ts @@ -8,8 +8,8 @@ import { Logger } from '../util/logger'; import { parseJSON } from '../util/parse'; import WorkflowRunRef from '../util/workflow-run-ref'; import { WorkerLabels } from '../clients/dispatcher/dispatcher-client'; -import { CreateStepRateLimit, RateLimitDuration, WorkerLabelComparator } from '../protoc/workflows'; -import { CreateWorkflowTaskOpts, Priority } from '../v1'; +import { CreateStepRateLimit, RateLimitDuration } from '../protoc/workflows'; +import { CreateWorkflowTaskOpts, Priority, WorkerLabelComparator } from '../v1'; import { RunOpts, TaskWorkflowDeclaration, @@ -18,7 +18,7 @@ import { import { Conditions, Render } from '../v1/conditions'; import { Action as ConditionAction } from '../protoc/v1/shared/condition'; import { conditionsToPb } from '../v1/conditions/transformer'; -import { Duration } from '../v1/client/duration'; +import { Duration, durationToString } from '../v1/client/duration'; import { JsonValue, OutputType } from '../v1/types'; import { InternalWorker } from '../v1/client/worker/worker-internal'; import { LegacyHatchetClient } from './legacy-client'; @@ -351,7 +351,7 @@ export class V0Context { return; } - await this.v0.dispatcher.refreshTimeout(incrementBy, taskRunExternalId); + await this.v0.dispatcher.refreshTimeout(durationToString(incrementBy), taskRunExternalId); } /** diff --git a/sdks/typescript/src/protoc/dispatcher/dispatcher.ts b/sdks/typescript/src/protoc/dispatcher/dispatcher.ts index f933ce3bb..51dda71dd 100644 --- a/sdks/typescript/src/protoc/dispatcher/dispatcher.ts +++ b/sdks/typescript/src/protoc/dispatcher/dispatcher.ts @@ -441,6 +441,8 @@ export interface AssignedAction { workflowId?: string | undefined; /** (optional) the workflow version id */ workflowVersionId?: string | undefined; + /** (optional) the invocation count for durable task events (required for durable events, otherwise null) */ + durableTaskInvocationCount?: number | undefined; } export interface WorkerListenRequest { @@ -596,6 +598,14 @@ export interface ReleaseSlotRequest { export interface ReleaseSlotResponse {} +export interface RestoreEvictedTaskRequest { + taskRunExternalId: string; +} + +export interface RestoreEvictedTaskResponse { + requeued: boolean; +} + export interface GetVersionRequest {} export interface GetVersionResponse { @@ -1615,6 +1625,7 @@ function createBaseAssignedAction(): AssignedAction { priority: 0, workflowId: undefined, workflowVersionId: undefined, + durableTaskInvocationCount: undefined, }; } @@ -1680,6 +1691,9 @@ export const AssignedAction: MessageFns = { if (message.workflowVersionId !== undefined) { writer.uint32(162).string(message.workflowVersionId); } + if (message.durableTaskInvocationCount !== undefined) { + writer.uint32(168).int32(message.durableTaskInvocationCount); + } return writer; }, @@ -1850,6 +1864,14 @@ export const AssignedAction: MessageFns = { message.workflowVersionId = reader.string(); continue; } + case 21: { + if (tag !== 168) { + break; + } + + message.durableTaskInvocationCount = reader.int32(); + continue; + } } if ((tag & 7) === 4 || tag === 0) { break; @@ -1895,6 +1917,9 @@ export const AssignedAction: MessageFns = { workflowVersionId: isSet(object.workflowVersionId) ? globalThis.String(object.workflowVersionId) : undefined, + durableTaskInvocationCount: isSet(object.durableTaskInvocationCount) + ? globalThis.Number(object.durableTaskInvocationCount) + : undefined, }; }, @@ -1960,6 +1985,9 @@ export const AssignedAction: MessageFns = { if (message.workflowVersionId !== undefined) { obj.workflowVersionId = message.workflowVersionId; } + if (message.durableTaskInvocationCount !== undefined) { + obj.durableTaskInvocationCount = Math.round(message.durableTaskInvocationCount); + } return obj; }, @@ -1988,6 +2016,7 @@ export const AssignedAction: MessageFns = { message.priority = object.priority ?? 0; message.workflowId = object.workflowId ?? undefined; message.workflowVersionId = object.workflowVersionId ?? undefined; + message.durableTaskInvocationCount = object.durableTaskInvocationCount ?? undefined; return message; }, }; @@ -3808,6 +3837,132 @@ export const ReleaseSlotResponse: MessageFns = { }, }; +function createBaseRestoreEvictedTaskRequest(): RestoreEvictedTaskRequest { + return { taskRunExternalId: '' }; +} + +export const RestoreEvictedTaskRequest: MessageFns = { + encode( + message: RestoreEvictedTaskRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.taskRunExternalId !== '') { + writer.uint32(10).string(message.taskRunExternalId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): RestoreEvictedTaskRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseRestoreEvictedTaskRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.taskRunExternalId = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): RestoreEvictedTaskRequest { + return { + taskRunExternalId: isSet(object.taskRunExternalId) + ? globalThis.String(object.taskRunExternalId) + : '', + }; + }, + + toJSON(message: RestoreEvictedTaskRequest): unknown { + const obj: any = {}; + if (message.taskRunExternalId !== '') { + obj.taskRunExternalId = message.taskRunExternalId; + } + return obj; + }, + + create(base?: DeepPartial): RestoreEvictedTaskRequest { + return RestoreEvictedTaskRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): RestoreEvictedTaskRequest { + const message = createBaseRestoreEvictedTaskRequest(); + message.taskRunExternalId = object.taskRunExternalId ?? ''; + return message; + }, +}; + +function createBaseRestoreEvictedTaskResponse(): RestoreEvictedTaskResponse { + return { requeued: false }; +} + +export const RestoreEvictedTaskResponse: MessageFns = { + encode( + message: RestoreEvictedTaskResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.requeued !== false) { + writer.uint32(8).bool(message.requeued); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): RestoreEvictedTaskResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseRestoreEvictedTaskResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.requeued = reader.bool(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): RestoreEvictedTaskResponse { + return { requeued: isSet(object.requeued) ? globalThis.Boolean(object.requeued) : false }; + }, + + toJSON(message: RestoreEvictedTaskResponse): unknown { + const obj: any = {}; + if (message.requeued !== false) { + obj.requeued = message.requeued; + } + return obj; + }, + + create(base?: DeepPartial): RestoreEvictedTaskResponse { + return RestoreEvictedTaskResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): RestoreEvictedTaskResponse { + const message = createBaseRestoreEvictedTaskResponse(); + message.requeued = object.requeued ?? false; + return message; + }, +}; + function createBaseGetVersionRequest(): GetVersionRequest { return {}; } @@ -4015,6 +4170,14 @@ export const DispatcherDefinition = { responseStream: false, options: {}, }, + restoreEvictedTask: { + name: 'RestoreEvictedTask', + requestType: RestoreEvictedTaskRequest, + requestStream: false, + responseType: RestoreEvictedTaskResponse, + responseStream: false, + options: {}, + }, upsertWorkerLabels: { name: 'UpsertWorkerLabels', requestType: UpsertWorkerLabelsRequest, @@ -4093,6 +4256,10 @@ export interface DispatcherServiceImplementation { request: ReleaseSlotRequest, context: CallContext & CallContextExt ): Promise>; + restoreEvictedTask( + request: RestoreEvictedTaskRequest, + context: CallContext & CallContextExt + ): Promise>; upsertWorkerLabels( request: UpsertWorkerLabelsRequest, context: CallContext & CallContextExt @@ -4162,6 +4329,10 @@ export interface DispatcherClient { request: DeepPartial, options?: CallOptions & CallOptionsExt ): Promise; + restoreEvictedTask( + request: DeepPartial, + options?: CallOptions & CallOptionsExt + ): Promise; upsertWorkerLabels( request: DeepPartial, options?: CallOptions & CallOptionsExt diff --git a/sdks/typescript/src/protoc/v1/dispatcher.ts b/sdks/typescript/src/protoc/v1/dispatcher.ts index 8df1d1e8e..823e41327 100644 --- a/sdks/typescript/src/protoc/v1/dispatcher.ts +++ b/sdks/typescript/src/protoc/v1/dispatcher.ts @@ -8,9 +8,190 @@ import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire'; import type { CallContext, CallOptions } from 'nice-grpc-common'; import { DurableEventListenerConditions } from './shared/condition'; +import { TriggerWorkflowRequest } from './shared/trigger'; export const protobufPackage = 'v1'; +export enum DurableTaskErrorType { + DURABLE_TASK_ERROR_TYPE_UNSPECIFIED = 0, + DURABLE_TASK_ERROR_TYPE_NONDETERMINISM = 1, + UNRECOGNIZED = -1, +} + +export function durableTaskErrorTypeFromJSON(object: any): DurableTaskErrorType { + switch (object) { + case 0: + case 'DURABLE_TASK_ERROR_TYPE_UNSPECIFIED': + return DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_UNSPECIFIED; + case 1: + case 'DURABLE_TASK_ERROR_TYPE_NONDETERMINISM': + return DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_NONDETERMINISM; + case -1: + case 'UNRECOGNIZED': + default: + return DurableTaskErrorType.UNRECOGNIZED; + } +} + +export function durableTaskErrorTypeToJSON(object: DurableTaskErrorType): string { + switch (object) { + case DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_UNSPECIFIED: + return 'DURABLE_TASK_ERROR_TYPE_UNSPECIFIED'; + case DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_NONDETERMINISM: + return 'DURABLE_TASK_ERROR_TYPE_NONDETERMINISM'; + case DurableTaskErrorType.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +export interface DurableTaskRequestRegisterWorker { + workerId: string; +} + +export interface DurableTaskResponseRegisterWorker { + workerId: string; +} + +export interface DurableEventLogEntryRef { + durableTaskExternalId: string; + invocationCount: number; + branchId: number; + nodeId: number; +} + +export interface DurableTaskRunAckEntry { + nodeId: number; + branchId: number; +} + +export interface DurableTaskEventMemoAckResponse { + ref: DurableEventLogEntryRef | undefined; + memoAlreadyExisted: boolean; + memoResultPayload?: Uint8Array | undefined; +} + +export interface DurableTaskEventTriggerRunsAckResponse { + durableTaskExternalId: string; + invocationCount: number; + runEntries: DurableTaskRunAckEntry[]; +} + +export interface DurableTaskEventWaitForAckResponse { + ref: DurableEventLogEntryRef | undefined; +} + +export interface DurableTaskEventLogEntryCompletedResponse { + ref: DurableEventLogEntryRef | undefined; + payload: Uint8Array; +} + +export interface DurableTaskEvictInvocationRequest { + invocationCount: number; + durableTaskExternalId: string; + reason?: string | undefined; +} + +/** Sent by the server after recording eviction for an evict_invocation request. */ +export interface DurableTaskEvictionAckResponse { + invocationCount: number; + durableTaskExternalId: string; +} + +export interface DurableTaskAwaitedCompletedEntry { + durableTaskExternalId: string; + branchId: number; + nodeId: number; + invocationCount: number; +} + +/** Sent by the server to notify a worker that its invocation is stale and should be cancelled. */ +export interface DurableTaskServerEvictNotice { + durableTaskExternalId: string; + invocationCount: number; + reason: string; +} + +export interface DurableTaskWorkerStatusRequest { + workerId: string; + waitingEntries: DurableTaskAwaitedCompletedEntry[]; +} + +export interface DurableTaskCompleteMemoRequest { + ref: DurableEventLogEntryRef | undefined; + payload: Uint8Array; + memoKey: Uint8Array; +} + +export interface DurableTaskMemoRequest { + /** + * The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + * at running a durable task. Each time the task is started, it gets a new invocation count (which has) + * incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + * differentiating between different attempts of the same task running in different places, to prevent race conditions + * and other problems from duplication. It also allows for older invocations to be evicted cleanly + */ + invocationCount: number; + durableTaskExternalId: string; + key: Uint8Array; + /** optional payload because we can send a memo request to check if a memo already exists */ + payload?: Uint8Array | undefined; +} + +export interface DurableTaskTriggerRunsRequest { + /** + * The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + * at running a durable task. Each time the task is started, it gets a new invocation count (which has) + * incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + * differentiating between different attempts of the same task running in different places, to prevent race conditions + * and other problems from duplication. It also allows for older invocations to be evicted cleanly + */ + invocationCount: number; + durableTaskExternalId: string; + triggerOpts: TriggerWorkflowRequest[]; +} + +export interface DurableTaskWaitForRequest { + /** + * The invocation_count is a monotonically increasing count that uniquely identifies an "attempt" + * at running a durable task. Each time the task is started, it gets a new invocation count (which has) + * incremented by one since the previous invocation. This allows the server (and the worker) to have a way of + * differentiating between different attempts of the same task running in different places, to prevent race conditions + * and other problems from duplication. It also allows for older invocations to be evicted cleanly + */ + invocationCount: number; + durableTaskExternalId: string; + /** Fields for DURABLE_TASK_TRIGGER_KIND_WAIT_FOR */ + waitForConditions?: DurableEventListenerConditions | undefined; +} + +export interface DurableTaskRequest { + registerWorker?: DurableTaskRequestRegisterWorker | undefined; + memo?: DurableTaskMemoRequest | undefined; + triggerRuns?: DurableTaskTriggerRunsRequest | undefined; + waitFor?: DurableTaskWaitForRequest | undefined; + evictInvocation?: DurableTaskEvictInvocationRequest | undefined; + workerStatus?: DurableTaskWorkerStatusRequest | undefined; + completeMemo?: DurableTaskCompleteMemoRequest | undefined; +} + +export interface DurableTaskErrorResponse { + ref: DurableEventLogEntryRef | undefined; + errorType: DurableTaskErrorType; + errorMessage: string; +} + +export interface DurableTaskResponse { + registerWorker?: DurableTaskResponseRegisterWorker | undefined; + memoAck?: DurableTaskEventMemoAckResponse | undefined; + triggerRunsAck?: DurableTaskEventTriggerRunsAckResponse | undefined; + waitForAck?: DurableTaskEventWaitForAckResponse | undefined; + entryCompleted?: DurableTaskEventLogEntryCompletedResponse | undefined; + error?: DurableTaskErrorResponse | undefined; + evictionAck?: DurableTaskEvictionAckResponse | undefined; + serverEvict?: DurableTaskServerEvictNotice | undefined; +} + export interface RegisterDurableEventRequest { /** external uuid for the task run */ taskId: string; @@ -36,6 +217,2152 @@ export interface DurableEvent { data: Uint8Array; } +function createBaseDurableTaskRequestRegisterWorker(): DurableTaskRequestRegisterWorker { + return { workerId: '' }; +} + +export const DurableTaskRequestRegisterWorker: MessageFns = { + encode( + message: DurableTaskRequestRegisterWorker, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.workerId !== '') { + writer.uint32(10).string(message.workerId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskRequestRegisterWorker { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskRequestRegisterWorker(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.workerId = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskRequestRegisterWorker { + return { workerId: isSet(object.workerId) ? globalThis.String(object.workerId) : '' }; + }, + + toJSON(message: DurableTaskRequestRegisterWorker): unknown { + const obj: any = {}; + if (message.workerId !== '') { + obj.workerId = message.workerId; + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskRequestRegisterWorker { + return DurableTaskRequestRegisterWorker.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskRequestRegisterWorker { + const message = createBaseDurableTaskRequestRegisterWorker(); + message.workerId = object.workerId ?? ''; + return message; + }, +}; + +function createBaseDurableTaskResponseRegisterWorker(): DurableTaskResponseRegisterWorker { + return { workerId: '' }; +} + +export const DurableTaskResponseRegisterWorker: MessageFns = { + encode( + message: DurableTaskResponseRegisterWorker, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.workerId !== '') { + writer.uint32(10).string(message.workerId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskResponseRegisterWorker { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskResponseRegisterWorker(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.workerId = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskResponseRegisterWorker { + return { workerId: isSet(object.workerId) ? globalThis.String(object.workerId) : '' }; + }, + + toJSON(message: DurableTaskResponseRegisterWorker): unknown { + const obj: any = {}; + if (message.workerId !== '') { + obj.workerId = message.workerId; + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskResponseRegisterWorker { + return DurableTaskResponseRegisterWorker.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskResponseRegisterWorker { + const message = createBaseDurableTaskResponseRegisterWorker(); + message.workerId = object.workerId ?? ''; + return message; + }, +}; + +function createBaseDurableEventLogEntryRef(): DurableEventLogEntryRef { + return { durableTaskExternalId: '', invocationCount: 0, branchId: 0, nodeId: 0 }; +} + +export const DurableEventLogEntryRef: MessageFns = { + encode( + message: DurableEventLogEntryRef, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.durableTaskExternalId !== '') { + writer.uint32(10).string(message.durableTaskExternalId); + } + if (message.invocationCount !== 0) { + writer.uint32(16).int32(message.invocationCount); + } + if (message.branchId !== 0) { + writer.uint32(24).int64(message.branchId); + } + if (message.nodeId !== 0) { + writer.uint32(32).int64(message.nodeId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableEventLogEntryRef { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableEventLogEntryRef(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 3: { + if (tag !== 24) { + break; + } + + message.branchId = longToNumber(reader.int64()); + continue; + } + case 4: { + if (tag !== 32) { + break; + } + + message.nodeId = longToNumber(reader.int64()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableEventLogEntryRef { + return { + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + branchId: isSet(object.branchId) ? globalThis.Number(object.branchId) : 0, + nodeId: isSet(object.nodeId) ? globalThis.Number(object.nodeId) : 0, + }; + }, + + toJSON(message: DurableEventLogEntryRef): unknown { + const obj: any = {}; + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.branchId !== 0) { + obj.branchId = Math.round(message.branchId); + } + if (message.nodeId !== 0) { + obj.nodeId = Math.round(message.nodeId); + } + return obj; + }, + + create(base?: DeepPartial): DurableEventLogEntryRef { + return DurableEventLogEntryRef.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableEventLogEntryRef { + const message = createBaseDurableEventLogEntryRef(); + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.invocationCount = object.invocationCount ?? 0; + message.branchId = object.branchId ?? 0; + message.nodeId = object.nodeId ?? 0; + return message; + }, +}; + +function createBaseDurableTaskRunAckEntry(): DurableTaskRunAckEntry { + return { nodeId: 0, branchId: 0 }; +} + +export const DurableTaskRunAckEntry: MessageFns = { + encode(message: DurableTaskRunAckEntry, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.nodeId !== 0) { + writer.uint32(8).int64(message.nodeId); + } + if (message.branchId !== 0) { + writer.uint32(16).int64(message.branchId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskRunAckEntry { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskRunAckEntry(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.nodeId = longToNumber(reader.int64()); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.branchId = longToNumber(reader.int64()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskRunAckEntry { + return { + nodeId: isSet(object.nodeId) ? globalThis.Number(object.nodeId) : 0, + branchId: isSet(object.branchId) ? globalThis.Number(object.branchId) : 0, + }; + }, + + toJSON(message: DurableTaskRunAckEntry): unknown { + const obj: any = {}; + if (message.nodeId !== 0) { + obj.nodeId = Math.round(message.nodeId); + } + if (message.branchId !== 0) { + obj.branchId = Math.round(message.branchId); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskRunAckEntry { + return DurableTaskRunAckEntry.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskRunAckEntry { + const message = createBaseDurableTaskRunAckEntry(); + message.nodeId = object.nodeId ?? 0; + message.branchId = object.branchId ?? 0; + return message; + }, +}; + +function createBaseDurableTaskEventMemoAckResponse(): DurableTaskEventMemoAckResponse { + return { ref: undefined, memoAlreadyExisted: false, memoResultPayload: undefined }; +} + +export const DurableTaskEventMemoAckResponse: MessageFns = { + encode( + message: DurableTaskEventMemoAckResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.ref !== undefined) { + DurableEventLogEntryRef.encode(message.ref, writer.uint32(10).fork()).join(); + } + if (message.memoAlreadyExisted !== false) { + writer.uint32(16).bool(message.memoAlreadyExisted); + } + if (message.memoResultPayload !== undefined) { + writer.uint32(26).bytes(message.memoResultPayload); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskEventMemoAckResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskEventMemoAckResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.ref = DurableEventLogEntryRef.decode(reader, reader.uint32()); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.memoAlreadyExisted = reader.bool(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.memoResultPayload = reader.bytes(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskEventMemoAckResponse { + return { + ref: isSet(object.ref) ? DurableEventLogEntryRef.fromJSON(object.ref) : undefined, + memoAlreadyExisted: isSet(object.memoAlreadyExisted) + ? globalThis.Boolean(object.memoAlreadyExisted) + : false, + memoResultPayload: isSet(object.memoResultPayload) + ? bytesFromBase64(object.memoResultPayload) + : undefined, + }; + }, + + toJSON(message: DurableTaskEventMemoAckResponse): unknown { + const obj: any = {}; + if (message.ref !== undefined) { + obj.ref = DurableEventLogEntryRef.toJSON(message.ref); + } + if (message.memoAlreadyExisted !== false) { + obj.memoAlreadyExisted = message.memoAlreadyExisted; + } + if (message.memoResultPayload !== undefined) { + obj.memoResultPayload = base64FromBytes(message.memoResultPayload); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskEventMemoAckResponse { + return DurableTaskEventMemoAckResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskEventMemoAckResponse { + const message = createBaseDurableTaskEventMemoAckResponse(); + message.ref = + object.ref !== undefined && object.ref !== null + ? DurableEventLogEntryRef.fromPartial(object.ref) + : undefined; + message.memoAlreadyExisted = object.memoAlreadyExisted ?? false; + message.memoResultPayload = object.memoResultPayload ?? undefined; + return message; + }, +}; + +function createBaseDurableTaskEventTriggerRunsAckResponse(): DurableTaskEventTriggerRunsAckResponse { + return { durableTaskExternalId: '', invocationCount: 0, runEntries: [] }; +} + +export const DurableTaskEventTriggerRunsAckResponse: MessageFns = + { + encode( + message: DurableTaskEventTriggerRunsAckResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.durableTaskExternalId !== '') { + writer.uint32(10).string(message.durableTaskExternalId); + } + if (message.invocationCount !== 0) { + writer.uint32(16).int32(message.invocationCount); + } + for (const v of message.runEntries) { + DurableTaskRunAckEntry.encode(v!, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number + ): DurableTaskEventTriggerRunsAckResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskEventTriggerRunsAckResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.runEntries.push(DurableTaskRunAckEntry.decode(reader, reader.uint32())); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskEventTriggerRunsAckResponse { + return { + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + runEntries: globalThis.Array.isArray(object?.runEntries) + ? object.runEntries.map((e: any) => DurableTaskRunAckEntry.fromJSON(e)) + : [], + }; + }, + + toJSON(message: DurableTaskEventTriggerRunsAckResponse): unknown { + const obj: any = {}; + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.runEntries?.length) { + obj.runEntries = message.runEntries.map((e) => DurableTaskRunAckEntry.toJSON(e)); + } + return obj; + }, + + create( + base?: DeepPartial + ): DurableTaskEventTriggerRunsAckResponse { + return DurableTaskEventTriggerRunsAckResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskEventTriggerRunsAckResponse { + const message = createBaseDurableTaskEventTriggerRunsAckResponse(); + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.invocationCount = object.invocationCount ?? 0; + message.runEntries = + object.runEntries?.map((e) => DurableTaskRunAckEntry.fromPartial(e)) || []; + return message; + }, + }; + +function createBaseDurableTaskEventWaitForAckResponse(): DurableTaskEventWaitForAckResponse { + return { ref: undefined }; +} + +export const DurableTaskEventWaitForAckResponse: MessageFns = { + encode( + message: DurableTaskEventWaitForAckResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.ref !== undefined) { + DurableEventLogEntryRef.encode(message.ref, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskEventWaitForAckResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskEventWaitForAckResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.ref = DurableEventLogEntryRef.decode(reader, reader.uint32()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskEventWaitForAckResponse { + return { ref: isSet(object.ref) ? DurableEventLogEntryRef.fromJSON(object.ref) : undefined }; + }, + + toJSON(message: DurableTaskEventWaitForAckResponse): unknown { + const obj: any = {}; + if (message.ref !== undefined) { + obj.ref = DurableEventLogEntryRef.toJSON(message.ref); + } + return obj; + }, + + create( + base?: DeepPartial + ): DurableTaskEventWaitForAckResponse { + return DurableTaskEventWaitForAckResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskEventWaitForAckResponse { + const message = createBaseDurableTaskEventWaitForAckResponse(); + message.ref = + object.ref !== undefined && object.ref !== null + ? DurableEventLogEntryRef.fromPartial(object.ref) + : undefined; + return message; + }, +}; + +function createBaseDurableTaskEventLogEntryCompletedResponse(): DurableTaskEventLogEntryCompletedResponse { + return { ref: undefined, payload: new Uint8Array(0) }; +} + +export const DurableTaskEventLogEntryCompletedResponse: MessageFns = + { + encode( + message: DurableTaskEventLogEntryCompletedResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.ref !== undefined) { + DurableEventLogEntryRef.encode(message.ref, writer.uint32(10).fork()).join(); + } + if (message.payload.length !== 0) { + writer.uint32(18).bytes(message.payload); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number + ): DurableTaskEventLogEntryCompletedResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskEventLogEntryCompletedResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.ref = DurableEventLogEntryRef.decode(reader, reader.uint32()); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.payload = reader.bytes(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskEventLogEntryCompletedResponse { + return { + ref: isSet(object.ref) ? DurableEventLogEntryRef.fromJSON(object.ref) : undefined, + payload: isSet(object.payload) ? bytesFromBase64(object.payload) : new Uint8Array(0), + }; + }, + + toJSON(message: DurableTaskEventLogEntryCompletedResponse): unknown { + const obj: any = {}; + if (message.ref !== undefined) { + obj.ref = DurableEventLogEntryRef.toJSON(message.ref); + } + if (message.payload.length !== 0) { + obj.payload = base64FromBytes(message.payload); + } + return obj; + }, + + create( + base?: DeepPartial + ): DurableTaskEventLogEntryCompletedResponse { + return DurableTaskEventLogEntryCompletedResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskEventLogEntryCompletedResponse { + const message = createBaseDurableTaskEventLogEntryCompletedResponse(); + message.ref = + object.ref !== undefined && object.ref !== null + ? DurableEventLogEntryRef.fromPartial(object.ref) + : undefined; + message.payload = object.payload ?? new Uint8Array(0); + return message; + }, + }; + +function createBaseDurableTaskEvictInvocationRequest(): DurableTaskEvictInvocationRequest { + return { invocationCount: 0, durableTaskExternalId: '', reason: undefined }; +} + +export const DurableTaskEvictInvocationRequest: MessageFns = { + encode( + message: DurableTaskEvictInvocationRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.invocationCount !== 0) { + writer.uint32(8).int32(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + writer.uint32(18).string(message.durableTaskExternalId); + } + if (message.reason !== undefined) { + writer.uint32(26).string(message.reason); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskEvictInvocationRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskEvictInvocationRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.reason = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskEvictInvocationRequest { + return { + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + reason: isSet(object.reason) ? globalThis.String(object.reason) : undefined, + }; + }, + + toJSON(message: DurableTaskEvictInvocationRequest): unknown { + const obj: any = {}; + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.reason !== undefined) { + obj.reason = message.reason; + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskEvictInvocationRequest { + return DurableTaskEvictInvocationRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskEvictInvocationRequest { + const message = createBaseDurableTaskEvictInvocationRequest(); + message.invocationCount = object.invocationCount ?? 0; + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.reason = object.reason ?? undefined; + return message; + }, +}; + +function createBaseDurableTaskEvictionAckResponse(): DurableTaskEvictionAckResponse { + return { invocationCount: 0, durableTaskExternalId: '' }; +} + +export const DurableTaskEvictionAckResponse: MessageFns = { + encode( + message: DurableTaskEvictionAckResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.invocationCount !== 0) { + writer.uint32(8).int32(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + writer.uint32(18).string(message.durableTaskExternalId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskEvictionAckResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskEvictionAckResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskEvictionAckResponse { + return { + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + }; + }, + + toJSON(message: DurableTaskEvictionAckResponse): unknown { + const obj: any = {}; + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskEvictionAckResponse { + return DurableTaskEvictionAckResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskEvictionAckResponse { + const message = createBaseDurableTaskEvictionAckResponse(); + message.invocationCount = object.invocationCount ?? 0; + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + return message; + }, +}; + +function createBaseDurableTaskAwaitedCompletedEntry(): DurableTaskAwaitedCompletedEntry { + return { durableTaskExternalId: '', branchId: 0, nodeId: 0, invocationCount: 0 }; +} + +export const DurableTaskAwaitedCompletedEntry: MessageFns = { + encode( + message: DurableTaskAwaitedCompletedEntry, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.durableTaskExternalId !== '') { + writer.uint32(10).string(message.durableTaskExternalId); + } + if (message.branchId !== 0) { + writer.uint32(16).int64(message.branchId); + } + if (message.nodeId !== 0) { + writer.uint32(24).int64(message.nodeId); + } + if (message.invocationCount !== 0) { + writer.uint32(32).int32(message.invocationCount); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskAwaitedCompletedEntry { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskAwaitedCompletedEntry(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.branchId = longToNumber(reader.int64()); + continue; + } + case 3: { + if (tag !== 24) { + break; + } + + message.nodeId = longToNumber(reader.int64()); + continue; + } + case 4: { + if (tag !== 32) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskAwaitedCompletedEntry { + return { + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + branchId: isSet(object.branchId) ? globalThis.Number(object.branchId) : 0, + nodeId: isSet(object.nodeId) ? globalThis.Number(object.nodeId) : 0, + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + }; + }, + + toJSON(message: DurableTaskAwaitedCompletedEntry): unknown { + const obj: any = {}; + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.branchId !== 0) { + obj.branchId = Math.round(message.branchId); + } + if (message.nodeId !== 0) { + obj.nodeId = Math.round(message.nodeId); + } + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskAwaitedCompletedEntry { + return DurableTaskAwaitedCompletedEntry.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): DurableTaskAwaitedCompletedEntry { + const message = createBaseDurableTaskAwaitedCompletedEntry(); + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.branchId = object.branchId ?? 0; + message.nodeId = object.nodeId ?? 0; + message.invocationCount = object.invocationCount ?? 0; + return message; + }, +}; + +function createBaseDurableTaskServerEvictNotice(): DurableTaskServerEvictNotice { + return { durableTaskExternalId: '', invocationCount: 0, reason: '' }; +} + +export const DurableTaskServerEvictNotice: MessageFns = { + encode( + message: DurableTaskServerEvictNotice, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.durableTaskExternalId !== '') { + writer.uint32(10).string(message.durableTaskExternalId); + } + if (message.invocationCount !== 0) { + writer.uint32(16).int32(message.invocationCount); + } + if (message.reason !== '') { + writer.uint32(26).string(message.reason); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskServerEvictNotice { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskServerEvictNotice(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.reason = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskServerEvictNotice { + return { + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + reason: isSet(object.reason) ? globalThis.String(object.reason) : '', + }; + }, + + toJSON(message: DurableTaskServerEvictNotice): unknown { + const obj: any = {}; + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.reason !== '') { + obj.reason = message.reason; + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskServerEvictNotice { + return DurableTaskServerEvictNotice.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskServerEvictNotice { + const message = createBaseDurableTaskServerEvictNotice(); + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.invocationCount = object.invocationCount ?? 0; + message.reason = object.reason ?? ''; + return message; + }, +}; + +function createBaseDurableTaskWorkerStatusRequest(): DurableTaskWorkerStatusRequest { + return { workerId: '', waitingEntries: [] }; +} + +export const DurableTaskWorkerStatusRequest: MessageFns = { + encode( + message: DurableTaskWorkerStatusRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.workerId !== '') { + writer.uint32(10).string(message.workerId); + } + for (const v of message.waitingEntries) { + DurableTaskAwaitedCompletedEntry.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskWorkerStatusRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskWorkerStatusRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.workerId = reader.string(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.waitingEntries.push( + DurableTaskAwaitedCompletedEntry.decode(reader, reader.uint32()) + ); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskWorkerStatusRequest { + return { + workerId: isSet(object.workerId) ? globalThis.String(object.workerId) : '', + waitingEntries: globalThis.Array.isArray(object?.waitingEntries) + ? object.waitingEntries.map((e: any) => DurableTaskAwaitedCompletedEntry.fromJSON(e)) + : [], + }; + }, + + toJSON(message: DurableTaskWorkerStatusRequest): unknown { + const obj: any = {}; + if (message.workerId !== '') { + obj.workerId = message.workerId; + } + if (message.waitingEntries?.length) { + obj.waitingEntries = message.waitingEntries.map((e) => + DurableTaskAwaitedCompletedEntry.toJSON(e) + ); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskWorkerStatusRequest { + return DurableTaskWorkerStatusRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskWorkerStatusRequest { + const message = createBaseDurableTaskWorkerStatusRequest(); + message.workerId = object.workerId ?? ''; + message.waitingEntries = + object.waitingEntries?.map((e) => DurableTaskAwaitedCompletedEntry.fromPartial(e)) || []; + return message; + }, +}; + +function createBaseDurableTaskCompleteMemoRequest(): DurableTaskCompleteMemoRequest { + return { ref: undefined, payload: new Uint8Array(0), memoKey: new Uint8Array(0) }; +} + +export const DurableTaskCompleteMemoRequest: MessageFns = { + encode( + message: DurableTaskCompleteMemoRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.ref !== undefined) { + DurableEventLogEntryRef.encode(message.ref, writer.uint32(10).fork()).join(); + } + if (message.payload.length !== 0) { + writer.uint32(18).bytes(message.payload); + } + if (message.memoKey.length !== 0) { + writer.uint32(26).bytes(message.memoKey); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskCompleteMemoRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskCompleteMemoRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.ref = DurableEventLogEntryRef.decode(reader, reader.uint32()); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.payload = reader.bytes(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.memoKey = reader.bytes(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskCompleteMemoRequest { + return { + ref: isSet(object.ref) ? DurableEventLogEntryRef.fromJSON(object.ref) : undefined, + payload: isSet(object.payload) ? bytesFromBase64(object.payload) : new Uint8Array(0), + memoKey: isSet(object.memoKey) ? bytesFromBase64(object.memoKey) : new Uint8Array(0), + }; + }, + + toJSON(message: DurableTaskCompleteMemoRequest): unknown { + const obj: any = {}; + if (message.ref !== undefined) { + obj.ref = DurableEventLogEntryRef.toJSON(message.ref); + } + if (message.payload.length !== 0) { + obj.payload = base64FromBytes(message.payload); + } + if (message.memoKey.length !== 0) { + obj.memoKey = base64FromBytes(message.memoKey); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskCompleteMemoRequest { + return DurableTaskCompleteMemoRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskCompleteMemoRequest { + const message = createBaseDurableTaskCompleteMemoRequest(); + message.ref = + object.ref !== undefined && object.ref !== null + ? DurableEventLogEntryRef.fromPartial(object.ref) + : undefined; + message.payload = object.payload ?? new Uint8Array(0); + message.memoKey = object.memoKey ?? new Uint8Array(0); + return message; + }, +}; + +function createBaseDurableTaskMemoRequest(): DurableTaskMemoRequest { + return { + invocationCount: 0, + durableTaskExternalId: '', + key: new Uint8Array(0), + payload: undefined, + }; +} + +export const DurableTaskMemoRequest: MessageFns = { + encode(message: DurableTaskMemoRequest, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.invocationCount !== 0) { + writer.uint32(8).int32(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + writer.uint32(18).string(message.durableTaskExternalId); + } + if (message.key.length !== 0) { + writer.uint32(26).bytes(message.key); + } + if (message.payload !== undefined) { + writer.uint32(34).bytes(message.payload); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskMemoRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskMemoRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.key = reader.bytes(); + continue; + } + case 4: { + if (tag !== 34) { + break; + } + + message.payload = reader.bytes(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskMemoRequest { + return { + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + key: isSet(object.key) ? bytesFromBase64(object.key) : new Uint8Array(0), + payload: isSet(object.payload) ? bytesFromBase64(object.payload) : undefined, + }; + }, + + toJSON(message: DurableTaskMemoRequest): unknown { + const obj: any = {}; + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.key.length !== 0) { + obj.key = base64FromBytes(message.key); + } + if (message.payload !== undefined) { + obj.payload = base64FromBytes(message.payload); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskMemoRequest { + return DurableTaskMemoRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskMemoRequest { + const message = createBaseDurableTaskMemoRequest(); + message.invocationCount = object.invocationCount ?? 0; + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.key = object.key ?? new Uint8Array(0); + message.payload = object.payload ?? undefined; + return message; + }, +}; + +function createBaseDurableTaskTriggerRunsRequest(): DurableTaskTriggerRunsRequest { + return { invocationCount: 0, durableTaskExternalId: '', triggerOpts: [] }; +} + +export const DurableTaskTriggerRunsRequest: MessageFns = { + encode( + message: DurableTaskTriggerRunsRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.invocationCount !== 0) { + writer.uint32(8).int32(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + writer.uint32(18).string(message.durableTaskExternalId); + } + for (const v of message.triggerOpts) { + TriggerWorkflowRequest.encode(v!, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskTriggerRunsRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskTriggerRunsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.triggerOpts.push(TriggerWorkflowRequest.decode(reader, reader.uint32())); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskTriggerRunsRequest { + return { + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + triggerOpts: globalThis.Array.isArray(object?.triggerOpts) + ? object.triggerOpts.map((e: any) => TriggerWorkflowRequest.fromJSON(e)) + : [], + }; + }, + + toJSON(message: DurableTaskTriggerRunsRequest): unknown { + const obj: any = {}; + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.triggerOpts?.length) { + obj.triggerOpts = message.triggerOpts.map((e) => TriggerWorkflowRequest.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskTriggerRunsRequest { + return DurableTaskTriggerRunsRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskTriggerRunsRequest { + const message = createBaseDurableTaskTriggerRunsRequest(); + message.invocationCount = object.invocationCount ?? 0; + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.triggerOpts = + object.triggerOpts?.map((e) => TriggerWorkflowRequest.fromPartial(e)) || []; + return message; + }, +}; + +function createBaseDurableTaskWaitForRequest(): DurableTaskWaitForRequest { + return { invocationCount: 0, durableTaskExternalId: '', waitForConditions: undefined }; +} + +export const DurableTaskWaitForRequest: MessageFns = { + encode( + message: DurableTaskWaitForRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.invocationCount !== 0) { + writer.uint32(8).int32(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + writer.uint32(18).string(message.durableTaskExternalId); + } + if (message.waitForConditions !== undefined) { + DurableEventListenerConditions.encode( + message.waitForConditions, + writer.uint32(26).fork() + ).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskWaitForRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskWaitForRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 8) { + break; + } + + message.invocationCount = reader.int32(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.durableTaskExternalId = reader.string(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.waitForConditions = DurableEventListenerConditions.decode( + reader, + reader.uint32() + ); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskWaitForRequest { + return { + invocationCount: isSet(object.invocationCount) + ? globalThis.Number(object.invocationCount) + : 0, + durableTaskExternalId: isSet(object.durableTaskExternalId) + ? globalThis.String(object.durableTaskExternalId) + : '', + waitForConditions: isSet(object.waitForConditions) + ? DurableEventListenerConditions.fromJSON(object.waitForConditions) + : undefined, + }; + }, + + toJSON(message: DurableTaskWaitForRequest): unknown { + const obj: any = {}; + if (message.invocationCount !== 0) { + obj.invocationCount = Math.round(message.invocationCount); + } + if (message.durableTaskExternalId !== '') { + obj.durableTaskExternalId = message.durableTaskExternalId; + } + if (message.waitForConditions !== undefined) { + obj.waitForConditions = DurableEventListenerConditions.toJSON(message.waitForConditions); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskWaitForRequest { + return DurableTaskWaitForRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskWaitForRequest { + const message = createBaseDurableTaskWaitForRequest(); + message.invocationCount = object.invocationCount ?? 0; + message.durableTaskExternalId = object.durableTaskExternalId ?? ''; + message.waitForConditions = + object.waitForConditions !== undefined && object.waitForConditions !== null + ? DurableEventListenerConditions.fromPartial(object.waitForConditions) + : undefined; + return message; + }, +}; + +function createBaseDurableTaskRequest(): DurableTaskRequest { + return { + registerWorker: undefined, + memo: undefined, + triggerRuns: undefined, + waitFor: undefined, + evictInvocation: undefined, + workerStatus: undefined, + completeMemo: undefined, + }; +} + +export const DurableTaskRequest: MessageFns = { + encode(message: DurableTaskRequest, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.registerWorker !== undefined) { + DurableTaskRequestRegisterWorker.encode( + message.registerWorker, + writer.uint32(10).fork() + ).join(); + } + if (message.memo !== undefined) { + DurableTaskMemoRequest.encode(message.memo, writer.uint32(18).fork()).join(); + } + if (message.triggerRuns !== undefined) { + DurableTaskTriggerRunsRequest.encode(message.triggerRuns, writer.uint32(26).fork()).join(); + } + if (message.waitFor !== undefined) { + DurableTaskWaitForRequest.encode(message.waitFor, writer.uint32(34).fork()).join(); + } + if (message.evictInvocation !== undefined) { + DurableTaskEvictInvocationRequest.encode( + message.evictInvocation, + writer.uint32(42).fork() + ).join(); + } + if (message.workerStatus !== undefined) { + DurableTaskWorkerStatusRequest.encode(message.workerStatus, writer.uint32(50).fork()).join(); + } + if (message.completeMemo !== undefined) { + DurableTaskCompleteMemoRequest.encode(message.completeMemo, writer.uint32(58).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.registerWorker = DurableTaskRequestRegisterWorker.decode(reader, reader.uint32()); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.memo = DurableTaskMemoRequest.decode(reader, reader.uint32()); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.triggerRuns = DurableTaskTriggerRunsRequest.decode(reader, reader.uint32()); + continue; + } + case 4: { + if (tag !== 34) { + break; + } + + message.waitFor = DurableTaskWaitForRequest.decode(reader, reader.uint32()); + continue; + } + case 5: { + if (tag !== 42) { + break; + } + + message.evictInvocation = DurableTaskEvictInvocationRequest.decode( + reader, + reader.uint32() + ); + continue; + } + case 6: { + if (tag !== 50) { + break; + } + + message.workerStatus = DurableTaskWorkerStatusRequest.decode(reader, reader.uint32()); + continue; + } + case 7: { + if (tag !== 58) { + break; + } + + message.completeMemo = DurableTaskCompleteMemoRequest.decode(reader, reader.uint32()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskRequest { + return { + registerWorker: isSet(object.registerWorker) + ? DurableTaskRequestRegisterWorker.fromJSON(object.registerWorker) + : undefined, + memo: isSet(object.memo) ? DurableTaskMemoRequest.fromJSON(object.memo) : undefined, + triggerRuns: isSet(object.triggerRuns) + ? DurableTaskTriggerRunsRequest.fromJSON(object.triggerRuns) + : undefined, + waitFor: isSet(object.waitFor) + ? DurableTaskWaitForRequest.fromJSON(object.waitFor) + : undefined, + evictInvocation: isSet(object.evictInvocation) + ? DurableTaskEvictInvocationRequest.fromJSON(object.evictInvocation) + : undefined, + workerStatus: isSet(object.workerStatus) + ? DurableTaskWorkerStatusRequest.fromJSON(object.workerStatus) + : undefined, + completeMemo: isSet(object.completeMemo) + ? DurableTaskCompleteMemoRequest.fromJSON(object.completeMemo) + : undefined, + }; + }, + + toJSON(message: DurableTaskRequest): unknown { + const obj: any = {}; + if (message.registerWorker !== undefined) { + obj.registerWorker = DurableTaskRequestRegisterWorker.toJSON(message.registerWorker); + } + if (message.memo !== undefined) { + obj.memo = DurableTaskMemoRequest.toJSON(message.memo); + } + if (message.triggerRuns !== undefined) { + obj.triggerRuns = DurableTaskTriggerRunsRequest.toJSON(message.triggerRuns); + } + if (message.waitFor !== undefined) { + obj.waitFor = DurableTaskWaitForRequest.toJSON(message.waitFor); + } + if (message.evictInvocation !== undefined) { + obj.evictInvocation = DurableTaskEvictInvocationRequest.toJSON(message.evictInvocation); + } + if (message.workerStatus !== undefined) { + obj.workerStatus = DurableTaskWorkerStatusRequest.toJSON(message.workerStatus); + } + if (message.completeMemo !== undefined) { + obj.completeMemo = DurableTaskCompleteMemoRequest.toJSON(message.completeMemo); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskRequest { + return DurableTaskRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskRequest { + const message = createBaseDurableTaskRequest(); + message.registerWorker = + object.registerWorker !== undefined && object.registerWorker !== null + ? DurableTaskRequestRegisterWorker.fromPartial(object.registerWorker) + : undefined; + message.memo = + object.memo !== undefined && object.memo !== null + ? DurableTaskMemoRequest.fromPartial(object.memo) + : undefined; + message.triggerRuns = + object.triggerRuns !== undefined && object.triggerRuns !== null + ? DurableTaskTriggerRunsRequest.fromPartial(object.triggerRuns) + : undefined; + message.waitFor = + object.waitFor !== undefined && object.waitFor !== null + ? DurableTaskWaitForRequest.fromPartial(object.waitFor) + : undefined; + message.evictInvocation = + object.evictInvocation !== undefined && object.evictInvocation !== null + ? DurableTaskEvictInvocationRequest.fromPartial(object.evictInvocation) + : undefined; + message.workerStatus = + object.workerStatus !== undefined && object.workerStatus !== null + ? DurableTaskWorkerStatusRequest.fromPartial(object.workerStatus) + : undefined; + message.completeMemo = + object.completeMemo !== undefined && object.completeMemo !== null + ? DurableTaskCompleteMemoRequest.fromPartial(object.completeMemo) + : undefined; + return message; + }, +}; + +function createBaseDurableTaskErrorResponse(): DurableTaskErrorResponse { + return { ref: undefined, errorType: 0, errorMessage: '' }; +} + +export const DurableTaskErrorResponse: MessageFns = { + encode( + message: DurableTaskErrorResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.ref !== undefined) { + DurableEventLogEntryRef.encode(message.ref, writer.uint32(10).fork()).join(); + } + if (message.errorType !== 0) { + writer.uint32(16).int32(message.errorType); + } + if (message.errorMessage !== '') { + writer.uint32(26).string(message.errorMessage); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskErrorResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskErrorResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.ref = DurableEventLogEntryRef.decode(reader, reader.uint32()); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.errorType = reader.int32() as any; + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.errorMessage = reader.string(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskErrorResponse { + return { + ref: isSet(object.ref) ? DurableEventLogEntryRef.fromJSON(object.ref) : undefined, + errorType: isSet(object.errorType) ? durableTaskErrorTypeFromJSON(object.errorType) : 0, + errorMessage: isSet(object.errorMessage) ? globalThis.String(object.errorMessage) : '', + }; + }, + + toJSON(message: DurableTaskErrorResponse): unknown { + const obj: any = {}; + if (message.ref !== undefined) { + obj.ref = DurableEventLogEntryRef.toJSON(message.ref); + } + if (message.errorType !== 0) { + obj.errorType = durableTaskErrorTypeToJSON(message.errorType); + } + if (message.errorMessage !== '') { + obj.errorMessage = message.errorMessage; + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskErrorResponse { + return DurableTaskErrorResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskErrorResponse { + const message = createBaseDurableTaskErrorResponse(); + message.ref = + object.ref !== undefined && object.ref !== null + ? DurableEventLogEntryRef.fromPartial(object.ref) + : undefined; + message.errorType = object.errorType ?? 0; + message.errorMessage = object.errorMessage ?? ''; + return message; + }, +}; + +function createBaseDurableTaskResponse(): DurableTaskResponse { + return { + registerWorker: undefined, + memoAck: undefined, + triggerRunsAck: undefined, + waitForAck: undefined, + entryCompleted: undefined, + error: undefined, + evictionAck: undefined, + serverEvict: undefined, + }; +} + +export const DurableTaskResponse: MessageFns = { + encode(message: DurableTaskResponse, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.registerWorker !== undefined) { + DurableTaskResponseRegisterWorker.encode( + message.registerWorker, + writer.uint32(10).fork() + ).join(); + } + if (message.memoAck !== undefined) { + DurableTaskEventMemoAckResponse.encode(message.memoAck, writer.uint32(18).fork()).join(); + } + if (message.triggerRunsAck !== undefined) { + DurableTaskEventTriggerRunsAckResponse.encode( + message.triggerRunsAck, + writer.uint32(26).fork() + ).join(); + } + if (message.waitForAck !== undefined) { + DurableTaskEventWaitForAckResponse.encode( + message.waitForAck, + writer.uint32(34).fork() + ).join(); + } + if (message.entryCompleted !== undefined) { + DurableTaskEventLogEntryCompletedResponse.encode( + message.entryCompleted, + writer.uint32(42).fork() + ).join(); + } + if (message.error !== undefined) { + DurableTaskErrorResponse.encode(message.error, writer.uint32(50).fork()).join(); + } + if (message.evictionAck !== undefined) { + DurableTaskEvictionAckResponse.encode(message.evictionAck, writer.uint32(58).fork()).join(); + } + if (message.serverEvict !== undefined) { + DurableTaskServerEvictNotice.encode(message.serverEvict, writer.uint32(66).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DurableTaskResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDurableTaskResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.registerWorker = DurableTaskResponseRegisterWorker.decode( + reader, + reader.uint32() + ); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.memoAck = DurableTaskEventMemoAckResponse.decode(reader, reader.uint32()); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.triggerRunsAck = DurableTaskEventTriggerRunsAckResponse.decode( + reader, + reader.uint32() + ); + continue; + } + case 4: { + if (tag !== 34) { + break; + } + + message.waitForAck = DurableTaskEventWaitForAckResponse.decode(reader, reader.uint32()); + continue; + } + case 5: { + if (tag !== 42) { + break; + } + + message.entryCompleted = DurableTaskEventLogEntryCompletedResponse.decode( + reader, + reader.uint32() + ); + continue; + } + case 6: { + if (tag !== 50) { + break; + } + + message.error = DurableTaskErrorResponse.decode(reader, reader.uint32()); + continue; + } + case 7: { + if (tag !== 58) { + break; + } + + message.evictionAck = DurableTaskEvictionAckResponse.decode(reader, reader.uint32()); + continue; + } + case 8: { + if (tag !== 66) { + break; + } + + message.serverEvict = DurableTaskServerEvictNotice.decode(reader, reader.uint32()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DurableTaskResponse { + return { + registerWorker: isSet(object.registerWorker) + ? DurableTaskResponseRegisterWorker.fromJSON(object.registerWorker) + : undefined, + memoAck: isSet(object.memoAck) + ? DurableTaskEventMemoAckResponse.fromJSON(object.memoAck) + : undefined, + triggerRunsAck: isSet(object.triggerRunsAck) + ? DurableTaskEventTriggerRunsAckResponse.fromJSON(object.triggerRunsAck) + : undefined, + waitForAck: isSet(object.waitForAck) + ? DurableTaskEventWaitForAckResponse.fromJSON(object.waitForAck) + : undefined, + entryCompleted: isSet(object.entryCompleted) + ? DurableTaskEventLogEntryCompletedResponse.fromJSON(object.entryCompleted) + : undefined, + error: isSet(object.error) ? DurableTaskErrorResponse.fromJSON(object.error) : undefined, + evictionAck: isSet(object.evictionAck) + ? DurableTaskEvictionAckResponse.fromJSON(object.evictionAck) + : undefined, + serverEvict: isSet(object.serverEvict) + ? DurableTaskServerEvictNotice.fromJSON(object.serverEvict) + : undefined, + }; + }, + + toJSON(message: DurableTaskResponse): unknown { + const obj: any = {}; + if (message.registerWorker !== undefined) { + obj.registerWorker = DurableTaskResponseRegisterWorker.toJSON(message.registerWorker); + } + if (message.memoAck !== undefined) { + obj.memoAck = DurableTaskEventMemoAckResponse.toJSON(message.memoAck); + } + if (message.triggerRunsAck !== undefined) { + obj.triggerRunsAck = DurableTaskEventTriggerRunsAckResponse.toJSON(message.triggerRunsAck); + } + if (message.waitForAck !== undefined) { + obj.waitForAck = DurableTaskEventWaitForAckResponse.toJSON(message.waitForAck); + } + if (message.entryCompleted !== undefined) { + obj.entryCompleted = DurableTaskEventLogEntryCompletedResponse.toJSON(message.entryCompleted); + } + if (message.error !== undefined) { + obj.error = DurableTaskErrorResponse.toJSON(message.error); + } + if (message.evictionAck !== undefined) { + obj.evictionAck = DurableTaskEvictionAckResponse.toJSON(message.evictionAck); + } + if (message.serverEvict !== undefined) { + obj.serverEvict = DurableTaskServerEvictNotice.toJSON(message.serverEvict); + } + return obj; + }, + + create(base?: DeepPartial): DurableTaskResponse { + return DurableTaskResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DurableTaskResponse { + const message = createBaseDurableTaskResponse(); + message.registerWorker = + object.registerWorker !== undefined && object.registerWorker !== null + ? DurableTaskResponseRegisterWorker.fromPartial(object.registerWorker) + : undefined; + message.memoAck = + object.memoAck !== undefined && object.memoAck !== null + ? DurableTaskEventMemoAckResponse.fromPartial(object.memoAck) + : undefined; + message.triggerRunsAck = + object.triggerRunsAck !== undefined && object.triggerRunsAck !== null + ? DurableTaskEventTriggerRunsAckResponse.fromPartial(object.triggerRunsAck) + : undefined; + message.waitForAck = + object.waitForAck !== undefined && object.waitForAck !== null + ? DurableTaskEventWaitForAckResponse.fromPartial(object.waitForAck) + : undefined; + message.entryCompleted = + object.entryCompleted !== undefined && object.entryCompleted !== null + ? DurableTaskEventLogEntryCompletedResponse.fromPartial(object.entryCompleted) + : undefined; + message.error = + object.error !== undefined && object.error !== null + ? DurableTaskErrorResponse.fromPartial(object.error) + : undefined; + message.evictionAck = + object.evictionAck !== undefined && object.evictionAck !== null + ? DurableTaskEvictionAckResponse.fromPartial(object.evictionAck) + : undefined; + message.serverEvict = + object.serverEvict !== undefined && object.serverEvict !== null + ? DurableTaskServerEvictNotice.fromPartial(object.serverEvict) + : undefined; + return message; + }, +}; + function createBaseRegisterDurableEventRequest(): RegisterDurableEventRequest { return { taskId: '', signalKey: '', conditions: undefined }; } @@ -355,6 +2682,15 @@ export const V1DispatcherDefinition = { name: 'V1Dispatcher', fullName: 'v1.V1Dispatcher', methods: { + durableTask: { + name: 'DurableTask', + requestType: DurableTaskRequest, + requestStream: true, + responseType: DurableTaskResponse, + responseStream: true, + options: {}, + }, + /** NOTE: deprecated after DurableEventLog is implemented */ registerDurableEvent: { name: 'RegisterDurableEvent', requestType: RegisterDurableEventRequest, @@ -375,6 +2711,11 @@ export const V1DispatcherDefinition = { } as const; export interface V1DispatcherServiceImplementation { + durableTask( + request: AsyncIterable, + context: CallContext & CallContextExt + ): ServerStreamingMethodResult>; + /** NOTE: deprecated after DurableEventLog is implemented */ registerDurableEvent( request: RegisterDurableEventRequest, context: CallContext & CallContextExt @@ -386,6 +2727,11 @@ export interface V1DispatcherServiceImplementation { } export interface V1DispatcherClient { + durableTask( + request: AsyncIterable>, + options?: CallOptions & CallOptionsExt + ): AsyncIterable; + /** NOTE: deprecated after DurableEventLog is implemented */ registerDurableEvent( request: DeepPartial, options?: CallOptions & CallOptionsExt @@ -433,6 +2779,17 @@ export type DeepPartial = T extends Builtin ? { [K in keyof T]?: DeepPartial } : Partial; +function longToNumber(int64: { toString(): string }): number { + const num = globalThis.Number(int64.toString()); + if (num > globalThis.Number.MAX_SAFE_INTEGER) { + throw new globalThis.Error('Value is larger than Number.MAX_SAFE_INTEGER'); + } + if (num < globalThis.Number.MIN_SAFE_INTEGER) { + throw new globalThis.Error('Value is smaller than Number.MIN_SAFE_INTEGER'); + } + return num; +} + function isSet(value: any): boolean { return value !== null && value !== undefined; } diff --git a/sdks/typescript/src/protoc/v1/shared/trigger.ts b/sdks/typescript/src/protoc/v1/shared/trigger.ts new file mode 100644 index 000000000..7ff3170e3 --- /dev/null +++ b/sdks/typescript/src/protoc/v1/shared/trigger.ts @@ -0,0 +1,630 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.7.7 +// protoc v3.19.1 +// source: v1/shared/trigger.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire'; + +export const protobufPackage = 'v1'; + +export enum WorkerLabelComparator { + EQUAL = 0, + NOT_EQUAL = 1, + GREATER_THAN = 2, + GREATER_THAN_OR_EQUAL = 3, + LESS_THAN = 4, + LESS_THAN_OR_EQUAL = 5, + UNRECOGNIZED = -1, +} + +export function workerLabelComparatorFromJSON(object: any): WorkerLabelComparator { + switch (object) { + case 0: + case 'EQUAL': + return WorkerLabelComparator.EQUAL; + case 1: + case 'NOT_EQUAL': + return WorkerLabelComparator.NOT_EQUAL; + case 2: + case 'GREATER_THAN': + return WorkerLabelComparator.GREATER_THAN; + case 3: + case 'GREATER_THAN_OR_EQUAL': + return WorkerLabelComparator.GREATER_THAN_OR_EQUAL; + case 4: + case 'LESS_THAN': + return WorkerLabelComparator.LESS_THAN; + case 5: + case 'LESS_THAN_OR_EQUAL': + return WorkerLabelComparator.LESS_THAN_OR_EQUAL; + case -1: + case 'UNRECOGNIZED': + default: + return WorkerLabelComparator.UNRECOGNIZED; + } +} + +export function workerLabelComparatorToJSON(object: WorkerLabelComparator): string { + switch (object) { + case WorkerLabelComparator.EQUAL: + return 'EQUAL'; + case WorkerLabelComparator.NOT_EQUAL: + return 'NOT_EQUAL'; + case WorkerLabelComparator.GREATER_THAN: + return 'GREATER_THAN'; + case WorkerLabelComparator.GREATER_THAN_OR_EQUAL: + return 'GREATER_THAN_OR_EQUAL'; + case WorkerLabelComparator.LESS_THAN: + return 'LESS_THAN'; + case WorkerLabelComparator.LESS_THAN_OR_EQUAL: + return 'LESS_THAN_OR_EQUAL'; + case WorkerLabelComparator.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +export interface DesiredWorkerLabels { + /** value of the affinity */ + strValue?: string | undefined; + intValue?: number | undefined; + /** + * (optional) Specifies whether the affinity setting is required. + * If required, the worker will not accept actions that do not have a truthy affinity setting. + * + * Defaults to false. + */ + required?: boolean | undefined; + /** + * (optional) Specifies the comparator for the affinity setting. + * If not set, the default is EQUAL. + */ + comparator?: WorkerLabelComparator | undefined; + /** + * (optional) Specifies the weight of the affinity setting. + * If not set, the default is 100. + */ + weight?: number | undefined; +} + +export interface TriggerWorkflowRequest { + name: string; + /** (optional) the input data for the workflow */ + input: string; + /** (optional) the parent workflow run id */ + parentId?: string | undefined; + /** (optional) the parent task external run id */ + parentTaskRunExternalId?: string | undefined; + /** + * (optional) the index of the child workflow. if this is set, matches on the index or the + * child key will return an existing workflow run if the parent id, parent task run id, and + * child index/key match an existing workflow run. + */ + childIndex?: number | undefined; + /** + * (optional) the key for the child. if this is set, matches on the index or the + * child key will return an existing workflow run if the parent id, parent task run id, and + * child index/key match an existing workflow run. + */ + childKey?: string | undefined; + /** (optional) additional metadata for the workflow */ + additionalMetadata?: string | undefined; + /** + * (optional) desired worker id for the workflow run, + * requires the workflow definition to have a sticky strategy + */ + desiredWorkerId?: string | undefined; + /** (optional) override for the priority of the workflow tasks, will set all tasks to this priority */ + priority?: number | undefined; + /** (optional) the desired worker labels for the workflow run, which will be used to determine which workers can pick up the workflow's tasks. if not set, defaults to an empty set of labels, which means any worker can pick up the tasks. */ + desiredWorkerLabels: { [key: string]: DesiredWorkerLabels }; +} + +export interface TriggerWorkflowRequest_DesiredWorkerLabelsEntry { + key: string; + value: DesiredWorkerLabels | undefined; +} + +function createBaseDesiredWorkerLabels(): DesiredWorkerLabels { + return { + strValue: undefined, + intValue: undefined, + required: undefined, + comparator: undefined, + weight: undefined, + }; +} + +export const DesiredWorkerLabels: MessageFns = { + encode(message: DesiredWorkerLabels, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.strValue !== undefined) { + writer.uint32(10).string(message.strValue); + } + if (message.intValue !== undefined) { + writer.uint32(16).int32(message.intValue); + } + if (message.required !== undefined) { + writer.uint32(24).bool(message.required); + } + if (message.comparator !== undefined) { + writer.uint32(32).int32(message.comparator); + } + if (message.weight !== undefined) { + writer.uint32(40).int32(message.weight); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DesiredWorkerLabels { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDesiredWorkerLabels(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.strValue = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.intValue = reader.int32(); + continue; + } + case 3: { + if (tag !== 24) { + break; + } + + message.required = reader.bool(); + continue; + } + case 4: { + if (tag !== 32) { + break; + } + + message.comparator = reader.int32() as any; + continue; + } + case 5: { + if (tag !== 40) { + break; + } + + message.weight = reader.int32(); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DesiredWorkerLabels { + return { + strValue: isSet(object.strValue) ? globalThis.String(object.strValue) : undefined, + intValue: isSet(object.intValue) ? globalThis.Number(object.intValue) : undefined, + required: isSet(object.required) ? globalThis.Boolean(object.required) : undefined, + comparator: isSet(object.comparator) + ? workerLabelComparatorFromJSON(object.comparator) + : undefined, + weight: isSet(object.weight) ? globalThis.Number(object.weight) : undefined, + }; + }, + + toJSON(message: DesiredWorkerLabels): unknown { + const obj: any = {}; + if (message.strValue !== undefined) { + obj.strValue = message.strValue; + } + if (message.intValue !== undefined) { + obj.intValue = Math.round(message.intValue); + } + if (message.required !== undefined) { + obj.required = message.required; + } + if (message.comparator !== undefined) { + obj.comparator = workerLabelComparatorToJSON(message.comparator); + } + if (message.weight !== undefined) { + obj.weight = Math.round(message.weight); + } + return obj; + }, + + create(base?: DeepPartial): DesiredWorkerLabels { + return DesiredWorkerLabels.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DesiredWorkerLabels { + const message = createBaseDesiredWorkerLabels(); + message.strValue = object.strValue ?? undefined; + message.intValue = object.intValue ?? undefined; + message.required = object.required ?? undefined; + message.comparator = object.comparator ?? undefined; + message.weight = object.weight ?? undefined; + return message; + }, +}; + +function createBaseTriggerWorkflowRequest(): TriggerWorkflowRequest { + return { + name: '', + input: '', + parentId: undefined, + parentTaskRunExternalId: undefined, + childIndex: undefined, + childKey: undefined, + additionalMetadata: undefined, + desiredWorkerId: undefined, + priority: undefined, + desiredWorkerLabels: {}, + }; +} + +export const TriggerWorkflowRequest: MessageFns = { + encode(message: TriggerWorkflowRequest, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.input !== '') { + writer.uint32(18).string(message.input); + } + if (message.parentId !== undefined) { + writer.uint32(26).string(message.parentId); + } + if (message.parentTaskRunExternalId !== undefined) { + writer.uint32(34).string(message.parentTaskRunExternalId); + } + if (message.childIndex !== undefined) { + writer.uint32(40).int32(message.childIndex); + } + if (message.childKey !== undefined) { + writer.uint32(50).string(message.childKey); + } + if (message.additionalMetadata !== undefined) { + writer.uint32(58).string(message.additionalMetadata); + } + if (message.desiredWorkerId !== undefined) { + writer.uint32(66).string(message.desiredWorkerId); + } + if (message.priority !== undefined) { + writer.uint32(72).int32(message.priority); + } + Object.entries(message.desiredWorkerLabels).forEach(([key, value]) => { + TriggerWorkflowRequest_DesiredWorkerLabelsEntry.encode( + { key: key as any, value }, + writer.uint32(82).fork() + ).join(); + }); + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): TriggerWorkflowRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseTriggerWorkflowRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.input = reader.string(); + continue; + } + case 3: { + if (tag !== 26) { + break; + } + + message.parentId = reader.string(); + continue; + } + case 4: { + if (tag !== 34) { + break; + } + + message.parentTaskRunExternalId = reader.string(); + continue; + } + case 5: { + if (tag !== 40) { + break; + } + + message.childIndex = reader.int32(); + continue; + } + case 6: { + if (tag !== 50) { + break; + } + + message.childKey = reader.string(); + continue; + } + case 7: { + if (tag !== 58) { + break; + } + + message.additionalMetadata = reader.string(); + continue; + } + case 8: { + if (tag !== 66) { + break; + } + + message.desiredWorkerId = reader.string(); + continue; + } + case 9: { + if (tag !== 72) { + break; + } + + message.priority = reader.int32(); + continue; + } + case 10: { + if (tag !== 82) { + break; + } + + const entry10 = TriggerWorkflowRequest_DesiredWorkerLabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry10.value !== undefined) { + message.desiredWorkerLabels[entry10.key] = entry10.value; + } + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): TriggerWorkflowRequest { + return { + name: isSet(object.name) ? globalThis.String(object.name) : '', + input: isSet(object.input) ? globalThis.String(object.input) : '', + parentId: isSet(object.parentId) ? globalThis.String(object.parentId) : undefined, + parentTaskRunExternalId: isSet(object.parentTaskRunExternalId) + ? globalThis.String(object.parentTaskRunExternalId) + : undefined, + childIndex: isSet(object.childIndex) ? globalThis.Number(object.childIndex) : undefined, + childKey: isSet(object.childKey) ? globalThis.String(object.childKey) : undefined, + additionalMetadata: isSet(object.additionalMetadata) + ? globalThis.String(object.additionalMetadata) + : undefined, + desiredWorkerId: isSet(object.desiredWorkerId) + ? globalThis.String(object.desiredWorkerId) + : undefined, + priority: isSet(object.priority) ? globalThis.Number(object.priority) : undefined, + desiredWorkerLabels: isObject(object.desiredWorkerLabels) + ? Object.entries(object.desiredWorkerLabels).reduce<{ [key: string]: DesiredWorkerLabels }>( + (acc, [key, value]) => { + acc[key] = DesiredWorkerLabels.fromJSON(value); + return acc; + }, + {} + ) + : {}, + }; + }, + + toJSON(message: TriggerWorkflowRequest): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.input !== '') { + obj.input = message.input; + } + if (message.parentId !== undefined) { + obj.parentId = message.parentId; + } + if (message.parentTaskRunExternalId !== undefined) { + obj.parentTaskRunExternalId = message.parentTaskRunExternalId; + } + if (message.childIndex !== undefined) { + obj.childIndex = Math.round(message.childIndex); + } + if (message.childKey !== undefined) { + obj.childKey = message.childKey; + } + if (message.additionalMetadata !== undefined) { + obj.additionalMetadata = message.additionalMetadata; + } + if (message.desiredWorkerId !== undefined) { + obj.desiredWorkerId = message.desiredWorkerId; + } + if (message.priority !== undefined) { + obj.priority = Math.round(message.priority); + } + if (message.desiredWorkerLabels) { + const entries = Object.entries(message.desiredWorkerLabels); + if (entries.length > 0) { + obj.desiredWorkerLabels = {}; + entries.forEach(([k, v]) => { + obj.desiredWorkerLabels[k] = DesiredWorkerLabels.toJSON(v); + }); + } + } + return obj; + }, + + create(base?: DeepPartial): TriggerWorkflowRequest { + return TriggerWorkflowRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): TriggerWorkflowRequest { + const message = createBaseTriggerWorkflowRequest(); + message.name = object.name ?? ''; + message.input = object.input ?? ''; + message.parentId = object.parentId ?? undefined; + message.parentTaskRunExternalId = object.parentTaskRunExternalId ?? undefined; + message.childIndex = object.childIndex ?? undefined; + message.childKey = object.childKey ?? undefined; + message.additionalMetadata = object.additionalMetadata ?? undefined; + message.desiredWorkerId = object.desiredWorkerId ?? undefined; + message.priority = object.priority ?? undefined; + message.desiredWorkerLabels = Object.entries(object.desiredWorkerLabels ?? {}).reduce<{ + [key: string]: DesiredWorkerLabels; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = DesiredWorkerLabels.fromPartial(value); + } + return acc; + }, {}); + return message; + }, +}; + +function createBaseTriggerWorkflowRequest_DesiredWorkerLabelsEntry(): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { + return { key: '', value: undefined }; +} + +export const TriggerWorkflowRequest_DesiredWorkerLabelsEntry: MessageFns = + { + encode( + message: TriggerWorkflowRequest_DesiredWorkerLabelsEntry, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.key !== '') { + writer.uint32(10).string(message.key); + } + if (message.value !== undefined) { + DesiredWorkerLabels.encode(message.value, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number + ): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseTriggerWorkflowRequest_DesiredWorkerLabelsEntry(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.key = reader.string(); + continue; + } + case 2: { + if (tag !== 18) { + break; + } + + message.value = DesiredWorkerLabels.decode(reader, reader.uint32()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { + return { + key: isSet(object.key) ? globalThis.String(object.key) : '', + value: isSet(object.value) ? DesiredWorkerLabels.fromJSON(object.value) : undefined, + }; + }, + + toJSON(message: TriggerWorkflowRequest_DesiredWorkerLabelsEntry): unknown { + const obj: any = {}; + if (message.key !== '') { + obj.key = message.key; + } + if (message.value !== undefined) { + obj.value = DesiredWorkerLabels.toJSON(message.value); + } + return obj; + }, + + create( + base?: DeepPartial + ): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { + return TriggerWorkflowRequest_DesiredWorkerLabelsEntry.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial + ): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { + const message = createBaseTriggerWorkflowRequest_DesiredWorkerLabelsEntry(); + message.key = object.key ?? ''; + message.value = + object.value !== undefined && object.value !== null + ? DesiredWorkerLabels.fromPartial(object.value) + : undefined; + return message; + }, + }; + +type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in keyof T]?: DeepPartial } + : Partial; + +function isObject(value: any): boolean { + return typeof value === 'object' && value !== null; +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +export interface MessageFns { + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/sdks/typescript/src/protoc/v1/workflows.ts b/sdks/typescript/src/protoc/v1/workflows.ts index 14569df29..edf329c29 100644 --- a/sdks/typescript/src/protoc/v1/workflows.ts +++ b/sdks/typescript/src/protoc/v1/workflows.ts @@ -9,6 +9,7 @@ import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire'; import type { CallContext, CallOptions } from 'nice-grpc-common'; import { Timestamp } from '../google/protobuf/timestamp'; import { TaskConditions } from './shared/condition'; +import { DesiredWorkerLabels } from './shared/trigger'; export const protobufPackage = 'v1'; @@ -114,6 +115,7 @@ export enum RunStatus { COMPLETED = 2, FAILED = 3, CANCELLED = 4, + EVICTED = 5, UNRECOGNIZED = -1, } @@ -134,6 +136,9 @@ export function runStatusFromJSON(object: any): RunStatus { case 4: case 'CANCELLED': return RunStatus.CANCELLED; + case 5: + case 'EVICTED': + return RunStatus.EVICTED; case -1: case 'UNRECOGNIZED': default: @@ -153,6 +158,8 @@ export function runStatusToJSON(object: RunStatus): string { return 'FAILED'; case RunStatus.CANCELLED: return 'CANCELLED'; + case RunStatus.EVICTED: + return 'EVICTED'; case RunStatus.UNRECOGNIZED: default: return 'UNRECOGNIZED'; @@ -212,63 +219,6 @@ export function concurrencyLimitStrategyToJSON(object: ConcurrencyLimitStrategy) } } -export enum WorkerLabelComparator { - EQUAL = 0, - NOT_EQUAL = 1, - GREATER_THAN = 2, - GREATER_THAN_OR_EQUAL = 3, - LESS_THAN = 4, - LESS_THAN_OR_EQUAL = 5, - UNRECOGNIZED = -1, -} - -export function workerLabelComparatorFromJSON(object: any): WorkerLabelComparator { - switch (object) { - case 0: - case 'EQUAL': - return WorkerLabelComparator.EQUAL; - case 1: - case 'NOT_EQUAL': - return WorkerLabelComparator.NOT_EQUAL; - case 2: - case 'GREATER_THAN': - return WorkerLabelComparator.GREATER_THAN; - case 3: - case 'GREATER_THAN_OR_EQUAL': - return WorkerLabelComparator.GREATER_THAN_OR_EQUAL; - case 4: - case 'LESS_THAN': - return WorkerLabelComparator.LESS_THAN; - case 5: - case 'LESS_THAN_OR_EQUAL': - return WorkerLabelComparator.LESS_THAN_OR_EQUAL; - case -1: - case 'UNRECOGNIZED': - default: - return WorkerLabelComparator.UNRECOGNIZED; - } -} - -export function workerLabelComparatorToJSON(object: WorkerLabelComparator): string { - switch (object) { - case WorkerLabelComparator.EQUAL: - return 'EQUAL'; - case WorkerLabelComparator.NOT_EQUAL: - return 'NOT_EQUAL'; - case WorkerLabelComparator.GREATER_THAN: - return 'GREATER_THAN'; - case WorkerLabelComparator.GREATER_THAN_OR_EQUAL: - return 'GREATER_THAN_OR_EQUAL'; - case WorkerLabelComparator.LESS_THAN: - return 'LESS_THAN'; - case WorkerLabelComparator.LESS_THAN_OR_EQUAL: - return 'LESS_THAN_OR_EQUAL'; - case WorkerLabelComparator.UNRECOGNIZED: - default: - return 'UNRECOGNIZED'; - } -} - export interface CancelTasksRequest { /** a list of external UUIDs */ externalIds: string[]; @@ -314,6 +264,24 @@ export interface TriggerWorkflowRunResponse { externalId: string; } +export interface BranchDurableTaskRequest { + /** (required) the external id (uuid) of the durable task */ + taskExternalId: string; + /** (required) the node id to branch from */ + nodeId: number; + /** (required) the branch id to branch from */ + branchId: number; +} + +export interface BranchDurableTaskResponse { + /** the external id of the durable task */ + taskExternalId: string; + /** the node id of the new entry */ + nodeId: number; + /** the branch id of the new entry */ + branchId: number; +} + /** CreateWorkflowVersionRequest represents options to create a workflow version. */ export interface CreateWorkflowVersionRequest { /** (required) the workflow name */ @@ -364,29 +332,6 @@ export interface Concurrency { limitStrategy?: ConcurrencyLimitStrategy | undefined; } -export interface DesiredWorkerLabels { - /** value of the affinity */ - strValue?: string | undefined; - intValue?: number | undefined; - /** - * (optional) Specifies whether the affinity setting is required. - * If required, the worker will not accept actions that do not have a truthy affinity setting. - * - * Defaults to false. - */ - required?: boolean | undefined; - /** - * (optional) Specifies the comparator for the affinity setting. - * If not set, the default is EQUAL. - */ - comparator?: WorkerLabelComparator | undefined; - /** - * (optional) Specifies the weight of the affinity setting. - * If not set, the default is 100. - */ - weight?: number | undefined; -} - /** CreateTaskOpts represents options to create a task. */ export interface CreateTaskOpts { /** (required) the task name */ @@ -468,6 +413,8 @@ export interface TaskRunDetail { output?: Uint8Array | undefined; /** the readable id of the task */ readableId: string; + /** whether the task has been evicted from a worker (status will be RUNNING) */ + isEvicted: boolean; } export interface GetRunDetailsResponse { @@ -481,6 +428,8 @@ export interface GetRunDetailsResponse { done: boolean; /** (optional) additional metadata for the workflow run */ additionalMetadata: Uint8Array; + /** whether any task in this run has been evicted */ + isEvicted: boolean; } export interface GetRunDetailsResponse_TaskRunsEntry { @@ -1226,6 +1175,196 @@ export const TriggerWorkflowRunResponse: MessageFns }, }; +function createBaseBranchDurableTaskRequest(): BranchDurableTaskRequest { + return { taskExternalId: '', nodeId: 0, branchId: 0 }; +} + +export const BranchDurableTaskRequest: MessageFns = { + encode( + message: BranchDurableTaskRequest, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.taskExternalId !== '') { + writer.uint32(10).string(message.taskExternalId); + } + if (message.nodeId !== 0) { + writer.uint32(16).int64(message.nodeId); + } + if (message.branchId !== 0) { + writer.uint32(24).int64(message.branchId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): BranchDurableTaskRequest { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBranchDurableTaskRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.taskExternalId = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.nodeId = longToNumber(reader.int64()); + continue; + } + case 3: { + if (tag !== 24) { + break; + } + + message.branchId = longToNumber(reader.int64()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): BranchDurableTaskRequest { + return { + taskExternalId: isSet(object.taskExternalId) ? globalThis.String(object.taskExternalId) : '', + nodeId: isSet(object.nodeId) ? globalThis.Number(object.nodeId) : 0, + branchId: isSet(object.branchId) ? globalThis.Number(object.branchId) : 0, + }; + }, + + toJSON(message: BranchDurableTaskRequest): unknown { + const obj: any = {}; + if (message.taskExternalId !== '') { + obj.taskExternalId = message.taskExternalId; + } + if (message.nodeId !== 0) { + obj.nodeId = Math.round(message.nodeId); + } + if (message.branchId !== 0) { + obj.branchId = Math.round(message.branchId); + } + return obj; + }, + + create(base?: DeepPartial): BranchDurableTaskRequest { + return BranchDurableTaskRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): BranchDurableTaskRequest { + const message = createBaseBranchDurableTaskRequest(); + message.taskExternalId = object.taskExternalId ?? ''; + message.nodeId = object.nodeId ?? 0; + message.branchId = object.branchId ?? 0; + return message; + }, +}; + +function createBaseBranchDurableTaskResponse(): BranchDurableTaskResponse { + return { taskExternalId: '', nodeId: 0, branchId: 0 }; +} + +export const BranchDurableTaskResponse: MessageFns = { + encode( + message: BranchDurableTaskResponse, + writer: BinaryWriter = new BinaryWriter() + ): BinaryWriter { + if (message.taskExternalId !== '') { + writer.uint32(10).string(message.taskExternalId); + } + if (message.nodeId !== 0) { + writer.uint32(16).int64(message.nodeId); + } + if (message.branchId !== 0) { + writer.uint32(24).int64(message.branchId); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): BranchDurableTaskResponse { + const reader = input instanceof BinaryReader ? input : new BinaryReader(input); + const end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBranchDurableTaskResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (tag !== 10) { + break; + } + + message.taskExternalId = reader.string(); + continue; + } + case 2: { + if (tag !== 16) { + break; + } + + message.nodeId = longToNumber(reader.int64()); + continue; + } + case 3: { + if (tag !== 24) { + break; + } + + message.branchId = longToNumber(reader.int64()); + continue; + } + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): BranchDurableTaskResponse { + return { + taskExternalId: isSet(object.taskExternalId) ? globalThis.String(object.taskExternalId) : '', + nodeId: isSet(object.nodeId) ? globalThis.Number(object.nodeId) : 0, + branchId: isSet(object.branchId) ? globalThis.Number(object.branchId) : 0, + }; + }, + + toJSON(message: BranchDurableTaskResponse): unknown { + const obj: any = {}; + if (message.taskExternalId !== '') { + obj.taskExternalId = message.taskExternalId; + } + if (message.nodeId !== 0) { + obj.nodeId = Math.round(message.nodeId); + } + if (message.branchId !== 0) { + obj.branchId = Math.round(message.branchId); + } + return obj; + }, + + create(base?: DeepPartial): BranchDurableTaskResponse { + return BranchDurableTaskResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): BranchDurableTaskResponse { + const message = createBaseBranchDurableTaskResponse(); + message.taskExternalId = object.taskExternalId ?? ''; + message.nodeId = object.nodeId ?? 0; + message.branchId = object.branchId ?? 0; + return message; + }, +}; + function createBaseCreateWorkflowVersionRequest(): CreateWorkflowVersionRequest { return { name: '', @@ -1720,138 +1859,6 @@ export const Concurrency: MessageFns = { }, }; -function createBaseDesiredWorkerLabels(): DesiredWorkerLabels { - return { - strValue: undefined, - intValue: undefined, - required: undefined, - comparator: undefined, - weight: undefined, - }; -} - -export const DesiredWorkerLabels: MessageFns = { - encode(message: DesiredWorkerLabels, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { - if (message.strValue !== undefined) { - writer.uint32(10).string(message.strValue); - } - if (message.intValue !== undefined) { - writer.uint32(16).int32(message.intValue); - } - if (message.required !== undefined) { - writer.uint32(24).bool(message.required); - } - if (message.comparator !== undefined) { - writer.uint32(32).int32(message.comparator); - } - if (message.weight !== undefined) { - writer.uint32(40).int32(message.weight); - } - return writer; - }, - - decode(input: BinaryReader | Uint8Array, length?: number): DesiredWorkerLabels { - const reader = input instanceof BinaryReader ? input : new BinaryReader(input); - const end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDesiredWorkerLabels(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - if (tag !== 10) { - break; - } - - message.strValue = reader.string(); - continue; - } - case 2: { - if (tag !== 16) { - break; - } - - message.intValue = reader.int32(); - continue; - } - case 3: { - if (tag !== 24) { - break; - } - - message.required = reader.bool(); - continue; - } - case 4: { - if (tag !== 32) { - break; - } - - message.comparator = reader.int32() as any; - continue; - } - case 5: { - if (tag !== 40) { - break; - } - - message.weight = reader.int32(); - continue; - } - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skip(tag & 7); - } - return message; - }, - - fromJSON(object: any): DesiredWorkerLabels { - return { - strValue: isSet(object.strValue) ? globalThis.String(object.strValue) : undefined, - intValue: isSet(object.intValue) ? globalThis.Number(object.intValue) : undefined, - required: isSet(object.required) ? globalThis.Boolean(object.required) : undefined, - comparator: isSet(object.comparator) - ? workerLabelComparatorFromJSON(object.comparator) - : undefined, - weight: isSet(object.weight) ? globalThis.Number(object.weight) : undefined, - }; - }, - - toJSON(message: DesiredWorkerLabels): unknown { - const obj: any = {}; - if (message.strValue !== undefined) { - obj.strValue = message.strValue; - } - if (message.intValue !== undefined) { - obj.intValue = Math.round(message.intValue); - } - if (message.required !== undefined) { - obj.required = message.required; - } - if (message.comparator !== undefined) { - obj.comparator = workerLabelComparatorToJSON(message.comparator); - } - if (message.weight !== undefined) { - obj.weight = Math.round(message.weight); - } - return obj; - }, - - create(base?: DeepPartial): DesiredWorkerLabels { - return DesiredWorkerLabels.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DesiredWorkerLabels { - const message = createBaseDesiredWorkerLabels(); - message.strValue = object.strValue ?? undefined; - message.intValue = object.intValue ?? undefined; - message.required = object.required ?? undefined; - message.comparator = object.comparator ?? undefined; - message.weight = object.weight ?? undefined; - return message; - }, -}; - function createBaseCreateTaskOpts(): CreateTaskOpts { return { readableId: '', @@ -2673,7 +2680,14 @@ export const GetRunDetailsRequest: MessageFns = { }; function createBaseTaskRunDetail(): TaskRunDetail { - return { externalId: '', status: 0, error: undefined, output: undefined, readableId: '' }; + return { + externalId: '', + status: 0, + error: undefined, + output: undefined, + readableId: '', + isEvicted: false, + }; } export const TaskRunDetail: MessageFns = { @@ -2693,6 +2707,9 @@ export const TaskRunDetail: MessageFns = { if (message.readableId !== '') { writer.uint32(42).string(message.readableId); } + if (message.isEvicted !== false) { + writer.uint32(48).bool(message.isEvicted); + } return writer; }, @@ -2743,6 +2760,14 @@ export const TaskRunDetail: MessageFns = { message.readableId = reader.string(); continue; } + case 6: { + if (tag !== 48) { + break; + } + + message.isEvicted = reader.bool(); + continue; + } } if ((tag & 7) === 4 || tag === 0) { break; @@ -2759,6 +2784,7 @@ export const TaskRunDetail: MessageFns = { error: isSet(object.error) ? globalThis.String(object.error) : undefined, output: isSet(object.output) ? bytesFromBase64(object.output) : undefined, readableId: isSet(object.readableId) ? globalThis.String(object.readableId) : '', + isEvicted: isSet(object.isEvicted) ? globalThis.Boolean(object.isEvicted) : false, }; }, @@ -2779,6 +2805,9 @@ export const TaskRunDetail: MessageFns = { if (message.readableId !== '') { obj.readableId = message.readableId; } + if (message.isEvicted !== false) { + obj.isEvicted = message.isEvicted; + } return obj; }, @@ -2792,6 +2821,7 @@ export const TaskRunDetail: MessageFns = { message.error = object.error ?? undefined; message.output = object.output ?? undefined; message.readableId = object.readableId ?? ''; + message.isEvicted = object.isEvicted ?? false; return message; }, }; @@ -2803,6 +2833,7 @@ function createBaseGetRunDetailsResponse(): GetRunDetailsResponse { taskRuns: {}, done: false, additionalMetadata: new Uint8Array(0), + isEvicted: false, }; } @@ -2826,6 +2857,9 @@ export const GetRunDetailsResponse: MessageFns = { if (message.additionalMetadata.length !== 0) { writer.uint32(42).bytes(message.additionalMetadata); } + if (message.isEvicted !== false) { + writer.uint32(48).bool(message.isEvicted); + } return writer; }, @@ -2879,6 +2913,14 @@ export const GetRunDetailsResponse: MessageFns = { message.additionalMetadata = reader.bytes(); continue; } + case 6: { + if (tag !== 48) { + break; + } + + message.isEvicted = reader.bool(); + continue; + } } if ((tag & 7) === 4 || tag === 0) { break; @@ -2902,6 +2944,7 @@ export const GetRunDetailsResponse: MessageFns = { ) : {}, done: isSet(object.done) ? globalThis.Boolean(object.done) : false, + isEvicted: isSet(object.isEvicted) ? globalThis.Boolean(object.isEvicted) : false, additionalMetadata: isSet(object.additionalMetadata) ? bytesFromBase64(object.additionalMetadata) : new Uint8Array(0), @@ -2928,6 +2971,9 @@ export const GetRunDetailsResponse: MessageFns = { if (message.done !== false) { obj.done = message.done; } + if (message.isEvicted !== false) { + obj.isEvicted = message.isEvicted; + } if (message.additionalMetadata.length !== 0) { obj.additionalMetadata = base64FromBytes(message.additionalMetadata); } @@ -2950,6 +2996,7 @@ export const GetRunDetailsResponse: MessageFns = { return acc; }, {}); message.done = object.done ?? false; + message.isEvicted = object.isEvicted ?? false; message.additionalMetadata = object.additionalMetadata ?? new Uint8Array(0); return message; }, @@ -3088,6 +3135,14 @@ export const AdminServiceDefinition = { responseStream: false, options: {}, }, + branchDurableTask: { + name: 'BranchDurableTask', + requestType: BranchDurableTaskRequest, + requestStream: false, + responseType: BranchDurableTaskResponse, + responseStream: false, + options: {}, + }, }, } as const; @@ -3112,6 +3167,10 @@ export interface AdminServiceImplementation { request: GetRunDetailsRequest, context: CallContext & CallContextExt ): Promise>; + branchDurableTask( + request: BranchDurableTaskRequest, + context: CallContext & CallContextExt + ): Promise>; } export interface AdminServiceClient { @@ -3135,6 +3194,10 @@ export interface AdminServiceClient { request: DeepPartial, options?: CallOptions & CallOptionsExt ): Promise; + branchDurableTask( + request: DeepPartial, + options?: CallOptions & CallOptionsExt + ): Promise; } function bytesFromBase64(b64: string): Uint8Array { @@ -3196,6 +3259,17 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(int64: { toString(): string }): number { + const num = globalThis.Number(int64.toString()); + if (num > globalThis.Number.MAX_SAFE_INTEGER) { + throw new globalThis.Error('Value is larger than Number.MAX_SAFE_INTEGER'); + } + if (num < globalThis.Number.MIN_SAFE_INTEGER) { + throw new globalThis.Error('Value is smaller than Number.MIN_SAFE_INTEGER'); + } + return num; +} + function isObject(value: any): boolean { return typeof value === 'object' && value !== null; } diff --git a/sdks/typescript/src/protoc/workflows/workflows.ts b/sdks/typescript/src/protoc/workflows/workflows.ts index e9966df4a..69bc61309 100644 --- a/sdks/typescript/src/protoc/workflows/workflows.ts +++ b/sdks/typescript/src/protoc/workflows/workflows.ts @@ -8,6 +8,7 @@ import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire'; import type { CallContext, CallOptions } from 'nice-grpc-common'; import { Timestamp } from '../google/protobuf/timestamp'; +import { DesiredWorkerLabels, TriggerWorkflowRequest } from '../v1/shared/trigger'; export const protobufPackage = ''; @@ -136,63 +137,6 @@ export function concurrencyLimitStrategyToJSON(object: ConcurrencyLimitStrategy) } } -export enum WorkerLabelComparator { - EQUAL = 0, - NOT_EQUAL = 1, - GREATER_THAN = 2, - GREATER_THAN_OR_EQUAL = 3, - LESS_THAN = 4, - LESS_THAN_OR_EQUAL = 5, - UNRECOGNIZED = -1, -} - -export function workerLabelComparatorFromJSON(object: any): WorkerLabelComparator { - switch (object) { - case 0: - case 'EQUAL': - return WorkerLabelComparator.EQUAL; - case 1: - case 'NOT_EQUAL': - return WorkerLabelComparator.NOT_EQUAL; - case 2: - case 'GREATER_THAN': - return WorkerLabelComparator.GREATER_THAN; - case 3: - case 'GREATER_THAN_OR_EQUAL': - return WorkerLabelComparator.GREATER_THAN_OR_EQUAL; - case 4: - case 'LESS_THAN': - return WorkerLabelComparator.LESS_THAN; - case 5: - case 'LESS_THAN_OR_EQUAL': - return WorkerLabelComparator.LESS_THAN_OR_EQUAL; - case -1: - case 'UNRECOGNIZED': - default: - return WorkerLabelComparator.UNRECOGNIZED; - } -} - -export function workerLabelComparatorToJSON(object: WorkerLabelComparator): string { - switch (object) { - case WorkerLabelComparator.EQUAL: - return 'EQUAL'; - case WorkerLabelComparator.NOT_EQUAL: - return 'NOT_EQUAL'; - case WorkerLabelComparator.GREATER_THAN: - return 'GREATER_THAN'; - case WorkerLabelComparator.GREATER_THAN_OR_EQUAL: - return 'GREATER_THAN_OR_EQUAL'; - case WorkerLabelComparator.LESS_THAN: - return 'LESS_THAN'; - case WorkerLabelComparator.LESS_THAN_OR_EQUAL: - return 'LESS_THAN_OR_EQUAL'; - case WorkerLabelComparator.UNRECOGNIZED: - default: - return 'UNRECOGNIZED'; - } -} - export enum RateLimitDuration { SECOND = 0, MINUTE = 1, @@ -313,29 +257,6 @@ export interface CreateWorkflowJobOpts { steps: CreateWorkflowStepOpts[]; } -export interface DesiredWorkerLabels { - /** value of the affinity */ - strValue?: string | undefined; - intValue?: number | undefined; - /** - * (optional) Specifies whether the affinity setting is required. - * If required, the worker will not accept actions that do not have a truthy affinity setting. - * - * Defaults to false. - */ - required?: boolean | undefined; - /** - * (optional) Specifies the comparator for the affinity setting. - * If not set, the default is EQUAL. - */ - comparator?: WorkerLabelComparator | undefined; - /** - * (optional) Specifies the weight of the affinity setting. - * If not set, the default is 100. - */ - weight?: number | undefined; -} - /** CreateWorkflowStepOpts represents options to create a workflow task. */ export interface CreateWorkflowStepOpts { /** (required) the task name */ @@ -447,44 +368,6 @@ export interface BulkTriggerWorkflowResponse { workflowRunIds: string[]; } -export interface TriggerWorkflowRequest { - name: string; - /** (optional) the input data for the workflow */ - input: string; - /** (optional) the parent workflow run id */ - parentId?: string | undefined; - /** (optional) the parent task external run id */ - parentTaskRunExternalId?: string | undefined; - /** - * (optional) the index of the child workflow. if this is set, matches on the index or the - * child key will return an existing workflow run if the parent id, parent task run id, and - * child index/key match an existing workflow run. - */ - childIndex?: number | undefined; - /** - * (optional) the key for the child. if this is set, matches on the index or the - * child key will return an existing workflow run if the parent id, parent task run id, and - * child index/key match an existing workflow run. - */ - childKey?: string | undefined; - /** (optional) additional metadata for the workflow */ - additionalMetadata?: string | undefined; - /** - * (optional) desired worker id for the workflow run, - * requires the workflow definition to have a sticky strategy - */ - desiredWorkerId?: string | undefined; - /** (optional) override for the priority of the workflow tasks, will set all tasks to this priority */ - priority?: number | undefined; - /** (optional) override for the desired worker labels for the workflow tasks, used for routing to specific workers (or worker pools) */ - desiredWorkerLabels: { [key: string]: DesiredWorkerLabels }; -} - -export interface TriggerWorkflowRequest_DesiredWorkerLabelsEntry { - key: string; - value: DesiredWorkerLabels | undefined; -} - export interface TriggerWorkflowResponse { workflowRunId: string; } @@ -1078,138 +961,6 @@ export const CreateWorkflowJobOpts: MessageFns = { }, }; -function createBaseDesiredWorkerLabels(): DesiredWorkerLabels { - return { - strValue: undefined, - intValue: undefined, - required: undefined, - comparator: undefined, - weight: undefined, - }; -} - -export const DesiredWorkerLabels: MessageFns = { - encode(message: DesiredWorkerLabels, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { - if (message.strValue !== undefined) { - writer.uint32(10).string(message.strValue); - } - if (message.intValue !== undefined) { - writer.uint32(16).int32(message.intValue); - } - if (message.required !== undefined) { - writer.uint32(24).bool(message.required); - } - if (message.comparator !== undefined) { - writer.uint32(32).int32(message.comparator); - } - if (message.weight !== undefined) { - writer.uint32(40).int32(message.weight); - } - return writer; - }, - - decode(input: BinaryReader | Uint8Array, length?: number): DesiredWorkerLabels { - const reader = input instanceof BinaryReader ? input : new BinaryReader(input); - const end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDesiredWorkerLabels(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - if (tag !== 10) { - break; - } - - message.strValue = reader.string(); - continue; - } - case 2: { - if (tag !== 16) { - break; - } - - message.intValue = reader.int32(); - continue; - } - case 3: { - if (tag !== 24) { - break; - } - - message.required = reader.bool(); - continue; - } - case 4: { - if (tag !== 32) { - break; - } - - message.comparator = reader.int32() as any; - continue; - } - case 5: { - if (tag !== 40) { - break; - } - - message.weight = reader.int32(); - continue; - } - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skip(tag & 7); - } - return message; - }, - - fromJSON(object: any): DesiredWorkerLabels { - return { - strValue: isSet(object.strValue) ? globalThis.String(object.strValue) : undefined, - intValue: isSet(object.intValue) ? globalThis.Number(object.intValue) : undefined, - required: isSet(object.required) ? globalThis.Boolean(object.required) : undefined, - comparator: isSet(object.comparator) - ? workerLabelComparatorFromJSON(object.comparator) - : undefined, - weight: isSet(object.weight) ? globalThis.Number(object.weight) : undefined, - }; - }, - - toJSON(message: DesiredWorkerLabels): unknown { - const obj: any = {}; - if (message.strValue !== undefined) { - obj.strValue = message.strValue; - } - if (message.intValue !== undefined) { - obj.intValue = Math.round(message.intValue); - } - if (message.required !== undefined) { - obj.required = message.required; - } - if (message.comparator !== undefined) { - obj.comparator = workerLabelComparatorToJSON(message.comparator); - } - if (message.weight !== undefined) { - obj.weight = Math.round(message.weight); - } - return obj; - }, - - create(base?: DeepPartial): DesiredWorkerLabels { - return DesiredWorkerLabels.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DesiredWorkerLabels { - const message = createBaseDesiredWorkerLabels(); - message.strValue = object.strValue ?? undefined; - message.intValue = object.intValue ?? undefined; - message.required = object.required ?? undefined; - message.comparator = object.comparator ?? undefined; - message.weight = object.weight ?? undefined; - return message; - }, -}; - function createBaseCreateWorkflowStepOpts(): CreateWorkflowStepOpts { return { readableId: '', @@ -2494,347 +2245,6 @@ export const BulkTriggerWorkflowResponse: MessageFns = { - encode(message: TriggerWorkflowRequest, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { - if (message.name !== '') { - writer.uint32(10).string(message.name); - } - if (message.input !== '') { - writer.uint32(18).string(message.input); - } - if (message.parentId !== undefined) { - writer.uint32(26).string(message.parentId); - } - if (message.parentTaskRunExternalId !== undefined) { - writer.uint32(34).string(message.parentTaskRunExternalId); - } - if (message.childIndex !== undefined) { - writer.uint32(40).int32(message.childIndex); - } - if (message.childKey !== undefined) { - writer.uint32(50).string(message.childKey); - } - if (message.additionalMetadata !== undefined) { - writer.uint32(58).string(message.additionalMetadata); - } - if (message.desiredWorkerId !== undefined) { - writer.uint32(66).string(message.desiredWorkerId); - } - if (message.priority !== undefined) { - writer.uint32(72).int32(message.priority); - } - Object.entries(message.desiredWorkerLabels).forEach(([key, value]) => { - TriggerWorkflowRequest_DesiredWorkerLabelsEntry.encode( - { key: key as any, value }, - writer.uint32(82).fork() - ).join(); - }); - return writer; - }, - - decode(input: BinaryReader | Uint8Array, length?: number): TriggerWorkflowRequest { - const reader = input instanceof BinaryReader ? input : new BinaryReader(input); - const end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTriggerWorkflowRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - } - case 2: { - if (tag !== 18) { - break; - } - - message.input = reader.string(); - continue; - } - case 3: { - if (tag !== 26) { - break; - } - - message.parentId = reader.string(); - continue; - } - case 4: { - if (tag !== 34) { - break; - } - - message.parentTaskRunExternalId = reader.string(); - continue; - } - case 5: { - if (tag !== 40) { - break; - } - - message.childIndex = reader.int32(); - continue; - } - case 6: { - if (tag !== 50) { - break; - } - - message.childKey = reader.string(); - continue; - } - case 7: { - if (tag !== 58) { - break; - } - - message.additionalMetadata = reader.string(); - continue; - } - case 8: { - if (tag !== 66) { - break; - } - - message.desiredWorkerId = reader.string(); - continue; - } - case 9: { - if (tag !== 72) { - break; - } - - message.priority = reader.int32(); - continue; - } - case 10: { - if (tag !== 82) { - break; - } - - const entry10 = TriggerWorkflowRequest_DesiredWorkerLabelsEntry.decode( - reader, - reader.uint32() - ); - if (entry10.value !== undefined) { - message.desiredWorkerLabels[entry10.key] = entry10.value; - } - continue; - } - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skip(tag & 7); - } - return message; - }, - - fromJSON(object: any): TriggerWorkflowRequest { - return { - name: isSet(object.name) ? globalThis.String(object.name) : '', - input: isSet(object.input) ? globalThis.String(object.input) : '', - parentId: isSet(object.parentId) ? globalThis.String(object.parentId) : undefined, - parentTaskRunExternalId: isSet(object.parentTaskRunExternalId) - ? globalThis.String(object.parentTaskRunExternalId) - : undefined, - childIndex: isSet(object.childIndex) ? globalThis.Number(object.childIndex) : undefined, - childKey: isSet(object.childKey) ? globalThis.String(object.childKey) : undefined, - additionalMetadata: isSet(object.additionalMetadata) - ? globalThis.String(object.additionalMetadata) - : undefined, - desiredWorkerId: isSet(object.desiredWorkerId) - ? globalThis.String(object.desiredWorkerId) - : undefined, - priority: isSet(object.priority) ? globalThis.Number(object.priority) : undefined, - desiredWorkerLabels: isObject(object.desiredWorkerLabels) - ? Object.entries(object.desiredWorkerLabels).reduce<{ [key: string]: DesiredWorkerLabels }>( - (acc, [key, value]) => { - acc[key] = DesiredWorkerLabels.fromJSON(value); - return acc; - }, - {} - ) - : {}, - }; - }, - - toJSON(message: TriggerWorkflowRequest): unknown { - const obj: any = {}; - if (message.name !== '') { - obj.name = message.name; - } - if (message.input !== '') { - obj.input = message.input; - } - if (message.parentId !== undefined) { - obj.parentId = message.parentId; - } - if (message.parentTaskRunExternalId !== undefined) { - obj.parentTaskRunExternalId = message.parentTaskRunExternalId; - } - if (message.childIndex !== undefined) { - obj.childIndex = Math.round(message.childIndex); - } - if (message.childKey !== undefined) { - obj.childKey = message.childKey; - } - if (message.additionalMetadata !== undefined) { - obj.additionalMetadata = message.additionalMetadata; - } - if (message.desiredWorkerId !== undefined) { - obj.desiredWorkerId = message.desiredWorkerId; - } - if (message.priority !== undefined) { - obj.priority = Math.round(message.priority); - } - if (message.desiredWorkerLabels) { - const entries = Object.entries(message.desiredWorkerLabels); - if (entries.length > 0) { - obj.desiredWorkerLabels = {}; - entries.forEach(([k, v]) => { - obj.desiredWorkerLabels[k] = DesiredWorkerLabels.toJSON(v); - }); - } - } - return obj; - }, - - create(base?: DeepPartial): TriggerWorkflowRequest { - return TriggerWorkflowRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): TriggerWorkflowRequest { - const message = createBaseTriggerWorkflowRequest(); - message.name = object.name ?? ''; - message.input = object.input ?? ''; - message.parentId = object.parentId ?? undefined; - message.parentTaskRunExternalId = object.parentTaskRunExternalId ?? undefined; - message.childIndex = object.childIndex ?? undefined; - message.childKey = object.childKey ?? undefined; - message.additionalMetadata = object.additionalMetadata ?? undefined; - message.desiredWorkerId = object.desiredWorkerId ?? undefined; - message.priority = object.priority ?? undefined; - message.desiredWorkerLabels = Object.entries(object.desiredWorkerLabels ?? {}).reduce<{ - [key: string]: DesiredWorkerLabels; - }>((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = DesiredWorkerLabels.fromPartial(value); - } - return acc; - }, {}); - return message; - }, -}; - -function createBaseTriggerWorkflowRequest_DesiredWorkerLabelsEntry(): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { - return { key: '', value: undefined }; -} - -export const TriggerWorkflowRequest_DesiredWorkerLabelsEntry: MessageFns = - { - encode( - message: TriggerWorkflowRequest_DesiredWorkerLabelsEntry, - writer: BinaryWriter = new BinaryWriter() - ): BinaryWriter { - if (message.key !== '') { - writer.uint32(10).string(message.key); - } - if (message.value !== undefined) { - DesiredWorkerLabels.encode(message.value, writer.uint32(18).fork()).join(); - } - return writer; - }, - - decode( - input: BinaryReader | Uint8Array, - length?: number - ): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { - const reader = input instanceof BinaryReader ? input : new BinaryReader(input); - const end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTriggerWorkflowRequest_DesiredWorkerLabelsEntry(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - } - case 2: { - if (tag !== 18) { - break; - } - - message.value = DesiredWorkerLabels.decode(reader, reader.uint32()); - continue; - } - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skip(tag & 7); - } - return message; - }, - - fromJSON(object: any): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { - return { - key: isSet(object.key) ? globalThis.String(object.key) : '', - value: isSet(object.value) ? DesiredWorkerLabels.fromJSON(object.value) : undefined, - }; - }, - - toJSON(message: TriggerWorkflowRequest_DesiredWorkerLabelsEntry): unknown { - const obj: any = {}; - if (message.key !== '') { - obj.key = message.key; - } - if (message.value !== undefined) { - obj.value = DesiredWorkerLabels.toJSON(message.value); - } - return obj; - }, - - create( - base?: DeepPartial - ): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { - return TriggerWorkflowRequest_DesiredWorkerLabelsEntry.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial - ): TriggerWorkflowRequest_DesiredWorkerLabelsEntry { - const message = createBaseTriggerWorkflowRequest_DesiredWorkerLabelsEntry(); - message.key = object.key ?? ''; - message.value = - object.value !== undefined && object.value !== null - ? DesiredWorkerLabels.fromPartial(object.value) - : undefined; - return message; - }, - }; - function createBaseTriggerWorkflowResponse(): TriggerWorkflowResponse { return { workflowRunId: '' }; } diff --git a/sdks/typescript/src/util/abort-error.ts b/sdks/typescript/src/util/abort-error.ts index 8b56ecce4..2a34178f2 100644 --- a/sdks/typescript/src/util/abort-error.ts +++ b/sdks/typescript/src/util/abort-error.ts @@ -1,3 +1,5 @@ +import { setMaxListeners } from 'events'; + export class AbortError extends Error { readonly code = 'ABORT_ERR'; @@ -35,6 +37,20 @@ export function rethrowIfAborted(err: unknown): void { } } +/** + * Attach an `abort` listener to a signal, disabling the Node.js + * `MaxListenersExceededWarning` first. + * + * A single durable task can attach many concurrent listeners to the same signal + * (fan-out children, parallel waitFor calls, etc.), easily exceeding the default + * cap of 10. Setting max to 0 (unlimited) is safe here because every listener is + * removed on settlement. + */ +export function bindAbortSignalHandler(signal: AbortSignal, handler: () => void): void { + setMaxListeners(0, signal); + signal.addEventListener('abort', handler, { once: true }); +} + export type ThrowIfAbortedOpts = { /** * Optional: called before throwing when the signal is aborted. diff --git a/sdks/typescript/src/util/errors/eviction-not-supported-error.ts b/sdks/typescript/src/util/errors/eviction-not-supported-error.ts new file mode 100644 index 000000000..feafd2ea9 --- /dev/null +++ b/sdks/typescript/src/util/errors/eviction-not-supported-error.ts @@ -0,0 +1,16 @@ +import { MinEngineVersion } from '@hatchet/v1'; +import HatchetError from './hatchet-error'; + +export class EvictionNotSupportedError extends HatchetError { + engineVersion: string | undefined; + + constructor(engineVersion?: string) { + const versionInfo = engineVersion ? ` (engine ${engineVersion})` : ''; + super( + `Durable eviction is not supported by the connected engine${versionInfo}. ` + + `Please upgrade your Hatchet engine to ${MinEngineVersion.DURABLE_EVICTION} or later.` + ); + this.name = 'EvictionNotSupportedError'; + this.engineVersion = engineVersion; + } +} diff --git a/sdks/typescript/src/util/errors/non-determinism-error.ts b/sdks/typescript/src/util/errors/non-determinism-error.ts new file mode 100644 index 000000000..812509924 --- /dev/null +++ b/sdks/typescript/src/util/errors/non-determinism-error.ts @@ -0,0 +1,21 @@ +import HatchetError from './hatchet-error'; + +export class NonDeterminismError extends HatchetError { + taskExternalId: string; + invocationCount: number; + nodeId: number; + + constructor(taskExternalId: string, invocationCount: number, nodeId: number, message: string) { + const detail = message + ? message + : `Non-determinism detected in task ${taskExternalId} on invocation ${invocationCount} at node ${nodeId}`; + super( + `${detail}\n` + + `Check out our documentation for more details on expectations of durable tasks: https://docs.hatchet.run/v1/patterns/mixing-patterns` + ); + this.name = 'NonDeterminismError'; + this.taskExternalId = taskExternalId; + this.invocationCount = invocationCount; + this.nodeId = nodeId; + } +} diff --git a/sdks/typescript/src/util/errors/task-run-terminated-error.ts b/sdks/typescript/src/util/errors/task-run-terminated-error.ts new file mode 100644 index 000000000..5068d7a3d --- /dev/null +++ b/sdks/typescript/src/util/errors/task-run-terminated-error.ts @@ -0,0 +1,15 @@ +export type TaskRunTerminationReason = 'cancelled' | 'evicted'; + +export class TaskRunTerminatedError extends Error { + readonly reason: TaskRunTerminationReason; + + constructor(reason: TaskRunTerminationReason, message?: string) { + super(message ?? reason); + this.name = 'TaskRunTerminatedError'; + this.reason = reason; + } +} + +export function isTaskRunTerminatedError(err: unknown): err is TaskRunTerminatedError { + return err instanceof TaskRunTerminatedError; +} diff --git a/sdks/typescript/src/util/hatchet-promise/hatchet-promise.test.ts b/sdks/typescript/src/util/hatchet-promise/hatchet-promise.test.ts index b5dfb10c4..2cfaeb6ac 100644 --- a/sdks/typescript/src/util/hatchet-promise/hatchet-promise.test.ts +++ b/sdks/typescript/src/util/hatchet-promise/hatchet-promise.test.ts @@ -1,4 +1,8 @@ -import HatchetPromise from './hatchet-promise'; +import HatchetPromise, { CancellationReason } from './hatchet-promise'; +import { + TaskRunTerminatedError, + isTaskRunTerminatedError, +} from '@util/errors/task-run-terminated-error'; describe('HatchetPromise', () => { it('should resolve the original promise if not canceled', async () => { @@ -10,7 +14,7 @@ describe('HatchetPromise', () => { const result = await hatchetPromise.promise; expect(result).toEqual('RESOLVED'); }); - it('should resolve the cancel promise if canceled', async () => { + it('should reject with a TaskRunTerminatedError when canceled', async () => { const hatchetPromise = new HatchetPromise( new Promise((resolve) => { setTimeout(() => resolve('RESOLVED'), 500); @@ -24,9 +28,30 @@ describe('HatchetPromise', () => { try { await result; - expect(true).toEqual(false); // this should not be reached + expect(true).toEqual(false); } catch (e) { - expect(e).toEqual(undefined); + expect(isTaskRunTerminatedError(e)).toBe(true); + expect((e as TaskRunTerminatedError).reason).toBe('cancelled'); + } + }); + it('should use evicted reason when cancelled with EVICTED_BY_WORKER', async () => { + const hatchetPromise = new HatchetPromise( + new Promise((resolve) => { + setTimeout(() => resolve('RESOLVED'), 500); + }) + ); + + const result = hatchetPromise.promise; + setTimeout(() => { + hatchetPromise.cancel(CancellationReason.EVICTED_BY_WORKER); + }, 100); + + try { + await result; + expect(true).toEqual(false); + } catch (e) { + expect(isTaskRunTerminatedError(e)).toBe(true); + expect((e as TaskRunTerminatedError).reason).toBe('evicted'); } }); }); diff --git a/sdks/typescript/src/util/hatchet-promise/hatchet-promise.ts b/sdks/typescript/src/util/hatchet-promise/hatchet-promise.ts index 84edb8eb0..fa55c85bf 100644 --- a/sdks/typescript/src/util/hatchet-promise/hatchet-promise.ts +++ b/sdks/typescript/src/util/hatchet-promise/hatchet-promise.ts @@ -1,8 +1,16 @@ +import { TaskRunTerminatedError } from '@util/errors/task-run-terminated-error'; + /** Canonical reasons when cancelling a HatchetPromise (e.g. worker shutdown). */ export enum CancellationReason { CANCELLED_BY_WORKER = 'Cancelled by worker', + EVICTED_BY_WORKER = 'Evicted by worker', } +const reasonToTermination: Record = { + [CancellationReason.CANCELLED_BY_WORKER]: 'cancelled', + [CancellationReason.EVICTED_BY_WORKER]: 'evicted', +}; + class HatchetPromise { cancel: (reason?: CancellationReason) => void = (_reason?: CancellationReason) => {}; promise: Promise; @@ -17,7 +25,10 @@ class HatchetPromise { constructor(promise: Promise) { this.inner = Promise.resolve(promise) as Promise; this.promise = new Promise((resolve, reject) => { - this.cancel = reject; + this.cancel = (reason?: CancellationReason) => { + const termination = reason ? reasonToTermination[reason] : 'cancelled'; + reject(new TaskRunTerminatedError(termination, reason)); + }; this.inner.then(resolve).catch(reject); }); } diff --git a/sdks/typescript/src/util/sleep.ts b/sdks/typescript/src/util/sleep.ts index 70412ea5e..d6ea9fbea 100644 --- a/sdks/typescript/src/util/sleep.ts +++ b/sdks/typescript/src/util/sleep.ts @@ -1,18 +1,21 @@ +import { Duration, durationToMs } from '../v1/client/duration'; + /** * Sleeps for a given number of milliseconds without blocking the event loop * * WARNING: This is not a durable sleep. It will not be honored if the worker is * restarted or crashes. * - * @param ms - The number of milliseconds to sleep + * @param duration - The number of milliseconds to sleep, or a Duration (e.g. "5s", \{ seconds: 5 \}) * @param signal - Optional AbortSignal; if aborted, the promise rejects with Error('Cancelled'). * Use in task handlers so cancellation can interrupt long sleeps. * @returns A promise that resolves after the given number of milliseconds (or rejects on abort) */ -function sleep(ms: number, signal?: AbortSignal): Promise { +function sleep(duration: number | Duration, signal?: AbortSignal): Promise { + const timeout = typeof duration === 'number' ? duration : durationToMs(duration); if (!signal) { return new Promise((resolve) => { - setTimeout(resolve, ms); + setTimeout(resolve, timeout); }); } @@ -20,7 +23,7 @@ function sleep(ms: number, signal?: AbortSignal): Promise { const timer = setTimeout(() => { signal.removeEventListener('abort', onAbort); resolve(); - }, ms); + }, timeout); const onAbort = () => { clearTimeout(timer); diff --git a/sdks/typescript/src/v1/client/admin.ts b/sdks/typescript/src/v1/client/admin.ts index 1c6ffc1d9..f3f33e17c 100644 --- a/sdks/typescript/src/v1/client/admin.ts +++ b/sdks/typescript/src/v1/client/admin.ts @@ -2,14 +2,12 @@ import HatchetError from '@util/errors/hatchet-error'; import { ClientConfig } from '@clients/hatchet-client/client-config'; import WorkflowRunRef from '@hatchet/util/workflow-run-ref'; -import { Priority, RateLimitDuration, RunsClient } from '@hatchet/v1'; +import { Priority, RateLimitDuration, RunsClient, WorkerLabelComparator } from '@hatchet/v1'; import { createGrpcClient } from '@hatchet/util/grpc-helpers'; import { RunListenerClient } from '@hatchet/clients/listeners/run-listener/child-listener-client'; import { Api } from '@hatchet/clients/rest/generated/Api'; import { BulkTriggerWorkflowRequest, - DesiredWorkerLabels, - WorkerLabelComparator, WorkflowServiceClient, WorkflowServiceDefinition, } from '@hatchet/protoc/workflows'; @@ -22,6 +20,7 @@ import { Logger } from '@hatchet/util/logger'; import { retrier } from '@hatchet/util/retrier'; import { batch } from '@hatchet/util/batch'; import { applyNamespace } from '@hatchet/util/apply-namespace'; +import { DesiredWorkerLabels } from '@hatchet-dev/typescript-sdk/protoc/v1/shared/trigger'; type DesiredWorkerLabelOpt = { value: string | number; diff --git a/sdks/typescript/src/v1/client/client.ts b/sdks/typescript/src/v1/client/client.ts index ff3ec0a5d..a89248843 100644 --- a/sdks/typescript/src/v1/client/client.ts +++ b/sdks/typescript/src/v1/client/client.ts @@ -545,8 +545,7 @@ export class HatchetClient< this._durableListener = new DurableListenerClient( this._config, channelFactory(this._config, this._credentials), - this._clientFactory, - this.api + this._clientFactory ); } return this._durableListener; diff --git a/sdks/typescript/src/v1/client/duration.test.ts b/sdks/typescript/src/v1/client/duration.test.ts new file mode 100644 index 000000000..54b508842 --- /dev/null +++ b/sdks/typescript/src/v1/client/duration.test.ts @@ -0,0 +1,82 @@ +import { durationToString, durationToMs, Duration } from './duration'; + +describe('durationToString', () => { + it('passes through a duration string as-is', () => { + expect(durationToString('1h30m5s')).toBe('1h30m5s'); + expect(durationToString('10m')).toBe('10m'); + expect(durationToString('30s')).toBe('30s'); + }); + + it('converts a DurationObject to a string', () => { + expect(durationToString({ hours: 1, minutes: 30, seconds: 5 })).toBe('1h30m5s'); + expect(durationToString({ minutes: 10 })).toBe('10m'); + expect(durationToString({ seconds: 45 })).toBe('45s'); + expect(durationToString({ hours: 2 })).toBe('2h'); + }); + + it('returns "0s" for an empty DurationObject', () => { + expect(durationToString({})).toBe('0s'); + }); + + it('converts milliseconds to a string', () => { + expect(durationToString(0)).toBe('0s'); + expect(durationToString(5000)).toBe('5s'); + expect(durationToString(60_000)).toBe('1m'); + expect(durationToString(3_600_000)).toBe('1h'); + expect(durationToString(5_405_000)).toBe('1h30m5s'); + }); + + it('truncates sub-second remainders from milliseconds', () => { + expect(durationToString(1500)).toBe('1s'); + expect(durationToString(999)).toBe('0s'); + }); +}); + +describe('durationToMs', () => { + it('parses a seconds-only string', () => { + expect(durationToMs('30s')).toBe(30_000); + }); + + it('parses a minutes-only string', () => { + expect(durationToMs('10m')).toBe(600_000); + }); + + it('parses an hours-only string', () => { + expect(durationToMs('2h')).toBe(7_200_000); + }); + + it('parses a multi-unit string', () => { + expect(durationToMs('1h30m5s')).toBe(5_405_000); + }); + + it('converts a DurationObject', () => { + expect(durationToMs({ hours: 1, minutes: 30, seconds: 5 })).toBe(5_405_000); + expect(durationToMs({ seconds: 10 })).toBe(10_000); + expect(durationToMs({})).toBe(0); + }); + + it('returns a number (milliseconds) as-is', () => { + expect(durationToMs(42_000)).toBe(42_000); + expect(durationToMs(0)).toBe(0); + }); + + it('throws on an invalid string', () => { + expect(() => durationToMs('bad' as Duration)).toThrow(/Invalid duration string/); + }); +}); + +describe('round-trip: durationToMs → durationToString', () => { + const cases: [Duration, number, string][] = [ + ['1h30m5s', 5_405_000, '1h30m5s'], + ['10m', 600_000, '10m'], + ['30s', 30_000, '30s'], + [{ hours: 2, minutes: 15 }, 8_100_000, '2h15m'], + [60_000, 60_000, '1m'], + ]; + + it.each(cases)('input %j → %d ms → "%s"', (input, expectedMs, expectedStr) => { + const ms = durationToMs(input); + expect(ms).toBe(expectedMs); + expect(durationToString(ms)).toBe(expectedStr); + }); +}); diff --git a/sdks/typescript/src/v1/client/duration.ts b/sdks/typescript/src/v1/client/duration.ts index 6429d34ae..9920a3f6e 100644 --- a/sdks/typescript/src/v1/client/duration.ts +++ b/sdks/typescript/src/v1/client/duration.ts @@ -1,15 +1,63 @@ -// Single unit durations type SecondsDuration = `${number}s`; type MinutesDuration = `${number}m`; type HoursDuration = `${number}h`; - -// Combined durations type TwoUnitDurations = `${number}h${number}m` | `${number}h${number}s` | `${number}m${number}s`; type ThreeUnitDurations = `${number}h${number}m${number}s`; -export type Duration = +type DurationString = | SecondsDuration | MinutesDuration | HoursDuration | TwoUnitDurations | ThreeUnitDurations; + +export interface DurationObject { + hours?: number; + minutes?: number; + seconds?: number; +} + +/** A number is treated as milliseconds. */ +export type Duration = DurationString | DurationObject | number; + +const DURATION_RE = /^(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?$/; + +/** Normalizes a Duration to Go-style string format (e.g. "1h30m5s"). */ +export function durationToString(d: Duration): string { + if (typeof d === 'string') return d; + if (typeof d === 'number') { + const totalSeconds = Math.floor(d / 1000); + const h = Math.floor(totalSeconds / 3600); + const m = Math.floor((totalSeconds % 3600) / 60); + const s = totalSeconds % 60; + let out = ''; + if (h) out += `${h}h`; + if (m) out += `${m}m`; + if (s || !out) out += `${s}s`; + return out; + } + let s = ''; + if (d.hours) s += `${d.hours}h`; + if (d.minutes) s += `${d.minutes}m`; + if (d.seconds) s += `${d.seconds}s`; + return s || '0s'; +} + +export function durationToMs(d: Duration): number { + if (typeof d === 'number') return d; + if (typeof d === 'object') { + return ((d.hours ?? 0) * 3600 + (d.minutes ?? 0) * 60 + (d.seconds ?? 0)) * 1000; + } + + const match = (d as string).match(DURATION_RE); + if (!match) { + throw new Error( + `Invalid duration string: "${d}". Expected format like "1h30m5s", "10m", "30s".` + ); + } + + const [, h, m, s] = match; + return ( + (parseInt(h ?? '0', 10) * 3600 + parseInt(m ?? '0', 10) * 60 + parseInt(s ?? '0', 10)) * 1000 + ); +} diff --git a/sdks/typescript/src/v1/client/features/runs.ts b/sdks/typescript/src/v1/client/features/runs.ts index dcca4a682..80fc25364 100644 --- a/sdks/typescript/src/v1/client/features/runs.ts +++ b/sdks/typescript/src/v1/client/features/runs.ts @@ -212,10 +212,43 @@ export class RunsClient { } /** - * Creates a run reference for a task or workflow run by its ID. - * @param id - The ID of the run to create a reference for. - * @returns A promise that resolves to the run reference. + * Restore an evicted durable task so it can resume execution. + * @param taskExternalId - The external ID of the evicted task. */ + async restoreTask(taskExternalId: string) { + return this.api.v1TaskRestore(taskExternalId); + } + + /** + * Fork (reset) a durable task from a specific node, triggering re-execution from that point. + * @param taskExternalId - The external ID of the durable task to reset. + * @param nodeId - The node ID to replay from. + */ + async branchDurableTask(taskExternalId: string, nodeId: number, branchId: number = 0) { + return this.api.v1DurableTaskBranch(this.tenantId, { + taskExternalId, + nodeId, + branchId, + }); + } + + /** + * Resolve the task external ID for a workflow run. For runs with multiple tasks, + * returns the first task's external ID. + * @param workflowRunId - The workflow run ID to look up. + * @returns The task external ID. + */ + async getTaskExternalId(workflowRunId: string): Promise { + const run = await this.get(workflowRunId); + const tasks = run?.tasks; + + if (Array.isArray(tasks) && tasks.length > 0 && tasks[0]?.taskExternalId) { + return tasks[0].taskExternalId; + } + + throw new Error(`Could not find task external ID for workflow run ${workflowRunId}`); + } + runRef = any>(id: string): WorkflowRunRef { return new WorkflowRunRef(id, this.listener, this); } diff --git a/sdks/typescript/src/v1/client/worker/context.ts b/sdks/typescript/src/v1/client/worker/context.ts index 98fb32908..f949d9891 100644 --- a/sdks/typescript/src/v1/client/worker/context.ts +++ b/sdks/typescript/src/v1/client/worker/context.ts @@ -15,7 +15,6 @@ import { BaseWorkflowDeclaration as WorkflowV1, } from '@hatchet/v1/declaration'; import HatchetError from '@util/errors/hatchet-error'; -import { JsonObject } from '@bufbuild/protobuf'; import { Action } from '@hatchet/clients/dispatcher/action-listener'; import { Logger, LogLevel } from '@hatchet/util/logger'; import { parseJSON } from '@hatchet/util/parse'; @@ -23,21 +22,33 @@ import WorkflowRunRef from '@hatchet/util/workflow-run-ref'; import { Conditions, Render } from '@hatchet/v1/conditions'; import { conditionsToPb } from '@hatchet/v1/conditions/transformer'; import { CreateWorkflowDurableTaskOpts, CreateWorkflowTaskOpts } from '@hatchet/v1/task'; -import { OutputType } from '@hatchet/v1/types'; +import { JsonObject, OutputType } from '@hatchet/v1/types'; import { Action as ConditionAction } from '@hatchet/protoc/v1/shared/condition'; import { HatchetClient } from '@hatchet/v1'; import { applyNamespace } from '@hatchet/util/apply-namespace'; import { createAbortError, rethrowIfAborted } from '@hatchet/util/abort-error'; import { WorkerLabels } from '@hatchet/clients/dispatcher/dispatcher-client'; -import { NextStep } from '@hatchet/legacy/step'; +import { NextStep } from '@hatchet-dev/typescript-sdk/legacy/step'; +import { DurableListenerClient } from '@hatchet/clients/listeners/durable-listener/durable-listener-client'; +import { createHash } from 'crypto'; +import { z } from 'zod'; import { InternalWorker } from './worker-internal'; -import { Duration } from '../duration'; +import { Duration, durationToMs, durationToString } from '../duration'; +import { DurableEvictionManager } from './eviction/eviction-manager'; +import { ActionKey } from './eviction/eviction-cache'; +import { supportsEviction } from './engine-version'; +import { waitForPreEviction } from './deprecated/pre-eviction'; // TODO remove this once we have a proper next step type type TriggerData = Record>; type ChildRunOpts = RunOpts & { key?: string; sticky?: boolean }; +export interface SleepResult { + /** The sleep duration in milliseconds. */ + durationMs: number; +} + type LogExtra = { extra?: any; error?: Error; @@ -411,7 +422,7 @@ export class Context { return; } - await this.v1.dispatcher.refreshTimeout(incrementBy, taskRunExternalId); + await this.v1.dispatcher.refreshTimeout(durationToString(incrementBy), taskRunExternalId); } /** @@ -444,7 +455,7 @@ export class Context { await this.v1.events.putStream(taskRunExternalId, data, index); } - private spawnOptions(workflow: string | WorkflowV1, options?: ChildRunOpts) { + protected spawnOptions(workflow: string | WorkflowV1, options?: ChildRunOpts) { this.throwIfCancelled(); let workflowName: string; @@ -820,16 +831,89 @@ export class Context { * It extends the Context class and includes additional methods for durable execution like sleepFor and waitFor. */ export class DurableContext extends Context { - waitKey: number = 0; + private _durableListener: DurableListenerClient; + private _evictionManager: DurableEvictionManager | undefined; + private _engineVersion: string | undefined; + private _waitKey: number = 0; + + constructor( + action: Action, + v1: HatchetClient, + worker: InternalWorker, + durableListener: DurableListenerClient, + evictionManager?: DurableEvictionManager, + engineVersion?: string + ) { + super(action, v1, worker); + this._durableListener = durableListener; + this._evictionManager = evictionManager; + this._engineVersion = engineVersion; + } + + get supportsEviction(): boolean { + return supportsEviction(this._engineVersion); + } + + get durableListener(): DurableListenerClient { + return this._durableListener; + } + + /** + * The invocation count for the current durable task. Used for deduplication across replays. + */ + get invocationCount(): number { + return this.action.durableTaskInvocationCount ?? 1; + } + + private get _actionKey(): ActionKey { + return this.action.key; + } + + private async withEvictionWait( + waitKind: string, + resourceId: string, + fn: () => Promise + ): Promise { + this._evictionManager?.markWaiting(this._actionKey, waitKind, resourceId); + try { + return await fn(); + } finally { + this._evictionManager?.markActive(this._actionKey); + } + } /** * Pauses execution for the specified duration. * Duration is "global" meaning it will wait in real time regardless of transient failures like worker restarts. * @param duration - The duration to sleep for. - * @returns A promise that resolves when the sleep duration has elapsed. + * @returns A promise that resolves with a SleepResult when the sleep duration has elapsed. */ - async sleepFor(duration: Duration, readableDataKey?: string) { - return this.waitFor({ sleepFor: duration, readableDataKey }); + async sleepFor(duration: Duration, readableDataKey?: string): Promise { + const res = await this.waitFor({ sleepFor: duration, readableDataKey }); + + const matches: Record = res['CREATE'] || {}; + const [firstMatch] = Object.values(matches); + + if (!firstMatch || firstMatch.length === 0) { + return { durationMs: durationToMs(duration) }; + } + + const [sleep] = firstMatch; + const sleepDuration: string | undefined = sleep?.sleep_duration; + + if (sleepDuration) { + const DURATION_RE = /^(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?$/; + const match = sleepDuration.match(DURATION_RE); + if (match) { + const [, h, m, s] = match; + const ms = + (parseInt(h ?? '0', 10) * 3600 + parseInt(m ?? '0', 10) * 60 + parseInt(s ?? '0', 10)) * + 1000; + return { durationMs: ms }; + } + } + + return { durationMs: durationToMs(duration) }; } /** @@ -840,31 +924,295 @@ export class DurableContext extends Context { */ async waitFor(conditions: Conditions | Conditions[]): Promise> { this.throwIfCancelled(); - const pbConditions = conditionsToPb( - Render(ConditionAction.CREATE, conditions), - this.v1.config.namespace - ); - const key = `waitFor-${this.waitKey++}`; - await this.v1.durableListener.registerDurableEvent({ - taskId: this.action.taskRunExternalId, - signalKey: key, - sleepConditions: pbConditions.sleepConditions, - userEventConditions: pbConditions.userEventConditions, - }); - const event = await this.v1.durableListener.result( + if (!this.supportsEviction) { + return this._waitForPreEviction(conditions); + } + + const rendered = Render(ConditionAction.CREATE, conditions); + const pbConditions = conditionsToPb(rendered, this.v1.config.namespace); + + const ack = await this._durableListener.sendEvent( + this.action.taskRunExternalId, + this.invocationCount, { - taskId: this.action.taskRunExternalId, - signalKey: key, - }, - { signal: this.abortController.signal } + kind: 'waitFor', + waitForConditions: { + sleepConditions: pbConditions.sleepConditions, + userEventConditions: pbConditions.userEventConditions, + }, + } ); - // Convert event.data from Uint8Array to string if needed - const eventData = - event.data instanceof Uint8Array ? new TextDecoder().decode(event.data) : event.data; + const resourceId = + rendered + .map((c) => c.base.readableDataKey) + .filter(Boolean) + .join(',') || `node:${ack.nodeId}`; - const res = JSON.parse(eventData) as Record>; - return res.CREATE; + return this.withEvictionWait('waitFor', resourceId, async () => { + const result = await this._durableListener.waitForCallback( + this.action.taskRunExternalId, + this.invocationCount, + ack.branchId, + ack.nodeId, + { signal: this.abortController.signal } + ); + return result.payload || {}; + }); + } + + /** + * Lightweight wrapper for waiting for a user event. Allows for shorthand usage of + * `ctx.waitFor` when specifying a user event condition. + * + * For more complicated conditions, use `ctx.waitFor` directly. + * + * @param key - The event key to wait for. + * @param expression - An optional CEL expression to filter events. + * @param payloadSchema - An optional Zod schema to validate and parse the event payload. + * @returns The event payload, validated against the schema if provided. + */ + async waitForEvent( + key: string, + expression?: string, + payloadSchema?: T + ): Promise>; + async waitForEvent(key: string, expression?: string): Promise>; + async waitForEvent( + key: string, + expression?: string, + payloadSchema?: z.ZodTypeAny + ): Promise { + const res = await this.waitFor({ eventKey: key, expression }); + + // The engine returns an object like: + // {"CREATE": {"signal_key_1": [{"id": ..., "data": {...}}]}} + // Since we have a single match, the list will only have one item. + const matches: Record = res['CREATE'] || {}; + const [firstMatch] = Object.values(matches); + + if (!firstMatch || firstMatch.length === 0) { + if (payloadSchema) { + return payloadSchema.parse({}); + } + return {}; + } + + const [rawPayload] = firstMatch; + + if (payloadSchema) { + return payloadSchema.parse(rawPayload); + } + + return rawPayload; + } + + /** + * Durably sleep until a specific timestamp. + * Uses the memoized `now()` to compute the remaining duration, then delegates to `sleepFor`. + * + * @param wakeAt - The timestamp to sleep until. + * @returns A SleepResult containing the actual duration slept. + */ + async sleepUntil(wakeAt: Date): Promise { + const now = await this.now(); + const remainingMs = wakeAt.getTime() - now.getTime(); + return this.sleepFor(`${Math.max(0, Math.ceil(remainingMs / 1000))}s`); + } + + /** + * Get the current timestamp, memoized across replays. Returns the same Date on every replay of the same task run. + * @returns The memoized current timestamp. + */ + async now(): Promise { + const result = await this.memo(async () => { + return { ts: new Date().toISOString() }; + }, ['now']); + return new Date(result.ts); + } + + private async _waitForPreEviction( + conditions: Conditions | Conditions[] + ): Promise> { + const { result, nextWaitKey } = await waitForPreEviction( + this._durableListener, + this.action.taskRunExternalId, + this._waitKey, + conditions, + this.v1.config.namespace, + this.abortController.signal + ); + this._waitKey = nextWaitKey; + return result; + } + + private _buildTriggerOpts( + workflow: string | WorkflowV1 | TaskWorkflowDeclaration, + input?: Q, + options?: ChildRunOpts + ) { + let workflowName: string; + if (typeof workflow === 'string') { + workflowName = workflow; + } else { + workflowName = workflow.name; + } + + workflowName = applyNamespace(workflowName, this.v1.config.namespace).toLowerCase(); + + const triggerOpts = { + name: workflowName, + input: JSON.stringify(input || {}), + parentId: this.action.workflowRunId, + parentTaskRunExternalId: this.action.taskRunExternalId, + childIndex: this.spawnIndex, + childKey: options?.key, + additionalMetadata: options?.additionalMetadata + ? JSON.stringify(options.additionalMetadata) + : undefined, + desiredWorkerId: options?.sticky ? this.worker.id() : undefined, + priority: options?.priority, + desiredWorkerLabels: {}, + }; + + this.spawnIndex += 1; + + return { workflowName, triggerOpts }; + } + + /** + * Spawns a child workflow through the durable event log, waits for the child to complete. + * @param workflow - The workflow to spawn. + * @param input - The input data for the child workflow. + * @param options - Options for spawning the child workflow. + * @returns The result of the child workflow. + */ + async spawnChild( + workflow: string | WorkflowV1 | TaskWorkflowDeclaration, + input?: Q, + options?: ChildRunOpts + ): Promise

{ + if (!this.supportsEviction) { + const { workflowName, opts } = this.spawnOptions(workflow, options); + const ref = await this.v1.admin.runWorkflow(workflowName, (input || {}) as Q, opts); + ref.defaultSignal = this.abortController.signal; + return ref.output as Promise

; + } + + const results = await this.spawnChildren([ + { workflow, input: (input || {}) as Q, options }, + ]); + return results[0]; + } + + /** + * Spawns multiple child workflows through the durable event log, waits for all to complete. + * @param children - An array of objects containing the workflow, input, and options for each child. + * @returns A list of results from the child workflows. + */ + async spawnChildren( + children: Array<{ + workflow: string | WorkflowV1 | TaskWorkflowDeclaration; + input: Q; + options?: ChildRunOpts; + }> + ): Promise { + this.throwIfCancelled(); + + if (!this.supportsEviction) { + const workflows = children.map((c) => { + const { workflowName, opts } = this.spawnOptions(c.workflow, c.options); + return { workflowName, input: c.input, options: opts }; + }); + const refs = await this.v1.admin.runWorkflows(workflows); + for (const r of refs) { + r.defaultSignal = this.abortController.signal; + } + return Promise.all(refs.map((r) => r.output)) as Promise; + } + + const triggerOptsList = children.map((child) => { + const { triggerOpts } = this._buildTriggerOpts(child.workflow, child.input, child.options); + return triggerOpts; + }); + + const ack = await this._durableListener.sendEvent( + this.action.taskRunExternalId, + this.invocationCount, + { + kind: 'runChildren', + triggerOpts: triggerOptsList, + } + ); + + const results = await Promise.all( + ack.runEntries.map((entry) => + this.withEvictionWait('runChild', `workflow:bulk-child`, async () => { + const result = await this._durableListener.waitForCallback( + this.action.taskRunExternalId, + this.invocationCount, + entry.branchId, + entry.nodeId, + { signal: this.abortController.signal } + ); + return (result.payload || {}) as P; + }) + ) + ); + + return results; + } + + /** + * Memoize a function by storing its result in durable storage. Avoids recomputation on replay. + * + * @param fn - The async function to compute the value. + * @param deps - Dependency values that form the memoization key. + * @returns The memoized value, either from durable storage or freshly computed. + */ + private async memo(fn: () => Promise, deps: readonly unknown[]): Promise { + this.throwIfCancelled(); + + if (!this.supportsEviction) { + return fn(); + } + + const memoKey = computeMemoKey(this.action.taskRunExternalId, deps); + + const ack = await this._durableListener.sendEvent( + this.action.taskRunExternalId, + this.invocationCount, + { + kind: 'memo', + memoKey, + } + ); + + if (ack.memoAlreadyExisted && ack.memoResultPayload && ack.memoResultPayload.length > 0) { + const serialized = new TextDecoder().decode(ack.memoResultPayload); + return JSON.parse(serialized) as R; + } + + const result = await fn(); + const serializedResult = new TextEncoder().encode(JSON.stringify(result)); + + await this._durableListener.sendMemoCompletedNotification( + this.action.taskRunExternalId, + ack.nodeId, + ack.branchId, + this.invocationCount, + memoKey, + serializedResult + ); + + return result; } } + +function computeMemoKey(taskRunExternalId: string, args: readonly unknown[]): Uint8Array { + const h = createHash('sha256'); + h.update(taskRunExternalId); + h.update(JSON.stringify(args)); + return new Uint8Array(h.digest()); +} diff --git a/sdks/typescript/src/v1/client/worker/deprecated/index.ts b/sdks/typescript/src/v1/client/worker/deprecated/index.ts index cc28a8e35..7155a1fd6 100644 --- a/sdks/typescript/src/v1/client/worker/deprecated/index.ts +++ b/sdks/typescript/src/v1/client/worker/deprecated/index.ts @@ -1,4 +1,4 @@ -export { isLegacyEngine, LegacyDualWorker } from './legacy-worker'; +export { isLegacyEngine, fetchEngineVersion, LegacyDualWorker } from './legacy-worker'; export { LegacyV1Worker } from './legacy-v1-worker'; export { legacyGetActionListener } from './legacy-registration'; export { diff --git a/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts b/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts index 11cb7d4ab..90ccc0f89 100644 --- a/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts +++ b/sdks/typescript/src/v1/client/worker/deprecated/legacy-worker.ts @@ -7,7 +7,6 @@ */ import { Status } from 'nice-grpc'; -import { getGrpcErrorCode } from '@util/grpc-error'; import { BaseWorkflowDeclaration } from '../../../declaration'; import { HatchetClient } from '../../..'; import { CreateWorkerOpts } from '../worker'; @@ -15,20 +14,36 @@ import { LegacyV1Worker } from './legacy-v1-worker'; import { emitDeprecationNotice, semverLessThan } from './deprecation'; import { transformLegacyWorkflow } from '../../../../legacy/legacy-transformer'; +import { MinEngineVersion } from '../engine-version'; +import { getGrpcErrorCode } from '@hatchet-dev/typescript-sdk/util/grpc-error'; + const DEFAULT_DEFAULT_SLOTS = 100; const DEFAULT_DURABLE_SLOTS = 1_000; /** The date when slot_config support was released. */ const LEGACY_ENGINE_START = new Date('2026-02-12T00:00:00Z'); -/** Minimum engine version that supports multiple slot types. */ -const MIN_SLOT_CONFIG_VERSION = 'v0.78.23'; - const LEGACY_ENGINE_MESSAGE = 'Connected to an older Hatchet engine that does not support multiple slot types. ' + 'Falling back to legacy worker registration. ' + 'Please upgrade your Hatchet engine to the latest version.'; +/** + * Fetches the engine version from the dispatcher. + * Returns the semver string, or undefined if the engine is too old to support GetVersion. + */ +export async function fetchEngineVersion(v1: HatchetClient): Promise { + try { + const version = await v1.dispatcher.getVersion(); + return version || undefined; + } catch (e: unknown) { + if (getGrpcErrorCode(e) == Status.UNIMPLEMENTED) { + return undefined; + } + throw e; + } +} + /** * Checks if the connected engine is legacy by comparing its semantic version * against the minimum required version for slot_config support. @@ -36,30 +51,22 @@ const LEGACY_ENGINE_MESSAGE = * Emits a time-aware deprecation notice when a legacy engine is detected. */ export async function isLegacyEngine(v1: HatchetClient): Promise { - try { - const version = await v1.dispatcher.getVersion(); - - // If the version is empty or older than the minimum, treat as legacy - if (!version || semverLessThan(version, MIN_SLOT_CONFIG_VERSION)) { - const logger = v1.config.logger('Worker', v1.config.log_level); - emitDeprecationNotice('legacy-engine', LEGACY_ENGINE_MESSAGE, LEGACY_ENGINE_START, logger, { - errorDays: 180, - }); - return true; - } - - return false; - } catch (e: unknown) { + const version = await fetchEngineVersion(v1).catch((e) => { if (getGrpcErrorCode(e) === Status.UNIMPLEMENTED) { - const logger = v1.config.logger('Worker', v1.config.log_level); - emitDeprecationNotice('legacy-engine', LEGACY_ENGINE_MESSAGE, LEGACY_ENGINE_START, logger, { - errorDays: 180, - }); - return true; + return undefined; } - // For other errors, assume new engine and let registration fail naturally - return false; + throw e; + }); + + if (!version || semverLessThan(version, MinEngineVersion.SLOT_CONFIG)) { + const logger = v1.config.logger('Worker', v1.config.log_level); + emitDeprecationNotice('legacy-engine', LEGACY_ENGINE_MESSAGE, LEGACY_ENGINE_START, logger, { + errorDays: 180, + }); + return true; } + + return false; } /** diff --git a/sdks/typescript/src/v1/client/worker/deprecated/pre-eviction.ts b/sdks/typescript/src/v1/client/worker/deprecated/pre-eviction.ts new file mode 100644 index 000000000..ad4d622e5 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/deprecated/pre-eviction.ts @@ -0,0 +1,39 @@ +/** + * Pre-eviction fallback for DurableContext. + * + * Supports engines older than MinEngineVersion.DURABLE_EVICTION. + * Remove this module when support for those engines is dropped. + */ +import { Conditions, Render } from '@hatchet/v1/conditions'; +import { conditionsToPb } from '@hatchet/v1/conditions/transformer'; +import { Action as ConditionAction } from '@hatchet/protoc/v1/shared/condition'; +import { DurableListenerClient } from '@hatchet/clients/listeners/durable-listener/durable-listener-client'; + +export async function waitForPreEviction( + durableListener: DurableListenerClient, + taskRunExternalId: string, + waitKey: number, + conditions: Conditions | Conditions[], + namespace?: string, + signal?: AbortSignal +): Promise<{ result: Record; nextWaitKey: number }> { + const pbConditions = conditionsToPb(Render(ConditionAction.CREATE, conditions), namespace); + const key = `waitFor-${waitKey}`; + + await durableListener.registerDurableEvent({ + taskId: taskRunExternalId, + signalKey: key, + sleepConditions: pbConditions.sleepConditions, + userEventConditions: pbConditions.userEventConditions, + }); + + const event = await durableListener.result( + { taskId: taskRunExternalId, signalKey: key }, + { signal } + ); + + const eventData = + event.data instanceof Uint8Array ? new TextDecoder().decode(event.data) : event.data; + const res = JSON.parse(eventData) as Record>; + return { result: res.CREATE, nextWaitKey: waitKey + 1 }; +} diff --git a/sdks/typescript/src/v1/client/worker/engine-version.ts b/sdks/typescript/src/v1/client/worker/engine-version.ts new file mode 100644 index 000000000..f0cfadc91 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/engine-version.ts @@ -0,0 +1,11 @@ +import { semverLessThan } from './deprecated/deprecation'; + +export const MinEngineVersion = { + SLOT_CONFIG: 'v0.78.23', + DURABLE_EVICTION: 'v0.80.0', +} as const; + +export function supportsEviction(engineVersion: string | undefined): boolean { + if (!engineVersion) return false; + return !semverLessThan(engineVersion, MinEngineVersion.DURABLE_EVICTION); +} diff --git a/sdks/typescript/src/v1/client/worker/eviction/eviction-cache.test.ts b/sdks/typescript/src/v1/client/worker/eviction/eviction-cache.test.ts new file mode 100644 index 000000000..94a0a8cfb --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/eviction/eviction-cache.test.ts @@ -0,0 +1,512 @@ +import { + DurableEvictionCache, + DurableRunRecord, + EvictionCause, + buildEvictionReason, +} from './eviction-cache'; +import { EvictionPolicy } from './eviction-policy'; + +function makePolicy(overrides: Partial = {}): EvictionPolicy { + return { ttl: undefined, allowCapacityEviction: true, priority: 0, ...overrides }; +} + +const T0 = 1_000_000; +const ONE_SEC = 1_000; +const ONE_MIN = 60_000; + +describe('DurableEvictionCache', () => { + let cache: DurableEvictionCache; + + beforeEach(() => { + cache = new DurableEvictionCache(); + }); + + // ------- basic bookkeeping ------- + + describe('registerRun / unregisterRun / get', () => { + it('registers and retrieves a run', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + const rec = cache.get('k1/0'); + expect(rec).toBeDefined(); + expect(rec!.taskRunExternalId).toBe('ext-1'); + expect(rec!.invocationCount).toBe(1); + expect(rec!.registeredAt).toBe(T0); + expect(rec!.waitingSince).toBeUndefined(); + }); + + it('returns undefined for unknown key', () => { + expect(cache.get('nope/0')).toBeUndefined(); + }); + + it('unregisters a run', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.unregisterRun('k1/0'); + expect(cache.get('k1/0')).toBeUndefined(); + }); + }); + + // ------- waiting state ------- + + describe('markWaiting / markActive / getAllWaiting', () => { + it('markWaiting sets wait fields', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0 + ONE_SEC, 'sleep', 'res-1'); + const rec = cache.get('k1/0')!; + expect(rec.waitingSince).toBe(T0 + ONE_SEC); + expect(rec.waitKind).toBe('sleep'); + expect(rec.waitResourceId).toBe('res-1'); + }); + + it('markActive clears wait fields', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0 + ONE_SEC, 'sleep', 'res-1'); + cache.markActive('k1/0'); + const rec = cache.get('k1/0')!; + expect(rec.waitingSince).toBeUndefined(); + expect(rec.waitKind).toBeUndefined(); + expect(rec.waitResourceId).toBeUndefined(); + }); + + it('ref-counts concurrent waits so one markActive does not clear waiting', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'runChild', 'child-0'); + cache.markWaiting('k1/0', T0, 'runChild', 'child-1'); + cache.markWaiting('k1/0', T0, 'runChild', 'child-2'); + + cache.markActive('k1/0'); + const rec = cache.get('k1/0')!; + expect(rec._waitCount).toBe(2); + expect(rec.waitingSince).toBe(T0); + + cache.markActive('k1/0'); + expect(rec._waitCount).toBe(1); + expect(rec.waitingSince).toBe(T0); + + cache.markActive('k1/0'); + expect(rec._waitCount).toBe(0); + expect(rec.waitingSince).toBeUndefined(); + }); + + it('waitingSince is set only on the first markWaiting call', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'runChild', 'child-0'); + cache.markWaiting('k1/0', T0 + ONE_SEC, 'runChild', 'child-1'); + const rec = cache.get('k1/0')!; + expect(rec.waitingSince).toBe(T0); + }); + + it('markActive never goes below zero', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + cache.markActive('k1/0'); + cache.markActive('k1/0'); + expect(cache.get('k1/0')!._waitCount).toBe(0); + }); + + it('getAllWaiting returns only waiting runs', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r1'); + expect(cache.getAllWaiting()).toHaveLength(1); + expect(cache.getAllWaiting()[0].key).toBe('k1/0'); + }); + + it('markWaiting on unknown key is a no-op', () => { + cache.markWaiting('unknown/0', T0, 'sleep', 'r'); + expect(cache.get('unknown/0')).toBeUndefined(); + }); + + it('markActive on unknown key is a no-op', () => { + expect(() => cache.markActive('unknown/0')).not.toThrow(); + }); + }); + + // ------- findKeyByTaskRunExternalId ------- + + describe('findKeyByTaskRunExternalId', () => { + it('returns the matching key', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy()); + + expect(cache.findKeyByTaskRunExternalId('ext-1')).toBe('k1/0'); + expect(cache.findKeyByTaskRunExternalId('ext-2')).toBe('k2/0'); + }); + + it('returns undefined for unknown taskRunExternalId', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + expect(cache.findKeyByTaskRunExternalId('no-such-id')).toBeUndefined(); + }); + + it('returns undefined after unregister', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + expect(cache.findKeyByTaskRunExternalId('ext-1')).toBe('k1/0'); + cache.unregisterRun('k1/0'); + expect(cache.findKeyByTaskRunExternalId('ext-1')).toBeUndefined(); + }); + }); + + // ------- selectEvictionCandidate ------- + + describe('selectEvictionCandidate', () => { + const DURABLE_SLOTS = 4; + const RESERVE_SLOTS = 0; + const MIN_WAIT_MS = 5_000; + + it('returns undefined when no runs are registered', () => { + expect( + cache.selectEvictionCandidate(T0, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBeUndefined(); + }); + + it('returns undefined when runs exist but none are waiting', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '1m' })); + expect( + cache.selectEvictionCandidate(T0 + ONE_MIN * 5, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBeUndefined(); + }); + + it('returns undefined when waiting runs have no eviction policy', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, undefined); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + ONE_MIN * 5, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBeUndefined(); + }); + + // ------- TTL eviction ------- + + describe('TTL-based eviction', () => { + it('evicts a run whose TTL has been exceeded', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '1m' })); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + const result = cache.selectEvictionCandidate( + T0 + ONE_MIN + 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + ); + expect(result).toBe('k1/0'); + }); + + it('does not evict when TTL has not been exceeded', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '5m' })); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + ONE_MIN, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBeUndefined(); + }); + + it('evicts regardless of capacity when TTL is exceeded', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '1s' })); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + const noCapacityPressureSlots = 100; + expect( + cache.selectEvictionCandidate(T0 + 2 * ONE_SEC, noCapacityPressureSlots, 0, MIN_WAIT_MS) + ).toBe('k1/0'); + }); + + it('picks lowest priority among TTL-eligible candidates', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '1s', priority: 5 })); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy({ ttl: '1s', priority: 1 })); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + cache.markWaiting('k2/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + 2 * ONE_SEC, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBe('k2/0'); + }); + + it('breaks priority ties by longest waiting (earliest waitingSince)', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '1s', priority: 0 })); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy({ ttl: '1s', priority: 0 })); + cache.markWaiting('k1/0', T0 + 100, 'sleep', 'r'); + cache.markWaiting('k2/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + 2 * ONE_SEC, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBe('k2/0'); + }); + + it('uses DurationObject TTL', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: { seconds: 30 } })); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + 29_000, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBeUndefined(); + expect( + cache.selectEvictionCandidate(T0 + 31_000, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBe('k1/0'); + }); + }); + + // ------- Capacity-based eviction ------- + + describe('capacity-based eviction', () => { + it('evicts under capacity pressure when min wait is met', () => { + for (let i = 0; i < DURABLE_SLOTS; i += 1) { + cache.registerRun(`k${i}/0`, `ext-${i}`, 1, T0, makePolicy()); + cache.markWaiting(`k${i}/0`, T0, 'sleep', 'r'); + } + const result = cache.selectEvictionCandidate( + T0 + MIN_WAIT_MS + 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + ); + expect(result).toBeDefined(); + }); + + it('does not evict when there is no capacity pressure', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate( + T0 + MIN_WAIT_MS + 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + ) + ).toBeUndefined(); + }); + + it('does not evict when min wait threshold has not been met', () => { + for (let i = 0; i < DURABLE_SLOTS; i += 1) { + cache.registerRun(`k${i}/0`, `ext-${i}`, 1, T0, makePolicy()); + cache.markWaiting(`k${i}/0`, T0, 'sleep', 'r'); + } + expect( + cache.selectEvictionCandidate( + T0 + MIN_WAIT_MS - 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + ) + ).toBeUndefined(); + }); + + it('respects allowCapacityEviction=false', () => { + for (let i = 0; i < DURABLE_SLOTS; i += 1) { + cache.registerRun( + `k${i}/0`, + `ext-${i}`, + 1, + T0, + makePolicy({ allowCapacityEviction: false }) + ); + cache.markWaiting(`k${i}/0`, T0, 'sleep', 'r'); + } + expect( + cache.selectEvictionCandidate( + T0 + MIN_WAIT_MS + 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + ) + ).toBeUndefined(); + }); + + it('skips allowCapacityEviction=false but evicts others', () => { + cache.registerRun( + 'protected/0', + 'ext-p', + 1, + T0, + makePolicy({ allowCapacityEviction: false }) + ); + cache.markWaiting('protected/0', T0, 'sleep', 'r'); + for (let i = 1; i < DURABLE_SLOTS; i += 1) { + cache.registerRun(`k${i}/0`, `ext-${i}`, 1, T0, makePolicy()); + cache.markWaiting(`k${i}/0`, T0, 'sleep', 'r'); + } + const result = cache.selectEvictionCandidate( + T0 + MIN_WAIT_MS + 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + ); + expect(result).toBeDefined(); + expect(result).not.toBe('protected/0'); + }); + + it('picks lowest priority among capacity candidates', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ priority: 10 })); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy({ priority: 2 })); + cache.registerRun('k3/0', 'ext-3', 1, T0, makePolicy({ priority: 5 })); + cache.registerRun('k4/0', 'ext-4', 1, T0, makePolicy({ priority: 7 })); + for (const k of ['k1/0', 'k2/0', 'k3/0', 'k4/0'] as const) { + cache.markWaiting(k, T0, 'sleep', 'r'); + } + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, 4, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBe('k2/0'); + }); + + it('breaks capacity priority ties by longest waiting', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ priority: 0 })); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy({ priority: 0 })); + cache.registerRun('k3/0', 'ext-3', 1, T0, makePolicy({ priority: 0 })); + cache.registerRun('k4/0', 'ext-4', 1, T0, makePolicy({ priority: 0 })); + cache.markWaiting('k1/0', T0 + 200, 'sleep', 'r'); + cache.markWaiting('k2/0', T0, 'sleep', 'r'); + cache.markWaiting('k3/0', T0 + 100, 'sleep', 'r'); + cache.markWaiting('k4/0', T0 + 300, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1000, 4, RESERVE_SLOTS, MIN_WAIT_MS) + ).toBe('k2/0'); + }); + }); + + // ------- reserveSlots ------- + + describe('reserveSlots', () => { + it('reserve slots reduce the effective capacity threshold', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.registerRun('k2/0', 'ext-2', 1, T0, makePolicy()); + cache.registerRun('k3/0', 'ext-3', 1, T0, makePolicy()); + for (const k of ['k1/0', 'k2/0', 'k3/0'] as const) { + cache.markWaiting(k, T0, 'sleep', 'r'); + } + + // 4 slots - 2 reserved = 2 effective; 3 waiting >= 2 → pressure + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, 4, 2, MIN_WAIT_MS) + ).toBeDefined(); + }); + + it('no pressure when waiting count is below effective threshold', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + + // 4 slots - 0 reserved = 4 effective; 1 waiting < 4 → no pressure + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, 4, 0, MIN_WAIT_MS) + ).toBeUndefined(); + }); + + it('reserveSlots >= durableSlots means no capacity eviction', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, 4, 4, MIN_WAIT_MS) + ).toBeUndefined(); + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, 4, 5, MIN_WAIT_MS) + ).toBeUndefined(); + }); + }); + + // ------- durableSlots edge cases ------- + + describe('durableSlots edge cases', () => { + it('durableSlots=0 means no capacity eviction', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, 0, 0, MIN_WAIT_MS) + ).toBeUndefined(); + }); + + it('durableSlots negative means no capacity eviction', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy()); + cache.markWaiting('k1/0', T0, 'sleep', 'r'); + expect( + cache.selectEvictionCandidate(T0 + MIN_WAIT_MS + 1, -1, 0, MIN_WAIT_MS) + ).toBeUndefined(); + }); + }); + + // ------- TTL priority over capacity ------- + + describe('TTL takes precedence over capacity', () => { + it('selects TTL-eligible candidate even when capacity candidates also exist', () => { + cache.registerRun('ttl-run/0', 'ext-t', 1, T0, makePolicy({ ttl: '1s', priority: 10 })); + cache.registerRun('cap-run/0', 'ext-c', 1, T0, makePolicy({ priority: 0 })); + cache.markWaiting('ttl-run/0', T0, 'sleep', 'r'); + cache.markWaiting('cap-run/0', T0, 'sleep', 'r'); + + const result = cache.selectEvictionCandidate(T0 + 2 * ONE_SEC, 2, 0, MIN_WAIT_MS); + expect(result).toBe('ttl-run/0'); + }); + }); + + // ------- evictionReason side-effect ------- + + describe('evictionReason is set on the chosen record', () => { + it('sets TTL reason on TTL eviction', () => { + cache.registerRun('k1/0', 'ext-1', 1, T0, makePolicy({ ttl: '30s' })); + cache.markWaiting('k1/0', T0, 'sleep', 'res-1'); + cache.selectEvictionCandidate(T0 + 31_000, DURABLE_SLOTS, RESERVE_SLOTS, MIN_WAIT_MS); + expect(cache.get('k1/0')!.evictionReason).toMatch(/TTL.*exceeded/); + }); + + it('sets capacity reason on capacity eviction', () => { + for (let i = 0; i < DURABLE_SLOTS; i += 1) { + cache.registerRun(`k${i}/0`, `ext-${i}`, 1, T0, makePolicy()); + cache.markWaiting(`k${i}/0`, T0, 'sleep', 'res'); + } + const key = cache.selectEvictionCandidate( + T0 + MIN_WAIT_MS + 1, + DURABLE_SLOTS, + RESERVE_SLOTS, + MIN_WAIT_MS + )!; + expect(cache.get(key)!.evictionReason).toMatch(/capacity/i); + }); + }); + }); +}); + +describe('buildEvictionReason', () => { + function makeRecord(overrides: Partial = {}): DurableRunRecord { + return { + key: 'k1/0', + taskRunExternalId: 'ext-1', + invocationCount: 1, + evictionPolicy: { ttl: '30s', allowCapacityEviction: true, priority: 0 }, + registeredAt: T0, + waitingSince: T0, + waitKind: 'sleep', + waitResourceId: 'res-1', + _waitCount: 1, + evictionReason: undefined, + ...overrides, + }; + } + + it('formats TTL_EXCEEDED with ttl and resource', () => { + const reason = buildEvictionReason(EvictionCause.TTL_EXCEEDED, makeRecord()); + expect(reason).toBe('Wait TTL (30s) exceeded while waiting on sleep(res-1)'); + }); + + it('formats TTL_EXCEEDED without ttl', () => { + const reason = buildEvictionReason( + EvictionCause.TTL_EXCEEDED, + makeRecord({ evictionPolicy: { allowCapacityEviction: true } }) + ); + expect(reason).toBe('Wait TTL exceeded while waiting on sleep(res-1)'); + }); + + it('formats CAPACITY_PRESSURE', () => { + const reason = buildEvictionReason(EvictionCause.CAPACITY_PRESSURE, makeRecord()); + expect(reason).toBe('Worker at capacity while waiting on sleep(res-1)'); + }); + + it('formats WORKER_SHUTDOWN', () => { + const reason = buildEvictionReason(EvictionCause.WORKER_SHUTDOWN, makeRecord()); + expect(reason).toBe('Worker shutdown while waiting on sleep(res-1)'); + }); + + it('handles missing waitKind', () => { + const reason = buildEvictionReason( + EvictionCause.CAPACITY_PRESSURE, + makeRecord({ waitKind: undefined, waitResourceId: undefined }) + ); + expect(reason).toBe('Worker at capacity while waiting on unknown'); + }); + + it('handles waitKind without resourceId', () => { + const reason = buildEvictionReason( + EvictionCause.TTL_EXCEEDED, + makeRecord({ waitResourceId: undefined }) + ); + expect(reason).toBe('Wait TTL (30s) exceeded while waiting on sleep'); + }); +}); diff --git a/sdks/typescript/src/v1/client/worker/eviction/eviction-cache.ts b/sdks/typescript/src/v1/client/worker/eviction/eviction-cache.ts new file mode 100644 index 000000000..d02cca59c --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/eviction/eviction-cache.ts @@ -0,0 +1,180 @@ +import { ActionKey } from '@hatchet/clients/dispatcher/action-listener'; +import { EvictionPolicy } from './eviction-policy'; +import { durationToMs } from '../../duration'; + +export type { ActionKey }; + +export enum EvictionCause { + TTL_EXCEEDED = 'ttl_exceeded', + CAPACITY_PRESSURE = 'capacity_pressure', + WORKER_SHUTDOWN = 'worker_shutdown', +} + +export interface DurableRunRecord { + key: ActionKey; + taskRunExternalId: string; + invocationCount: number; + evictionPolicy: EvictionPolicy | undefined; + registeredAt: number; + + waitingSince: number | undefined; + waitKind: string | undefined; + waitResourceId: string | undefined; + // Ref-counted so concurrent waits (e.g. multiple child results via + // Promise.all) don't prematurely clear the waiting flag when one + // child completes before the others. + _waitCount: number; + + evictionReason: string | undefined; +} + +export class DurableEvictionCache { + private _runs = new Map(); + + registerRun( + key: ActionKey, + taskRunExternalId: string, + invocationCount: number, + now: number, + evictionPolicy: EvictionPolicy | undefined + ): void { + this._runs.set(key, { + key, + taskRunExternalId, + invocationCount, + evictionPolicy, + registeredAt: now, + waitingSince: undefined, + waitKind: undefined, + waitResourceId: undefined, + _waitCount: 0, + evictionReason: undefined, + }); + } + + unregisterRun(key: ActionKey): void { + this._runs.delete(key); + } + + get(key: ActionKey): DurableRunRecord | undefined { + return this._runs.get(key); + } + + getAllWaiting(): DurableRunRecord[] { + return [...this._runs.values()].filter((r) => r._waitCount > 0); + } + + findKeyByTaskRunExternalId(taskRunExternalId: string): ActionKey | undefined { + for (const [key, rec] of this._runs) { + if (rec.taskRunExternalId === taskRunExternalId) return key; + } + return undefined; + } + + markWaiting(key: ActionKey, now: number, waitKind: string, resourceId: string): void { + const rec = this._runs.get(key); + if (!rec) return; + rec._waitCount += 1; + if (rec._waitCount === 1) { + rec.waitingSince = now; + } + rec.waitKind = waitKind; + rec.waitResourceId = resourceId; + } + + markActive(key: ActionKey): void { + const rec = this._runs.get(key); + if (!rec) return; + rec._waitCount = Math.max(0, rec._waitCount - 1); + if (rec._waitCount === 0) { + rec.waitingSince = undefined; + rec.waitKind = undefined; + rec.waitResourceId = undefined; + } + } + + selectEvictionCandidate( + now: number, + durableSlots: number, + reserveSlots: number, + minWaitForCapacityEvictionMs: number + ): ActionKey | undefined { + const waiting = [...this._runs.values()].filter( + (r) => r._waitCount > 0 && r.evictionPolicy !== undefined + ); + + if (waiting.length === 0) return undefined; + + const ttlEligible = waiting.filter((r) => { + const ttl = r.evictionPolicy?.ttl; + if (!ttl || !r.waitingSince) return false; + return now - r.waitingSince >= durationToMs(ttl); + }); + + if (ttlEligible.length > 0) { + ttlEligible.sort( + (a, b) => + (a.evictionPolicy?.priority ?? 0) - (b.evictionPolicy?.priority ?? 0) || + (a.waitingSince ?? now) - (b.waitingSince ?? now) + ); + const [chosen] = ttlEligible; + chosen.evictionReason = buildEvictionReason(EvictionCause.TTL_EXCEEDED, chosen); + return chosen.key; + } + + if (!this._hasCapacityPressure(durableSlots, reserveSlots, waiting.length)) { + return undefined; + } + + const capacityCandidates = waiting.filter( + (r) => + r.evictionPolicy?.allowCapacityEviction !== false && + r.waitingSince !== undefined && + now - r.waitingSince >= minWaitForCapacityEvictionMs + ); + + if (capacityCandidates.length === 0) return undefined; + + capacityCandidates.sort( + (a, b) => + (a.evictionPolicy?.priority ?? 0) - (b.evictionPolicy?.priority ?? 0) || + (a.waitingSince ?? now) - (b.waitingSince ?? now) + ); + const [chosen] = capacityCandidates; + chosen.evictionReason = buildEvictionReason(EvictionCause.CAPACITY_PRESSURE, chosen); + return chosen.key; + } + + private _hasCapacityPressure( + durableSlots: number, + reserveSlots: number, + waitingCount: number + ): boolean { + if (durableSlots <= 0) return false; + const maxWaiting = durableSlots - reserveSlots; + if (maxWaiting <= 0) return false; + return waitingCount >= maxWaiting; + } +} + +export function buildEvictionReason(cause: EvictionCause, rec: DurableRunRecord): string { + let waitDesc = rec.waitKind || 'unknown'; + if (rec.waitResourceId) { + waitDesc = `${waitDesc}(${rec.waitResourceId})`; + } + + switch (cause) { + case EvictionCause.TTL_EXCEEDED: { + const ttlStr = rec.evictionPolicy?.ttl ? ` (${rec.evictionPolicy.ttl})` : ''; + return `Wait TTL${ttlStr} exceeded while waiting on ${waitDesc}`; + } + case EvictionCause.CAPACITY_PRESSURE: + return `Worker at capacity while waiting on ${waitDesc}`; + case EvictionCause.WORKER_SHUTDOWN: + return `Worker shutdown while waiting on ${waitDesc}`; + default: { + const _exhaustive: never = cause; + throw new Error(`Unknown eviction cause: ${_exhaustive}`); + } + } +} diff --git a/sdks/typescript/src/v1/client/worker/eviction/eviction-manager.test.ts b/sdks/typescript/src/v1/client/worker/eviction/eviction-manager.test.ts new file mode 100644 index 000000000..36adcccd4 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/eviction/eviction-manager.test.ts @@ -0,0 +1,116 @@ +import { Logger } from '@hatchet/util/logger'; +import { DurableEvictionManager } from './eviction-manager'; +import { DurableEvictionCache } from './eviction-cache'; + +class NoopLogger extends Logger { + debug() {} + info() {} + green() {} + warn() {} + error() {} + util() {} +} + +function makeManager() { + const cancelLocal = jest.fn(); + const requestEvictionWithAck = jest.fn().mockResolvedValue(undefined); + const cache = new DurableEvictionCache(); + + const manager = new DurableEvictionManager({ + durableSlots: 10, + cancelLocal, + requestEvictionWithAck, + config: { checkIntervalMs: 3_600_000 }, + cache, + logger: new NoopLogger(), + }); + + return { manager, cancelLocal, requestEvictionWithAck, cache }; +} + +describe('DurableEvictionManager', () => { + describe('handleServerEviction', () => { + it('cancels and unregisters the matching run when invocationCount matches', () => { + const { manager, cancelLocal } = makeManager(); + + manager.registerRun('run-1/0', 'ext-1', 2, { + ttl: '30s', + allowCapacityEviction: true, + priority: 0, + }); + manager.markWaiting('run-1/0', 'sleep', 's1'); + + manager.handleServerEviction('ext-1', 2); + + expect(cancelLocal).toHaveBeenCalledWith('run-1/0'); + expect(manager.cache.get('run-1/0')).toBeUndefined(); + }); + + it('is a no-op for unknown taskRunExternalId', () => { + const { manager, cancelLocal } = makeManager(); + + manager.registerRun('run-1/0', 'ext-1', 1, undefined); + + manager.handleServerEviction('no-such-id', 1); + + expect(cancelLocal).not.toHaveBeenCalled(); + expect(manager.cache.get('run-1/0')).toBeDefined(); + }); + + it('only evicts the matching run, not others', () => { + const { manager, cancelLocal } = makeManager(); + + manager.registerRun('run-1/0', 'ext-1', 1, { + ttl: '30s', + allowCapacityEviction: true, + priority: 0, + }); + manager.registerRun('run-2/0', 'ext-2', 1, { + ttl: '30s', + allowCapacityEviction: true, + priority: 0, + }); + manager.markWaiting('run-1/0', 'sleep', 's1'); + manager.markWaiting('run-2/0', 'sleep', 's2'); + + manager.handleServerEviction('ext-1', 1); + + expect(cancelLocal).toHaveBeenCalledTimes(1); + expect(cancelLocal).toHaveBeenCalledWith('run-1/0'); + expect(manager.cache.get('run-1/0')).toBeUndefined(); + expect(manager.cache.get('run-2/0')).toBeDefined(); + }); + + it('does not evict when invocationCount does not match (newer invocation)', () => { + const { manager, cancelLocal } = makeManager(); + + manager.registerRun('run-1/0', 'ext-1', 3, { + ttl: '30s', + allowCapacityEviction: true, + priority: 0, + }); + manager.markWaiting('run-1/0', 'sleep', 's1'); + + manager.handleServerEviction('ext-1', 2); + + expect(cancelLocal).not.toHaveBeenCalled(); + expect(manager.cache.get('run-1/0')).toBeDefined(); + }); + + it('evicts when invocationCount matches exactly', () => { + const { manager, cancelLocal } = makeManager(); + + manager.registerRun('run-1/0', 'ext-1', 5, { + ttl: '30s', + allowCapacityEviction: true, + priority: 0, + }); + manager.markWaiting('run-1/0', 'sleep', 's1'); + + manager.handleServerEviction('ext-1', 5); + + expect(cancelLocal).toHaveBeenCalledWith('run-1/0'); + expect(manager.cache.get('run-1/0')).toBeUndefined(); + }); + }); +}); diff --git a/sdks/typescript/src/v1/client/worker/eviction/eviction-manager.ts b/sdks/typescript/src/v1/client/worker/eviction/eviction-manager.ts new file mode 100644 index 000000000..e2ab9fa1d --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/eviction/eviction-manager.ts @@ -0,0 +1,181 @@ +import { Logger } from '@hatchet/util/logger'; +import { EvictionPolicy } from './eviction-policy'; +import { + ActionKey, + DurableEvictionCache, + DurableRunRecord, + EvictionCause, + buildEvictionReason, +} from './eviction-cache'; +import { getErrorMessage } from '@hatchet/util/errors/hatchet-error'; + +export interface DurableEvictionConfig { + /** How often we try selecting an eviction candidate. Default: 1000ms */ + checkIntervalMs?: number; + /** How many slots to reserve from capacity-based eviction decisions. Default: 0 */ + reserveSlots?: number; + /** Avoid immediately evicting runs that just entered a wait. Default: 10000ms */ + minWaitForCapacityEvictionMs?: number; +} + +export const DEFAULT_DURABLE_EVICTION_CONFIG: Required = { + checkIntervalMs: 1000, + reserveSlots: 0, + minWaitForCapacityEvictionMs: 10_000, +}; + +export class DurableEvictionManager { + private _durableSlots: number; + private _cancelLocal: (key: ActionKey) => void; + private _requestEvictionWithAck: (key: ActionKey, rec: DurableRunRecord) => Promise; + private _config: Required; + private _cache: DurableEvictionCache; + private _logger: Logger; + + private _timer: ReturnType | undefined; + private _ticking = false; + + constructor(opts: { + durableSlots: number; + cancelLocal: (key: ActionKey) => void; + requestEvictionWithAck: (key: ActionKey, rec: DurableRunRecord) => Promise; + config?: DurableEvictionConfig; + cache?: DurableEvictionCache; + logger: Logger; + }) { + this._durableSlots = opts.durableSlots; + this._cancelLocal = opts.cancelLocal; + this._requestEvictionWithAck = opts.requestEvictionWithAck; + this._config = { ...DEFAULT_DURABLE_EVICTION_CONFIG, ...opts.config }; + this._cache = opts.cache || new DurableEvictionCache(); + this._logger = opts.logger; + } + + get cache(): DurableEvictionCache { + return this._cache; + } + + start(): void { + if (this._timer) return; + this._timer = setInterval(() => this._tickSafe(), this._config.checkIntervalMs); + } + + stop(): void { + if (this._timer) { + clearInterval(this._timer); + this._timer = undefined; + } + } + + registerRun( + key: ActionKey, + taskRunExternalId: string, + invocationCount: number, + evictionPolicy: EvictionPolicy | undefined + ): void { + this._cache.registerRun(key, taskRunExternalId, invocationCount, Date.now(), evictionPolicy); + } + + unregisterRun(key: ActionKey): void { + this._cache.unregisterRun(key); + } + + markWaiting(key: ActionKey, waitKind: string, resourceId: string): void { + this._cache.markWaiting(key, Date.now(), waitKind, resourceId); + } + + markActive(key: ActionKey): void { + this._cache.markActive(key); + } + + private _evictRun(key: ActionKey): void { + this._cancelLocal(key); + this.unregisterRun(key); + } + + private async _tickSafe(): Promise { + if (this._ticking) return; + this._ticking = true; + try { + await this._tick(); + } catch (err: unknown) { + this._logger.error(`DurableEvictionManager: error in eviction loop: ${getErrorMessage(err)}`); + } finally { + this._ticking = false; + } + } + + private async _tick(): Promise { + const evictedThisTick = new Set(); + + while (true) { + const key = this._cache.selectEvictionCandidate( + Date.now(), + this._durableSlots, + this._config.reserveSlots, + this._config.minWaitForCapacityEvictionMs + ); + + if (!key) return; + if (evictedThisTick.has(key)) return; + evictedThisTick.add(key); + + const rec = this._cache.get(key); + if (!rec || !rec.evictionPolicy) continue; + + this._logger.debug( + `DurableEvictionManager: evicting task_run_external_id=${rec.taskRunExternalId} ` + + `wait_kind=${rec.waitKind} resource_id=${rec.waitResourceId}` + ); + + await this._requestEvictionWithAck(key, rec); + this._evictRun(key); + } + } + + handleServerEviction(taskRunExternalId: string, invocationCount: number): void { + const key = this._cache.findKeyByTaskRunExternalId(taskRunExternalId); + if (!key) return; + + const rec = this._cache.get(key); + if (rec && rec.invocationCount !== invocationCount) return; + + this._logger.info( + `DurableEvictionManager: server-initiated eviction for task_run_external_id=${taskRunExternalId} invocation_count=${invocationCount}` + ); + this._evictRun(key); + } + + async evictAllWaiting(): Promise { + this.stop(); + + const waiting = this._cache.getAllWaiting(); + let evicted = 0; + + for (const rec of waiting) { + rec.evictionReason = buildEvictionReason(EvictionCause.WORKER_SHUTDOWN, rec); + + this._logger.debug( + `DurableEvictionManager: shutdown-evicting task_run_external_id=${rec.taskRunExternalId} ` + + `wait_kind=${rec.waitKind}` + ); + + try { + await this._requestEvictionWithAck(rec.key, rec); + } catch (err: unknown) { + this._logger.error( + `DurableEvictionManager: failed to send eviction for ` + + `task_run_external_id=${rec.taskRunExternalId}: ${getErrorMessage(err)}` + ); + } + + // Always cancel locally even if the server ACK failed, so the + // future settles and exitGracefully doesn't hang. + // This will get resolved by the reassignment of the task. + this._evictRun(rec.key); + evicted++; + } + + return evicted; + } +} diff --git a/sdks/typescript/src/v1/client/worker/eviction/eviction-policy.ts b/sdks/typescript/src/v1/client/worker/eviction/eviction-policy.ts new file mode 100644 index 000000000..e43fb1a60 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/eviction/eviction-policy.ts @@ -0,0 +1,27 @@ +import { Duration } from '../../duration'; + +export type EvictionPolicy = { + /** + * Maximum continuous waiting duration before TTL-eligible eviction. + * `undefined` means no TTL-based eviction. + */ + ttl?: Duration; + + /** + * Whether this task may be evicted under durable-slot pressure. + * @default true + */ + allowCapacityEviction?: boolean; + + /** + * Lower values are evicted first when multiple candidates exist. + * @default 0 + */ + priority?: number; +}; + +export const DEFAULT_DURABLE_TASK_EVICTION_POLICY: EvictionPolicy = { + ttl: '15m', + allowCapacityEviction: true, + priority: 0, +}; diff --git a/sdks/typescript/src/v1/client/worker/eviction/index.ts b/sdks/typescript/src/v1/client/worker/eviction/index.ts new file mode 100644 index 000000000..faa7fdbe2 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/eviction/index.ts @@ -0,0 +1,7 @@ +export { EvictionPolicy, DEFAULT_DURABLE_TASK_EVICTION_POLICY } from './eviction-policy'; +export { DurableEvictionCache, ActionKey, DurableRunRecord, EvictionCause } from './eviction-cache'; +export { + DurableEvictionManager, + DurableEvictionConfig, + DEFAULT_DURABLE_EVICTION_CONFIG, +} from './eviction-manager'; diff --git a/sdks/typescript/src/v1/client/worker/worker-cancel-supervision.test.ts b/sdks/typescript/src/v1/client/worker/worker-cancel-supervision.test.ts index 73dbc3ab1..1252d83b3 100644 --- a/sdks/typescript/src/v1/client/worker/worker-cancel-supervision.test.ts +++ b/sdks/typescript/src/v1/client/worker/worker-cancel-supervision.test.ts @@ -1,5 +1,7 @@ import { InternalWorker } from '@hatchet/v1/client/worker/worker-internal'; +import { createAction } from '@hatchet/clients/dispatcher/action-listener'; import HatchetPromise, { CancellationReason } from '@util/hatchet-promise/hatchet-promise'; +import { ActionType } from '@hatchet-dev/typescript-sdk/protoc/dispatcher'; describe('V1Worker handleCancelStepRun cancellation supervision', () => { beforeEach(() => { @@ -19,6 +21,8 @@ describe('V1Worker handleCancelStepRun cancellation supervision', () => { }; const taskExternalId = 'task-1'; + const retryCount = 0; + const actionKey = `${taskExternalId}/${retryCount}`; // Use the real HatchetPromise behavior: cancel rejects the wrapper immediately, // while the underlying work (`inner`) continues. @@ -43,11 +47,32 @@ describe('V1Worker handleCancelStepRun cancellation supervision', () => { }, }, cancellingTaskRuns: new Set(), - futures: { [taskExternalId]: future }, - contexts: { [taskExternalId]: ctx }, + evictionManager: undefined, + futures: { [actionKey]: future }, + contexts: { [actionKey]: ctx }, + cleanupRun(id: string) { + this.evictionManager?.unregisterRun(id); + delete this.futures[id]; + delete this.contexts[id]; + }, }; - const action: any = { taskRunExternalId: taskExternalId }; + const action = createAction({ + taskRunExternalId: taskExternalId, + retryCount, + tenantId: 'tenant-1', + workflowRunId: 'workflow-run-1', + getGroupKeyRunId: '', + jobId: 'job-1', + jobName: 'job-1', + jobRunId: 'job-run-1', + taskId: 'task-1', + actionId: 'action-1', + actionType: ActionType.START_STEP_RUN, + actionPayload: 'action-payload-1', + taskName: 'task-1', + priority: 1, + }); const p = InternalWorker.prototype.handleCancelStepRun.call(fakeThis, action); @@ -58,8 +83,8 @@ describe('V1Worker handleCancelStepRun cancellation supervision', () => { expect(cancelSpy).toHaveBeenCalled(); expect(logger.warn).toHaveBeenCalled(); - expect(fakeThis.futures[taskExternalId]).toBeUndefined(); - expect(fakeThis.contexts[taskExternalId]).toBeUndefined(); + expect(fakeThis.futures[actionKey]).toBeUndefined(); + expect(fakeThis.contexts[actionKey]).toBeUndefined(); }); it('suppresses "was cancelled" debug log when cancellation is supervised', async () => { diff --git a/sdks/typescript/src/v1/client/worker/worker-internal.ts b/sdks/typescript/src/v1/client/worker/worker-internal.ts index 985c17b72..6b21f192e 100644 --- a/sdks/typescript/src/v1/client/worker/worker-internal.ts +++ b/sdks/typescript/src/v1/client/worker/worker-internal.ts @@ -1,5 +1,9 @@ import HatchetError from '@util/errors/hatchet-error'; -import { Action, ActionListener } from '@clients/dispatcher/action-listener'; +import { + TaskRunTerminatedError, + isTaskRunTerminatedError, +} from '@util/errors/task-run-terminated-error'; +import { Action, ActionKey, ActionListener } from '@clients/dispatcher/action-listener'; import { StepActionEvent, StepActionEventType, @@ -9,11 +13,7 @@ import { actionTypeFromJSON, } from '@hatchet/protoc/dispatcher'; import HatchetPromise, { CancellationReason } from '@util/hatchet-promise/hatchet-promise'; -import { - CreateStepRateLimit, - DesiredWorkerLabels, - StickyStrategy, -} from '@hatchet/protoc/workflows'; +import { CreateStepRateLimit, StickyStrategy } from '@hatchet/protoc/workflows'; import { actionMap, Logger, taskRunLog } from '@hatchet/util/logger'; import { BaseWorkflowDeclaration, WorkflowDefinition, HatchetClient } from '@hatchet/v1'; import { CreateTaskOpts } from '@hatchet/protoc/v1/workflows'; @@ -31,10 +31,16 @@ import { WorkerLabels } from '@hatchet/clients/dispatcher/dispatcher-client'; import { applyNamespace } from '@hatchet/util/apply-namespace'; import sleep from '@hatchet/util/sleep'; import { throwIfAborted } from '@hatchet/util/abort-error'; +import { DesiredWorkerLabels } from '@hatchet-dev/typescript-sdk/protoc/v1/shared/trigger'; +import { Duration, durationToString } from '../duration'; import { Context, DurableContext } from './context'; import { parentRunContextManager } from '../../parent-run-context-vars'; import { HealthServer, workerStatus, type WorkerStatus } from './health-server'; import { SlotConfig } from '../../slot-types'; +import { DurableEvictionManager } from './eviction/eviction-manager'; +import { EvictionPolicy, DEFAULT_DURABLE_TASK_EVICTION_POLICY } from './eviction/eviction-policy'; +import { DurableRunRecord } from './eviction/eviction-cache'; +import { supportsEviction } from './engine-version'; // eslint-disable-next-line @typescript-eslint/no-unsafe-function-type export type ActionRegistry = Record; @@ -57,13 +63,17 @@ export class InternalWorker { handle_kill: boolean; action_registry: ActionRegistry; + durable_action_set: Set = new Set(); + eviction_policies: Map = new Map(); + evictionManager: DurableEvictionManager | undefined; workflow_registry: Array = []; listener: ActionListener | undefined; - futures: Record> = {}; - contexts: Record> = {}; + futures: Record> = {}; + contexts: Record> = {}; slots?: number; durableSlots?: number; slotConfig: SlotConfig; + engineVersion: string | undefined; logger: Logger; @@ -160,12 +170,19 @@ export class InternalWorker { const newActions = workflow._durableTasks .filter((task) => !!task.fn) .reduce((acc, task) => { - acc[ - `${applyNamespace( - workflow.name, - this.client.config.namespace - ).toLowerCase()}:${task.name.toLowerCase()}` - ] = (ctx: Context) => task.fn!(ctx.input, ctx as DurableContext); + const actionId = `${applyNamespace( + workflow.name, + this.client.config.namespace + ).toLowerCase()}:${task.name.toLowerCase()}`; + acc[actionId] = (ctx: Context) => + task.fn!(ctx.input, ctx as DurableContext); + this.durable_action_set.add(actionId); + this.eviction_policies.set( + actionId, + task.evictionPolicy !== undefined + ? task.evictionPolicy + : DEFAULT_DURABLE_TASK_EVICTION_POLICY + ); return acc; }, {}); @@ -239,12 +256,15 @@ export class InternalWorker { if (workflow.onFailure && typeof workflow.onFailure === 'object') { const onFailure = workflow.onFailure as CreateOnFailureTaskOpts; + const scheduleTimeout = onFailure.scheduleTimeout ?? workflow.taskDefaults?.scheduleTimeout; onFailureTask = { readableId: 'on-failure-task', action: onFailureTaskName(workflow), - timeout: onFailure.executionTimeout || workflow.taskDefaults?.executionTimeout || '60s', - scheduleTimeout: onFailure.scheduleTimeout || workflow.taskDefaults?.scheduleTimeout, + timeout: durationToString( + onFailure.executionTimeout || workflow.taskDefaults?.executionTimeout || '60s' + ), + scheduleTimeout: scheduleTimeout ? durationToString(scheduleTimeout) : undefined, inputs: '{}', parents: [], retries: onFailure.retries || workflow.taskDefaults?.retries || 0, @@ -368,12 +388,8 @@ export class InternalWorker { tasks: [...workflow._tasks, ...workflow._durableTasks].map((task) => ({ readableId: task.name, action: `${workflow.name}:${task.name}`, - timeout: - task.executionTimeout || - task.timeout || - workflow.taskDefaults?.executionTimeout || - '60s', - scheduleTimeout: task.scheduleTimeout || workflow.taskDefaults?.scheduleTimeout, + timeout: resolveExecutionTimeout(task, workflow.taskDefaults), + scheduleTimeout: resolveScheduleTimeout(task, workflow.taskDefaults), inputs: '{}', parents: task.parents?.map((p) => p.name) ?? [], userData: '{}', @@ -416,19 +432,109 @@ export class InternalWorker { this.registerActions(workflow); } + private ensureEvictionManager(): DurableEvictionManager { + if (this.evictionManager) return this.evictionManager; + + const totalDurableSlots = this.slotConfig?.durable ?? this.durableSlots ?? 0; + + this.evictionManager = new DurableEvictionManager({ + durableSlots: totalDurableSlots, + cancelLocal: (key: ActionKey) => { + const err = new TaskRunTerminatedError('evicted'); + const ctx = this.contexts[key] as DurableContext | undefined; + if (ctx) { + const invocationCount = ctx.invocationCount ?? 1; + this.client.durableListener.cleanupTaskState( + ctx.action.taskRunExternalId, + invocationCount + ); + if (ctx.abortController) { + ctx.abortController.abort(err); + } + } + const future = this.futures[key]; + if (future) { + future.promise.catch(() => undefined); + future.cancel(CancellationReason.EVICTED_BY_WORKER); + } + }, + requestEvictionWithAck: async (key: ActionKey, rec: DurableRunRecord) => { + const ctx = this.contexts[key] as DurableContext | undefined; + const invocationCount = ctx?.invocationCount ?? 1; + await this.client.durableListener.sendEvictInvocation( + rec.taskRunExternalId, + invocationCount, + rec.evictionReason + ); + }, + logger: this.logger, + }); + + this.client.durableListener.onServerEvict = (durableTaskExternalId, invocationCount) => { + this.evictionManager?.handleServerEviction(durableTaskExternalId, invocationCount); + }; + + this.evictionManager.start(); + return this.evictionManager; + } + + private cleanupRun(key: ActionKey): void { + const ctx = this.contexts[key]; + if (ctx instanceof DurableContext) { + this.client.durableListener.cleanupTaskState( + ctx.action.taskRunExternalId, + ctx.invocationCount + ); + } + this.evictionManager?.unregisterRun(key); + delete this.futures[key]; + delete this.contexts[key]; + } + async handleStartStepRun(action: Action) { const { actionId, taskRunExternalId, taskName } = action; + const actionKey = action.key; try { - // Note: we always use a DurableContext since its a superset of the Context class - const context = new DurableContext(action, this.client, this); - this.contexts[taskRunExternalId] = context; + const isDurable = this.durable_action_set.has(actionId); + let context: Context; + + if (isDurable) { + const { durableListener } = this.client; + let mgr: DurableEvictionManager | undefined; + + if (supportsEviction(this.engineVersion)) { + await durableListener.ensureStarted(this.workerId || ''); + mgr = this.ensureEvictionManager(); + const evictionPolicy = this.eviction_policies.get(actionId); + mgr.registerRun( + actionKey, + taskRunExternalId, + action.durableTaskInvocationCount ?? 1, + evictionPolicy + ); + } + + context = new DurableContext( + action, + this.client, + this, + durableListener, + mgr, + this.engineVersion + ); + } else { + context = new Context(action, this.client, this); + } + + this.contexts[actionKey] = context; const step = this.action_registry[actionId]; if (!step) { this.logger.error(`Registered actions: '${Object.keys(this.action_registry).join(', ')}'`); this.logger.error(`Could not find step '${actionId}'`); + this.cleanupRun(actionKey); return; } @@ -456,9 +562,9 @@ export class InternalWorker { childIndex: 0, desiredWorkerId: this.workerId || '', signal: context.abortController.signal, + durableContext: isDurable && context instanceof DurableContext ? context : undefined, }, () => { - // Precheck: if cancellation already happened, don't execute user code. throwIfAborted(context.abortController.signal); return step(context); } @@ -485,7 +591,6 @@ export class InternalWorker { this.logger.info(taskRunLog(taskName, taskRunExternalId, 'completed')); - // Send the action event to the dispatcher const event = this.getStepActionEvent( action, StepActionEventType.STEP_EVENT_TYPE_COMPLETED, @@ -499,7 +604,6 @@ export class InternalWorker { `Could not send completed action event: ${actionEventError.message || actionEventError}` ); - // send a failure event const failureEvent = this.getStepActionEvent( action, StepActionEventType.STEP_EVENT_TYPE_FAILED, @@ -520,9 +624,7 @@ export class InternalWorker { `Could not send action event: ${actionEventError.message || actionEventError}` ); } finally { - // delete the run from the futures - delete this.futures[taskRunExternalId]; - delete this.contexts[taskRunExternalId]; + this.cleanupRun(actionKey); } }; @@ -540,7 +642,6 @@ export class InternalWorker { this.logger.error(error.stack); } - // Send the action event to the dispatcher const event = this.getStepActionEvent( action, StepActionEventType.STEP_EVENT_TYPE_FAILED, @@ -555,9 +656,7 @@ export class InternalWorker { } catch (e: any) { this.logger.error(`Could not send action event: ${e.message}`); } finally { - // delete the run from the futures - delete this.futures[taskRunExternalId]; - delete this.contexts[taskRunExternalId]; + this.cleanupRun(actionKey); } }; @@ -586,7 +685,7 @@ export class InternalWorker { await success(result); })() ); - this.futures[taskRunExternalId] = future; + this.futures[actionKey] = future; // Send the action event to the dispatcher const event = this.getStepActionEvent( @@ -603,115 +702,22 @@ export class InternalWorker { try { await future.promise; } catch (e: any) { - const message = e?.message || String(e); - // TODO is this cased correctly... - if (!message.includes('Cancelled')) { + if (!isTaskRunTerminatedError(e)) { this.logger.error( `Could not wait for task run ${taskRunExternalId} to finish. ` + `See https://docs.hatchet.run/home/cancellation for best practices on handling cancellation: `, e ); } + } finally { + this.cleanupRun(actionKey); } } catch (e: any) { + this.cleanupRun(actionKey); this.logger.error('Could not send action event (outer): ', e); } } - async handleStartGroupKeyRun(action: Action) { - const { actionId, getGroupKeyRunId, taskRunExternalId, taskName } = action; - - this.logger.error( - 'Concurrency Key Functions have been deprecated and will be removed in a future release. Use Concurrency Expressions instead.' - ); - - try { - const context = new Context(action, this.client, this); - - const key = getGroupKeyRunId; - - if (!key) { - this.logger.error(`No group key run id provided for action ${actionId}`); - return; - } - - this.contexts[key] = context; - - this.logger.debug(`Starting group key run ${key}`); - - const step = this.action_registry[actionId]; - - if (!step) { - this.logger.error(`Could not find step '${actionId}'`); - return; - } - - const run = async () => { - return step(context); - }; - - const success = (result: any) => { - this.logger.info(taskRunLog(taskName, taskRunExternalId, 'completed')); - - try { - // Send the action event to the dispatcher - const event = this.getGroupKeyActionEvent( - action, - GroupKeyActionEventType.GROUP_KEY_EVENT_TYPE_COMPLETED, - result - ); - this.client.dispatcher.sendGroupKeyActionEvent(event).catch((e) => { - this.logger.error(`Could not send action event: ${e.message}`); - }); - } catch (e: any) { - this.logger.error(`Could not send action event: ${e.message}`); - } finally { - // delete the run from the futures - delete this.futures[key]; - delete this.contexts[key]; - } - }; - - const failure = (error: any) => { - this.logger.error(taskRunLog(taskName, taskRunExternalId, `failed: ${error.message}`)); - - try { - // Send the action event to the dispatcher - const event = this.getGroupKeyActionEvent( - action, - GroupKeyActionEventType.GROUP_KEY_EVENT_TYPE_FAILED, - error - ); - this.client.dispatcher.sendGroupKeyActionEvent(event).catch((e) => { - this.logger.error(`Could not send action event: ${e.message}`); - }); - } catch (e: any) { - this.logger.error(`Could not send action event: ${e.message}`); - } finally { - // delete the run from the futures - delete this.futures[key]; - delete this.contexts[key]; - } - }; - - const future = new HatchetPromise(run().then(success).catch(failure)); - this.futures[key] = future; - - // Send the action event to the dispatcher - const event = this.getGroupKeyActionEvent( - action, - GroupKeyActionEventType.GROUP_KEY_EVENT_TYPE_STARTED - ); - this.client.dispatcher.sendGroupKeyActionEvent(event).catch((e) => { - this.logger.error(`Could not send action event: ${e.message}`); - }); - - await future.promise; - } catch (e: any) { - this.logger.error(`Could not send action event: ${e.message}`); - } - } - getStepActionEvent( action: Action, eventType: StepActionEventType, @@ -755,13 +761,15 @@ export class InternalWorker { async handleCancelStepRun(action: Action) { const { taskRunExternalId, taskName } = action; + const actionKey = action.key; try { - const future = this.futures[taskRunExternalId]; - const context = this.contexts[taskRunExternalId]; + const future = this.futures[actionKey]; + const context = this.contexts[actionKey]; + const cancelErr = new TaskRunTerminatedError('cancelled', 'Cancelled by worker'); if (context && context.abortController) { - context.abortController.abort('Cancelled by worker'); // TODO this reason is nonsensical + context.abortController.abort(cancelErr); } if (future) { @@ -822,8 +830,7 @@ export class InternalWorker { `Cancellation: error while supervising cancellation for task run ${taskRunExternalId}: ${e?.message || e}` ); } finally { - delete this.futures[taskRunExternalId]; - delete this.contexts[taskRunExternalId]; + this.cleanupRun(actionKey); } } @@ -837,6 +844,33 @@ export class InternalWorker { this.logger.info('Starting to exit...'); + // Pause the worker on the server so it stops receiving new task assignments + // before we evict waiting durable runs, mirroring Python's pause_task_assignment(). + if (this.workerId) { + try { + await this.client.workers.pause(this.workerId); + } catch (e: any) { + this.logger.error(`Could not pause worker: ${e.message}`); + } + } + + if (this.evictionManager) { + try { + const evicted = await this.evictionManager.evictAllWaiting(); + if (evicted > 0) { + this.logger.info(`Evicted ${evicted} waiting durable run(s) during shutdown`); + } + } catch (e: any) { + this.logger.error(`Could not evict waiting runs: ${e.message}`); + } + } + + try { + await this.client.durableListener.stop(); + } catch (e: any) { + this.logger.error(`Could not stop durable listener: ${e.message}`); + } + try { await this.listener?.unregister(); } catch (e: any) { @@ -927,17 +961,26 @@ export class InternalWorker { } async handleAction(action: Action) { - const type = action.actionType - ? actionTypeFromJSON(action.actionType) - : ActionType.START_STEP_RUN; - if (type === ActionType.START_STEP_RUN) { - await this.handleStartStepRun(action); - } else if (type === ActionType.CANCEL_STEP_RUN) { - await this.handleCancelStepRun(action); - } else if (type === ActionType.START_GET_GROUP_KEY) { - await this.handleStartGroupKeyRun(action); - } else { - this.logger.error(`Worker ${this.name} received unknown action type ${type}`); + const type = actionTypeFromJSON(action.actionType) || ActionType.START_STEP_RUN; + switch (type) { + case ActionType.START_STEP_RUN: + return this.handleStartStepRun(action); + case ActionType.CANCEL_STEP_RUN: + return this.handleCancelStepRun(action); + case ActionType.START_GET_GROUP_KEY: + this.logger.error( + `Worker ${this.name} received unsupported action type START_GET_GROUP_KEY, please upgrade to V1...` + ); + return Promise.resolve(); + case ActionType.UNRECOGNIZED: + this.logger.error( + `Worker ${this.name} received unrecognized action type ${action.actionType}` + ); + return Promise.resolve(); + default: { + const _: never = type; + throw new Error(`Unhandled action type: ${_}`); + } } } @@ -1105,3 +1148,20 @@ function validateCelExpression(_expr: string): boolean { // For now, we'll just return true to mimic the behavior. return true; } + +export function resolveExecutionTimeout( + task: { executionTimeout?: Duration; timeout?: Duration }, + workflowDefaults?: { executionTimeout?: Duration } +): string { + return durationToString( + task.executionTimeout || task.timeout || workflowDefaults?.executionTimeout || '60s' + ); +} + +export function resolveScheduleTimeout( + task: { scheduleTimeout?: Duration }, + workflowDefaults?: { scheduleTimeout?: Duration } +): string | undefined { + const value = task.scheduleTimeout || workflowDefaults?.scheduleTimeout; + return value ? durationToString(value) : undefined; +} diff --git a/sdks/typescript/src/v1/client/worker/worker-timeouts.test.ts b/sdks/typescript/src/v1/client/worker/worker-timeouts.test.ts new file mode 100644 index 000000000..5114009c0 --- /dev/null +++ b/sdks/typescript/src/v1/client/worker/worker-timeouts.test.ts @@ -0,0 +1,77 @@ +import { resolveExecutionTimeout, resolveScheduleTimeout } from './worker-internal'; + +describe('resolveExecutionTimeout', () => { + it('uses task.executionTimeout when set', () => { + expect(resolveExecutionTimeout({ executionTimeout: '30s' })).toBe('30s'); + }); + + it('falls back to deprecated task.timeout', () => { + expect(resolveExecutionTimeout({ timeout: '45s' })).toBe('45s'); + }); + + it('prefers executionTimeout over deprecated timeout', () => { + expect(resolveExecutionTimeout({ executionTimeout: '30s', timeout: '45s' })).toBe('30s'); + }); + + it('falls back to workflow taskDefaults.executionTimeout', () => { + expect(resolveExecutionTimeout({}, { executionTimeout: '2m' })).toBe('2m'); + }); + + it('task-level timeout beats workflow defaults', () => { + expect(resolveExecutionTimeout({ timeout: '45s' }, { executionTimeout: '2m' })).toBe('45s'); + }); + + it('task-level executionTimeout beats workflow defaults', () => { + expect(resolveExecutionTimeout({ executionTimeout: '30s' }, { executionTimeout: '2m' })).toBe( + '30s' + ); + }); + + it('defaults to 60s when nothing is set', () => { + expect(resolveExecutionTimeout({})).toBe('60s'); + }); + + it('defaults to 60s when workflow defaults are empty', () => { + expect(resolveExecutionTimeout({}, {})).toBe('60s'); + }); + + it('handles DurationObject for executionTimeout', () => { + expect(resolveExecutionTimeout({ executionTimeout: { minutes: 5 } })).toBe('5m'); + }); + + it('handles DurationObject for deprecated timeout', () => { + expect(resolveExecutionTimeout({ timeout: { hours: 1, minutes: 30 } })).toBe('1h30m'); + }); + + it('handles DurationObject in workflow defaults', () => { + expect(resolveExecutionTimeout({}, { executionTimeout: { seconds: 90 } })).toBe('90s'); + }); +}); + +describe('resolveScheduleTimeout', () => { + it('uses task.scheduleTimeout when set', () => { + expect(resolveScheduleTimeout({ scheduleTimeout: '10m' })).toBe('10m'); + }); + + it('falls back to workflow taskDefaults.scheduleTimeout', () => { + expect(resolveScheduleTimeout({}, { scheduleTimeout: '15m' })).toBe('15m'); + }); + + it('task-level beats workflow defaults', () => { + expect(resolveScheduleTimeout({ scheduleTimeout: '10m' }, { scheduleTimeout: '15m' })).toBe( + '10m' + ); + }); + + it('returns undefined when nothing is set', () => { + expect(resolveScheduleTimeout({})).toBeUndefined(); + }); + + it('returns undefined when workflow defaults are empty', () => { + expect(resolveScheduleTimeout({}, {})).toBeUndefined(); + }); + + it('handles DurationObject for scheduleTimeout', () => { + expect(resolveScheduleTimeout({ scheduleTimeout: { minutes: 10 } })).toBe('10m'); + }); +}); diff --git a/sdks/typescript/src/v1/client/worker/worker.ts b/sdks/typescript/src/v1/client/worker/worker.ts index 879b35b3b..792c6338c 100644 --- a/sdks/typescript/src/v1/client/worker/worker.ts +++ b/sdks/typescript/src/v1/client/worker/worker.ts @@ -6,7 +6,13 @@ import { normalizeWorkflows } from '../../../legacy/legacy-transformer'; import { HatchetClient } from '../..'; import { InternalWorker } from './worker-internal'; import { resolveWorkerOptions, type WorkerSlotOptions } from './slot-utils'; -import { isLegacyEngine, LegacyDualWorker } from './deprecated'; +import { + isLegacyEngine, + fetchEngineVersion, + LegacyDualWorker, + emitDeprecationNotice, +} from './deprecated'; +import { MinEngineVersion, supportsEviction } from './engine-version'; /** * Options for creating a new hatchet worker @@ -125,9 +131,44 @@ export class Worker { this._legacyWorker = await LegacyDualWorker.create(this._v1, this.name, legacyConfig); return this._legacyWorker.start(); } + + const engineVersion = await fetchEngineVersion(this._v1).catch(() => undefined); + this._checkEvictionSupport(engineVersion); + this._internal.engineVersion = engineVersion; + return this._internal.start(); } + private _checkEvictionSupport(engineVersion: string | undefined): void { + if (supportsEviction(engineVersion)) return; + + const workflows = (this.config.workflows || []) as BaseWorkflowDeclaration[]; + const tasksWithEviction: string[] = []; + + for (const wf of workflows) { + if (!(wf instanceof BaseWorkflowDeclaration)) continue; + for (const task of wf.definition._durableTasks) { + if (task.evictionPolicy) { + tasksWithEviction.push(`${wf.definition.name}:${task.name}`); + } + } + } + + if (tasksWithEviction.length === 0) return; + + const names = tasksWithEviction.join(', '); + const logger = this._v1.config.logger('Worker', this._v1.config.log_level); + emitDeprecationNotice( + 'pre-eviction-engine', + `Engine ${engineVersion || 'unknown'} does not support durable eviction ` + + `(requires >= ${MinEngineVersion.DURABLE_EVICTION}). ` + + `Eviction policies will be ignored for tasks: ${names}. ` + + `Please upgrade your Hatchet engine.`, + new Date('2026-03-01T00:00:00Z'), + logger + ); + } + /** * Stops the worker * @returns Promise that resolves when the worker stops @@ -193,9 +234,13 @@ export class Worker { const pollInterval = 200; const start = Date.now(); while (Date.now() - start < timeoutMs) { - if (this._internal?.workerId) { + // start() may asynchronously detect a legacy engine and set _legacyWorker + // after waitUntilReady has already entered this loop + if (this._legacyWorker) { + await sleep(2000); return; } + if (this._internal?.workerId) return; await sleep(pollInterval); } throw new Error(`Worker ${this.name} did not become ready within ${timeoutMs}ms`); diff --git a/sdks/typescript/src/v1/conditions/contitions.test.ts b/sdks/typescript/src/v1/conditions/contitions.test.ts index 34bcb6bc9..752ad8051 100644 --- a/sdks/typescript/src/v1/conditions/contitions.test.ts +++ b/sdks/typescript/src/v1/conditions/contitions.test.ts @@ -8,10 +8,11 @@ import { Sleep, UserEvent, } from './index'; +import { durationToString } from '../client/duration'; export function render(condition: Condition | OrCondition): string { if (condition instanceof SleepCondition) { - return `sleepFor: ${condition.sleepFor}`; + return `sleepFor: ${durationToString(condition.sleepFor)}`; } if (condition instanceof UserEventCondition) { return `event: ${condition.eventKey}${condition.expression ? `, expression: ${condition.expression}` : ''}`; diff --git a/sdks/typescript/src/v1/conditions/sleep-condition.ts b/sdks/typescript/src/v1/conditions/sleep-condition.ts index e5cfdd3fd..7d74ac0a9 100644 --- a/sdks/typescript/src/v1/conditions/sleep-condition.ts +++ b/sdks/typescript/src/v1/conditions/sleep-condition.ts @@ -1,4 +1,4 @@ -import { Duration } from '../client/duration'; +import { Duration, durationToString } from '../client/duration'; import { Condition, Action } from './base'; export interface Sleep { @@ -49,7 +49,7 @@ export class SleepCondition extends Condition { */ constructor(sleepFor: Duration, readableDataKey?: string, action?: Action) { super({ - readableDataKey: readableDataKey || `sleep-${sleepFor}`, + readableDataKey: readableDataKey || `sleep-${durationToString(sleepFor)}`, action, orGroupId: '', expression: '', diff --git a/sdks/typescript/src/v1/conditions/transformer.ts b/sdks/typescript/src/v1/conditions/transformer.ts index 1a9247690..9e3fe27cc 100644 --- a/sdks/typescript/src/v1/conditions/transformer.ts +++ b/sdks/typescript/src/v1/conditions/transformer.ts @@ -10,6 +10,7 @@ import { Render, SleepCondition, UserEventCondition, generateGroupId } from '.'; import { CreateWorkflowTaskOpts } from '../task'; import { Action, BaseCondition, Condition } from './base'; import { ParentCondition } from './parent-condition'; +import { durationToString } from '../client/duration'; export function taskConditionsToPb( task: Omit, 'fn'>, @@ -32,7 +33,7 @@ export function conditionsToPb(conditions: Condition[], namespace?: string): Tas if (condition instanceof SleepCondition) { sleepConditions.push({ base: baseToPb(condition.base), - sleepFor: condition.sleepFor, + sleepFor: durationToString(condition.sleepFor), }); } else if (condition instanceof UserEventCondition) { userEventConditions.push({ diff --git a/sdks/typescript/src/v1/declaration.ts b/sdks/typescript/src/v1/declaration.ts index 9c637bf3d..7bd61607c 100644 --- a/sdks/typescript/src/v1/declaration.ts +++ b/sdks/typescript/src/v1/declaration.ts @@ -14,7 +14,6 @@ import { } from '@hatchet/clients/rest/generated/data-contracts'; import { z } from 'zod'; import { throwIfAborted } from '@hatchet/util/abort-error'; -import { WorkerLabelComparator } from '@hatchet/protoc/v1/workflows'; import { IHatchetClient } from './client/client.interface'; import { CreateWorkflowTaskOpts, @@ -25,12 +24,14 @@ import { CreateOnSuccessTaskOpts, Concurrency, DurableTaskFn, + WorkerLabelComparator, } from './task'; import { Duration } from './client/duration'; import { MetricsClient } from './client/features/metrics'; import { InputType, OutputType, UnknownInputType, JsonObject, Resolved } from './types'; import { Context, DurableContext } from './client/worker/context'; import { parentRunContextManager } from './parent-run-context-vars'; +import { EvictionPolicy } from './client/worker/eviction/eviction-policy'; const UNBOUND_ERR = new Error('workflow unbound to hatchet client, hint: use client.run instead'); @@ -200,7 +201,10 @@ export type CreateTaskWorkflowOpts< export type CreateDurableTaskWorkflowOpts< I extends InputType = UnknownInputType, O extends OutputType = void, -> = CreateBaseWorkflowOpts & CreateBaseTaskOpts>; +> = CreateBaseWorkflowOpts & + CreateBaseTaskOpts> & { + evictionPolicy?: EvictionPolicy; + }; /** * Options for creating a new workflow. @@ -480,6 +484,16 @@ export class BaseWorkflowDeclaration< throw UNBOUND_ERR; } + const durableCtx = parentRunContextManager.getContext()?.durableContext; + if (durableCtx) { + if (Array.isArray(input)) { + return durableCtx.spawnChildren( + input.map((inp) => ({ workflow: this, input: inp, options })) + ); + } + return durableCtx.spawnChild(this, input, options); + } + if (Array.isArray(input)) { const refs = await this.runNoWait(input, options, _standaloneTaskName); if (options?.returnExceptions) { diff --git a/sdks/typescript/src/v1/examples/__e2e__/harness.ts b/sdks/typescript/src/v1/examples/__e2e__/harness.ts index 08e3eba17..d3483df44 100644 --- a/sdks/typescript/src/v1/examples/__e2e__/harness.ts +++ b/sdks/typescript/src/v1/examples/__e2e__/harness.ts @@ -3,6 +3,8 @@ import { randomUUID } from 'crypto'; import { HatchetClient } from '@hatchet/v1'; import type { BaseWorkflowDeclaration } from '@hatchet/v1'; import { Worker } from '../../client/worker/worker'; +import { supportsEviction } from '../../client/worker/engine-version'; +import { fetchEngineVersion } from '../../client/worker/deprecated/legacy-worker'; export function requireEnv(name: string): string { const value = process.env[name]; @@ -50,6 +52,15 @@ export async function stopWorker(worker: Worker | undefined) { await sleep(300); } +/** + * Checks whether the connected engine supports durable eviction. + * Call from beforeAll / beforeEach and skip tests when false. + */ +export async function checkDurableEvictionSupport(client: HatchetClient): Promise { + const version = await fetchEngineVersion(client).catch(() => undefined); + return supportsEviction(version); +} + export async function poll( fn: () => Promise, { diff --git a/sdks/typescript/src/v1/examples/concurrency_workflow_level/workflow.ts b/sdks/typescript/src/v1/examples/concurrency_workflow_level/workflow.ts index 88e91c8a1..5dbec6994 100644 --- a/sdks/typescript/src/v1/examples/concurrency_workflow_level/workflow.ts +++ b/sdks/typescript/src/v1/examples/concurrency_workflow_level/workflow.ts @@ -7,7 +7,7 @@ const sleep = (ms: number) => setTimeout(resolve, ms); }); -export const SLEEP_TIME_MS = 500; +export const SLEEP_TIME_MS = 2000; export const DIGIT_MAX_RUNS = 8; export const NAME_MAX_RUNS = 3; diff --git a/sdks/typescript/src/v1/examples/durable-event/workflow.ts b/sdks/typescript/src/v1/examples/durable-event/workflow.ts index 436c2be89..22544543a 100644 --- a/sdks/typescript/src/v1/examples/durable-event/workflow.ts +++ b/sdks/typescript/src/v1/examples/durable-event/workflow.ts @@ -6,9 +6,7 @@ export const durableEvent = hatchet.durableTask({ name: 'durable-event', executionTimeout: '10m', fn: async (_, ctx) => { - const res = ctx.waitFor({ - eventKey: 'user:update', - }); + const res = await ctx.waitForEvent('user:update'); console.log('res', res); @@ -24,10 +22,7 @@ export const durableEventWithFilter = hatchet.durableTask({ executionTimeout: '10m', fn: async (_, ctx) => { // > Durable Event With Filter - const res = ctx.waitFor({ - eventKey: 'user:update', - expression: "input.userId == '1234'", - }); + const res = await ctx.waitForEvent('user:update', "input.userId == '1234'"); // !! console.log('res', res); diff --git a/sdks/typescript/src/v1/examples/durable/durable.e2e.ts b/sdks/typescript/src/v1/examples/durable/durable.e2e.ts index 121904892..9f3d08e4e 100644 --- a/sdks/typescript/src/v1/examples/durable/durable.e2e.ts +++ b/sdks/typescript/src/v1/examples/durable/durable.e2e.ts @@ -1,23 +1,44 @@ import sleep from '@hatchet/util/sleep'; -import { makeE2EClient } from '../__e2e__/harness'; -import { durableWorkflow, EVENT_KEY, SLEEP_TIME_SECONDS, waitForSleepTwice } from './workflow'; +import { makeE2EClient, checkDurableEvictionSupport } from '../__e2e__/harness'; +import { + durableWorkflow, + EVENT_KEY, + SLEEP_TIME_SECONDS, + REPLAY_RESET_SLEEP_SECONDS, + REPLAY_RESET_MEMOIZED_MAX_SECONDS, + waitForSleepTwice, + durableWithSpawn, + durableWithBulkSpawn, + durableSleepEventSpawn, + durableSpawnDag, + durableNonDeterminism, + durableReplayReset, +} from './workflow'; describe('durable-e2e', () => { const hatchet = makeE2EClient(); + let evictionSupported = false; + + beforeAll(async () => { + evictionSupported = await checkDurableEvictionSupport(hatchet); + }); + + function requireEviction() { + if (!evictionSupported) { + console.log('Skipping: engine does not support durable eviction'); + } + return !evictionSupported; + } it('durable workflow waits for sleep + event', async () => { const ref = await durableWorkflow.runNoWait({}); - // `runNoWait` returns before work starts; reliably getting an event to `durable_task` - // means we need to push events *until* the task is actually waiting. let finished = false; const resultPromise = ref.output.finally(() => { finished = true; }); const eventPusher = (async () => { - // Push a handful of events over time to handle single-consumer semantics. - // Delay pushing so `wait_for_or_group_1` resolves via its sleep condition. await sleep((SLEEP_TIME_SECONDS + 1) * 1000); for (let i = 0; i < 30 && !finished; i += 1) { await hatchet.events.push(EVENT_KEY, { test: 'test', i }); @@ -40,33 +61,161 @@ describe('durable-e2e', () => { const g1 = (result as any).wait_for_or_group_1; const g2 = (result as any).wait_for_or_group_2; - // runtime is rounded to seconds and can drift a bit under load expect(Math.abs(g1.runtime - SLEEP_TIME_SECONDS)).toBeLessThanOrEqual(5); expect(g1.key).toBe(g2.key); expect(g1.key).toBe('CREATE'); - // Backend may return condition index ('0'/'1') or readable key ('sleep'/'event') - // g1: 5s sleep resolves first -> '0' or 'sleep' - // g2: event or 30s sleep (event may be consumed by durable_task) -> '0'/'1' or 'sleep'/'event' - expect(['0', 'sleep']).toContain(`${g1.eventId}`); - expect(['0', '1', 'sleep', 'event']).toContain(`${g2.eventId}`); + expect(['sleep', 'event']).toContain(`${g1.eventId}`); + expect(['sleep', 'event']).toContain(`${g2.eventId}`); const multi = (result as any).wait_for_multi_sleep; expect(multi.runtime).toBeGreaterThan(3 * SLEEP_TIME_SECONDS); - }, 300_000); // durable + event flow is slow in CI + }, 300_000); it('durable sleep cancel + replay', async () => { + if (requireEviction()) return; const ref = await waitForSleepTwice.runNoWait({}); await sleep((SLEEP_TIME_SECONDS * 1000) / 2); await ref.cancel(); - // may resolve or reject depending on engine; we only need it to settle await ref.output.catch(() => undefined); await ref.replay(); const replayed = await ref.output; - // We've already slept a bit by the time the task is cancelled - expect(replayed.runtime).toBeLessThanOrEqual(SLEEP_TIME_SECONDS); - }, 300_000); // durable + event flow is slow in CI + expect(replayed.runtime).toBeLessThan(SLEEP_TIME_SECONDS + 3); + }, 300_000); + + it('durable child spawn', async () => { + const result = await durableWithSpawn.run({}); + expect(result.child_output).toEqual({ message: 'hello from child 1' }); + }, 300_000); + + it('durable child bulk spawn', async () => { + const n = 10; + const result = await durableWithBulkSpawn.run({ n }); + expect(result.child_outputs).toEqual( + Array.from({ length: n }, (_, i) => ({ message: `hello from child ${i}` })) + ); + }, 300_000); + + it('durable sleep + event + spawn replay', async () => { + if (requireEviction()) return; + const start = Date.now(); + const ref = await durableSleepEventSpawn.runNoWait({}); + + let finished = false; + const resultPromise = ref.output.finally(() => { + finished = true; + }); + + const eventPusher = (async () => { + await sleep((SLEEP_TIME_SECONDS + 1) * 1000); + for (let i = 0; i < 30 && !finished; i += 1) { + await hatchet.events.push(EVENT_KEY, { test: 'test', i }); + await sleep(200); + } + })(); + + const result = await resultPromise; + await eventPusher.catch(() => undefined); + const firstElapsed = (Date.now() - start) / 1000; + + expect(result.child_output).toEqual({ message: 'hello from child 1' }); + expect(firstElapsed).toBeGreaterThanOrEqual(SLEEP_TIME_SECONDS); + + const replayStart = Date.now(); + await ref.replay(); + const replayed = await ref.output; + const replayElapsed = (Date.now() - replayStart) / 1000; + + expect(replayed.child_output).toEqual({ message: 'hello from child 1' }); + expect(replayElapsed).toBeLessThan(SLEEP_TIME_SECONDS); + }, 300_000); + + it('durable completed replay', async () => { + if (requireEviction()) return; + const ref = await waitForSleepTwice.runNoWait({}); + + const start = Date.now(); + const firstResult = await ref.output; + const firstElapsed = (Date.now() - start) / 1000; + + expect(firstResult.runtime).toBeGreaterThanOrEqual(SLEEP_TIME_SECONDS); + expect(firstElapsed).toBeGreaterThanOrEqual(SLEEP_TIME_SECONDS); + + const replayStart = Date.now(); + await ref.replay(); + const replayed = await ref.output; + const replayElapsed = (Date.now() - replayStart) / 1000; + + expect(replayed.runtime).toBeLessThan(SLEEP_TIME_SECONDS); + expect(replayElapsed).toBeLessThan(SLEEP_TIME_SECONDS); + }, 300_000); + + it('durable spawn DAG', async () => { + const start = Date.now(); + const result = await durableSpawnDag.run({}); + const elapsed = (Date.now() - start) / 1000; + + expect(result.sleep_duration).toBeGreaterThanOrEqual(SLEEP_TIME_SECONDS); + expect(result.spawn_duration).toBeGreaterThanOrEqual(1); + expect(elapsed).toBeGreaterThanOrEqual(SLEEP_TIME_SECONDS); + expect(elapsed).toBeLessThanOrEqual(60); + }, 300_000); + + it('durable non-determinism', async () => { + if (requireEviction()) return; + const ref = await durableNonDeterminism.runNoWait({}); + const result = await ref.output; + + expect(result.non_determinism_detected).toBe(false); + + await ref.replay(); + const replayed = await ref.output; + + expect(replayed.non_determinism_detected).toBe(true); + expect(replayed.node_id).toBe(1); + expect(replayed.attempt_number).toBe(2); + }, 300_000); + + it.each([1, 2, 3])( + 'durable replay reset from node %i', + async (nodeId) => { + if (requireEviction()) return; + const ref = await durableReplayReset.runNoWait({}); + const result = await ref.output; + + expect(result.sleep_1_duration).toBeGreaterThanOrEqual(REPLAY_RESET_SLEEP_SECONDS); + expect(result.sleep_2_duration).toBeGreaterThanOrEqual(REPLAY_RESET_SLEEP_SECONDS); + expect(result.sleep_3_duration).toBeGreaterThanOrEqual(REPLAY_RESET_SLEEP_SECONDS); + + const runId = await ref.getWorkflowRunId(); + const taskExternalId = await hatchet.runs.getTaskExternalId(runId); + await hatchet.runs.branchDurableTask(taskExternalId, nodeId); + await sleep('1s'); + + const resetStart = Date.now(); + const resetResult = await ref.output; + const resetElapsed = (Date.now() - resetStart) / 1000; + + const durations = [ + resetResult.sleep_1_duration, + resetResult.sleep_2_duration, + resetResult.sleep_3_duration, + ]; + + for (let i = 0; i < durations.length; i += 1) { + if (i + 1 >= nodeId) { + expect(durations[i]).toBeGreaterThanOrEqual(REPLAY_RESET_SLEEP_SECONDS); + } else { + expect(durations[i]).toBeLessThan(REPLAY_RESET_MEMOIZED_MAX_SECONDS); + } + } + + const sleepsToDo = 3 - nodeId + 1; + expect(resetElapsed).toBeGreaterThanOrEqual(sleepsToDo * REPLAY_RESET_SLEEP_SECONDS); + }, + 300_000 + ); }); diff --git a/sdks/typescript/src/v1/examples/durable/workflow.ts b/sdks/typescript/src/v1/examples/durable/workflow.ts index 4bb3680d7..104b86dd9 100644 --- a/sdks/typescript/src/v1/examples/durable/workflow.ts +++ b/sdks/typescript/src/v1/examples/durable/workflow.ts @@ -1,4 +1,6 @@ import { Or, SleepCondition, UserEventCondition } from '@hatchet/v1/conditions'; +import { NonDeterminismError } from '@hatchet/util/errors/non-determinism-error'; +import sleep from '@hatchet/util/sleep'; import { hatchet } from '../hatchet-client'; export const EVENT_KEY = 'durable-example:event'; @@ -23,14 +25,18 @@ durableWorkflow.durableTask({ executionTimeout: '10m', fn: async (_input, ctx) => { console.log('Waiting for sleep'); - await ctx.sleepFor(SLEEP_TIME); + const sleepResult = await ctx.sleepFor(SLEEP_TIME); console.log('Sleep finished'); console.log('Waiting for event'); - await ctx.waitFor({ eventKey: EVENT_KEY }); + const event = await ctx.waitForEvent(EVENT_KEY, 'true'); console.log('Event received'); - return { status: 'success' }; + return { + status: 'success', + event: event, + sleep_duration_ms: sleepResult.durationMs, + }; }, }); @@ -41,10 +47,10 @@ function extractKeyAndEventId(waitResult: unknown): { key: string; eventId: stri if (obj && typeof obj === 'object') { const [key] = Object.keys(obj); const inner = obj[key]; - if (inner && typeof inner === 'object') { + if (inner && typeof inner === 'object' && !Array.isArray(inner)) { const [eventId] = Object.keys(inner); if (eventId) { - return { key: 'CREATE', eventId }; + return { key, eventId }; } } if (key) { @@ -115,8 +121,180 @@ export const waitForSleepTwice = hatchet.durableTask({ await ctx.sleepFor(SLEEP_TIME); return { runtime: Math.round((Date.now() - start) / 1000) }; } catch (e) { - // treat cancellation as a successful completion for parity with Python sample return { runtime: -1 }; } }, }); + +// --- Spawn child from durable task --- + +export const spawnChildTask = hatchet.task({ + name: 'spawn-child-task', + fn: async (input: { n?: number }) => { + return { message: `hello from child ${input.n ?? 1}` }; + }, +}); + +export const durableWithSpawn = hatchet.durableTask({ + name: 'durable-with-spawn', + executionTimeout: '10s', + fn: async (_input, ctx) => { + const childResult = await spawnChildTask.run({}); + return { child_output: childResult }; + }, +}); + +export const durableWithBulkSpawn = hatchet.durableTask({ + name: 'durable-with-bulk-spawn', + executionTimeout: '10m', + fn: async (input: { n?: number }, ctx) => { + const n = input.n ?? 10; + const inputs = Array.from({ length: n }, (_, i) => ({ n: i })); + const childResults = await spawnChildTask.run(inputs); + return { child_outputs: childResults }; + }, +}); + +export const durableSleepEventSpawn = hatchet.durableTask({ + name: 'durable-sleep-event-spawn', + executionTimeout: '10m', + fn: async (_input, ctx) => { + const start = Date.now(); + + await ctx.sleepFor(SLEEP_TIME); + + await ctx.waitForEvent(EVENT_KEY, 'true'); + + const childResult = await spawnChildTask.run({}); + + return { + runtime: (Date.now() - start) / 1000, + child_output: childResult, + }; + }, +}); + +// --- Spawn child using explicit ctx.spawnChild --- + +export const durableWithExplicitSpawn = hatchet.durableTask({ + name: 'durable-with-explicit-spawn', + executionTimeout: '10m', + fn: async (_input, ctx) => { + const childResult = await ctx.spawnChild(spawnChildTask, {}); + return { child_output: childResult }; + }, +}); + +// --- Non-determinism detection --- + +export const durableNonDeterminism = hatchet.durableTask({ + name: 'durable-non-determinism', + executionTimeout: '10s', + fn: async (_input, ctx) => { + const sleepTime = ctx.invocationCount * 2; + + try { + await ctx.sleepFor(`${sleepTime}s`); + } catch (e) { + if (e instanceof NonDeterminismError) { + return { + attempt_number: ctx.invocationCount, + sleep_time: sleepTime, + non_determinism_detected: true, + node_id: e.nodeId, + }; + } + throw e; + } + + return { + attempt_number: ctx.invocationCount, + sleep_time: sleepTime, + non_determinism_detected: false, + }; + }, +}); + +// --- Replay reset --- + +export const REPLAY_RESET_SLEEP_SECONDS = 3; +/** Max duration (seconds) for a replayed/memoized step; above this we treat it as a real sleep. */ +export const REPLAY_RESET_MEMOIZED_MAX_SECONDS = 5; +const REPLAY_RESET_SLEEP = `${REPLAY_RESET_SLEEP_SECONDS}s` as const; + +export const durableReplayReset = hatchet.durableTask({ + name: 'durable-replay-reset', + executionTimeout: '20s', + fn: async (_input, ctx) => { + let start = Date.now(); + await ctx.sleepFor(REPLAY_RESET_SLEEP); + const sleep1Duration = (Date.now() - start) / 1000; + + start = Date.now(); + await ctx.sleepFor(REPLAY_RESET_SLEEP); + const sleep2Duration = (Date.now() - start) / 1000; + + start = Date.now(); + await ctx.sleepFor(REPLAY_RESET_SLEEP); + const sleep3Duration = (Date.now() - start) / 1000; + + return { + sleep_1_duration: sleep1Duration, + sleep_2_duration: sleep2Duration, + sleep_3_duration: sleep3Duration, + }; + }, +}); + +export const memoNowCaching = hatchet.durableTask({ + name: 'memo-now-caching', + executionTimeout: '10m', + fn: async (_input, ctx) => { + const now = await ctx.now(); + return { start_time: now.toISOString() }; + }, +}); + +// --- Spawn DAG from durable task --- + +export const dagChildWorkflow = hatchet.workflow({ + name: 'dag-child-workflow-ts', +}); + +const dagChild1 = dagChildWorkflow.task({ + name: 'dag-child-1', + fn: async () => { + await sleep(1000); + return { result: 'child1' }; + }, +}); + +dagChildWorkflow.task({ + name: 'dag-child-2', + parents: [dagChild1], + fn: async () => { + await sleep(2000); + return { result: 'child2' }; + }, +}); + +export const durableSpawnDag = hatchet.durableTask({ + name: 'durable-spawn-dag', + executionTimeout: '10s', + fn: async (_input, ctx) => { + const sleepStart = Date.now(); + const sleepResult = await ctx.sleepFor(SLEEP_TIME); + const sleepDuration = (Date.now() - sleepStart) / 1000; + + const spawnStart = Date.now(); + const spawnResult = await dagChildWorkflow.run({}); + const spawnDuration = (Date.now() - spawnStart) / 1000; + + return { + sleep_duration: sleepDuration, + sleep_duration_ms: sleepResult.durationMs, + spawn_duration: spawnDuration, + spawn_result: spawnResult, + }; + }, +}); diff --git a/sdks/typescript/src/v1/examples/durable_event/durable_event.e2e.ts b/sdks/typescript/src/v1/examples/durable_event/durable_event.e2e.ts new file mode 100644 index 000000000..62907afa2 --- /dev/null +++ b/sdks/typescript/src/v1/examples/durable_event/durable_event.e2e.ts @@ -0,0 +1,51 @@ +import sleep from '@hatchet/util/sleep'; +import { makeE2EClient } from '../__e2e__/harness'; +import { durableEvent, durableEventWithFilter, EVENT_KEY } from './workflow'; + +describe('durable-event-e2e', () => { + const hatchet = makeE2EClient(); + + it('waits for a user event', async () => { + const ref = await durableEvent.runNoWait({}); + + let finished = false; + const resultPromise = ref.output.finally(() => { + finished = true; + }); + + const eventPusher = (async () => { + await sleep(2000); + for (let i = 0; i < 30 && !finished; i += 1) { + await hatchet.events.push(EVENT_KEY, { userId: '1234' }); + await sleep(200); + } + })(); + + const result = await resultPromise; + await eventPusher.catch(() => undefined); + + expect(result.Value).toBe('done'); + }, 120_000); + + it('waits for a user event with filter', async () => { + const ref = await durableEventWithFilter.runNoWait({}); + + let finished = false; + const resultPromise = ref.output.finally(() => { + finished = true; + }); + + const eventPusher = (async () => { + await sleep(2000); + for (let i = 0; i < 30 && !finished; i += 1) { + await hatchet.events.push(EVENT_KEY, { userId: '1234' }); + await sleep(200); + } + })(); + + const result = await resultPromise; + await eventPusher.catch(() => undefined); + + expect(result.Value).toBe('done'); + }, 120_000); +}); diff --git a/sdks/typescript/src/v1/examples/durable_event/workflow.ts b/sdks/typescript/src/v1/examples/durable_event/workflow.ts index 436c2be89..23dffec94 100644 --- a/sdks/typescript/src/v1/examples/durable_event/workflow.ts +++ b/sdks/typescript/src/v1/examples/durable_event/workflow.ts @@ -1,14 +1,13 @@ -// import sleep from '@hatchet/util/sleep'; import { hatchet } from '../hatchet-client'; +export const EVENT_KEY = 'user:update'; + // > Durable Event export const durableEvent = hatchet.durableTask({ name: 'durable-event', executionTimeout: '10m', fn: async (_, ctx) => { - const res = ctx.waitFor({ - eventKey: 'user:update', - }); + const res = await ctx.waitForEvent(EVENT_KEY); console.log('res', res); @@ -24,10 +23,7 @@ export const durableEventWithFilter = hatchet.durableTask({ executionTimeout: '10m', fn: async (_, ctx) => { // > Durable Event With Filter - const res = ctx.waitFor({ - eventKey: 'user:update', - expression: "input.userId == '1234'", - }); + const res = await ctx.waitForEvent(EVENT_KEY, "input.userId == '1234'"); // !! console.log('res', res); diff --git a/sdks/typescript/src/v1/examples/durable_eviction/capacity-worker.ts b/sdks/typescript/src/v1/examples/durable_eviction/capacity-worker.ts new file mode 100644 index 000000000..04360ab1d --- /dev/null +++ b/sdks/typescript/src/v1/examples/durable_eviction/capacity-worker.ts @@ -0,0 +1,21 @@ +/** + * Dedicated worker for capacity-eviction e2e tests. + * + * Runs with durableSlots=1 so that a single waiting durable task triggers + * capacity pressure and gets evicted (even with ttl=undefined). + */ +import { hatchet } from '../hatchet-client'; +import { capacityEvictableSleep } from './workflow'; + +async function main() { + const worker = await hatchet.worker('capacity-eviction-worker', { + durableSlots: 1, + workflows: [capacityEvictableSleep], + }); + + await worker.start(); +} + +if (require.main === module) { + main(); +} diff --git a/sdks/typescript/src/v1/examples/durable_eviction/durable_eviction.e2e.ts b/sdks/typescript/src/v1/examples/durable_eviction/durable_eviction.e2e.ts new file mode 100644 index 000000000..4b54c0d36 --- /dev/null +++ b/sdks/typescript/src/v1/examples/durable_eviction/durable_eviction.e2e.ts @@ -0,0 +1,548 @@ +import sleep from '@hatchet/util/sleep'; +import { V1TaskStatus } from '@hatchet/clients/rest/generated/data-contracts'; +import Hatchet from '@hatchet/index'; +import { makeE2EClient, poll, checkDurableEvictionSupport } from '../__e2e__/harness'; +import { + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + evictableChildBulkSpawn, + multipleEviction, + nonEvictableSleep, + capacityEvictableSleep, + LONG_SLEEP_SECONDS, + EVICTION_TTL_SECONDS, + EVENT_KEY, + evictableSleepForGracefulTermination, +} from './workflow'; + +function getTaskStatuses(details: any): V1TaskStatus[] { + return (details?.tasks || []).map((t: any) => t.status); +} + +function hasEvictedTask(details: any): boolean { + return (details?.tasks || []).some((t: any) => t.isEvicted === true); +} + +function getTaskExternalId(details: any): string | undefined { + const tasks = details?.tasks || []; + const [t] = tasks; + return t?.taskExternalId ?? t?.metadata?.id; +} + +describe('durable-eviction-e2e', () => { + const hatchet = makeE2EClient(); + let evictionSupported = false; + + beforeAll(async () => { + evictionSupported = await checkDurableEvictionSupport(hatchet); + }); + + function requireEviction() { + if (!evictionSupported) { + console.log('Skipping: engine does not support durable eviction'); + } + return !evictionSupported; + } + + async function pollUntilStatus( + runId: string, + targetStatus: V1TaskStatus, + maxPollsOverride?: number + ) { + const maxPolls = maxPollsOverride || 15; + const interval = 2000; + + return poll( + async () => { + try { + return await hatchet.runs.get(runId); + } catch (e: any) { + if (e?.response?.status === 404) return undefined; + throw e; + } + }, + { + timeoutMs: maxPolls * interval, + intervalMs: interval, + shouldStop: (details: any) => + details != null && getTaskStatuses(details).includes(targetStatus), + label: `status=${targetStatus}`, + } + ); + } + + async function pollUntilEvicted(runId: string, maxPollsOverride?: number) { + const maxPolls = maxPollsOverride || 15; + const interval = 2000; + + return poll( + async () => { + try { + return await hatchet.runs.get(runId); + } catch (e: any) { + if (e?.response?.status === 404) return undefined; + throw e; + } + }, + { + timeoutMs: maxPolls * interval, + intervalMs: interval, + shouldStop: (details: any) => details != null && hasEvictedTask(details), + label: 'isEvicted=true', + } + ); + } + + it('non-evictable task completes normally', async () => { + if (requireEviction()) return; + const start = Date.now(); + const result = await nonEvictableSleep.run({}); + const elapsed = (Date.now() - start) / 1000; + + expect(result.status).toBe('completed'); + expect(elapsed).toBeGreaterThanOrEqual(9); + }, 120_000); + + it('non-evictable task is never evicted past TTL', async () => { + if (requireEviction()) return; + const ref = await nonEvictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + await sleep(7000); + const details = await hatchet.runs.get(runId); + + expect(hasEvictedTask(details)).toBe(false); + + const result = await ref.output; + expect(result.status).toBe('completed'); + }, 120_000); + + it('evictable task is evicted after TTL', async () => { + if (requireEviction()) return; + const ref = await evictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + + expect(hasEvictedTask(details)).toBe(true); + }, 120_000); + + it('evictable task restore re-enqueues the task', async () => { + if (requireEviction()) return; + const ref = await evictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + const taskId = getTaskExternalId(details); + expect(taskId).toBeDefined(); + + await hatchet.runs.restoreTask(taskId!); + + const restored = await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const statuses = getTaskStatuses(restored); + expect(statuses).toContain(V1TaskStatus.RUNNING); + }, 120_000); + + it('evictable task restore completes', async () => { + if (requireEviction()) return; + const start = Date.now(); + const ref = await evictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + const taskId = getTaskExternalId(details); + expect(taskId).toBeDefined(); + + await hatchet.runs.restoreTask(taskId!); + + const result = await ref.output; + const elapsed = (Date.now() - start) / 1000; + expect(result.status).toBe('completed'); + expect(elapsed).toBeGreaterThanOrEqual(LONG_SLEEP_SECONDS); + }, 180_000); + + it('evictable wait-for-event is evicted after TTL', async () => { + if (requireEviction()) return; + const ref = await evictableWaitForEvent.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + + expect(hasEvictedTask(details)).toBe(true); + }, 120_000); + + it('evictable wait-for-event restore + event completes', async () => { + if (requireEviction()) return; + const ref = await evictableWaitForEvent.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + const taskId = getTaskExternalId(details); + expect(taskId).toBeDefined(); + + await hatchet.runs.restoreTask(taskId!); + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + + await hatchet.events.push(EVENT_KEY, {}); + + const result = await ref.output; + expect(result.status).toBe('completed'); + }, 180_000); + + it('evictable child spawn is evicted after TTL', async () => { + if (requireEviction()) return; + const ref = await evictableChildSpawn.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + + expect(hasEvictedTask(details)).toBe(true); + }, 120_000); + + it('evictable child spawn restore completes', async () => { + if (requireEviction()) return; + const ref = await evictableChildSpawn.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + const taskId = getTaskExternalId(details); + expect(taskId).toBeDefined(); + + await hatchet.runs.restoreTask(taskId!); + + const result = await ref.output; + expect(result.status).toBe('completed'); + expect(result.child).toEqual({ child_status: 'completed' }); + }, 180_000); + + it('evictable child spawn restore re-enqueues', async () => { + if (requireEviction()) return; + const ref = await evictableChildSpawn.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + const taskId = getTaskExternalId(details); + expect(taskId).toBeDefined(); + + await hatchet.runs.restoreTask(taskId!); + + const restored = await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const statuses = getTaskStatuses(restored); + expect(statuses).toContain(V1TaskStatus.RUNNING); + }, 120_000); + + it('evictable child bulk spawn restore completes', async () => { + if (requireEviction()) return; + const ref = await evictableChildBulkSpawn.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + let evictionCount = 0; + for (let i = 0; i < 3; i += 1) { + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + evictionCount += 1; + const taskId = getTaskExternalId(details)!; + await hatchet.runs.restoreTask(taskId); + } + + const result = await ref.output; + expect(evictionCount).toBe(3); + expect(result.status).toBe('completed'); + expect(result.child_results).toEqual( + Array.from({ length: 3 }, (_, i) => ({ + sleepSeconds: (EVICTION_TTL_SECONDS + 5) * (i + 1), + status: 'completed', + })) + ); + }, 300_000); + + it('multiple eviction cycles', async () => { + if (requireEviction()) return; + const start = Date.now(); + const ref = await multipleEviction.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + // First eviction cycle + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + let details = await pollUntilEvicted(runId); + expect(hasEvictedTask(details)).toBe(true); + + let taskId = getTaskExternalId(details)!; + await hatchet.runs.restoreTask(taskId); + + // Second eviction cycle + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + details = await pollUntilEvicted(runId); + expect(hasEvictedTask(details)).toBe(true); + + taskId = getTaskExternalId(details)!; + await hatchet.runs.restoreTask(taskId); + + const result = await ref.output; + const elapsed = (Date.now() - start) / 1000; + expect(result.status).toBe('completed'); + expect(elapsed).toBeGreaterThanOrEqual(2 * LONG_SLEEP_SECONDS); + }, 300_000); + + it('eviction plus replay completes', async () => { + if (requireEviction()) return; + const ref = await evictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + await pollUntilEvicted(runId); + + await hatchet.runs.replay({ ids: [runId] }); + + const result = await ref.output; + expect(result.status).toBe('completed'); + }, 180_000); + + it('cancel after eviction transitions to CANCELLED', async () => { + if (requireEviction()) return; + const ref = await evictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + expect(hasEvictedTask(details)).toBe(true); + + await hatchet.runs.cancel({ ids: [runId] }); + + const cancelled = await pollUntilStatus(runId, V1TaskStatus.CANCELLED, 30); + const cancelledStatuses = getTaskStatuses(cancelled); + expect(cancelledStatuses).toContain(V1TaskStatus.CANCELLED); + }, 120_000); + + it('restore idempotency - double restore completes once', async () => { + if (requireEviction()) return; + const ref = await evictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId); + const taskId = getTaskExternalId(details)!; + + await hatchet.runs.restoreTask(taskId); + await hatchet.runs.restoreTask(taskId); + + const result = await ref.output; + expect(result.status).toBe('completed'); + }, 180_000); + + it('capacity eviction fires with durable_slots=1 and ttl=undefined', async () => { + if (requireEviction()) return; + const { spawn } = await import('child_process'); + + const workerProc = spawn( + 'pnpm', + [ + 'exec', + 'ts-node', + '-r', + 'tsconfig-paths/register', + '-P', + 'tsconfig.json', + 'src/v1/examples/durable_eviction/capacity-worker.ts', + ], + { + cwd: process.cwd(), + env: { + ...process.env, + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: 'true', + HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT: '8105', + }, + stdio: 'pipe', + } + ); + + workerProc.stdout?.on('data', () => {}); + workerProc.stderr?.on('data', () => {}); + + try { + await poll( + async () => { + try { + const resp = await fetch('http://localhost:8105/health'); + return resp.ok; + } catch { + return false; + } + }, + { + timeoutMs: 30_000, + intervalMs: 1000, + shouldStop: (healthy) => healthy === true, + label: 'capacity-worker-health', + } + ); + + const ref = await capacityEvictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId, 20); + + expect(hasEvictedTask(details)).toBe(true); + } finally { + try { + workerProc.kill('SIGKILL'); + } catch { + // ignore + } + } + }, 120_000); + + it('capacity eviction restore completes', async () => { + if (requireEviction()) return; + const { spawn } = await import('child_process'); + + const workerProc = spawn( + 'pnpm', + [ + 'exec', + 'ts-node', + '-r', + 'tsconfig-paths/register', + '-P', + 'tsconfig.json', + 'src/v1/examples/durable_eviction/capacity-worker.ts', + ], + { + cwd: process.cwd(), + env: { + ...process.env, + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: 'true', + HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT: '8106', + }, + stdio: 'pipe', + } + ); + + workerProc.stdout?.on('data', () => {}); + workerProc.stderr?.on('data', () => {}); + + try { + await poll( + async () => { + try { + const resp = await fetch('http://localhost:8106/health'); + return resp.ok; + } catch { + return false; + } + }, + { + timeoutMs: 30_000, + intervalMs: 1000, + shouldStop: (healthy) => healthy === true, + label: 'capacity-worker-health', + } + ); + + const ref = await capacityEvictableSleep.runNoWait({}); + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + const details = await pollUntilEvicted(runId, 20); + const taskId = getTaskExternalId(details)!; + + await hatchet.runs.restoreTask(taskId); + + const result = await ref.output; + expect(result.status).toBe('completed'); + } finally { + try { + workerProc.kill('SIGKILL'); + } catch { + // ignore + } + } + }, 180_000); + + it('graceful termination evicts waiting runs', async () => { + if (requireEviction()) return; + const { spawn } = await import('child_process'); + + const namespace = 'graceful-termination-evicts-waiting-runs'; + + const hatchetWithNamespace = Hatchet.init({ + namespace, + }); + + const workerProc = spawn( + 'pnpm', + [ + 'exec', + 'ts-node', + '-r', + 'tsconfig-paths/register', + '-P', + 'tsconfig.json', + 'src/v1/examples/durable_eviction/worker.ts', + ], + { + cwd: process.cwd(), + env: { + ...process.env, + HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED: 'true', + HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT: '8104', + HATCHET_CLIENT_NAMESPACE: 'graceful-termination-evicts-waiting-runs', + }, + stdio: 'pipe', + } + ); + + workerProc.stdout?.on('data', () => {}); + workerProc.stderr?.on('data', () => {}); + + try { + await poll( + async () => { + try { + const resp = await fetch('http://localhost:8104/health'); + return resp.ok; + } catch { + return false; + } + }, + { + timeoutMs: 30_000, + intervalMs: 1000, + shouldStop: (healthy) => healthy === true, + label: 'worker-health', + } + ); + + const ref = await hatchetWithNamespace.admin.runWorkflow( + evictableSleepForGracefulTermination.name, + {} + ); + + const runId = await ref.getWorkflowRunId(); + + await pollUntilStatus(runId, V1TaskStatus.RUNNING); + + workerProc.kill('SIGTERM'); + + const details = await pollUntilEvicted(runId); + expect(hasEvictedTask(details)).toBe(true); + } finally { + try { + workerProc.kill('SIGKILL'); + } catch { + // ignore + } + } + }, 120_000); +}); diff --git a/sdks/typescript/src/v1/examples/durable_eviction/worker.ts b/sdks/typescript/src/v1/examples/durable_eviction/worker.ts new file mode 100644 index 000000000..bd13f1170 --- /dev/null +++ b/sdks/typescript/src/v1/examples/durable_eviction/worker.ts @@ -0,0 +1,34 @@ +import { hatchet } from '../hatchet-client'; +import { + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + evictableChildBulkSpawn, + multipleEviction, + nonEvictableSleep, + childTask, + bulkChildTask, + evictableSleepForGracefulTermination, +} from './workflow'; + +async function main() { + const worker = await hatchet.worker('eviction-worker', { + workflows: [ + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + evictableChildBulkSpawn, + multipleEviction, + nonEvictableSleep, + childTask, + bulkChildTask, + evictableSleepForGracefulTermination, + ], + }); + + await worker.start(); +} + +if (require.main === module) { + main(); +} diff --git a/sdks/typescript/src/v1/examples/durable_eviction/workflow.ts b/sdks/typescript/src/v1/examples/durable_eviction/workflow.ts new file mode 100644 index 000000000..ed75f3768 --- /dev/null +++ b/sdks/typescript/src/v1/examples/durable_eviction/workflow.ts @@ -0,0 +1,128 @@ +import sleep from '@hatchet/util/sleep'; +import { EvictionPolicy } from '@hatchet/v1'; +import { hatchet } from '../hatchet-client'; + +export const EVICTION_TTL_SECONDS = 5; +export const LONG_SLEEP_SECONDS = 15; +export const EVENT_KEY = 'durable-eviction:event'; + +const EVICTION_POLICY: EvictionPolicy = { + ttl: `${EVICTION_TTL_SECONDS}s`, + allowCapacityEviction: true, + priority: 0, +}; + +export const childTask = hatchet.task({ + name: 'eviction-child-task', + fn: async () => { + await sleep(LONG_SLEEP_SECONDS * 1000); + return { child_status: 'completed' }; + }, +}); + +export const evictableSleep = hatchet.durableTask({ + name: 'evictable-sleep', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + await ctx.sleepFor(`${LONG_SLEEP_SECONDS}s`); + return { status: 'completed' }; + }, +}); + +// NOTE: DO NOT REGISTER ON E2E TEST WORKER +export const evictableSleepForGracefulTermination = hatchet.durableTask({ + name: 'evictable-sleep-for-graceful-termination', + executionTimeout: '5m', + evictionPolicy: { + ttl: `30m`, + allowCapacityEviction: true, + priority: 0, + }, + fn: async (_input, ctx) => { + await ctx.sleepFor(`5m`); + return { status: 'completed' }; + }, +}); + +export const evictableWaitForEvent = hatchet.durableTask({ + name: 'evictable-wait-for-event', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + await ctx.waitForEvent(EVENT_KEY, 'true'); + return { status: 'completed' }; + }, +}); + +export const evictableChildSpawn = hatchet.durableTask({ + name: 'evictable-child-spawn', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + const childResult = await childTask.run({}); + return { child: childResult, status: 'completed' }; + }, +}); + +export const multipleEviction = hatchet.durableTask({ + name: 'multiple-eviction', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + await ctx.sleepFor(`${LONG_SLEEP_SECONDS}s`); + await ctx.sleepFor(`${LONG_SLEEP_SECONDS}s`); + return { status: 'completed' }; + }, +}); + +export const bulkChildTask = hatchet.task({ + name: 'eviction-bulk-child-task', + fn: async (input: { sleepSeconds: number }) => { + await sleep(input.sleepSeconds * 1000); + return { sleepSeconds: input.sleepSeconds, status: 'completed' }; + }, +}); + +export const evictableChildBulkSpawn = hatchet.durableTask({ + name: 'evictable-child-bulk-spawn', + executionTimeout: '5m', + evictionPolicy: EVICTION_POLICY, + fn: async (_input, ctx) => { + const inputs = Array.from({ length: 3 }, (_, i) => ({ + sleepSeconds: (EVICTION_TTL_SECONDS + 5) * (i + 1), + })); + const childResults = await bulkChildTask.run(inputs); + return { child_results: childResults, status: 'completed' }; + }, +}); + +export const CAPACITY_SLEEP_SECONDS = 20; + +export const capacityEvictableSleep = hatchet.durableTask({ + name: 'capacity-evictable-sleep', + executionTimeout: '5m', + evictionPolicy: { + ttl: undefined, + allowCapacityEviction: true, + priority: 0, + }, + fn: async (_input, ctx) => { + await ctx.sleepFor(`${CAPACITY_SLEEP_SECONDS}s`); + return { status: 'completed' }; + }, +}); + +export const nonEvictableSleep = hatchet.durableTask({ + name: 'non-evictable-sleep', + executionTimeout: '5m', + evictionPolicy: { + ttl: undefined, + allowCapacityEviction: false, + priority: 0, + }, + fn: async (_input, ctx) => { + await ctx.sleepFor('10s'); + return { status: 'completed' }; + }, +}); diff --git a/sdks/typescript/src/v1/examples/durable_sleep/durable_sleep.e2e.ts b/sdks/typescript/src/v1/examples/durable_sleep/durable_sleep.e2e.ts new file mode 100644 index 000000000..5a29ee9b2 --- /dev/null +++ b/sdks/typescript/src/v1/examples/durable_sleep/durable_sleep.e2e.ts @@ -0,0 +1,19 @@ +import { durableSleep } from './workflow'; + +describe('durable-sleep-e2e', () => { + it('sleeps for 5s and completes', async () => { + const start = Date.now(); + const result = await durableSleep.run({}); + const elapsed = (Date.now() - start) / 1000; + + expect(result).toEqual( + expect.objectContaining({ + 'durable-sleep': expect.objectContaining({ + Value: 'done', + }), + }) + ); + expect(elapsed).toBeGreaterThanOrEqual(4); + expect(elapsed).toBeLessThanOrEqual(30); + }, 120_000); +}); diff --git a/sdks/typescript/src/v1/examples/e2e-worker.ts b/sdks/typescript/src/v1/examples/e2e-worker.ts index bcd1872e7..c7548a449 100644 --- a/sdks/typescript/src/v1/examples/e2e-worker.ts +++ b/sdks/typescript/src/v1/examples/e2e-worker.ts @@ -14,7 +14,31 @@ import { concurrencyCancelNewestWorkflow } from './concurrency_cancel_newest/wor import { concurrencyMultipleKeysWorkflow } from './concurrency_multiple_keys/workflow'; import { concurrencyWorkflowLevelWorkflow } from './concurrency_workflow_level/workflow'; import { dag } from './dag/workflow'; -import { durableWorkflow, waitForSleepTwice } from './durable/workflow'; +import { + durableWorkflow, + waitForSleepTwice, + spawnChildTask, + durableWithSpawn, + durableWithBulkSpawn, + durableSleepEventSpawn, + durableWithExplicitSpawn, + durableNonDeterminism, + durableReplayReset, + dagChildWorkflow, + durableSpawnDag, +} from './durable/workflow'; +import { durableEvent, durableEventWithFilter } from './durable_event/workflow'; +import { + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + multipleEviction, + nonEvictableSleep, + childTask as evictionChildTask, + bulkChildTask, + evictableChildBulkSpawn, +} from './durable_eviction/workflow'; +import { durableSleep } from './durable_sleep/workflow'; import { createLoggingWorkflow } from './logger/workflow'; import { nonRetryableWorkflow } from './non_retryable/workflow'; import { failureWorkflow } from './on_failure/workflow'; @@ -41,6 +65,26 @@ const workflows = [ dag, durableWorkflow, waitForSleepTwice, + spawnChildTask, + durableWithSpawn, + durableWithBulkSpawn, + durableSleepEventSpawn, + durableWithExplicitSpawn, + durableNonDeterminism, + durableReplayReset, + dagChildWorkflow, + durableSpawnDag, + durableEvent, + durableEventWithFilter, + durableSleep, + evictableSleep, + evictableWaitForEvent, + evictableChildSpawn, + multipleEviction, + nonEvictableSleep, + evictionChildTask, + bulkChildTask, + evictableChildBulkSpawn, createLoggingWorkflow(hatchet), nonRetryableWorkflow, failureWorkflow, diff --git a/sdks/typescript/src/v1/examples/runtime_affinity/runtime-affinity.e2e.ts b/sdks/typescript/src/v1/examples/runtime_affinity/runtime-affinity.e2e.ts index a88428e83..ce306e37c 100644 --- a/sdks/typescript/src/v1/examples/runtime_affinity/runtime-affinity.e2e.ts +++ b/sdks/typescript/src/v1/examples/runtime_affinity/runtime-affinity.e2e.ts @@ -1,6 +1,6 @@ import sleep from '@hatchet/util/sleep'; import { WorkerList } from '@hatchet/clients/rest/generated/data-contracts'; -import { stopWorker } from '../__e2e__/harness'; +import { checkDurableEvictionSupport, stopWorker } from '../__e2e__/harness'; import { Worker } from '../../client/worker/worker'; import { hatchet } from '../hatchet-client'; import { affinityExampleTask } from './workflow'; @@ -10,6 +10,11 @@ const labels = ['foo', 'bar'] as const; describe('runtime-affinity-e2e', () => { let workerA: Worker | undefined; let workerB: Worker | undefined; + let evictionSupported = false; + + beforeAll(async () => { + evictionSupported = await checkDurableEvictionSupport(hatchet); + }); afterAll(async () => { await stopWorker(workerA); @@ -17,6 +22,10 @@ describe('runtime-affinity-e2e', () => { }); it('routes runs to the correct worker based on desired labels', async () => { + if (!evictionSupported) { + return; + } + workerA = await hatchet.worker('runtime-affinity-worker', { workflows: [affinityExampleTask], labels: { affinity: labels[0] }, diff --git a/sdks/typescript/src/v1/index.ts b/sdks/typescript/src/v1/index.ts index 7167c5c06..bc351e43c 100644 --- a/sdks/typescript/src/v1/index.ts +++ b/sdks/typescript/src/v1/index.ts @@ -9,3 +9,11 @@ export * from './task'; export * from './client/worker/context'; export * from './slot-types'; export * from '../legacy/legacy-transformer'; +export { NonDeterminismError } from '../util/errors/non-determinism-error'; +export { EvictionNotSupportedError } from '../util/errors/eviction-not-supported-error'; +export { + EvictionPolicy, + DEFAULT_DURABLE_TASK_EVICTION_POLICY, +} from './client/worker/eviction/eviction-policy'; +export { DurableEvictionConfig } from './client/worker/eviction/eviction-manager'; +export { MinEngineVersion, supportsEviction } from './client/worker/engine-version'; diff --git a/sdks/typescript/src/v1/parent-run-context-vars.ts b/sdks/typescript/src/v1/parent-run-context-vars.ts index 8518d23f8..8ac1228d5 100644 --- a/sdks/typescript/src/v1/parent-run-context-vars.ts +++ b/sdks/typescript/src/v1/parent-run-context-vars.ts @@ -1,4 +1,5 @@ import { AsyncLocalStorage } from 'async_hooks'; +import { DurableContext } from './client/worker/context'; export interface ParentRunContext { parentId: string; @@ -14,6 +15,12 @@ export interface ParentRunContext { * Used to cancel local "wait for result" subscriptions when the parent task is cancelled. */ signal?: AbortSignal; + + /** + * Present when the current task is running in durable mode. + * Used by child `run()` calls to route through `spawnChild` instead of a fresh trigger. + */ + durableContext?: DurableContext; } export class ParentRunContextManager { diff --git a/sdks/typescript/src/v1/task.ts b/sdks/typescript/src/v1/task.ts index b0edb8de9..86c7695eb 100644 --- a/sdks/typescript/src/v1/task.ts +++ b/sdks/typescript/src/v1/task.ts @@ -1,12 +1,10 @@ -import { - ConcurrencyLimitStrategy, - RateLimitDuration, - WorkerLabelComparator, -} from '@hatchet/protoc/v1/workflows'; +import { ConcurrencyLimitStrategy, RateLimitDuration } from '@hatchet/protoc/v1/workflows'; import { Conditions } from './conditions'; import { Duration } from './client/duration'; import { InputType, OutputType, UnknownInputType } from './types'; import { Context, DurableContext } from './client/worker/context'; +import { EvictionPolicy } from './client/worker/eviction/eviction-policy'; +import { WorkerLabelComparator } from '../protoc/v1/shared/trigger'; export { ConcurrencyLimitStrategy, WorkerLabelComparator }; @@ -241,7 +239,13 @@ export type CreateWorkflowDurableTaskOpts< I extends InputType = UnknownInputType, O extends OutputType = void, C extends DurableTaskFn = DurableTaskFn, -> = CreateWorkflowTaskOpts; +> = CreateWorkflowTaskOpts & { + /** + * Eviction policy for the durable task. Controls TTL-based eviction and capacity-based eviction. + * Defaults to the built-in eviction policy when omitted or `undefined`. + */ + evictionPolicy?: EvictionPolicy; +}; /** * Options for configuring the onSuccess task that is invoked when a task succeeds. diff --git a/sql/schema/v0.sql b/sql/schema/v0.sql index 10b2939d1..4b2d657fb 100644 --- a/sql/schema/v0.sql +++ b/sql/schema/v0.sql @@ -863,6 +863,7 @@ CREATE TABLE "Worker" ( "os" TEXT, "runtimeExtra" TEXT, "sdkVersion" TEXT, + "durableTaskDispatcherId" UUID, CONSTRAINT "Worker_pkey" PRIMARY KEY ("id") ); diff --git a/sql/schema/v1-core.sql b/sql/schema/v1-core.sql index e2c1cd3ab..c69e21dec 100644 --- a/sql/schema/v1-core.sql +++ b/sql/schema/v1-core.sql @@ -40,7 +40,8 @@ $$; CREATE OR REPLACE FUNCTION create_v1_range_partition( targetTableName text, - targetDate date + targetDate date, + fillfactor integer DEFAULT 100 ) RETURNS integer LANGUAGE plpgsql AS $$ @@ -60,14 +61,15 @@ BEGIN EXECUTE format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES INCLUDING CONSTRAINTS)', newTableName, targetTableName); EXECUTE - format('ALTER TABLE %s SET ( + format('ALTER TABLE %I SET ( + fillfactor = %s, autovacuum_vacuum_scale_factor = ''0.1'', autovacuum_analyze_scale_factor=''0.05'', autovacuum_vacuum_threshold=''25'', autovacuum_analyze_threshold=''25'', autovacuum_vacuum_cost_delay=''10'', autovacuum_vacuum_cost_limit=''1000'' - )', newTableName); + )', newTableName, fillfactor); EXECUTE format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); RETURN 1; @@ -303,6 +305,7 @@ CREATE TABLE v1_task ( concurrency_keys TEXT[], retry_backoff_factor DOUBLE PRECISION, retry_max_backoff INTEGER, + is_durable BOOLEAN, desired_worker_label JSONB, CONSTRAINT v1_task_pkey PRIMARY KEY (id, inserted_at) ) PARTITION BY RANGE(inserted_at); @@ -417,6 +420,7 @@ CREATE TABLE v1_task_runtime ( worker_id UUID, tenant_id UUID NOT NULL, timeout_at TIMESTAMP(3) NOT NULL, + evicted_at TIMESTAMPTZ DEFAULT NULL, CONSTRAINT v1_task_runtime_pkey PRIMARY KEY (task_id, task_inserted_at, retry_count) ); @@ -425,6 +429,8 @@ CREATE INDEX v1_task_runtime_tenantId_workerId_idx ON v1_task_runtime (tenant_id CREATE INDEX v1_task_runtime_tenantId_timeoutAt_idx ON v1_task_runtime (tenant_id ASC, timeout_at ASC); +CREATE INDEX v1_task_runtime_tenant_worker_not_evicted_idx ON v1_task_runtime (tenant_id, worker_id) WHERE evicted_at IS NULL; + alter table v1_task_runtime set ( autovacuum_vacuum_scale_factor = '0.1', autovacuum_analyze_scale_factor='0.05', @@ -531,6 +537,7 @@ CREATE TABLE v1_match ( existing_data JSONB, signal_task_id bigint, signal_task_inserted_at timestamptz, + signal_task_external_id UUID, signal_external_id UUID, signal_key TEXT, -- references the parent DAG for the task, which we can use to get input + additional metadata @@ -551,6 +558,8 @@ CREATE TABLE v1_match ( trigger_existing_task_id bigint, trigger_existing_task_inserted_at timestamptz, trigger_priority integer, + durable_event_log_entry_node_id bigint, + durable_event_log_entry_branch_id bigint, CONSTRAINT v1_match_pkey PRIMARY KEY (id) ); @@ -1705,7 +1714,7 @@ CREATE TABLE v1_durable_sleep ( PRIMARY KEY (tenant_id, sleep_until, id) ); -CREATE TYPE v1_payload_type AS ENUM ('TASK_INPUT', 'DAG_INPUT', 'TASK_OUTPUT', 'TASK_EVENT_DATA', 'USER_EVENT_INPUT'); +CREATE TYPE v1_payload_type AS ENUM ('TASK_INPUT', 'DAG_INPUT', 'TASK_OUTPUT', 'TASK_EVENT_DATA', 'USER_EVENT_INPUT', 'DURABLE_EVENT_LOG_ENTRY_DATA', 'DURABLE_EVENT_LOG_ENTRY_RESULT_DATA'); -- IMPORTANT: Keep these values in sync with `v1_payload_type_olap` in the OLAP db CREATE TYPE v1_payload_location AS ENUM ('INLINE', 'EXTERNAL'); @@ -2274,3 +2283,88 @@ CREATE TABLE v1_event_to_run ( PRIMARY KEY (event_id, event_seen_at, run_external_id) ) PARTITION BY RANGE(event_seen_at); + +-- v1_durable_event_log represents the log file for the durable event history +-- of a durable task. This table stores metadata like sequence values for entries. +-- +-- Important: writers to v1_durable_event_log_entry should lock this row to increment the sequence value. +CREATE TABLE v1_durable_event_log_file ( + tenant_id UUID NOT NULL, + + -- The id and inserted_at of the durable task which created this entry + durable_task_id BIGINT NOT NULL, + durable_task_inserted_at TIMESTAMPTZ NOT NULL, + + latest_invocation_count INTEGER NOT NULL, + + latest_inserted_at TIMESTAMPTZ NOT NULL, + -- A monotonically increasing node id for this durable event log scoped to the durable task. + -- Starts at 0 and increments by 1 for each new entry. + latest_node_id BIGINT NOT NULL, + -- The latest branch id. Branches represent different execution paths on a replay. + latest_branch_id BIGINT NOT NULL, + + CONSTRAINT v1_durable_event_log_file_pkey PRIMARY KEY (durable_task_id, durable_task_inserted_at) +) PARTITION BY RANGE(durable_task_inserted_at); + +CREATE TYPE v1_durable_event_log_kind AS ENUM ( + 'RUN', + 'WAIT_FOR', + 'MEMO' +); + +CREATE TABLE v1_durable_event_log_entry ( + tenant_id UUID NOT NULL, + + -- need an external id for consistency with the payload store logic (unfortunately) + external_id UUID NOT NULL, + -- The id and inserted_at of the durable task which created this entry + -- The inserted_at time of this event from a DB clock perspective. + -- Important: for consistency, this should always be auto-generated via the CURRENT_TIMESTAMP! + inserted_at TIMESTAMPTZ NOT NULL, + id BIGINT NOT NULL GENERATED ALWAYS AS IDENTITY, + + durable_task_id BIGINT NOT NULL, + durable_task_inserted_at TIMESTAMPTZ NOT NULL, + + kind v1_durable_event_log_kind NOT NULL, + -- The node number in the durable event log. This represents a monotonically increasing + -- sequence value generated from v1_durable_event_log_file.latest_node_id + node_id BIGINT NOT NULL, + -- The branch id when this event was first seen. A durable event log can be a part of many branches. + branch_id BIGINT NOT NULL, + -- An idempotency key generated from the incoming data (using the type of event + wait for conditions or the trigger event payload + options) + -- to determine whether or not there's been a non-determinism error + idempotency_key BYTEA NOT NULL, + -- Access patterns: + -- Definite: we'll query directly for the node_id when a durable task is replaying its log + -- Possible: we may want to query a range of node_ids for a durable task + -- Possible: we may want to query a range of inserted_ats for a durable task + + -- Whether this callback has been seen by the engine or not. Note that is_satisfied _may_ change multiple + -- times through the lifecycle of a callback, and readers should not assume that once it's true it will always be true. + is_satisfied BOOLEAN NOT NULL DEFAULT FALSE, + + CONSTRAINT v1_durable_event_log_entry_pkey PRIMARY KEY (durable_task_id, durable_task_inserted_at, branch_id, node_id) +) PARTITION BY RANGE(durable_task_inserted_at); + + +CREATE TABLE v1_durable_event_log_branch_point ( + tenant_id UUID NOT NULL, + + id BIGINT NOT NULL GENERATED ALWAYS AS IDENTITY, + + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + + durable_task_id BIGINT NOT NULL, + + durable_task_inserted_at TIMESTAMPTZ NOT NULL, + + first_node_id_in_new_branch BIGINT NOT NULL, + + parent_branch_id BIGINT NOT NULL, + + next_branch_id BIGINT NOT NULL, + + CONSTRAINT v1_durable_event_log_branch_point_pkey PRIMARY KEY (durable_task_id, durable_task_inserted_at, parent_branch_id, first_node_id_in_new_branch, next_branch_id) +) PARTITION BY RANGE(durable_task_inserted_at); diff --git a/sql/schema/v1-olap.sql b/sql/schema/v1-olap.sql index 2b91ca1ba..8ac2764b0 100644 --- a/sql/schema/v1-olap.sql +++ b/sql/schema/v1-olap.sql @@ -5,9 +5,37 @@ CREATE TYPE v1_readable_status_olap AS ENUM ( 'RUNNING', 'CANCELLED', 'FAILED', - 'COMPLETED' + 'COMPLETED', + 'EVICTED' ); +-- NOTE: enum ordering puts EVICTED after COMPLETED, but logically EVICTED is +-- non-terminal and should rank below terminal statuses. These functions provide +-- the canonical priority ordering for aggregation and comparison. +CREATE OR REPLACE FUNCTION v1_status_to_priority(s v1_readable_status_olap) +RETURNS int IMMUTABLE LANGUAGE sql AS $$ + SELECT CASE s + WHEN 'QUEUED' THEN 1 + WHEN 'RUNNING' THEN 2 + WHEN 'EVICTED' THEN 3 + WHEN 'CANCELLED' THEN 4 + WHEN 'FAILED' THEN 5 + WHEN 'COMPLETED' THEN 6 + END; +$$; + +CREATE OR REPLACE FUNCTION v1_status_from_priority(p int) +RETURNS v1_readable_status_olap IMMUTABLE LANGUAGE sql AS $$ + SELECT CASE p + WHEN 1 THEN 'QUEUED' + WHEN 2 THEN 'RUNNING' + WHEN 3 THEN 'EVICTED' + WHEN 4 THEN 'CANCELLED' + WHEN 5 THEN 'FAILED' + WHEN 6 THEN 'COMPLETED' + END::v1_readable_status_olap; +$$; + -- HELPER FUNCTIONS FOR PARTITIONED TABLES -- CREATE OR REPLACE FUNCTION get_v1_partitions_before_date( targetTableName text, @@ -84,6 +112,7 @@ BEGIN PERFORM create_v1_partition_with_status(newTableName, 'COMPLETED'); PERFORM create_v1_partition_with_status(newTableName, 'CANCELLED'); PERFORM create_v1_partition_with_status(newTableName, 'FAILED'); + PERFORM create_v1_partition_with_status(newTableName, 'EVICTED'); -- If it's not already attached, attach the partition IF NOT EXISTS (SELECT 1 FROM pg_inherits WHERE inhrelid = newTableName::regclass) THEN @@ -272,7 +301,9 @@ CREATE TYPE v1_event_type_olap AS ENUM ( 'TIMED_OUT', 'RATE_LIMIT_ERROR', 'SKIPPED', - 'COULD_NOT_SEND_TO_WORKER' + 'COULD_NOT_SEND_TO_WORKER', + 'DURABLE_EVICTED', + 'DURABLE_RESTORING' ); -- this is a hash-partitioned table on the task_id, so that we can process batches of events in parallel @@ -333,6 +364,7 @@ CREATE TABLE v1_task_events_olap ( worker_id UUID, additional__event_data TEXT, additional__event_message TEXT, + durable_invocation_count INT NOT NULL DEFAULT 0, PRIMARY KEY (task_id, task_inserted_at, id) );