Merge branch 'main' into feat-durable-execution

This commit is contained in:
mrkaye97
2026-02-17 08:45:41 -05:00
200 changed files with 10451 additions and 2456 deletions
+1 -1
View File
@@ -9,7 +9,7 @@ on:
name: Release
jobs:
load:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
timeout-minutes: 30
strategy:
matrix:
+211 -6
View File
@@ -54,7 +54,7 @@ jobs:
run: docker compose down
unit:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
steps:
- uses: actions/checkout@v6
- name: Setup Go
@@ -72,7 +72,7 @@ jobs:
run: go test $(go list ./... | grep -v "quickstart") -v -failfast
integration:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
@@ -117,7 +117,7 @@ jobs:
run: docker compose down
e2e:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
timeout-minutes: 30
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
@@ -206,7 +206,7 @@ jobs:
run: docker compose down
e2e-pgmq:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
timeout-minutes: 30
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
@@ -297,7 +297,7 @@ jobs:
run: docker compose down
load:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
timeout-minutes: 30
strategy:
matrix:
@@ -339,8 +339,213 @@ jobs:
TESTING_MATRIX_PG_VERSION: ${{ matrix.pg-version }}
TESTING_MATRIX_OPTIMISTIC_SCHEDULING: ${{ matrix.optimistic-scheduling }}
load-online-migrate:
runs-on: ubicloud-standard-8
timeout-minutes: 30
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0
fetch-tags: true
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: "1.25"
- name: Compose
run: docker compose up -d
- name: Determine latest stable release tag
run: |
LATEST_TAG=$(git tag --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -1)
if [ -z "$LATEST_TAG" ]; then
echo "ERROR: No stable release tag found"
exit 1
fi
echo "Latest stable tag: $LATEST_TAG"
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
- name: Pull old release images
run: |
docker pull ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{ env.LATEST_TAG }}
docker pull ghcr.io/hatchet-dev/hatchet/hatchet-admin:${{ env.LATEST_TAG }}
docker pull ghcr.io/hatchet-dev/hatchet/hatchet-engine:${{ env.LATEST_TAG }}
docker pull ghcr.io/hatchet-dev/hatchet/hatchet-loadtest:${{ env.LATEST_TAG }}
- name: Run old migrations
run: |
docker run --rm --network host \
-e DATABASE_URL="${{ env.DATABASE_URL }}" \
ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{ env.LATEST_TAG }}
- name: Setup config and seed database
run: |
mkdir -p generated
docker run --rm --network host \
-v ${{ github.workspace }}/generated:/hatchet/generated \
-e DATABASE_URL="${{ env.DATABASE_URL }}" \
-e SERVER_GRPC_PORT=7077 \
-e SERVER_GRPC_BROADCAST_ADDRESS=localhost:7077 \
-e SERVER_GRPC_INSECURE=true \
-e SERVER_AUTH_COOKIE_DOMAIN=localhost \
-e SERVER_AUTH_COOKIE_INSECURE=true \
ghcr.io/hatchet-dev/hatchet/hatchet-admin:${{ env.LATEST_TAG }} \
/hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/generated
- name: Generate API token
run: |
TOKEN=$(docker run --rm --network host \
-v ${{ github.workspace }}/generated:/hatchet/generated \
-e DATABASE_URL="${{ env.DATABASE_URL }}" \
-e SERVER_GRPC_PORT=7077 \
-e SERVER_GRPC_BROADCAST_ADDRESS=localhost:7077 \
-e SERVER_GRPC_INSECURE=true \
-e SERVER_AUTH_COOKIE_DOMAIN=localhost \
-e SERVER_AUTH_COOKIE_INSECURE=true \
ghcr.io/hatchet-dev/hatchet/hatchet-admin:${{ env.LATEST_TAG }} \
/hatchet/hatchet-admin token create --config /hatchet/generated)
echo "HATCHET_CLIENT_TOKEN=$TOKEN" >> $GITHUB_ENV
- name: Start old engine
run: |
docker run -d --name hatchet-engine --network host \
-v ${{ github.workspace }}/generated:/hatchet/generated \
-e DATABASE_URL="${{ env.DATABASE_URL }}" \
-e SERVER_GRPC_PORT=7077 \
-e SERVER_GRPC_BROADCAST_ADDRESS=localhost:7077 \
-e SERVER_GRPC_INSECURE=true \
-e SERVER_AUTH_COOKIE_DOMAIN=localhost \
-e SERVER_AUTH_COOKIE_INSECURE=true \
-e SERVER_MSGQUEUE_KIND=postgres \
-e SERVER_LOGGER_LEVEL=warn \
-e SERVER_LOGGER_FORMAT=console \
-e DATABASE_LOGGER_LEVEL=warn \
-e DATABASE_LOGGER_FORMAT=console \
ghcr.io/hatchet-dev/hatchet/hatchet-engine:${{ env.LATEST_TAG }} \
/hatchet/hatchet-engine --config /hatchet/generated
echo "Waiting 30s for engine to start..."
sleep 30
- name: Start old load test
run: |
docker run -d --name hatchet-loadtest --network host \
-e HATCHET_CLIENT_TOKEN="${{ env.HATCHET_CLIENT_TOKEN }}" \
-e HATCHET_CLIENT_TLS_STRATEGY=none \
-e HATCHET_CLIENT_HOST_PORT=localhost:7077 \
ghcr.io/hatchet-dev/hatchet/hatchet-loadtest:${{ env.LATEST_TAG }} \
/hatchet/hatchet-load-test loadtest -e 10 -d 240s -w 60s -s 100
- name: Wait then apply new migrations
run: |
echo "Waiting 30s for load test to get started..."
sleep 30
echo "Applying new migrations from current branch..."
go run ./cmd/hatchet-migrate
echo "New migrations applied successfully"
- name: Wait for load test to complete
run: |
echo "Waiting for load test container to finish..."
docker wait hatchet-loadtest
EXIT_CODE=$(docker inspect hatchet-loadtest --format='{{.State.ExitCode}}')
echo "Load test exited with code: $EXIT_CODE"
if [ "$EXIT_CODE" != "0" ]; then
echo "=== Load test logs ==="
docker logs hatchet-loadtest
echo "=== Engine logs ==="
docker logs hatchet-engine
exit 1
fi
echo "Load test passed"
- name: Teardown
if: always()
run: |
docker rm -f hatchet-loadtest hatchet-engine 2>/dev/null || true
docker compose down
load-deadlock:
runs-on: ubicloud-standard-8
timeout-minutes: 30
strategy:
matrix:
migrate-strategy: ["latest"]
rabbitmq-enabled: ["true"]
pg-version: ["17-alpine"]
optimistic-scheduling: ["true", "false"]
steps:
- uses: actions/checkout@v6
- name: Install Task
uses: arduino/setup-task@v2
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: "1.25"
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.16.1
run_install: false
- name: Go deps
run: go mod download
- name: Add go-deadlock dependency
run: go get github.com/sasha-s/go-deadlock@v0.3.6
- name: Patch sync imports to use go-deadlock (sed)
shell: bash
run: |
set -euo pipefail
# Replace ONLY the stdlib "sync" import with an alias that preserves `sync.X` call sites.
# - `import "sync"` -> `import sync "github.com/sasha-s/go-deadlock"`
# - within import blocks: `"sync"` -> `sync "github.com/sasha-s/go-deadlock"`
# NOTE: use `-i''` (no backup) for portability across GNU/BSD sed.
find . -name '*.go' -not -path './vendor/*' -print0 | xargs -0 sed -i'' -E \
-e 's/^([[:space:]]*)import[[:space:]]+"sync"[[:space:]]*$/\1import sync "github.com\/sasha-s\/go-deadlock"/' \
-e 's/^([[:space:]]*)"sync"[[:space:]]*$/\1sync "github.com\/sasha-s\/go-deadlock"/'
# Keep formatting/import grouping consistent after rewriting.
find . -name '*.go' -not -path './vendor/*' -print0 | xargs -0 gofmt -w
# Evidence in CI logs that rewriting happened (or not).
echo "Changed Go files (after patch):"
git diff --name-only -- '*.go' || true
echo ""
echo "Contents of pkg/scheduling/v1/scheduler.go after patch:"
echo "----"
cat pkg/scheduling/v1/scheduler.go
echo "----"
- name: Test (deadlock-instrumented)
run: |
# Disable gzip compression for load tests to reduce CPU overhead
# Compression adds overhead without benefit for 0kb payloads
HATCHET_CLIENT_DISABLE_GZIP_COMPRESSION=true go test -tags load ./... -p 5 -v -race -failfast -timeout 20m
env:
# This job adds go-deadlock + -race overhead; relax perf threshold to avoid flakes.
HATCHET_LOADTEST_AVERAGE_DURATION_THRESHOLD: 1s
# Give the engine a bit more time to come up under instrumentation.
HATCHET_LOADTEST_STARTUP_SLEEP: 30s
TESTING_MATRIX_MIGRATE: ${{ matrix.migrate-strategy }}
TESTING_MATRIX_RABBITMQ_ENABLED: ${{ matrix.rabbitmq-enabled }}
TESTING_MATRIX_PG_VERSION: ${{ matrix.pg-version }}
TESTING_MATRIX_OPTIMISTIC_SCHEDULING: ${{ matrix.optimistic-scheduling }}
rampup:
runs-on: ubicloud-standard-4
runs-on: ubicloud-standard-8
timeout-minutes: 30
strategy:
matrix:
+16 -1
View File
@@ -33,6 +33,11 @@ service Dispatcher {
rpc ReleaseSlot(ReleaseSlotRequest) returns (ReleaseSlotResponse) {}
rpc UpsertWorkerLabels(UpsertWorkerLabelsRequest) returns (UpsertWorkerLabelsResponse) {}
// GetVersion returns the dispatcher protocol version as a simple integer.
// SDKs use this to determine feature support (e.g. slot_config registration).
// Old engines that do not implement this RPC will return UNIMPLEMENTED.
rpc GetVersion(GetVersionRequest) returns (GetVersionResponse) {}
}
message WorkerLabels {
@@ -67,7 +72,8 @@ message WorkerRegisterRequest {
// (optional) the services for this worker
repeated string services = 3;
// (optional) the number of slots this worker can handle
// (optional) the number of default slots this worker can handle
// deprecated: use slot_config instead
optional int32 slots = 4;
// (optional) worker labels (i.e. state or other metadata)
@@ -79,6 +85,9 @@ message WorkerRegisterRequest {
// (optional) information regarding the runtime environment of the worker
optional RuntimeInfo runtime_info = 7;
// (optional) slot config for this worker (slot_type -> units)
map<string, int32> slot_config = 9;
}
message WorkerRegisterResponse {
@@ -403,3 +412,9 @@ message ReleaseSlotRequest {
}
message ReleaseSlotResponse {}
message GetVersionRequest {}
message GetVersionResponse {
string version = 1;
}
@@ -130,6 +130,8 @@ V1WebhookSourceName:
$ref: "./v1/webhook.yaml#/V1WebhookSourceName"
V1WebhookAuthType:
$ref: "./v1/webhook.yaml#/V1WebhookAuthType"
V1WebhookResponse:
$ref: "./v1/webhook.yaml#/V1WebhookResponse"
RateLimit:
$ref: "./rate_limits.yaml#/RateLimit"
RateLimitList:
@@ -212,3 +212,14 @@ V1UpdateWebhookRequest:
staticPayload:
type: object
description: The static payload to use for the webhook. This is used to send a static payload with the webhook.
V1WebhookResponse:
type: object
properties:
message:
type: string
description: The message for the webhook response
event:
$ref: "event.yaml#/V1Event"
challenge:
type: string
@@ -76,6 +76,19 @@ WorkerType:
- MANAGED
- WEBHOOK
WorkerSlotConfig:
type: object
description: Slot availability and limits for a slot type.
properties:
available:
type: integer
description: The number of available units for this slot type.
limit:
type: integer
description: The maximum number of units for this slot type.
required:
- limit
RegisteredWorkflow:
type: object
properties:
@@ -136,12 +149,11 @@ Worker:
- ACTIVE
- INACTIVE
- PAUSED
maxRuns:
type: integer
description: The maximum number of runs this worker can execute concurrently.
availableRuns:
type: integer
description: The number of runs this worker can execute concurrently.
slotConfig:
type: object
description: Slot availability and limits for this worker (slot_type -> { available, limit }).
additionalProperties:
$ref: "#/WorkerSlotConfig"
dispatcherId:
type: string
description: "the id of the assigned dispatcher, in UUID format"
@@ -281,6 +281,14 @@ Step:
timeout:
type: string
description: The timeout of the step.
isDurable:
type: boolean
description: Whether the step is durable.
slotRequests:
type: object
description: Slot requests for the step (slot_type -> units).
additionalProperties:
type: integer
children:
type: array
items:
@@ -124,8 +124,7 @@ V1WebhookGetDeleteReceiveUpdate:
content:
application/json:
schema:
type: object
additionalProperties: true
$ref: "../../../components/schemas/_index.yaml#/V1WebhookResponse"
"400":
content:
application/json:
+2
View File
@@ -168,6 +168,8 @@ message CreateTaskOpts {
repeated Concurrency concurrency = 11; // (optional) the task concurrency options
optional TaskConditions conditions = 12; // (optional) the task conditions for creating the task
optional string schedule_timeout = 13; // (optional) the timeout for the schedule
bool is_durable = 14; // (optional) whether the task is durable
map<string, int32> slot_requests = 15; // (optional) slot requests (slot_type -> units)
}
message CreateTaskRateLimit {
+2 -1
View File
@@ -5,12 +5,13 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
"github.com/labstack/echo/v4"
)
func (t *V1FiltersService) V1FilterCreate(ctx echo.Context, request gen.V1FilterCreateRequestObject) (gen.V1FilterCreateResponseObject, error) {
+2 -1
View File
@@ -2,12 +2,13 @@ package filtersv1
import (
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
"github.com/labstack/echo/v4"
)
func (t *V1FiltersService) V1FilterList(ctx echo.Context, request gen.V1FilterListRequestObject) (gen.V1FilterListResponseObject, error) {
+17 -9
View File
@@ -22,7 +22,9 @@ import (
"github.com/labstack/echo/v4"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1"
"github.com/hatchet-dev/hatchet/internal/cel"
"github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
@@ -79,7 +81,12 @@ func (w *V1WebhooksService) V1WebhookReceive(ctx echo.Context, request gen.V1Web
}
if isChallenge {
return gen.V1WebhookReceive200JSONResponse(challengeResponse), nil
res, err := transformers.ToV1WebhookResponse(nil, challengeResponse, nil)
if err != nil {
return nil, fmt.Errorf("failed to transform response: %w", err)
}
return gen.V1WebhookReceive200JSONResponse(*res), nil
}
ok, validationError := w.validateWebhook(rawBody, *webhook, *ctx.Request())
@@ -305,7 +312,7 @@ func (w *V1WebhooksService) V1WebhookReceive(ctx echo.Context, request gen.V1Web
}, nil
}
_, err = w.config.Ingestor.IngestEvent(
ev, err := w.config.Ingestor.IngestEvent(
ctx.Request().Context(),
tenant,
eventKey,
@@ -320,9 +327,12 @@ func (w *V1WebhooksService) V1WebhookReceive(ctx echo.Context, request gen.V1Web
return nil, fmt.Errorf("failed to ingest event")
}
return gen.V1WebhookReceive200JSONResponse(map[string]interface{}{
"message": "ok",
}), nil
res, err := transformers.ToV1WebhookResponse(repository.StringPtr("ok"), nil, ev)
if err != nil {
return nil, fmt.Errorf("failed to transform response: %w", err)
}
return gen.V1WebhookReceive200JSONResponse(*res), nil
}
func computeHMACSignature(payload []byte, secret []byte, algorithm sqlcv1.V1IncomingWebhookHmacAlgorithm, encoding sqlcv1.V1IncomingWebhookHmacEncoding) (string, error) {
@@ -389,7 +399,7 @@ func (vr ValidationError) ToResponse() (gen.V1WebhookReceiveResponseObject, erro
type IsValid bool
type IsChallenge bool
func (w *V1WebhooksService) performChallenge(webhookPayload []byte, webhook sqlcv1.V1IncomingWebhook, request http.Request) (IsChallenge, map[string]interface{}, error) {
func (w *V1WebhooksService) performChallenge(webhookPayload []byte, webhook sqlcv1.V1IncomingWebhook, request http.Request) (IsChallenge, *string, error) {
switch webhook.SourceName {
case sqlcv1.V1IncomingWebhookSourceNameSLACK:
/* Slack Events API URL verification challenges come as application/json with direct JSON payload
@@ -404,9 +414,7 @@ func (w *V1WebhooksService) performChallenge(webhookPayload []byte, webhook sqlc
}
if challenge, ok := payload["challenge"].(string); ok && challenge != "" {
return true, map[string]interface{}{
"challenge": challenge,
}, nil
return true, repository.StringPtr(challenge), nil
}
return false, nil, nil
+12 -15
View File
@@ -19,25 +19,17 @@ func (t *WorkerService) WorkerGet(ctx echo.Context, request gen.WorkerGetRequest
}
func (t *WorkerService) workerGetV1(ctx echo.Context, tenant *sqlcv1.Tenant, request gen.WorkerGetRequestObject) (gen.WorkerGetResponseObject, error) {
reqCtx := ctx.Request().Context()
workerV0 := ctx.Get("worker").(*sqlcv1.GetWorkerByIdRow)
worker, err := t.config.V1.Workers().GetWorkerById(workerV0.Worker.ID)
if err != nil {
return nil, err
}
slotState, err := t.config.V1.Workers().ListWorkerState(
worker.Worker.TenantId,
worker.Worker.ID,
int(worker.Worker.MaxRuns),
)
worker, err := t.config.V1.Workers().GetWorkerById(reqCtx, workerV0.Worker.ID)
if err != nil {
return nil, err
}
workerIdToActions, err := t.config.V1.Workers().GetWorkerActionsByWorkerId(
reqCtx,
worker.Worker.TenantId,
[]uuid.UUID{worker.Worker.ID},
)
@@ -46,7 +38,12 @@ func (t *WorkerService) workerGetV1(ctx echo.Context, tenant *sqlcv1.Tenant, req
return nil, err
}
workerWorkflows, err := t.config.V1.Workers().GetWorkerWorkflowsByWorkerId(tenant.ID, worker.Worker.ID)
workerSlotConfig, err := buildWorkerSlotConfig(ctx.Request().Context(), t.config.V1.Workers(), worker.Worker.TenantId, []uuid.UUID{worker.Worker.ID})
if err != nil {
return nil, err
}
workerWorkflows, err := t.config.V1.Workers().GetWorkerWorkflowsByWorkerId(reqCtx, tenant.ID, worker.Worker.ID)
if err != nil {
return nil, err
@@ -59,14 +56,14 @@ func (t *WorkerService) workerGetV1(ctx echo.Context, tenant *sqlcv1.Tenant, req
respStepRuns := make([]gen.RecentStepRuns, 0)
slots := int(worker.RemainingSlots)
slotConfig := workerSlotConfig[worker.Worker.ID]
workerResp := *transformersv1.ToWorkerSqlc(&worker.Worker, &slots, &worker.WebhookUrl.String, actions, &workerWorkflows)
workerResp := *transformersv1.ToWorkerSqlc(&worker.Worker, slotConfig, actions, &workerWorkflows)
workerResp.RecentStepRuns = &respStepRuns
workerResp.Slots = transformersv1.ToSlotState(slotState, slots)
affinity, err := t.config.V1.Workers().ListWorkerLabels(
reqCtx,
worker.Worker.TenantId,
worker.Worker.ID,
)
+23 -7
View File
@@ -46,7 +46,7 @@ func (t *WorkerService) workerListV0(ctx echo.Context, tenant *sqlcv1.Tenant, re
telemetry.AttributeKV{Key: "tenant.id", Value: tenant.ID},
)
workers, err := t.config.V1.Workers().ListWorkers(tenantId, opts)
workers, err := t.config.V1.Workers().ListWorkers(reqCtx, tenantId, opts)
if err != nil {
listSpan.RecordError(err)
@@ -58,12 +58,21 @@ func (t *WorkerService) workerListV0(ctx echo.Context, tenant *sqlcv1.Tenant, re
)
rows := make([]gen.Worker, len(workers))
workerIds := make([]uuid.UUID, 0, len(workers))
for _, worker := range workers {
workerIds = append(workerIds, worker.Worker.ID)
}
workerSlotConfig, err := buildWorkerSlotConfig(reqCtx, t.config.V1.Workers(), tenantId, workerIds)
if err != nil {
listSpan.RecordError(err)
return nil, err
}
for i, worker := range workers {
workerCp := worker
slots := int(worker.RemainingSlots)
rows[i] = *transformers.ToWorkerSqlc(&workerCp.Worker, &slots, &workerCp.WebhookUrl.String, nil)
slotConfig := workerSlotConfig[workerCp.Worker.ID]
rows[i] = *transformers.ToWorkerSqlc(&workerCp.Worker, slotConfig, nil)
}
return gen.WorkerList200JSONResponse(
@@ -90,7 +99,7 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re
telemetry.AttributeKV{Key: "tenant.id", Value: tenant.ID},
)
workers, err := t.config.V1.Workers().ListWorkers(tenantId, opts)
workers, err := t.config.V1.Workers().ListWorkers(listCtx, tenantId, opts)
if err != nil {
listSpan.RecordError(err)
@@ -120,6 +129,7 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re
)
workerIdToActionIds, err := t.config.V1.Workers().GetWorkerActionsByWorkerId(
listCtx,
tenant.ID,
workerIds,
)
@@ -129,6 +139,12 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re
return nil, err
}
workerSlotConfig, err := buildWorkerSlotConfig(listCtx, t.config.V1.Workers(), tenant.ID, workerIds)
if err != nil {
actionsSpan.RecordError(err)
return nil, err
}
telemetry.WithAttributes(actionsSpan,
telemetry.AttributeKV{Key: "worker_actions.mappings.count", Value: len(workerIdToActionIds)},
)
@@ -137,10 +153,10 @@ func (t *WorkerService) workerListV1(ctx echo.Context, tenant *sqlcv1.Tenant, re
for i, worker := range workers {
workerCp := worker
slots := int(worker.RemainingSlots)
actions := workerIdToActionIds[workerCp.Worker.ID.String()]
slotConfig := workerSlotConfig[workerCp.Worker.ID]
rows[i] = *transformersv1.ToWorkerSqlc(&workerCp.Worker, &slots, &workerCp.WebhookUrl.String, actions, nil)
rows[i] = *transformersv1.ToWorkerSqlc(&workerCp.Worker, slotConfig, actions, nil)
}
return gen.WorkerList200JSONResponse(
@@ -0,0 +1,66 @@
package workers
import (
"context"
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
)
type slotAvailabilityRepository interface {
ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error)
ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotType string) (map[uuid.UUID]int32, error)
ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID, slotTypes []string) (map[uuid.UUID]map[string]int32, error)
}
func buildWorkerSlotConfig(ctx context.Context, repo slotAvailabilityRepository, tenantId uuid.UUID, workerIds []uuid.UUID) (map[uuid.UUID]map[string]gen.WorkerSlotConfig, error) {
if len(workerIds) == 0 {
return map[uuid.UUID]map[string]gen.WorkerSlotConfig{}, nil
}
slotConfigByWorker, err := repo.ListWorkerSlotConfigs(ctx, tenantId, workerIds)
if err != nil {
return nil, fmt.Errorf("could not list worker slot config: %w", err)
}
slotTypes := make(map[string]struct{})
slotTypesArr := make([]string, 0)
for _, config := range slotConfigByWorker {
for slotType := range config {
if _, ok := slotTypes[slotType]; ok {
continue
}
slotTypes[slotType] = struct{}{}
slotTypesArr = append(slotTypesArr, slotType)
}
}
availableByWorker, err := repo.ListAvailableSlotsForWorkersAndTypes(ctx, tenantId, workerIds, slotTypesArr)
if err != nil {
return nil, fmt.Errorf("could not list available slots for workers and types: %w", err)
}
result := make(map[uuid.UUID]map[string]gen.WorkerSlotConfig, len(slotConfigByWorker))
for workerId, config := range slotConfigByWorker {
workerSlots := make(map[string]gen.WorkerSlotConfig, len(config))
for slotType, limit := range config {
available := 0
if workerAvailability, ok := availableByWorker[workerId]; ok {
if value, ok := workerAvailability[slotType]; ok {
available = int(value)
}
}
workerSlots[slotType] = gen.WorkerSlotConfig{
Available: &available,
Limit: int(limit),
}
}
result[workerId] = workerSlots
}
return result, nil
}
+9 -1
View File
@@ -1,6 +1,7 @@
package workers
import (
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
@@ -36,5 +37,12 @@ func (t *WorkerService) WorkerUpdate(ctx echo.Context, request gen.WorkerUpdateR
return nil, err
}
return gen.WorkerUpdate200JSONResponse(*transformers.ToWorkerSqlc(updatedWorker, nil, nil, nil)), nil
workerSlotConfig, err := buildWorkerSlotConfig(ctx.Request().Context(), t.config.V1.Workers(), worker.Worker.TenantId, []uuid.UUID{updatedWorker.ID})
if err != nil {
return nil, err
}
slotConfig := workerSlotConfig[updatedWorker.ID]
return gen.WorkerUpdate200JSONResponse(*transformers.ToWorkerSqlc(updatedWorker, slotConfig, nil)), nil
}
+337 -313
View File
@@ -999,15 +999,21 @@ type SlackWebhook struct {
// Step defines model for Step.
type Step struct {
Action string `json:"action"`
Children *[]string `json:"children,omitempty"`
JobId string `json:"jobId"`
Metadata APIResourceMeta `json:"metadata"`
Parents *[]string `json:"parents,omitempty"`
Action string `json:"action"`
Children *[]string `json:"children,omitempty"`
// IsDurable Whether the step is durable.
IsDurable *bool `json:"isDurable,omitempty"`
JobId string `json:"jobId"`
Metadata APIResourceMeta `json:"metadata"`
Parents *[]string `json:"parents,omitempty"`
// ReadableId The readable id of the step.
ReadableId string `json:"readableId"`
TenantId string `json:"tenantId"`
// SlotRequests Slot requests for the step (slot_type -> units).
SlotRequests *map[string]int `json:"slotRequests,omitempty"`
TenantId string `json:"tenantId"`
// Timeout The timeout of the step.
Timeout *string `json:"timeout,omitempty"`
@@ -1973,6 +1979,15 @@ type V1WebhookList struct {
Rows *[]V1Webhook `json:"rows,omitempty"`
}
// V1WebhookResponse defines model for V1WebhookResponse.
type V1WebhookResponse struct {
Challenge *string `json:"challenge,omitempty"`
Event *V1Event `json:"event,omitempty"`
// Message The message for the webhook response
Message *string `json:"message,omitempty"`
}
// V1WebhookSourceName defines model for V1WebhookSourceName.
type V1WebhookSourceName string
@@ -2114,9 +2129,6 @@ type Worker struct {
// Actions The actions this worker can perform.
Actions *[]string `json:"actions,omitempty"`
// AvailableRuns The number of runs this worker can execute concurrently.
AvailableRuns *int `json:"availableRuns,omitempty"`
// DispatcherId the id of the assigned dispatcher, in UUID format
DispatcherId *openapi_types.UUID `json:"dispatcherId,omitempty"`
@@ -2127,11 +2139,8 @@ type Worker struct {
LastHeartbeatAt *time.Time `json:"lastHeartbeatAt,omitempty"`
// LastListenerEstablished The time this worker last sent a heartbeat.
LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"`
// MaxRuns The maximum number of runs this worker can execute concurrently.
MaxRuns *int `json:"maxRuns,omitempty"`
Metadata APIResourceMeta `json:"metadata"`
LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"`
Metadata APIResourceMeta `json:"metadata"`
// Name The name of the worker.
Name string `json:"name"`
@@ -2143,6 +2152,9 @@ type Worker struct {
RegisteredWorkflows *[]RegisteredWorkflow `json:"registeredWorkflows,omitempty"`
RuntimeInfo *WorkerRuntimeInfo `json:"runtimeInfo,omitempty"`
// SlotConfig Slot availability and limits for this worker (slot_type -> { available, limit }).
SlotConfig *map[string]WorkerSlotConfig `json:"slotConfig,omitempty"`
// Slots The semaphore slot state for the worker.
Slots *[]SemaphoreSlots `json:"slots,omitempty"`
@@ -2188,6 +2200,15 @@ type WorkerRuntimeInfo struct {
// WorkerRuntimeSDKs defines model for WorkerRuntimeSDKs.
type WorkerRuntimeSDKs string
// WorkerSlotConfig Slot availability and limits for a slot type.
type WorkerSlotConfig struct {
// Available The number of available units for this slot type.
Available *int `json:"available,omitempty"`
// Limit The maximum number of units for this slot type.
Limit int `json:"limit"`
}
// WorkerType defines model for WorkerType.
type WorkerType string
@@ -8798,7 +8819,7 @@ type V1WebhookReceiveResponseObject interface {
VisitV1WebhookReceiveResponse(w http.ResponseWriter) error
}
type V1WebhookReceive200JSONResponse map[string]interface{}
type V1WebhookReceive200JSONResponse V1WebhookResponse
func (response V1WebhookReceive200JSONResponse) VisitV1WebhookReceiveResponse(w http.ResponseWriter) error {
w.Header().Set("Content-Type", "application/json")
@@ -16139,7 +16160,7 @@ func (sh *strictHandler) WorkflowVersionGet(ctx echo.Context, workflow openapi_t
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
"H4sIAAAAAAAC/+y9e3PbOLIo/lVY+v2q7kyV5Fcmc+ak6v6h2EqiiWN7JTm5e+ekvBAJSxhTJJcA7WhT",
"H4sIAAAAAAAC/+y9e3PbOLIo/lVY+v2q7kyV5Fcmc+ak6v6h2EqiiWP7SHJy986mvBAJSxhTJJcA7WhT",
"/u638CJBEiBBvSwlrNracUQ8Go3uRqPRj+8dN1xEYQADgjtvvnewO4cLwP7s3wwHcRzG9O8oDiMYEwTZ",
"Fzf0IP2vB7Ebo4igMOi86QDHTTAJF84HQNw5JA6kvR3WuNuB38Ai8mHnzelvJyfdzn0YLwDpvOkkKCC/",
"/9bpdsgygp03HRQQOINx57mbH748m/Jv5z6MHTJHmM+pTtfpZw0foYBpATEGM5jNikmMghmbNHTxnY+C",
@@ -16159,303 +16180,306 @@ var swaggerSpec = []string{
"rsNhCls3FS8pMrRIdF0YEa4jjOC/E8iFSR6fXCHgmF2POhcoMBNrt/OtF4II9ehlYQaDHvxGYtAjYMag",
"eAQ+ovvSeZOuuJskyOs8lwiJw6tb79vEf+A62OARBsS4ZPgo70JW+qpmyFrNlc/w9bnbOafnkG8B0NDL",
"g9R4O7ILV8K4rcn2WC2IQsiWFAZuEscwcJeXaIHImMSAwNmSn97JgnY471+dDy7vhld3N6Pr96PBeNzp",
"di5G1zd3V4Mvg/Gk0+3843ZwO8j++X50fXtzN7q+vbq4G12/HV4pe5xBqcw9dsMIqnN+uR59fHd5/aXT",
"7Uz644+1/SEh9FediIkhxtpLKWVnNxvDydp2qRLoUW16BgNIMeIAemQ693G4cAjADw4KooTgriMZuetA",
"4h7pxJBfxGslgZr245kRwSgJsH4hC/ANLZKFEySLKdXX77OlEecpjB/u/fDJiZMgL0BRQF6dae/zWG6J",
"Jbh8C2lHAqMRBB7VlnRKN4U2Ft/TwxY6tBvF+NMcuXN+yKmbg/kO83sxPwVqJKzAVnEDuipNyGXqRJC6",
"NgJIHW2V9v0BLrnu53mILh34N7nu6h4YbColmPgP3230Mi7q5OFrllf82Bka+MNL4sxkIrcGihMZYYcJ",
"+/Jm2B8R4QKRAPldORFbjP747fPDl9841zp92fhfLZCGozDAsIw1IhWaMsZyYFWDwUcxw3Eeh8EXwbqT",
"GM1mMDbuY0ZlnxS1pzSwG4fBoJpuaZMrsQFlpZmKPe3IUYzCGJFlkbSZeBHSqfPmFTu8+N+nZZIvKQh0",
"tq5ucQqcpVV9TTFYfVbrcVYgurRNKupTCmQnqbLNGTL0YzGGshvgQXeJo/3ZKWTonm2TuhnlMeRXKXrT",
"cZocC+Vh2ScGHBvQuUc+gRSiek7g11GGtWzzxldjxbpg3EUSRsjtxyZ2XID/hIEjFXyHUozzS3909atc",
"/fhq7LAx1hFjqaa7QMH/Pu0uwLf/ffb697LKmwJr5npudOz7MCaDBUD++zhMIrP8pk2wTlj6CBO6Rt5C",
"mrZieiJa2n1WWL6HHmGXzVheuwC1buU1lxw+uHav2Se5rXStVJ/gl4yN7K1cV7cTh36tbsRX8wlSfWxE",
"22vx0RGD1WHFjI9ghgL4GcZSoNfDJBs/dzsweERxGCwgN3PX9x0oHawvytwWvok9YEgMg2kIYg8Fswsh",
"Z/U6Fjc/G+V5NgyXyiR0MAljyB5h9HBne4P9ZGYQg34y2/zCu+LNiZ14zwYTJQNKT0mZJoFtD8IqpGoV",
"C61EUWzAZfttqk40mmsNw84Cknno1ZsJFHR94l0UYq88blfWfbodTi1DTzuHvMPVfDZqbrKBYH7tMGYj",
"VQqabqDC7DlYBWVkdJDuQS2dXiKdvIvADAXpe0PVLt6kLVNFnonupyb2IpVvrN5FdLSjGDYuBu/6t5eT",
"DrOL6s0a6gDXsQfjt8t38lVZDhNIxReWLK/ZSEz73aXau6bWugZfk/Sltv4IK7JaGdzhRV6AF1/oxfu9",
"cSGS/kdJME4WCxDX2n3YVn0pd6tgSa4zpwv5Kjdcnon5TW9yI3F++XN8feVMlwTiX+uV91RtZ9N/XI8G",
"5Bh7wPzpcsp8LwHdFygrQBQS5ALF0JUgSSkCsNvhCpJZfpgkkIXoGUMQu3PtaWSi9/LrIbO5ax+RmZaZ",
"mTtlQ62R02BguwfIYmjeqsm4EQw8YY+uGlg0azLyvxOY1EPMWzUZN06CwAJi0azJyDhxXQi9eqDThvaj",
"p1SOq56GNDdF9u1IvQqvwGNrnFhmsa68N/0ZTjWCvMrPjslzxdNOnGJ/h9OjLb2QlsbEBEb20mtMYKRD",
"bKUqTNAChgnRL198rFv647pq8KOi/srrF1u6Tq/9M5yOkqBCuvE3cLt37bRT6vBpbjKCABsuZvcoQHje",
"bOq/OUVW7SglWt7SsHtrEF0MceLrzc+YgJg0WwwmgCTYYj30fOJt5fOWeIazJnG6+c2p3H2AcTULNFmu",
"opTWgawczIWe618b+SCSQNJdMHPNON0mqXrcDK4uhlfvO93O6Pbqiv81vj0/HwwuBhedbuddf3jJ/uAv",
"1/zvt/3zj9fv3mm1FarG6f3ZbL1gi101my0mYS9L2Py0tFPlMfXN0eqPFOK8ER6/MLx5aGpdHRTYxEQ6",
"MmPL9IH78AVO52H48OKLVGDZ1BLD2SUKYCPnPOYeQT9TRYJKFnmk+uHM8VEAm3hicQ9+7Rx0ONGgVkkx",
"9eYtNDaJArZUr7UsrCCd4WuGqkv4CP284ebtLRU0w6t3151u50t/dNXpdgaj0fVIL1OUcdLLk9X+5yDQ",
"CRLx/eXvnpKs9NKDf1zj/pkfoeENVHSuuINqEKD6an3vCPeZu4jR7lm3E8Bv8l+vup0gWbB/4M6b0xNm",
"Bc5xVq6zzqVTeudEnArTic+srlUKLFr/Z/itPPIru5GzdWk9UUMCfPUSS5syy46PMOGvG1n80InNLU4j",
"sf5Bb7CfIImRq5HHQbK4sbtiMzqWF+0j03r/YXWr5mMh7pjKrtjGAUd212k+orhUH3VqHSIyUHOzdFWE",
"6OT/CBDI/MnKqLSy2TIXOOZHpXdxA5iM4D3yDQ+zzEFZeDCrgzHv5Zh1hMyLaAtu3myiz8BPoK3jXMyf",
"WrHDImOEyVfs+hMKvPBJv+2bsCnXIPrRvA4pTTTrWAAP2i6Cf9NPwb+xZdC9RIHiEZahmcdw3IexCz1b",
"zw/lnqDsl1xvClWO0r6qdL0Hh2HGY9rjMP28xoFYHKN0JHJsSqwpqNSOBl0YkLFyny28EzHwTPTMvzo6",
"7z/VANHkhrqKRWINa8LWTAYCpZnNoHSBLvp3V/NIuhFd9W4tYCmOrhX/cIYwgTH05M1eEzxg2OfUdxh5",
"TpyOw6OzEGafYXyk8VkvIc/OVyTzVa6azCJCxejkOYL0r58nlmIEIx8sf6iwBb4kxUyFjSvLccfLrk9p",
"/vrkpGa9BbhNqzaZkZTu9kdYwe5nC5+ELqYyj4m+CrbS+w9rHX/pqAWLj2bAGcTkNjZonrejS+bXBQOP",
"OXqKSz92SLgdFwTTcZkE6N9UN/JgQNA9gnGqWwt1UMT2cX9UNSR2Cv0wmEmIa6XsFt1h7Qy9lS6uY3cO",
"vcSHCqWt69K+ZZf0bodw13t7PaGJF3s2+FcFPd7m7N4sNIv+MT7/MLi4pT/qlMF05u26Ce6pw1959ZnX",
"3y6c+xqT2Ob8AUdJcK4agRs/JnEAdn2WKgDYLHFspbh/KXV4ScfJjCgqfSbLtPs28R8uoA8JfMdCMFZ0",
"AUwjCFIPwAe4dNjl0okA4ulGeJCHM13mc008wOXpG9b0lLuqnfF/nTVJO9HtRCDOrqj6q1NDuuEjfqm7",
"kK1IjRsY7LnhFhuPz/t075tJvhL1sKifQiuNNr2+cjzkQzHdeIEC8c9TG6/bagyZlGSPffc2vJIiETfM",
"qaRfil2aJWVB3aqcS1Vz6HNC6aMBN0LupTxRDUC+ZaknKKWYLBrrbua6yl9ekjddmZG7ec6NdalKQV9T",
"FlQXKYFpvjoTZ26TZ9KEJVvley2KVuHMPTBtay4Hz6tJ5VUCIsqjmMzfqsZU/To8hgsQzcMYjv2QbNj2",
"nbMr6x0UuTkT+yF/AhM97B0qVrRDY1WR0kSuERg5cSIXVm9qUJ3Q6heKfF96Z9qvtHTRqLBQW4Ne4M0M",
"LV3V1l6wq1OqUT1zyr40cxAE0DeBKT47yNM//WE6uPPER9c/qvARrox2dDkFs6evOMlaFjCwMK2efltj",
"6bS7ed1s8HUWvRe2OzvrmkREiu48XXQVMtSeLwRGJnGndyWeI9+LYd4bslbn3Yr7L7+74WaQxKuloqkl",
"k7W80g0zmClAWUWOHKQXrdhA7hZUsfVb8ELvk0EU5lysFKvXhnzVGRF+MT1p1NJArjs+D5OA6ME1X3VW",
"eZvO+lRgqGi+zjnbW/hqi9CCtP3m2S5MiAnEFTmS+U7174V5wg6ZG/f9510qdmYNJcs27IW2NYkTC1nT",
"ZMVpl4oV8/f11S2cKQWmK6v07xeo68fuHD3Cg5RLzS3neyViQnqR0neq4PoYknhZIUW3xo/K7WU3LFFx",
"UVCQIPGov3Sa6H0f7vV5BtT6rYk2hlwCrpkKzA+2nr6DEiWgITnJgxbrEa4urAelG/gI5QOebe+x7GNF",
"d+9QjMkYciXZnvYuQdNeDSOx+C0jB2Bh5hSzCprU0Ai+vxXEvC9h8DkyrSXkTKRL09FowB/K766u775c",
"jz4ORp1u9uOoPxncXQ4/DSfZQ/rw6v3dZPhpcHF3fcvMV+Px8P0Vf2qf9EcT9lf//OPV9ZfLwcV7/kI/",
"vBqOP+Qf60eDyeif/DFffbenQ1/fTu5Gg3ejgegzGiiTqHOPL69py8tBf5yOORxc3L39593tmC1FJlS9",
"G91e3fH8rB8H/7xT3QcMTQSgWiuajmMUpCqxMmKBo+FkeN6/rBqtyu9B/HXH0fBpcFVAfAO/CPE3b10V",
"HDgB+EGfADSLxa9MOiL6J5iNko+1b9JRZ2GVbSozjdpM0qkaXUCgkf5pilT7lDqFtKqaC0Loe+LNw04q",
"sn3YfK7VkADfqrMWdWlCmmJRExiLpHADQ+q+L7I4Quiw1tLKtGC91NS3akmNAPhLglx8HZHrhFSMmpmt",
"5gA7YUSg5wjTRDqIfo51k8VtPSG7Kd3a2vnasoQBDTPs1aZ9Z3Blo381klIhIeRuM0FuKdWFOSGkds17",
"oGbo90KXOHMW9jjRdkbsvew5vyoUzETScrw7IcFzsA2+RYjuMov8ZsBUj8978Wmw88QqM7AgdgfE0AFR",
"FIfAnaNgxks0MARXzS8TWnIiYfEsK0LBlyxrYZThYQEwlbhQbIrvAPKTGFqAwryJVUBy+dVZuiD9nD7A",
"fKnm18EsVA4EYmfZC2ExQ291UAz4JonsHbO2iQNaG/3m3MsmDiAyoktQ1WZfiMySQAuwWS4M8geR1BP9",
"0AU+i596hH4Ysc8sLNdL3EIxNEW9U5LObi/b7HNa4KPyrVSWdxGlvXZZ8mS1lLZ1T2eCRU0Pf/KzGWu8",
"RdXTHxshlxneeIrXHEUyF2+2V2p+PSM1ctrZm8NJkHKzM4nvaRn+FyMo+1SOlPXqWt9iGPMeN8nUR24V",
"KbDxKrIyqzDvzaaL/Vtl00din6QUvf5yxSwG/YtPw6tOt/Np8OntYFQhO6tD9esvZ03uYlWYyMGhGMtW",
"vRoXxyuGLKUIkJRfLGCTGl4Go7vx5fWk0+0MPnObxaQ//ng3ur1iNpHrKyU8g6UYOb/+NLx6f/dl8PbD",
"9fXHCtzntCidIgniRUXwO/suXLq1ApqH6ZPQeQIxSydXUq94b30webO8APqUAJuJ8udjm5eoh3+9VGUp",
"TdSzb0pBdjH+dRvWPLR/AQmMZYC/PEf5WM4v6AgeOaeOB5Zd59R5gvCB/ncRBmT+64puLCl6tAH/ZrEr",
"EXUT+sjVpAvlGn/VJTitnMebapSGBmI3z351PqACOPPqhAXUVqAaBZJSDUDKo88nnW7n86lelHC3yR3E",
"5BnDPLk/cJOCNxV55Z/TAceauAVzIZE1Xb2rvbw5QD9jcQ915TVR9xupq2HU3FRARP+XB8TMagdtKW4N",
"TS9paNqiAWgrpd0aGPJXtsMbuPAL83kyJyzANyDButxgKptwxykHYSdirR0QeI4LgiAkDmClW1lNeJnX",
"unRg6aDDuvt4rT0KeF4MMVbtUjktWho6yuYp+uEDwHPdcTMHeK4O+b9wYTpxAHFFlJdUH/Pq5M75nBVM",
"1k/4GcboHtWhl1nXqAx6FM1FWf8cDHpOmAN8I4v/280BnEh0cDAkBv7axkuWh3Dkg2WOEeT+NTZk5bH7",
"1UBg53MQzKBEkJEJAvhkRiLjXfiUYU1q1HrYV9A75Mhs3VElICkQlfhbD4ZSBlbxpZvDkwnll+EMBatX",
"N1uNv9cqdrZ3GJdrjOpwLfNeHRS67U5Ig2DYw92S9c1tN01Vq/EcRfhQjawlo/MOT/NtnDJ8Mt22fT49",
"H1xewGky23St1a7QRzFaJD4gEGfJKNhrmRsmvudMIXsg5doHCEQVozB2QE5j1pWsqisQfj64VAqDs/vB",
"I/ATSv1ad2yfwPgGLP0QGDhQJMyIeJvy+oD8RLUPJwzoDzF8RGGCe8K9WIzRqcqvU56YfSrPR0oRlCJd",
"UbXpJldEW9hxaiijMtbbwAX0k0za5SBZrpZtAKtKzev3aHYic1/XhXzhxE9jsQo7nI3epROyojQY3ye+",
"VhG0ixEpY0GGi5QczI3BEsYxDKG89Ftuiem6WDE+bhVkXpKsnr85kfnn03MWCTEB+KGijDiBcQB8EVJv",
"NFeJZs7wAktSdEHgxPBeXL4RV8gBfqD8myNMtbNq59pwug67vCmfTyk+ZIaUZ/2GyQgS2hTrElRg02sF",
"RxdDQ7ps5GEu9J5gDLN6V1tDxTNfBJM5fKFVVeQrpajCX/J2UJRhqgZTIT6lcDQNo0SF1VWq1r6f8PGO",
"nFt6i6eT4GSKuaMWRbnHFB/RCjuAqNLILlVXZYrVdRNjGZI08kgshpDckWcQNGzLRZy7sudhAK/vO2/+",
"qhV2mv5vAUZuPyHzznN3lf79myEvxbdK5w+f+ued56/GxYnBmdHVX2eJkAFY0HzoomuliRiKQyLwxLpO",
"liYqpjvHbLEJmcOAIFdQYcgsMZJBRNi7IvT7N8O7j4N/aoR9Me+wnJ5DoqEWM0oZMvRpZj/C5aCx1qUu",
"iat3D3B55EyYtxR2mNGNhLxgCcy3cu7jcKHiQgqRozWyFKdYLXsaUzZbb4FsiPLihOrIakeEcQxdkokO",
"Ejri/Unv/sxsUNKNyooUx1kXoeggt1Kz5U1SCa2nw/KquJG72DutwV1O22DQSKVDdQZ2V0dv9iIvE1l7",
"IBhU+bklufC2Px6eb1cqMEG8B9ikcGwXmWylG8PlBZidK3k4inlnNBk66nXXtMRwWQX2wMw2T72Gl36i",
"stNWqmp4nwGgXnqm0AHB0vlzfH3VwzBGwEf/YU+PfGVHKym1FZMVjpEwdlxA4CyM0X/UiqjlswPCoCrH",
"EyZgEYmH0vTc5U7rMLB34dqvEt7iMGWZmE1FZJX7qJyMvctml7R0FGe6LMxoyamMmSYKMNo6kvw7CmZC",
"vl01UWKE33kKagYns4CAKPKRSwlzQ7XOxaLWqnaunfdrJn72wGYsBaHhXl3eWEMG13oKT/XCwjYyctTs",
"YW1iN4ukawrxp4yWTh0nwdGWbrLmIidmsvpBioq3pb8rUnPEaSm0f8sCadns6Z4oWV2EtDBlyG5g0aqy",
"I+m4C+EL6PogBkRkvTE7JQjORtjxsi7OLyRO4K/0AI/icBaDxYJdnX65Bz6Gv27aYcGo4yjKmlR1mMJW",
"xsdhmOk2oVBUbHsDK2Dd2JsUrNW56k2Gw4wsVDbai1M3y1pen6D286mxMi4gBC4ig9orPioSrFgYV5Ny",
"aieldn1Zt7YaScUasy9XobeYTkr3XEfipcMy0dhgunnJ3wI61ij6m420D5xQWZ43/VxVj7A/Pu90OxeD",
"8blhubwkVfs02PRpkONtOy+DsRh7yw+DFHSTqae57KQL0stN5gPwqSI3GLuqSgte/cYM0uYr5iKzzoxX",
"qU1DEiOI65dPv1xwnx1joRvaxspgx7N/MYNNs6Rj8hraLIUxb8KBU6dW9yzDtf5Sl27ZXojUjOjruEIS",
"5JYTjDXMKCbHymUSK2YP06ceK2YUGw+uJncTdTHpGu74CVlKf3Y+GvQnhapkH4c3N/zj9e0lxc7kbjy4",
"ulBG1p88ioy1NDXb57rBKOCRm02y8cOmdJSljC3VpwgI8lcpBVZdz6JZxQqOBDNT3oQoIDxKsbwDgha1",
"sjVLzaYP/kYLuGoEHm+kyf1mtQzNQcxdxZrurIoay3sIU6GSwIRPtzLbqpULmkpyerezqnSPBQibYiRb",
"mobcc7ApIjMVEllav/PrTzeXg0kpm19FksL8a9dqlT6Uy3/+oM6mWfd5i2l0wnBawv5GFSr1vdCsYcpW",
"bCBs/2BR87RYcwvO3pNSnDwBLNw6GuQD8PIak50btGYLlBGTrAStZjjxtThU10GBs0C+jzB0w8DDdjpu",
"nSdsYRbnlzRkHxCICf3t1/oK61bop8PLbvb4r/NDrkC5oHrhVS9/jGAAInR0FQZXie+DqQ//HLO8GWmr",
"HlpEYcwmFa745cYRoFeczgyReTI9csPF8RwQdw5Jz4OP8u9jEKHjx9NjDONHGB+HgJ3R33qBGKvzhhla",
"1wwDSxbjCDwF0DuvZEfFRs6blxmzKnd3eUD+rSEFHdCe8JIETA1PDQ/WT1i8cyo7axWoLdz3LKpHaTh0",
"SxWkiopqVq/AUD2qfFCua3lYbSM3OLvFg0Dl5X0YYBg3P/KQ6NbUgcL2/eJILdi60wL/tT5aMgOIsNHI",
"6815GNyjmTbfSHX91bWKI69AfIWYI2twchWGyzOJyHfNROvUllLt46rW1OXWGxkNpDmv0nOmm10gCuyq",
"Wn/yrJAvasUNQblHJ/0WfC0q9Nu1ClUbYDelFZcCilPgBSTmC9kELcTT/RYtsB6MyNyg99JPOWUCcS+w",
"J0BgfA98Xz/kzhTRtcuNbUeTaCg4uU9DQ2TRU4R3tEfXz6bQaKzrG7grtkrLD6S0rOYMp+oAa9V+5MK3",
"cMRe5A7qVQ7dr4Uj5CXPUUpNLBF6o+NUHH0bO013lgSv24liFMpaKRq/cfHVREr6um2qPlvj9ita10f8",
"58btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW3l0FsGjIw1phWg5XsAkplh+fuIZDN1kvEtIGb",
"L0/zm3G1NdfztvJwtQwXVeIDv6qsqQRql5k0Qh9NIWn9myHjCgXJ+dBCHRHMIfBgbHe687bFTRTT1uJK",
"makr1/G1SkT1FYGUDyTtpoHmXVM0pDJOLti2qIRaZbSiS53SURhCdWhMMFWRTUiUX2sHKqAsHbUmt1U+",
"+NSfUR1vvlDxNv7QP+106X/OXv/O/3h9etbpdj5dvK7GXhrPqskiq0xkHxub9mIJTN3Qs6hXlxthIDsx",
"d5pZAEgSww9r0zEd2knH0wpMNAtYcSU3hobLK2bfGBumsgzNAqsJigG8KaIUPOlXXAStlkYGCt7TsOLB",
"/2HlCscDFhTD/7gdXVaTx164zkmdxtIhpnyYKWh4P7gajJiMeT+cfLh9y1ziRsObAfNm659/7HQ7l8Or",
"QZ85qn0e/h8TfrLb3uYDlis9Opr7QUiTXusL0fpC/Fi+EK27QvmRYU2j5X4b3Q/G5tvwPbnmAVdjHRZv",
"umtZiFnrzDycXXHyz7m519X05Va16Smn4QUksuRAwSE2Cexf8EW6AjwH9RYLNW6btn8Xxhp45OMKy1Zh",
"EyLDGmZ32fzL/PpO/xwcvLnUK7XODuW4504OJxLdErLy1ubVgfz2ejWRJluo/ahOWQXsS71QqNpRgycK",
"A8Y39VzxReceIVFkXsyOAuUKTjxqRGX/vSh/plXJhdbPizJstBxhI/OgKOigVX6T2JCyWfZNYr+RUUoY",
"D+i4ur3OoYQnxDIn6t/UIrHd9ZnKVZENnJ5izvDeCULiRHH4iDzodR3gxCDwwoXs9IR835lCZwYDGMtr",
"jEpdZ1vDeHM0e/tJgKvtza5JOYWzFtlUapmzQ+/USpEXP1aWilwXI2OKS/sdMOwbez4EgZdVQ4z5UKtd",
"+ReQzEOv0WoF6J94z1S3Pw89A9V+mExuZJ5pN/RSCo4F8u1j8+8AD85nM+cm/mqJ8GoSEqisOeclzcvW",
"1km6tBSwMu18SrcuM3ZNOt3OzfWY/ed2wrQk0wnJA5ZwVTQTFu8nvGaRCwIngjGlqyP76nHP3Q54BIhd",
"Zs0Z0nLJg8rTwm/QTQh03DAQlS39pcEDEeGI3ay12bAo1aE0zx7AGM0C6DlZJ2Z5ur0dXjiCfXZ/o/TB",
"FPq4uqwna8NYKuerwY8BO1LkApWOo9syH2DyAYKYTCEgVbaB3FaxKq2svgJw5rJ3/lZ+dnJ21js9652+",
"mpy+fnPy+5vf/jj6448/Xr3+o3fy+s3JiX36EsCZmaoHA0zA1GfGtj2EdAG+mQl/Ab6hRbLYHANsX+8w",
"6xsxdGFamxSbcrTQNjwGhNemC+NVCHiUn0tDw7Go6ZPV9MS1+ZSwk/VywkDdhwaQFefVQpcElGCGwX1o",
"x6sjpQM9dP3QdE5huADRPIyhQxsJMbEimsdyrDGbTxcCb11cIps6zRpzPhl+5oWs0z9v+rdjQ4CuTVQI",
"R1YaEcLPTWMmLXGSc3lfALLemMd739bpxrejS83wTVVl1l6r5iiivHTKV2a8lTmSaNdNO9lUFKfmRalr",
"Jq9O8FmBh5d/wTReClIgR3nmL1SmBsEsEU9a1mJhfPER82ORd1YKO5fT0ujVNiGRBt9IDLQNsPdgHra0",
"OAaRqpxeX/ZZZP7NPycf2APJ5J83g/H5aHjD8ozcvv2n3tCTMbTqATG4fPfheszj+z/1r/o8b0hVHfwv",
"SpX/gi1TJVF98Er6i4VvbLdBLVB+7shqoPoakn+HU4N8pV90AFmR6Z/hVCfPd6JAGDEnK8dpdDgwW32t",
"qZERaG8o1e9MwrcsuzhUrkA81DQTF8qbkERmpfFXczyk/voG0SjeCrj6qKuIP4NE+c7qjWvcGAKZ6YIn",
"XptBgoW7YtrVmdG+6ZGnGKC1CGMlkcckBgTOalNCKxBe5vo1V7QzXTpfLbmYVPbVWb19Qk5dXE1Xi9Wq",
"LRpe6HLfpQAOL7Q4lL0/oiBnEXh3e3U+GTJpe3E76r+9pBrWRf99pYCkg8hjtBEFs9k17CW/68/mtQLv",
"dnys67X554r9NCYMYkzyEVbF0JGQAF9HsSmPPcClwTlGDk/J0i5MT17GgIMj6KJ75GaTOL9EAGPoOY8I",
"CCflX/VcYUREA8+p7NcbpTWJE6gZv+4hUnVBSm/3pycnJ0aXIu0weSeghv48jRb0dziVYsz2HDdk+187",
"pJWfiLu2gPG5xdX+ZUDIecVs0sNFdV7QurmY60u8XTYYfKL0KvudNFRJjJ4r6ySMzgZSfVIUsL9WC5M9",
"uegp3iv2h8IoCdZIplse5R2Cfu7cV3M1ZLSck2KKZKyZZCy9clrZ3cruVna/lOw2zPEDivYKt74VRDMb",
"bUjgwuwoaLiv1Hc2llgbs/xX1VlW13SdylJsbTxz1gYGNMj0Yh7WYkICsahuCZHKqHXUU0oPejO4uuBZ",
"QbP8oJrUr/lEoWlO0bf984/X797VnpJs2pXuzXmBYibGSV6cFB1HwuBGkfwlWGmDsTuHXuJXpEc3dF77",
"OPpSTI5hKWBqNhvz0tlGd5pcTo4tsmNVESpcuwijkYCl2W1CR3Koc96xTgstNC/NnzGENqNwVfJmyXTa",
"j4K5tN8kjzZPCV212AmY6dDrc5VxfZN/sOGMGsKsyyGsoh8hFM5jepG518sFLUtzvrxDBm6sm5B5kWtn",
"ZHLkTrw8bnparF9hc82ggDeN5IVp7MAqA6f42axyz9UtPfoyDexOvEI0RzPPK2KUp5t82aoCQ9Fmiyyb",
"e8Kw2RD11YMlqbsHiU9uKlPriEbGFDtWjwTiFvkn5gfvwlD36M/x9ZXDgS7Hn7ARtIGh8lnwhR77wtjj",
"boUWaMBC7ZigBQwNFVEwQe7D0uRHQr85WDyr2L0kKvKiAdsyHezxtPBSZoVjpc+YZ/zRofwxo2xz5k6b",
"BT4p79m27xaNM6RaXwPlsiRh5Ab6Ws/pjKw2+TbUhD73Yk92hXDuUJE9ChVKycaQ+Xqdm+tILMC3mhZP",
"zZR9UzEJHsKQUPnL5CeHcApBDGOZxIJhlB0r7OdsU+aEROzaE4YPCMrmiO4q/0m+nb/piFjcrK/IZ0J7",
"J5iEC8vJnpnE5z49Gjd4PovTvxmyGkeE2cTyv6aE2Dk9Ojk6YXTMo5E7bzqvjk6PTkRgMcMECx72RW3Q",
"mS7S4718nqetAoixk9pj6KYDWdGicym+v2dokJ75bJazk5PywB8g8Mmcoeg1/+6GARE1vkQRYdr0+G/M",
"+QqnB2ANHw/iOKRS+Jkd1eqcVyFJ15Ejjs6bv752O1gW7qCrzhpKn5K/BMzuHLoPna+0P8NfDIG3rEcg",
"bYaqMDiSDfYdhWzBDgkd4LowIg6Jwf09cmsxmmKgFqWPp8fApyIlmPXgAiC/xx6S8fF39rP62zPHiw+J",
"5vZ0wX7HDkjTO9HuDuvO36ZLu9CnLQa0AXO14CMwnonBAhKmD/xV4eRTmsERya07b3hAfyo0SkvpqEKN",
"vw9kO7ZeIdavJXr6rYytceK6EOP7xPeXDkepl8uNVULec7fz264or+8sgE+xAD2HpU3yZPwMB+PVxsHQ",
"QfEujKfI8yC/fWT0zemkiswkxU9YE3pYfevFQuVgH3jfTldDGF/ZtZe4mtTY/Lq1DonzEX4MEmf08Dbk",
"8ngjxMCxwzetgLg0AKtMJpXYIqGTSJznsfGsF/sbWYh2CTrYc2KAA9qKAUsxwKlle2JAPSAj1CPhAwzo",
"qSj/ZqdhFOpi80fwMXyADghYhj7WWnhrpTMWxESEJrSVNOjQ7jZSIh3eIBMkrHt13MVseYLOGXQ/NlHj",
"JlQtSIdu7ETsnCTj7LcqSk63PEfBrh8m3rF6Qzdr0KWUZ/LawwZxUIAJCFxYIuJz+lm6l5gV6+3jlgHi",
"JEEay7o3BFajtXMEq+/1Yus/KS9s33pyiF4YcWcXcaIp+83N4cff2X+fq/abSinW6qi0ocwqzjeyVhLx",
"zMAm5YR93akQ2txmixxBNYc3L5zxKMQaxwbbsVa25UhcwUxG3hzFFVKN089XM4Uf14k1ti2pVKuh+YtU",
"gP3sdH/BSLil/f2i/QVc+Qw3nt67O7hF6rAmNJUeiQdykG/iCKdjHDM7Pd8lbNzxS4TpBch3cq1NG0xb",
"D/MNt7bbdC6x48qUDTdfpnLJrW6fCCHderYRhU0o739uk8MAkZBK8+PvnOOfj6M4nELz5VK+fTogV26A",
"2XV5uYJcIL+Z4dOpb0JMRklww+a1t02ZDr1Ucu341KsgKJGSg9MTw+/RTk+Fq5CwtPNhjP7DU5OL5Dw8",
"aQWP0iyZOQlAPvQcbrd32PY474Q8H2bbqj84cmSGfeA+HH9n/7Gw4jtj2lCpqpGnHPZVZDmyN9rnxjQS",
"DwNxL63zeZzsk2pzuhswboOMhPnEr3czMU+exXIQAt8Pn+j0uheBItVK0ct+r1KxONHlOSbAx99xgK24",
"5WqsSv0yvwS4AZvkBzMziji5945NCshoGWUPGaVEsCmrXI0rGSXAGjaRiotibdKrLnReeSUusUjjt7EX",
"0z+6ZkMAr8azkiVAgeHs9escEKeb0IGiOKT/gF57hu0Ra5oukawQgQOiSFJ7+VjjbQr8SMDUh8cemOHj",
"NIe58dKI2a2RtXPIHBBnCv0wmKlZBdJ82WBWvlJ+Pr0ArMboRNTNrjeXyUzVWYIWnjuascy/ExgvM57x",
"wOwOedXH3LYiRKzkTgHel7r4WFPvxgqfX4BZWjBemzqrQg7RKeXrH5v157YSdjuvdyX86C0ULSIfLmBA",
"SroBM15IOkifzgF+0EoY1vD4O/1PzfMSL9kwXXK+KQoQOoGlqZ0Xojcd+hTQHR/5+Yr7BqEga/arsJRi",
"obZpxy8Up2hkemNY/dn58zd+99n+rBO16DrVFO7DhCdp2hMRkfFzSUSY7wzERoQc++GsTlfxw5njowDK",
"zEcCjqJEuQxnlyjghUUOUaqILE8kdFh6Mme6NEgW9rmjhQYFhFUSLAddGjK/xkTkXQ6dGSQU1QzLhpkx",
"4pZHzcwVqRsM96Y0Pb7V1ElAkL+BqfsOlXc9Ar8RB0MQu3OHzaQU9q1YP+ugE+nVa2UUDB+h/wv+lU6E",
"AtdPPGjaX9oSd7TabrXAlyxAB7BVbj2Z3IYCxqJUzJTHPt9Nl3dppxyUVsCVcupYHbJW27MHR64qhBoo",
"xCKKtX03z2ulqeRXjp3LcLb+qUP/v5eFDptfV5WKY8aDJy0o9gMcPfgBRSbmv7/HcCPnzlZPuu2r1Nle",
"r+Ag0157W7U6J+N0EmZ9FZu1UEz0LvSPPThNZmYj/eAR+AkrXOScDy4dmFbNd8AMoABnhcBEoVsPEHCk",
"kYfn0L9gUx2KS8HmI1o+n54PLhkSagJYGCYxFYWs8C0VE3rk7zSORQVfpl2sEXVQUI+nWUOr16gvcdNk",
"VmIxhefPB5dmlrfidQu9hj8A5EVPWp63yM/NdJt9fKP7kfQbzY1WGvMf4BIrFyXjtLRd8+slIwMRcV93",
"sTwPA4zoVVKQGHtkCl2WecNzwD1hOWgQdsS1fZvGhmpYpvA+jGEtMJsyP7zjW0PCHDQgZoXOQhcxCfqE",
"yFx9iyvWOdbAl6WVMOzslp/J7NeVS+PvLABx54g9PbowJgAFWeh+1TrTbHxwJUNJoYK59eLSLRGrnC7p",
"cYdihz9X6iAWCftedFumSyfLkJv5iLPaWem9xGBTKScQ1i5EU8xBTvMAlz1eTigCKMbOLx5kgo9y39IB",
"zr/e/OvXotiqdIKwM2xhN4yglTzkLW3XxVqvB+9276j299PWAlVngUp5wzJso4GCdsyOYUstjZ/tVpra",
"R7g8FGVt62FMEhdNGYGhu2UGHTM4QnvcAkN8fzztNQhcZb4FBOv9C5rEsO6xX6EJJompA2VOsT/tAbWR",
"0ELcJKwwpRwrzuQ6js0xJVrWnlFcJW3NCftqTigV27VQoGtvn5VTlK6I7DLO5zxav9REs7sCTqYYEscF",
"gYdYnhlJ1xu9PVSt2LnF0GNsxGEh9HpchgcQaXNlb/eGohk7vXgorN1AsEsR00r2vLYl8ZLJdo7fKl2r",
"a3jbOWelhhzgBPBJDGwUzbztz/14w1DA0WHzgMPeb1JSdlhhJ27V3+WbjSCPOtYTZacUgNsn6V09SV9l",
"r9A5hk/5M+VNe5631+LYBYv/bRPeCOokRePEnfulxgluRSwu25Nr0V+2Ukwc5m3LUjTIWM5WLLykWLBl",
"/a5CmPTorwjFSBV4s8GEz3bIFpOUn39yLp6FpD3cjRaTFc7YIqNVpgmuPzYPPOA5d2ymSXZfkuG2cQXg",
"m7TyFeAFkg9byweZb7iVD4d3ylso+8y3fZEVq6tQC4RklIHATpwEjuhZnbeYe1BcIky4F4WsjXeoMq0c",
"CaWgocY/yQLQtYOj6qHZlINS0TbLrL+Bx711zNOnNc3QC3m6ULh51TpGyv8LqykHDECLKne0/Z1sfcda",
"b5XYshQI/I2PuUqllXez+FtDsgHeEAWzO17Db0eQ9zUORA+9R+HTY/FIkHkS3S0qXYle1ohNBdsoCaRE",
"ax43rUrRNsfB/gQws71ZpAeVXYyF/YkbhSgglufuAgUJgfQ6Lv+KIXjwwqcgPYobHMPvIbmhkx/6IcwO",
"POkbrITuCIN1p6tUqT87OTvtndD/TU5O3rD//V+D3BHd+/f8JrKJA5JBmnoOq6CGFL41gL1HAcJz6L1l",
"gzcHd/uyMUdqK0hHxietfNxT+ZjfnY1LSXzsslLg5ig0Xio8zUejk3e8yc/9QMlQwFSVmgJJPMdX6LgS",
"aTuNImOT+tDjecJqXyZl8zZJVBstW5JRBcmwcckUw8gHy6riTvR7pWTiTX5qycRR0EQyxRJpu5RMHExb",
"wRSL1q1cauVSSS4V5MIG5ZJI/WnjfSvTq9d534rs7a377T6733JyceiwdvFrrP0Vbb5KMKSgiXE6iq29",
"VRKdNaCiQwWk1ZO8uIeryj4NXFxTRm7f4vM+riliMrkpULy2l6upiEW6ia2fq/BzFfho8sotmfKFPF0l",
"jTRxdd3H5Oc/t69rObO5Be83UJuYu6v4h52/a63MOHCPVzq5fHuULFzv+5phxQzsbu3Qtvwv/Vlb3t8L",
"V5da9u6q5Fbj0irpV/i0CvXQwLeH7NZaUIB/NB6V3qotjxrcVWuOSRjQU7AXAwJ77AZKN1fsvSWX1fmz",
"1h6LB+7Rul0O25536o+ruEsX1VYw7JHirpEHq5/s+hv8TYhZfg8UuOECBbOUXhcQYzCrOOFH0IXosZVB",
"TWRQkPh+ifKDpROBpR8Cz0GBA4KlI1bb7RD4jRxHPkAFSitOua4MyTwFb2K63QTRcfhCxVzh9G/oVtng",
"cji6Bz6GrWJhqDnGmU7Daqtyt80dXfgL9+IkqHvfyGcMrH3hyDIEtq8c+5+zFIssjlbvHDvL+Mh88EHs",
"I4hZnmtoBd4WAwJ8QJqAsrFqKXvj9G2Zp+ZAIhUoEGkMnU12HRhv2b3/yxySORcAojqNc9F/j+npFQb+",
"Uv09rRmoE0iBv7yTDWqVlGkY+hAEFvEcuQKSFjh7odAOTZlLY4yHRVbfF4v1cO59MGNH7ZOgizBmzhcq",
"GaR3SxB4TpgQ+qdQHTHVHWkDqQceORfwHiQ+z3X/L0oP/3LQvZMEGLJjXLd8MdOdHLRTSUI7q6XX9PW3",
"dRjat5obOY1SVXTl7yP6+5qvUKqGe+whHPlg2WOuEjX6rmhLhxWuFeF9hRJcrQNf8MGYy8VB68OKaMXp",
"G1YOKSJWUqBPoM6sCCiy9EVKDW/Z/K4lgVZ0taKrqeiSfNKjfFItuXI8yrQHfbL/LLVdheQaiMGG3uEK",
"rvae295zf5J77s6Os0wutKfZj3Sa5U6PnZxs4nptDvmZ8AbSozR/Ya84ulrX0lOBOgUpNc/UOVIgofDd",
"3PX7tKI1QwKQj5v5mKoU0r43FV0+Cwy0AQbP8zPz91R+qSkjkSc5EHjMkSw9/0mYXiVFoaT/6XiMKP6n",
"40SGx+iMfixdznIwcNvmjPU0vAAryzvYPIYrcFl7iu/xKV4MfbNk6G6JoFdg8WNRLq6K0wnP8EUSZjjK",
"8/1RLRePZT26FXlZnV5R139M1lavny1L76mD13mY+B6PpaUXSZ3mskd5SXJclRaHfBFZwxI9WZTXZSG5",
"PMCdW+rtrw5pgXlro9fPU40mE6taA8iPK1FXqujYCtVWTyrKLoIWKJjVa0uiXWPp9R6SiZjiYO8+Whnk",
"wYjMebYSntHMcefI92Joct1gHRpKv+0LEr45rSQ5eElSxZ+bFi8wEjJF/vl8DGJ3jh5hnRYkWgkwaXet",
"CBkTGAl33b4c2EJ8yPGM1lMJb+u6u7pGtk2ZJPZd7LmVVMonlGxrgu4+F1PKdYV8TGUhlWN/hfmlfKLb",
"T2VTlWhKWbheJtncy0TZfnt5NJD1VVtp9JNII/u7ViuLDkcWKYy/fUnkh7M6Tyk/nDk+Ckq6UdkcfRnO",
"LlEAba1BrRh62XgmHz5C38pliLfMzVzFDJIOaK93CPqeMXscpAevw2ZT4KgoZMI6NAVkzHtpQ0kACxQI",
"Y69q/ezz2yVfS8PJr9W+Bjzw6T0UQ1dEuldAcaE0WwWSrP92DylVGrTF89dNP5dKYeUsuAxnzY8B4WhU",
"kdaceUBg4UlkcNyfsJ/PVceXTTvm8MH5RHUJerlr0su44nAIGznfCKT+2DS+gtdNSmxpZlrhT1Mkch1F",
"p65ztSZj7hojXtgrCbxpMqY0sEPMYHzy2Y233MtSvEyX1FL7bm8bnBi9EPKLBvzGT+BSEQ1bZstlM63O",
"vxTw2VAwq+arw8nCtCWvU46AJodblOYXyRUzbc+5QzrnBJ+swHoV590x8ClhBLMeXADk92ZxmESVD6dU",
"uZO3QEFebAyHDeCIAYqs26dNBrTFe9rgUCKdtn8S6hDTsNyUcRNa3sm/JlZQa6NzzPrqU56rjjF++pAK",
"9eZWwI3dWVdCeaOr3el22XuFE1BDQy1fa+9+Wm7b7Cl5jCEhda5FmO2e7OLILtXZDBRyQcFsLPocSELf",
"HR2TCmLWOCPVPWlZSXOt06BpY3wUoR4JH2BNMjynfzN0eLtqrulHaEKbtfokPmZ+RTdDhg88ErM05BPp",
"H9Xa0IvKI6VIjlqFGdIf1ynjEmTUbkfsrY7IECBpXVELt2nCKE7a8teGw2YzZmrIYFUHjoW3FK8sl3OZ",
"MqVdzZxm2nSre+2e8ACXVs4JtF3z9DOMDD7CpU1ekwym1H15eIFt82FyWdEYQOkSPbxYEcQsBm2NVD42",
"EI6SgMdRCsPXi7h6sP18GUcPNvUeuHmocKhOHhXEkmUQgkvnEfgJ1OcRgt/AIvIhFdkPcHn6hjU97XTp",
"v874v86oeK/ON/Rps+mGsmXwxKVpxqFqOmeNh4efaWilSLvWuyYw+1wqSgtD7vomZDauQQdprwAMAQwX",
"NWZhkZj4Rdx7OCU0sflC3uNn964+++/dzDoS/CnUU/jNhdCDhlKOfG8a8Hn9xeR4mvgPZne6t4kvahhB",
"nMkEXCkUaJ+fWDDQ5TcUDvglpQNuLh7a6Is9kw+MTVUhgTcsJVwQuNCvcLtl37khQ0mcnVNxTVKDu5Xw",
"EX5mhYIhwF6hEBeGGEY+WG5cbERKQajvqSVglARDnpx4W0U8rOtOCdHEkAazHCWtkNpbITVilLod+cTM",
"aJY2Vm6bs7CzfoTL9lkvMzaudFtnyG5v7LobuyNsv5vkA3EaGM9pzoO42dE8kkfMz3o0cwTsy9G8GbMa",
"B67V6n/SA/M7+2/vCZF5T35i1u3a8CNAAD88g0oD4QUg4D0kXxCZTyTb18oPyT568VECeddvlz/8KU83",
"bZV0DIwq2lM+78umYMaad7saIq/mZxQ8IgKbBkzIXnon0CH72uq+0vdTwcdKXp8S262vpy4cIqPFLcVA",
"8Akqab19zlKiHjhK7IIdOG5fNMKBg7tKYIMgjJ89tvfsbEdaLyB271xFvtXJBRiAqQ97MSCwx8ak7CF4",
"bRW9WEgh+UOP//uZixgfElgWNhfsd5yakWwEDe9zsN57ea6vhq2XouPQT/5a2cIpZJ9lS47NOBFm5GrS",
"RfP7WBtB34wTDieK/lA4YbuB/qtpBS8W6m/JuRy+g+FcEYLfmHOrTr4FXEwZ8zW6Qcpeehb/xL62N0hJ",
"jQo+VrpBSmy3N0jdDTKjxc0ECYrxjr/zPyyUQAcIIJz7OFzUBdlyavgxVEGxbBNs/PNOefe3rfDuKjrg",
"z8G1e5Sr9sqQmjZl0tzGNJAXXUnIFmmkSpOYRcCPoQPvhQjYrvLLt8tO+RXo2JOUV5bSS6MHi31rhdcL",
"Cy+jXFlBeFVpPVEcLiCZwwT3FlQHdevLF2VdHNEl9cGry0x5k3b9JCb7IS4KBH4jx5EPUIEqiiM1uQOU",
"sdwy5UszJeUAzb5s6gby7wQm0JoNWevGHPgP2uuAmO+wI5sPKVh1+/aQHO2tlsHCeYQxRmHQysR9konp",
"7pQlouScVWVi9tRn4+odp4+Ndb7eI0DgJW3Y5tXY5+q0m8jBUIvJbWZaSOlsD7ItFGHZVVmNPK81CCZQ",
"2Ln1MyxYwVXcZOKWeVtc8l9XlbiiRy8KfeQu61NOyg4O72CTcFK6Qt+wHm26yWMdWlZ7NCrsRvt4tPOs",
"rdgH7kN1oskxbeI8wek8DB/Kz6ns8xf+tX1O5TkmVZw0uT0UUL1P7LCjise3AUjIPIzRf6DHJ369m4k/",
"QTIPPVbRA/h++KSvtsw3iOmBnAXU84x9XIsRjzEBMTGy45h+5efYdT8hc4ddVooMeYvlsw0D6JoilPU8",
"RM58dXKmwYPKPQxl4ljJYWUOgSe8RvyQE0yNxZNtOHSTGJElw48bhg8I0kFZUaSvKj0wlOZnlIRAd2Bl",
"OqjL+zu+GhcJsCCQA9zKYSGHr8ZDFVUNJHERy60s3jtZXGaEVBJfjddIN1wYWMdgbTQGQ0CevyqzDG+O",
"ZvOTWkdVFHe1Zeg9Ymgj51lydOWJKup09nbxZCVKhx/ay9X2zQU6xDSzGaT1rHM70z6q7MOjSro3m35m",
"1lVVr2TdrIC6M11yhiqc3pwQD8SO193Xyu7blBhii1aUD61E2FkpVJUWnwCvh1onItRDnf5EN3rVKtvV",
"cqI2J2CfELiIRHJL1lYRHybBcWjJAFsJUuUSjzDzlRYihBOBv38XhBd+xKtjlF0xdAxpx4rcYSzJoi0P",
"s+YtC+9jNrM4CcRW1Xi0oyBKmD8Ef9zVLfd5LzSVNpdZhXxhG/4SAiVbU6UtgDcTzgJ1wuU9JGM+bCta",
"Xk47aJal12BpEMO1F4p9vlDIXdqK1CAAP/QwAaTGYAjwA6sGJSyFNVbCCcAPYzaovYgYXvyItsEUEQ04",
"VIvrlkf3wAxoYoNdpEcSXjO9pzB+qEoWkTlgG12aWm+mLJiEo+ILQypFSFVVT4qMNOCFd3TkdrTPbfv2",
"fq6Q/+pJDMUgJhb66d/Jc/zDsbGjYryamb1GKQjl1racu38P5SrjrXRYMqqofkijJyQX3tVe8tnZ8NMf",
"lhkm2prXG8lQLbWHfIze6t6VEtHcENS8FoVa/VdTkkIp2dsWplAKUyh4wTUG3Vx95ZcrU6GD27qcvWLr",
"zRFMe0ndy/IV+T0qhwNXm5KaCJzv6j/r/FhynFB7AgsyPWS3lgLr60FTMXjAaoLYrlUzC7RuLua4/vwL",
"Un1MfzdPU6vz8zF7jKx9TOJPlpyhVaCPavh6yEZvmfvlmTvLYnKjFKHkMK7z7pTHEdvu1qy9I7P2FxX3",
"gU3+kGyTmqoMm5M4eA4iuCU9YszGbuXNwSgTfMNajeIH0ijS2BXhM1QZGSoqtTMW9/30fRxrdI0q1meB",
"k9yVZSAL+7UyYOMAXgJMnOEFS1g/h44P5A6a0hQBTIaeMU/RqzNdnqId+Ng2KehZKsvXmkT2z7dmBVli",
"73hjJwux1csEa2mn0fyUidM8eA8Sn3TenHRzomIXKdTSuV+vMvmYZ1KbLh02gX5S8cmcz2EXalf72LN5",
"fWuTKRnTMWuDgc5lXMMUEHdeeuyp0pgOJxhoW14OyjsJR4at276IJik/lWz6sSdSLDXfU6VvlARDD+dS",
"z66F4HK+3YYGIRGB1L4e1aRH42Szi5cbfOzGYVCvkdBWzt/hNAOKxGg2q3WfOI/D4KdWUw4mv2u6scij",
"084gSVXio5o03qaL2xbuunTmpuBd1alS2ikZxTeZjnZoPtVhZiivyJk7XTr3Ii/vxlL3qlIE26fvnS63",
"l8FXUQp2nMM3h4w1NPT22NVo6aVzbkvqOj10j7/T//Tkr3Zl7soHsfXDByWcAy96l67eBFYOo7sve2dZ",
"n067iW1+4GK9OD2amr1V5Ani63O36jFxTeY6ZPekPeasLR2d7bF5CIb9Rof1RuRDXXlJNms6o7VwOPBa",
"k/slH7ZVbVIVEBNu4LCy9VEq4CUcbWx7daqCWgyyVRWq5YBgy22IAjtVnh0Htg966itjvZtSazDbZ4MZ",
"e0RuYC1j7XdoKttHO14EYoo0g+tKASze+Iv6mLEj+DQpYrSwCSeR7cLV18ZnsUQECYZW9RZl21WsW2PW",
"V9iZbIB7QIFnBRVr2Bikjyjw6qE5eGMqQQvogHsKaMl5+glgGcusLqFzdnJ22juh/5ucnLxh//u/RmM1",
"696nE+iJlx6rPQpFx7YaOYV4Cu/DGG4T5Ldshk3CXIHlexQgPF8dZtl/p3jeFNAbxfT2HgfKlvif9mmg",
"qDu2Fo6tuEtv502AeUjb5O8HjgCNHnR59lcT+lsGQhxyBepWDW/V8N2r4a1u2eqWLxIChdes2M4EUFtZ",
"pP5830L19Oycp6B6iU+PxxqrYdpyFfvhWHZurYj7bEXc3r0oJYCD8pxqlalWmToYZSpbRiaqN2KbTUGy",
"YvDUSquBeasxkiUJ01odNquVGDSA7eolx9PEf+hlnoj6iKK3if8gnNo2pKjQEQ/HP3FLfghlnsrQYht2",
"NK3fmt3WEalckznxnEpicdqulRBSQry12uetSwrurlIjKXgj55cYyt6/blBsHI5z1U7FhkzT2UBsiH3a",
"X7Eh11QjNsQ6WrFhEBu1+7xNsfE9/bNXyhlZGwGhB7mh0DjwOAgNDozVjLSo3tvQCP3utg6PxdgIA56a",
"eTwaaKMmSmIjDHjQFYoPivu2eSC3d/1Dj6HYthypjqbIXQc2JFkOPNBi74XLtmIvStKlQX3UjIzKeR9f",
"9spSKyHVYI+fUvk5gOpvt1WXpU3JSrtLVJpC8znL3FJVxsoBTgCfzPlb7NO3iHiowyl6VZ9JpDpnZiVo",
"OxKNHNurhqWJytHGzd+pbGwWfKvW6jLD30rG3UvGvSt0IgRdFZVvJ3WWIotzTj16eSx1AyGR7TVcnWLU",
"SuFdSmG5AytophVq3Z4rpqoEbhXTVvyaxK9QSOp04o2LXF49r+eGSUBq4iVYG5mLXJZ9BI8A+WDqQyZ9",
"FXGjty+8h4RX58PnbMaDF711KeMPvGREbrNWNFNyUuHk074gGhymc0harZBEnv0TDGN87CZxDKs5G/Pb",
"AW/o0G4l7r3FMH4PybkYbIt0R2dqSGcM4rYA8csXIIZuEiOyZGLcDcMHBPsJlV1/faWiqpB0KE9uktzZ",
"9mvIeIbIPJkeu8D3p8B9MJLzebiIfEggp+lrOr+jPY/oRNwe9Z4NfU1xeS6HLxD4q5OzmrdXV8zrleed",
"Q+Cxw+17xw/5ZuT3oSjWnwvIzOFOLjA/hyX6MAGxWRSM6dfVEMe6Nscag2f7OGPQNURYGM58uB16Y0P/",
"4PTG0bdhessQ98PRGwoeEYHVtZswi2aS2jDvwJRuq+ObjjBhfYdiri2e4upEVs7sPsJyY/ILbPVF62OV",
"1eQpYC+jvInmhpijvWPgujAiZstbn33HqYVNTFKiNnXzeZ/OduxJfHA+kWJIMhiAKqiPr1xHf63HVEpe",
"HNulvbenrxiy6hYVlfTp92b0xft0tlWXng6+AfriK2/pq5K+OLZXoC8/nKHATFaX4Qw7KHAAOxuPKhSM",
"SzbQlpwz6BFMx68npN3do/1wNoOeg4L2+vzC1+du57ezs12tO4pDSgPMaDsICCJLp+c8Ah95bDK6KaIJ",
"CmYOlCOZFV5G2PqrfLfzrQcDOlUvBgT2mA2c6tD8rUbHzGFCarg5TIgdO4fJyxurBJOFe1aouzVS1WjT",
"jHps7VMLuJjCGM9R1OAOp3Syu8fxM/BT1k0kpdgqgesnbX6hU1HUXupWudSpGKwnyQhg/BTGFa4UaS52",
"2sGR7atE6o0cc3tK0vkcBLN0on3SllwGmZciqhXnrdLUTGmqZnVO+XlmXFufiuGMSuK46trNW+BKlSr1",
"lNoW30sw9onjJfLah8aW6TdzU5JUvpnLEvaB+7CVR6oxHXmP36hqJGnDR6tHGGMBgtH9ia5BtJMuUBjG",
"jxotfRjch+8h+SwG3WhNYgXSLEPj6dHJ0YkuB6TiefRX2vWrRbnhScViC96WFcT+BToxJEkc5JBXuOlQ",
"MZsEAeWfdIpvPTlkL4x4yqkyCzzB6TwMH3rCEe34u/jBIvydHnWiddlRjf9uH9kuBjI7gqUT7dgPzDJU",
"XMLXHmwvb5wohqerZGr0/hItvloxx7HAs42ZQjYVfvU1HCMUN2ybKHNv+WYz/pMceu4+KVBDMVOVcYVi",
"Ja0DIrCTblfLnnvEnswqU9qipjya8ib747nG+5q30jpWM+dMK57jTqZVPsuaM/5wPJYb+46KFbf2yJJT",
"cingS15QzD7ITK2ur/xYScj2aQf2gpa3FcWfOzdMZ4XAQCJRtrs4KEteU4PyW04z1Fxch9kKp0kxuMcq",
"EVizGqwN7kV7GSHTJIlWCmAboPfCmSMEsSoUs2J8TLdOw7LnhAYq188QKLZicFjLWy/NW2oU2jqMZaP2",
"2XNXMz1wLxhs87pgHhm2sfIiJ2mOy3atHFpJhKJ62MoDo4K4HnPWqIlW5fLoJuXr4qWM95i+dBhPygbl",
"8faBnzUlKniBiQ3UD169erAesFkcJhGr+5GBIDfKCArr9BEuO7VpQLYsJNasxSUfldpyXHuoTaxU/6uR",
"4JKpiYzOLTKrRtNkQSvlCNpLyTXRsMuRM7xn1m2cUOqAXpdxlQ8IxCTlKYSde0jcOfRM1aEywb/nipQg",
"gxUTD71YuiEF3kZ5htrsQm12oS1kF2okmoVswBavWrmT3EosC9+aAzLB/AhyectSTjpMracKtvJur1TA",
"jBRXVQGLjn9TCGIYp45/Xa0rIPMk4/Igif3Om07n+evz/wsAAP//ral9pkI1AwA=",
"di5G1zd3V4Mvg/Gk0+38z+3gdpD98/3o+vbmbnR9e3VxN7p+O7xS9jiDUpl77IYRVOf8cj36+O7y+kun",
"25n0xx9r+0NC6K86ERNDjLWXUsrObjaGk7XtUiXQo9r0DAaQYsQB9Mh07uNw4RCAHxwURAnBXUcycteB",
"xD3SiSG/iNdKAjXtxzMjglESYP1CFuAbWiQLJ0gWU6qv32dLI85TGD/c++GTEydBXoCigLw6097nsdwS",
"S3D5FtKOBEYjCDyqLemUbgptLL6nhy10aDeK8ac5cuf8kFM3B/Md5vdifgrUSFiBreIGdFWakMvUiSB1",
"bQSQOtoq7fsDXHLdz/MQXTrwb3Ld1T0w2FRKMPEfvtvoZVzUycPXLK/4sTM08IeXxJnJRG4NFCcywg4T",
"9uXNsD8iwgUiAfK7ciK2GP3x2+eHL79xrnX6svG/WiANR2GAYRlrRCo0ZYzlwKoGg49ihuM8DoMvgnUn",
"MZrNYGzcx4zKPilqT2lgNw6DQTXd0iZXYgPKSjMVe9qRoxiFMSLLImkz8SKkU+fNK3Z48b9PyyRfUhDo",
"bF3d4hQ4S6v6mmKw+qzW46xAdGmbVNSnFMhOUmWbM2Tox2IMZTfAg+4SR/uzU8jQPdsmdTPKY8ivUvSm",
"4zQ5FsrDsk8MODagc498AilE9ZzAr6MMa9nmja/GinXBuIskjJDbj03suAD/CQNHKvgOpRjnl/7o6le5",
"+vHV2GFjrCPGUk13gYL/fdpdgG//++z172WVNwXWzPXc6Nj3YUwGC4D893GYRGb5TZtgnbD0ESZ0jbyF",
"NG3F9ES0tPussHwPPcIum7G8dgFq3cprLjl8cO1es09yW+laqT7BLxkb2Vu5rm4nDv1a3Yiv5hOk+tiI",
"ttfioyMGq8OKGR/BDAXwM4ylQK+HSTZ+7nZg8IjiMFhAbuau7ztQOlhflLktfBN7wJAYBtMQxB4KZhdC",
"zup1LG5+NsrzbBgulUnoYBLGkD3C6OHO9gb7ycwgBv1ktvmFd8WbEzvxng0mSgaUnpIyTQLbHoRVSNUq",
"FlqJotiAy/bbVJ1oNNcahp0FJPPQqzcTKOj6xLsoxF553K6s+3Q7nFqGnnYOeYer+WzU3GQDwfzaYcxG",
"qhQ03UCF2XOwCsrI6CDdg1o6vUQ6eReBGQrS94aqXbxJW6aKPBPdT03sRSrfWL2L6GhHMWxcDN71by8n",
"HWYX1Zs11AGuYw/Gb5fv5KuyHCaQii8sWV6zkZj2u0u1d02tdQ2+JulLbf0RVmS1MrjDi7wAL77Qi/d7",
"40Ik/Y+SYJwsFiCutfuwrfpS7lbBklxnThfyVW64PBPzm97kRuL88uf4+sqZLgnEv9Yr76nazqb/uB4N",
"yDH2gPnT5ZT5XgK6L1BWgCgkyAWKoStBklIEYLfDFSSz/DBJIAvRM4Ygdufa08hE7+XXQ2Zz1z4iMy0z",
"M3fKhlojp8HAdg+QxdC8VZNxIxh4wh5dNbBo1mTkfycwqYeYt2oybpwEgQXEolmTkXHiuhB69UCnDe1H",
"T6kcVz0NaW6K7NuRehVegcfWOLHMYl15b/oznGoEeZWfHZPniqedOMX+DqdHW3ohLY2JCYzspdeYwEiH",
"2EpVmKAFDBOiX774WLf0x3XV4EdF/ZXXL7Z0nV77ZzgdJUGFdONv4Hbv2mmn1OHT3GQEATZczO5RgPC8",
"2dR/c4qs2lFKtLylYffWILoY4sTXm58xATFpthhMAEmwxXro+cTbyuct8QxnTeJ085tTufsA42oWaLJc",
"RSmtA1k5mAs917828kEkgaS7YOaacbpNUvW4GVxdDK/ed7qd0e3VFf9rfHt+PhhcDC463c67/vCS/cFf",
"rvnfb/vnH6/fvdNqK1SN0/uz2XrBFrtqNltMwl6WsPlpaafKY+qbo9UfKcR5Izx+YXjz0NS6OiiwiYl0",
"ZMaW6QP34QuczsPw4cUXqcCyqSWGs0sUwEbOecw9gn6migSVLPJI9cOZ46MANvHE4h782jnocKJBrZJi",
"6s1baGwSBWypXmtZWEE6w9cMVZfwEfp5w83bWypohlfvrjvdzpf+6KrT7QxGo+uRXqYo46SXJ6v9z0Gg",
"EyTi+8vfPSVZ6aUH/7jG/TM/QsMbqOhccQfVIED11freEe4zdxGj3bNuJ4Df5L9edTtBsmD/wJ03pyfM",
"CpzjrFxnnUun9M6JOBWmE59ZXasUWLT+z/BbeeRXdiNn69J6ooYE+OolljZllh0fYcJfN7L4oRObW5xG",
"Yv0PvcF+giRGrkYeB8nixu6KzehYXrSPTOv9H6tbNR8LccdUdsU2Djiyu07zEcWl+qhT6xCRgZqbpasi",
"RCf/R4BA5k9WRqWVzZa5wDE/Kr2LG8BkBO+Rb3iYZQ7KwoNZHYx5L8esI2ReRFtw82YTfQZ+Am0d52L+",
"1IodFhkjTL5i159Q4IVP+m3fhE25BtGP5nVIaaJZxwJ40HYR/Jt+Cv6NLYPuJQoUj7AMzTyG4z6MXejZ",
"en4o9wRlv+R6U6hylPZVpes9OAwzHtMeh+nnNQ7E4hilI5FjU2JNQaV2NOjCgIyV+2zhnYiBZ6Jn/tXR",
"ef+pBogmN9RVLBJrWBO2ZjIQKM1sBqULdNG/u5pH0o3oqndrAUtxdK34hzOECYyhJ2/2muABwz6nvsPI",
"c+J0HB6dhTD7DOMjjc96CXl2viKZr3LVZBYRKkYnzxGkf/08sRQjGPlg+UOFLfAlKWYqbFxZjjtedn1K",
"89cnJzXrLcBtWrXJjKR0tz/CCnY/W/gkdDGVeUz0VbCV3n9Y6/hLRy1YfDQDziAmt7FB87wdXTK/Lhh4",
"zNFTXPqxQ8LtuCCYjsskQP+mupEHA4LuEYxT3VqogyK2j/ujqiGxU+iHwUxCXCtlt+gOa2forXRxHbtz",
"6CU+VChtXZf2LbukdzuEu97b6wlNvNizwb8q6PE2Z/dmoVn0j/H5h8HFLf1RpwymM2/XTXBPHf7Kq8+8",
"/nbh3NeYxDbnDzhKgnPVCNz4MYkDsOuzVAHAZoljK8X9S6nDSzpOZkRR6TNZpt23if9wAX1I4DsWgrGi",
"C2AaQZB6AD7ApcMul04EEE83woM8nOkyn2viAS5P37Cmp9xV7Yz/66xJ2oluJwJxdkXVX50a0g0f8Uvd",
"hWxFatzAYM8Nt9h4fN6ne99M8pWoh0X9FFpptOn1leMhH4rpxgsUiH+e2njdVmPIpCR77Lu34ZUUibhh",
"TiX9UuzSLCkL6lblXKqaQ58TSh8NuBFyL+WJagDyLUs9QSnFZNFYdzPXVf7ykrzpyozczXNurEtVCvqa",
"sqC6SAlM89WZOHObPJMmLNkq32tRtApn7oFpW3M5eF5NKq8SEFEexWT+VjWm6tfhMVyAaB7GcOyHZMO2",
"75xdWe+gyM2Z2A/5E5joYe9QsaIdGquKlCZyjcDIiRO5sHpTg+qEVr9Q5PvSO9N+paWLRoWF2hr0Am9m",
"aOmqtvaCXZ1SjeqZU/almYMggL4JTPHZQZ7+6Q/TwZ0nPrr+UYWPcGW0o8spmD19xUnWsoCBhWn19Nsa",
"S6fdzetmg6+z6L2w3dlZ1yQiUnTn6aKrkKH2fCEwMok7vSvxHPleDPPekLU6L8IXScyyl+pSe4ncf0Li",
"IMzSjEx91Z1CCRLdiisxvwfiZquKV0troz8l/FA+x1SmianJtNuhB1j21C8JkyH2FzrHHR3A6f0zOTl5",
"xUiZ5GK6lNwym3K5NyzZTN4KWnO0Ll2EBXVyn6cKut6Ci32fDKIw5z+mbMSGHPEZh30xvdfUEmWuOz4P",
"k4DowTXf41Z5eM/6VGCoaJvPRRJYOKKLuIm0/eblQJgQE4grigjmGNa/F7YXO2RuPLCBd6nYmTU0SNuY",
"HtrWJE4sZE2TFaddKlbMnQdWN9+mFJiurDJ4QaCuH7tz9AgPUi41fxbYKxET0luivlMF18eQxMsKKbo1",
"flSuZrthiYpbkIIEiUf9jdpE7/tgtMgzoNYpT7QxJEpwzVRgfo329B2UEAgNyUketFiP8ONhPSjdwEco",
"Xydte49lHyu6e4diTMaQ3wDsae8SNO3VMMyMX6FyABZmTjGroEmN++D7W0HM+xLjnyPTWkLORLq0i40G",
"3Avg7ur67sv16ONg1OlmP476k8Hd5fDTcJJ5CQyv3t9Nhp8GF3fXt8w2Nx4P319xP4JJfzRhf/XPP15d",
"f7kcXLzn7gfDq+H4Q94TYTSYjP7BPRVUpwQ69PXt5G40eDcaiD6jgTKJOvf48pq2vBz0x+mYw8HF3dt/",
"3N2O2VJktti70e3VHU8++3HwjzvVN8LQRACqNRHqOEZBqhIIJBY4Gk6G5/3LqtGqnDrEX3ccDZ8GVwXE",
"N3D6EH/z1lWRjxOAH/TZTbNEA5UZVUT/BLNR8okEmnTUmY9lm8r7sc0knarRBQQa6Z/mf7XPF1TIGau5",
"IIS+Jx507KQi24fNJ5INCfCtOmtRl2bbKVZsgbHIeDcw5CVMrT+hw1pLE9qC9cJ6CxAIgL8kyMXXEblO",
"SLVNSQw4B9gJIwI9R5gm0kH0c6ybCW/r2eZNueTWTkaXZUNomD6wNqc9gysb/auRlArZLneb5nJLeTzM",
"2S61a94DNUO/F7qsoLOwx4m2M2KPgc/5VaFgJjKy490JCZ5gbvAtQnSXWVg7A6Z6fN6LT4OdJ1Z2gkXo",
"OyCGDoiiOATuHAUzXn+CIbhqfpmtkxMJC9ZZEQq+ZFnoowwPi+6pxIViU3wHkJ/E0AIU5iqtApJLHs9y",
"Ienn9AHmSzU/fWZxgCAQO8ueP4vph6sjfsA3SWTvmLVNHNDa0D7nXjZxAJHhaoKqNvv8ZZYEWoDNcmGQ",
"P4iknuiHLvBZcNgj9MOIfWYxx17iFiq9KeqdklF3e6l0n9PqJZUPwbJ2jahbtst6Lqvl6617FxQsanrV",
"lJ/NWOMtqt412Qi5tPfGU7zmKJKJhrO9UpMHGqmR087eHE6ClJudSXxPy/C/GEHZ56mkrFfX+hbDmPe4",
"SaY+cqtIgY1XkXJahXlvNl3s3yqbPhL7JKXo9ZcrZjHoX3waXnW6nU+DT28HowrZWZ2HoP5y1uQuVoWJ",
"HByKsWzVq3FxvGI8VooASfnF6jyp4WUwuhtfXk863c7gM7dZTPrjj3ej2ytmE7m+UmJPWP6U8+tPw6v3",
"d18Gbz9cX3+swH1Oi9IpkiBeVET2s+/CX10roHkOAhI6TyBmufJK6hXvrY+Ub5b0QJ/vYDMpDPjY5iXq",
"4V8vD1tKE/Xsm1KQXQKDug1rnrdgAQmMZfYCeY7ysZxf0BE8ck4dDyy7zqnzBOED/e8iDMj81xV9dFL0",
"aLMZmMWuRNRN6CNXkwuVa/xVl+C0LCBvqlEaGojdPPvVObgK4MyrExZQW4FqFEhKqQMpjz6fdLqdz6d6",
"UcJ9QncQcGiMYeXOzk2q+VQkzX9OBxxrgjLMVVLW9GOvdmHnAP2MlUvUldekFNhI0RCj5qYCIvq/PCBm",
"VjtoS3FraHpJQ9MWDUBbqVvXwJC/sh3ewIVfmM+TORsDvgEJ1iU+U9mEO045CDsRa+2AwHNcEAQhcQCr",
"S8sK3suk3aUDSwcd1t3Ha+1RwPNiiLFql8pp0dLQUTZP0Q8fAJ7rjps5wHN1yP+FC9OJA4grorxe/JiX",
"XnfO56watH7CzzBG96gOvcy6RmXQo2hOf0VxHgY9J8wBvgEYP4Wx7RzAiUQHB0Ni4K9tvGR5CEc+WOYY",
"Qe5fY0NWHrtfDQR2PgfBDEoEGZkggE9mJDLehU8Z1qRGrYd9Bb1DjszWHVUCkgJRib/1YCillxVfujk8",
"mVB+Gc5QsHrpttX4e61KbnuHcbnGqA7XMqnXQaHb7oQ0CIY93C1ZvN1201S1Gs9RhA/VyFoyOu/wNN/G",
"KcMn023b59PzweUFnCazTReS7Qp9FKNF4gMCcZZpg72WuWHie84UsgdSrn2AQJRoCmMH5DRmXUhPXfXz",
"88GlUvWc3Q8egZ9Q6te6Y/sExjdg6YfAwIEiG0jE25TXB+Qnqn04YUB/iOEjChPcE+7FYoxOVfKg8sTs",
"U3k+UgoPFbmYqk03uQrhwo5TQxmVgewGLqCfZEYyB8lavGwDWMltXpxIsxOZ+7ouBg0nfhqLVdjhbPQu",
"nZBV3MH4PvG1iqBdjEgZCzJcpORgbgyWMI5hiFOm33JLTNfFKg1yqyDzkhyPK7O0fz49Z5EQE4AfKmqk",
"ExgHwBf5AozmKtHMGV5gSYouCJwY3ovLN+IKOcAPlH9zhKl2Vu1cG85FYpcU5vMpxYdM//Ks3zAZQUKb",
"Yl32DWx6reDoYmhIl408zIXeE4xhVsxra6h45otgMocvtKpEfqUUVfhL3g6KMkzVYCrEpxSOpmGUqLC6",
"Mtza9xM+3pFzS2/xdBKcTDF31KIo95jiI1phBxBVGtnlIavMH7tu1i9DBkoeicUQkjvyDIKGbbkI4lf2",
"PAzg9X3nzV+1wk7T/y3AyO0nZN557q7Sv38z5HUGV+n84VP/vPP81bg4MTgzuvrrLBEyAAuaD110rTQR",
"Q3FIBJ5Y18nSRMUsajm8d2grGBDkCioMmSVGMoiI6VeEfv9mePdx8A+NsC8mVZbTc0g01GJGKUOGPofu",
"R7gcNNa61CVx9e4BLo+cCfOWwg4zupGQV2OB+VbOfRwuVFxIIXK0RgrmFKtlT2PKZustkA1RXpxQHVlh",
"jDCOoUsy0UFCR7w/6d2fmQ1KulFZkeI46yIUHeRWara8SSqh9XRYXhU3chd7pwXGyzkpDBqpdKjOwO7q",
"6M1e5GUiaw8Egyo/tyQX3vbHw/PtSgUmiPcAmxSO7SKTrXRjuLwAs3MlyUgxqY4m/Ui97prWTy6rwB6Y",
"2Sbh1/DST1RT20pVDe8zANRLzxQ6IFg6f46vr3oYxgj46D/s6ZGv7GglpbZissIxEsaOCwichTH6j1ru",
"tXx2QBhUJbDCBCwi8VCanrvcaR0G9i5c+1WfXBymLM20qUKuch+Vk7F32eySlo7iTJeFGS05lTHTRAFG",
"WySTf0fBTMi3qyZKjPA7T0HN4GQWEBBFPnILuYfWKeQuFrVWKXftvF8z8bMHNmMpCA336vLGGtLT1lN4",
"qhcWtpGRo2YPa7PWWWSUU4g/ZbR06jgJjrZ0kzVXcDGT1Q9SMb2ta16RmiNO67z9W1Z/y2ZP90TJ6iKk",
"hSn9dwOLVpUdScddCF9A1wcxICLrjdkpQXA2wo6XdXF+IXECf6UHeBSHsxgsFuzq9Ms98DH8ddMOC0Yd",
"R1HWpKrDFLYyPg7DTLcJhaJi2xtYAevG3qRgrU7EbzIcZmShstFenLpZSvb67LufT41lfwEhcBEZ1F7x",
"UZFgxaq/mpRTO6kj7MuivNVIKhbQfbnyw8V0UrrnOhIvHZaJxgbTzesZF9CxRkXjbKR94ITK2sPp56pi",
"i/3xeafbuRiMzw3L5fW22qfBpk+DHG/beRmMxdhbfhikoJtMPc1lJ12QXm4yH4BPFbnB2FVVWvDqN2aQ",
"Nl8xF5l1ZrxKbRqSGEFcv3z65YL77Bir+NA2VgY7nv2LGWyaJR2T19Bm+Zl5Ew6cOrW6Zxmu9Ze6dMv2",
"QqRmRF/HFZIgt5xgrGFGMTlWLpNYMXuYPvVYMaPYeHA1uZuoi0nXcMdPyFL6s/PRoD8plFz7OLy54R+v",
"by8pdiZ348HVhTKy/uRRZKylqdk+1w1GAY/cbFJqADaloyxlbKn4RkCQv0qds+piHc3KcXAkmJnyJkQB",
"4VGK5R0QtKiVrVlqNn3wN1rAVSPweCNN7jerZWgOYu4q1nRnVdRY3kOYCpUEJny6ldlWrVzQVJLTu51V",
"pXssQNgUI9nSNOSeg00RmamQyNL6nV9/urkcTErZ/CqSFOZfu1YrY6Jc/vMHdTbNus9bTKMThtMS9jeq",
"UKnvhWYNU7ZiA2H7B4uap8WaW3D2npTi5Alg4dbRIB+Al9eY7NygNVugjJhk9XU1w4mvxaG6DgqcBfJ9",
"hKEbBh6203HrPGELszi/pCH7gEBM6G+/1pePt0I/HV52s8d/nR9yBcoF1QuvevljBAMQoaOrMLhKfB9M",
"ffjnmOXNSFv10CIKYzapcMUvN44AveJ0ZojMk+mRGy6O54C4c0h6HnyUfx+DCB0/nh5jGD/C+DgE7Iz+",
"1gvEWJ03zNC6ZhhYshhH4CmA3nklOyo2ct68zJhVubvLA/JvDSnogPaElyRganhqeLB+wuKdU9lZq0Bt",
"4b5nURpLw6FbKo9VVFSzegWG0ljlg3Jdy8NqG7nB2S0eBCov78MAw7j5kYdEt6YOFLbvF0dqNdodVSQm",
"VkaaNAOIsNHI6815GNyjmTbfSHVx2bUqP69AfIWYI2twcuWTyzOJyHfNROsUzlLt46rW1OXWGxkNpDmv",
"0nOmm10gCuyqWn/yrJCv2MUNQblHJ/0WfC0q9Nu1ClUbYDelFZcCilPgBSTmC9kELcTT/RYtsB6MyNyg",
"99JPOWUCcS+wJ0BgfA98Xz/kzhTRteufbUeTaCg4uU9DQ2TRU4R3tEfXz6bQaKzrG7grtkrLD6S0rOYM",
"p+oAaxW25MK3cMRe5A7qVQ7dr4Uj5CXPUUpNLBF6o+NUHH0bO013lgSv24liFMpaKRq/cfHVREr6um2q",
"Plvj9ita10f858btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW3l0FsGjIwFtBWg5XsAkplh+fu",
"IZDN1kvEtIGbL0/zm3G1NRcrt/JwtQwXVeIDv6qsqQRql5k0Qh9NIWn9myHjCgXJ+dBCHRHMIfBgbHe6",
"87bFTRTT1uJKmakr1/G1SkT1FYGUDyTtpoHmXVM0pDJOLti2qIRaZbSiS53SURhCdWhMMFWRTUiUX2sH",
"KqAsHbUmt1U++NSfUR1vvlDxNv7QP+106X/OXv/O/3h9etbpdj5dvK7GXhrPqskiq0xkHxub9mIJTN3Q",
"s6hXlxthIDsxd5pZAEgSww9r0zEd2knH0wpMNAtYcSU3hobLK2bfGBumsgzNAqsJigG8KaIUPOlXXASt",
"lkYGCt7TsOLB/2HlCscDFhTD/7gdXVaTx164zkmdxtIhJtWBTWmj3DnwfRhUOYU2CNGrdICXz+6FI9GJ",
"JXCW2n35gFa29v3gajBicvP9cPLh9i1z8xsNbwbMQ69//rHT7VwOrwZ95nz3efh/THue3WA3H4Rd6aXS",
"3LdDmilb/47Wv+PH8u9oXTDKDydrGmL3+yHhYOzYDd/Iax6lNRZv8U69ltWbtc5M3tm1Lf9EnXsxTl+j",
"VTulchpeQCLLKBScfJPA3itBpGDAc1BvhVFj0Wn7d2GsgUc+GLEMHDZhP6xhpozkvQ3WD2Tg4ODNpZOp",
"deAox3J3cjiR6JaQlbc2rw7kt9eriZ7ZQj1LdcoqYF/q1UXVjho8uxgwvqknmC86lw+JIvNidhT8V3BM",
"UqNE++9FSTetSi60fl5oYqMlFhuZPEWRCq3ym8SGNNSybxL7jQxtwiBCx9XtdQ4lPMmXufjAphaJ7UwC",
"VK6KDOf0FHOG904QEieKw0fkQa/rACcGgRcuZKcn5PvOFDozGMBYXmNU6jrbGsabo9nbTwJcbW92Tcop",
"nLXIplLLbLrYqeUlL36srC+5LkbGFJf2O2DYN/YkCgIvq/AY86FWu/IvIJmHXqPVCtA/8Z6pbn8eegaq",
"/TCZ3Mjc2W7opRQsDT32+QbuAE84wGbOTfzVEuHVJCRQWXPOZ4Yq3to68ZiWAlamnU/p1mXGrkmn27m5",
"HrP/3E6YlmQ6IXkQFq6K0MLiTYjXYXJB4EQwpnR1ZF8RT5iV2G1Xm3WLUgJK8/kBjNEsgJ6TdWLWoNvb",
"4YUjSHr3tzwfTKGPq8uHsjaMzHM+IVw025EHF3J0HB0afYDJBwhiMoWAVN3Xc7vGqsGyOg7Amcve+Zvy",
"2cnZWe/0rHf6anL6+s3J729+++Pojz/+ePX6j97J6zcnJ/ZpUgBnMHpkDzABU58ZwPYQ0u2fzuZTOYYu",
"TKuSYlN2FtqGR3/wqnRhvApJjfJzaagqFtV8smqeuDaTEnayXk4YqLvYALLivFrokoBu4TC4D+24Z6R0",
"oEeTH5LsgrxKvWo+7Dgbh7luq8ih3xzwCJAPpshHZMmO51wh3IzIf6EQ3bH8t71/Jicnr6DzXXb2YVeU",
"VH7+VZ+n1A9NZxOGCxDNwxg6tJEQQysSzViONWbz6UL5rYtkZFOn2W/OJ8PPvCB3+udN/3ZsCDS2iW7h",
"e5RGtvCz0pgRTJze/DwpAFlvwOO9b+v04dvRpWb4puoxa69VbZSjonSyV2bulbmeaNdNOwtVFNnmxbVr",
"Jq9OVFqBh5d/iTVeBFIgR3lRVqiwDYJZIp6xrIXc+OIj5scu76wUqC6n19GrakK+Dr6RGGgbYO/BPGxp",
"cQwiVSG9vuyzDAM3/5h8YI8ik3/cDMbno+ENy5dy+/YfeuNOUeiWaKpW6AIuCOnQlNIKuq8UuHXxGGlD",
"Jwly4jw3uKZGPytdbqoNixbJQpmkydC64ucVnFE0qo0Hl+8+XI95qodP/as+TyHzZfD2w/X1R+NesOO5",
"bAJW16aPY0p/sXCT7jYoC8sVEVkYVl9O9O9wajii6BcdQFac/mc41R2JO9EojZiTRQQ1ajaYrb7W1DYL",
"tBe76uc54WaY3e0qVyDet5pJXOUpTSKz0mauOWHT0A0DD4knFn7LczWpTmaQKN9Z6XmN90cgk57wHHwz",
"SLDwXE27OjPaN9UaFLu9FmGM9cckBgTOarODKxBe5vrxstbmi0hZWqUQk3zh7GJ+4Vdn9eJLTl1cTVeL",
"1aotGl7o0iCmAA4vtDiUvT+iIGdIeXd7dT4ZsgPr4nbUf3tJldSL/vtKAUkHkZpIIwpms2vYS37Xqzdr",
"xWDuWDPSX++eK/bTmDuKMclHWBVOSUICfB3Fpjz2AJcGnyI5PCVLu4hNeTsHDo6gi+6Rm03i/BIBjKHn",
"PCIg/NV/1XOFERENHM7011sSJ1Azft37req5lRpgTk9OToyeWNph8r5TDd2gGi3o73AqxZjtOW4o/LB2",
"dDM/EXdtpORzC1vPy4CQcybapGOQ6vOh9Q4ylxp5u2ww+ETpVXbXaaiSGB1+1skdng2kuvIoYH+tFiZ7",
"cldWnH7sD4VREqyRV7k8yjsE/dy5r6btyGg5J8UUyVgzyVg6M7Wyu5Xdrex+KdltmOMHFO0V3pAriGY2",
"2pDAhdm/0nBfqe9srLY3ZqnQqhPurulxlmVb23gStQ0MaJDpxZS8xdwUYlHdEiKVUeuop5Qp9mZwdcET",
"xGapYjVZgPM5Y9P0sm/75x+v372rPSXZtCvdm/MCxUyMk7w4KfrbhMGNIvlLsNIGY3cOvcSvCIoydF77",
"OPpSzJNiKWBqNhvzKupGL6RcepYtsmNVPTJcuwijkYBlXG5CR3Koc96xTgstNC/NnzGENrl0VR5vyXTa",
"j4K5tN8kjzbPDl612AmY6dDrc5VxfZN/sOHkKsKsyyGsoh8hFM5jepG518sFLUtzvrxDBm6sm5A532tn",
"ZHLkTjzebnparF9hc82ggDeN5IVpyMUqA6f42axyz9UtPfoyDexOvEI0RzNPMWOUp5t82aoCQ9Fmiyyb",
"e8Kw2RD11YM5vdyDxCc3lVmWRCNjtiWrRwJxi/wT84N3YSiB9ef4+srhQJfDdtgIWica+Sz4Qo99Yexx",
"b0wLNGChdkzQAoaG4jiYIPdhaXLFod8cLJ5V7F4SFXnRgG2ZDvZ4Wngps8Kx0mfMkz/pUP6YUbY5iavN",
"Ap+U92zbd4vGyXKtr4FyWZIwcgN9red0RlabfBtqQp97sSe7Qjh3qMgehQpVhWMIub+KsaTIAnyrafHU",
"TNk31RXhkR8Jlb9MfnIIpxDEMJb5TBhG2bHCfs42ZU5IxK49YfiAoGyO6K7yn+Tb+ZuOCGHO+orUNrR3",
"gkm4sJzsmUl87haliR7gszj9myErd0WYTSz/a0qIndOjk6MTRsc8iLvzpvPq6PToRMRjM0ywmGtflImd",
"6QJk3svnedoqgBg7qT2GbjqQxU06l+L7e4YGGdDAZjk7OSkP/AECn8wZil7z724YEJFUQ9STpk2P/8ac",
"r3B6ANbw8SCOQyqFn0v+qVchSdeRI47Om7++djtY1nChq84aSp+SvwTM7hy6D52vtD/DXwyBt6xHIG2G",
"qjA4kg32HYVswQ4JHeC6MCIOicH9PXJrMZpioBalj6fHwKciJZj14AIgv8cekvHxd/az+tszx4sPieb2",
"dMF+xw5IM33R7g7rzt+mS7vQpy0GtAFzteAjMJ6JwQISpg/8VeHkU5rBEXnOO294HoRUaJSW0lGFGn8f",
"yHZsvZq8X0v09JvGkzBxXYjxfeL7S4ej1MulSSsh77nb+W1XlNd3FsCnWICewzJoeTLsiIPxauNg6KB4",
"F8ZT5HmQ3z4y+uZ0UkVmkuInrAk9rL71YqFysA+8b6erIYyv7NpLXE2WdH7dWofE+Qg/Bokzengbcnm8",
"EWLg2OGbVkBcGrdWJpNKbJHQSSTO89h41ov9jSxEuwQd7DkxwAFtxYClGODUsj0xoB6QEeqR8AEG9FSU",
"f7PTMAp1KQ1G8DF8gA4IWLJG1lp4a6UzFsREhCa0lTTo0O42UiId3iATJKx7ddzFbHmCzhl0PzZR4yZU",
"LUiHbuxE7Jwk4+y3KkpOtzxHwa4fJt6xekM3a9ClTHHy2sMGcVCACQhcWCLic/pZupeYFevt45YB4iRB",
"FnCxLwRWo7VzBKvv9WLrPykvbN96coheGHFnF3GiKfvNzeHH39l/n6v2m0op1uqotKHMKs43slYS8STR",
"JuWEp3DcpRDa3GaL1Eo1hzevofIoxBrHBtuxVrblSFzBTEbeHMUVUo3Tz1czhR/XiTW2LalUq6H5i1SA",
"/ex0f8FIuKX9/aL9BVz5DDee3rs7uEXGtSY0lR6JB3KQb+IIp2McMzs93yVs3PFLhOkFyHdyrU0bTFsP",
"8w23ttt0LrHjypQNN19mwMmtbp8IId16thGFTSjvf26TwwCRkErz4++c45+PozicQvPlUr59OiBXeYLZ",
"dXnlilwuBDPDp1PfhJiMkuCGzWtvmzIdeqnk2vGpV0FQ8Bt0E2lbYfg92umpcBUSVoEgjNF/eJZ6kdOI",
"B1/zKM2SmZMA5EPP4XZ7h22P807I82G2rfqDI0dm2Afuw/F39h8LK74zpg2VAit5ymFfRXIoe6N9bkwj",
"8TAQ99I6n8fJPqk2p7sB4zbISJhP/Ho3E/OcYyx1I/D98IlOr3sRKFKtFL3s9yoVixNdnmMCfPwdB9iK",
"W67GqtQv80uAG7BJfjAzo4iTe+/YpICMllH2kFFKBJuyytW4klECrGETqbgo1ia96kLnlVfiEos0fht7",
"Mf2jazYE8MJMK1kCFBjOXr/OAXG6CR0oikP6D+i1Z9gesabpEsnqNzggiiS1l4813qbAjwRMfXjsgRk+",
"TlO/Gy+NmN0aWTuHzAFxptAPg5maVSBNMw5m5Svl59MLwMrNTkQJ9XpzmUzwnSVo4Sm3Gcv8O4HxMuMZ",
"D8zukFd9zG0rQsRK7hTgfamLjzX1bqwG/gWYnYuYL332sQo5RKeUr39s1p/bStjtvN6V8KO3ULSIfLiA",
"ASnpBsx4IekgfToH+EErYVjD4+/0PzXPS7zSxXTJ+aYoQOgElqZ2No7x0KeA7vjIB4TARUREXhaDUBCN",
"OiospViobdrxCzU9GpneGFZ/dv78jd99tj/rRK2/TzWF+zDhSZr2RERk/FwSEeY7A7ERIcd+OKvTVfxw",
"5vgogDLzkYCjKFEuw9klCng9lkOUKiLLEwlFWt7p0iBZeBpGLTQoIKyoZDno0pA8NyYiNXbozCChqGZY",
"NsyMEbc8amauSN1guDelVQWspk4CgvwNTN13qLzrEfiNOBiC2J07bCalxnPF+lkHnUivXiujYPgI/V/w",
"r3QiFLh+4kHT/tKWuKPVdqsFvmQBOoCtcuvJ5DYUMBalYqY89vluurxLO+WgtAKulFPH6pC12p49OHJV",
"IdRAIRZRrO27eV4rTSW/cuxchrP1Tx36/70sdNj8uqoUajMePGkdth/g6MEPKDIx//09hhs5d7Z60m1f",
"pc72egUHmfba26rVORmnkzDrq9ishWKid6F/7MFpMjMb6QePwE9YvSfnfHDpwG9RDDELqgUzgAKc1U8T",
"9YE9QMCRRh6eQ/+CTXUoLgWbj2j5fHo+uGRIqAlgYZjEVBSyesFUTOiRv9M4FhV8mXaxRtRBQT2eZg2t",
"XqO+xE2TWYnFFJ4/H1yaWd6K1y30Gv4AkBc9aVXjIj8302328Y3uR9JvNDdaacx/gEusXJSM09J2za+X",
"jAxExH3dxfI8DDCiV0lBYuyRKXRZ5g3PAfcEivIT4tq+TWNDNSxTeB/GsBaYTZkf3vGtIWEOGhCzWnSh",
"i5gEfUJkrr7FFctDa+DL0koYdnbLz2T268ql8XcWgLhzxJ4eXRgTgIIsdL9qnWk2PriSoaRQ+N16cemW",
"iFVOl/S4Q7HDnyt1EIuEfS+6LdOlk2XIzXzEWTG19F5isKmUEwhrF6Ip5iCneYDLHq/IFAEUY+cXDzLB",
"R7lv6QDnX2/+9WtRbFU6QdgZtrAbRtBKHvKWtutirdeDd7t3VPv7aWuBqrNApbxhGbbRQEE7ZsewpZbG",
"z3YrTe0jXB6Ksrb1MCaJi6aMwNDdMoOOGRyhPW6BIb4/nvYaBK4y3wKC9f4FTWJY99iv0ASTxNSBMqfY",
"n/aA2khoIW4SVphSjhVnch3H5pgSLWvPKK6StuaEfTUnlKovWyjQtbfPyilKV0R2GedzHq1faqLZXQEn",
"UwyJ44LAQyzPjKTrjd4eqlbs3GLoMTbisBB6PS7DA4i0ubK3e0PRjJ1ePBTWbiDYpYhpJXte25J4yWQ7",
"x2+VrtU1vO2cs1JDDnAC+CQGNopm3vbnfrxhKODosHnAYe83KSk7rLATt+rv8s1GkEcd64myUwrA7ZP0",
"rp6kr7JX6BzDp/yZ8qY9z9trceyCxf+2CW8EdZKiceLO/VLjBLciFpftybXoL1spJg7ztmUpGmQsZysW",
"XlIs2LJ+VyFMevRXhGKkCrzZYMJnO2SLScrPPzkXz0LSHu5Gi8kKZ2yR0SrTBNcfmwce8Jw7NtMkuy/J",
"cNu4AvBNWvkK8ALJh63lg8w33MqHwzvlLZR95tu+yIrVVagFQjLKQGAnTgJH9KzOW8w9KC4RJtyLQtbG",
"O1SZVo6EUtBQ459kAejawVH10GzKQalom2XW38Dj3jrm6dOaZuiFPF0o3LxqHSPl/4XVlAMGoEWVO9r+",
"Tra+Y623SmxZCgT+xsdcpdLKu1n8rSHZAG+Igtkdr+G3I8j7Ggeih96j8OmxeCTIPInuFpWuRC9rxKaC",
"bZQEUqI1j5tWpWib42B/ApjZ3izSg8ouxsL+xI1CFBDLc3eBgoRAeh2Xf8UQPHjhU5AexQ2O4feQ3NDJ",
"D/0QZgee9A1WQneEwbrTVarUn52cnfZO6P8mJydv2P/+r0HuiO79e34T2cQBySBNPYdVUEMK3xrA3qMA",
"4Tn03rLBm4O7fdmYI7UVpCPjk1Y+7ql8zO/OxqUkPnZZKXBzFBovFZ7mo9HJO97k536gZChgqkpNgSSe",
"4yt0XIm0nUaRsUl96PE8YbUvk7J5mySqjZYtyaiCZNi4ZIph5INlVXEn+r1SMvEmP7Vk4ihoIpliibRd",
"SiYOpq1gikXrVi61cqkklwpyYYNySaT+tPG+lenV67xvRfb21v12n91vObk4dFi7+DXW/oo2XyUYUtDE",
"OB3F1t4qic4aUNGhAtLqSV7cw1VlnwYurikjt2/xeR/XFDGZ3BQoXtvL1VTEIt3E1s9V+LkKfDR55ZZM",
"+UKerpJGmri67mPy85/b17Wc2dyC9xuoTczdVfzDzt+1VmYcuMcrnVy+PUoWrvd9zbBiBna3dmhb/pf+",
"rC3v74WrSy17d1Vyq3FplfQrfFqFemjg20N2ay0owD8aj0pv1ZZHDe6qNcckDOgp2IsBgT12A6WbK/be",
"ksvq/Flrj8UD92jdLodtzzv1x1XcpYtqKxj2SHHXyIPVT3b9Df4mxCy/BwrccIGCWUqvC4gxmFWc8CPo",
"QvTYyqAmMihIfL9E+cHSicDSD4HnoMABwdIRq+12CPxGjiMfoAKlFafciQyxyEyaw9M98DFslQtD3THO",
"eBp2W5XDbe7pwme4FydB3RtHPmtg7StHliWwfenY/7ylWGRytHrr2FnWR+aHD2IfQcxyXUMr8LYYFOAD",
"0gSUjVVM2RvHb8tcNQcSrUCBSOPobDLswHjLLv5f5pDMuQAQFWqci/57TE+vMPCX6u9p3UCdQAr85Z1s",
"UKuoTMPQhyCwiOnIFZG0wNkLhXdoSl0a4zwsMvu+WLyHc++DGTtqnwRdhDFzwFDJIL1fgsBzwoTQP4X6",
"iKn+SBtIXfDIuYD3IPF5vvt/UXr4l4PunSTAkB3juuWLme7koJ1KEtpZPb2mL8Ct09C+1d3IaZSqoit/",
"H9Hf13yJUjXcYw/hyAfLHnOXqNF3RVs6rHCvCO8rlOBqHfiCD8bcLg5aH1ZEK07fsXJIEfGSAn0CdWZF",
"QJGlL1JueMsmeC0JtKKrFV1NRZfkkx7lk2rJleNRpj3oE/5n6e0qJNdADDb0Dldwtffc9p77k9xzd3ac",
"ZXKhPc1+pNMsd3rs5GQT12tz2M+EN5BepfkLe8XR1bqXngrUKUipearOkQIJhf/mrt+oFa0ZEoB83MzP",
"VKWQ9r2p6PZZYKANMHien5nPp/JLTSmJPMmBwGPOZOn5T8L0KimKJf2z4zGi+GfHiQwP0hn9WLqd5WDg",
"ts0Z62l4BVaWd7C5DFfgsvYU3+NTvBj+ZsnQ3RJBr8Dix6JkXBWnE57liyTMcJTn+6NaLh7LmnQr8rI6",
"vaKu/5isrV4/W5beUyev8zDxPR5PSy+SOs1lj3KT5LgqLRD5IrKGJXuyKLHLwnJ5kDu31NtfHdIi89ZG",
"r5+nIk0mVrUGkB9Xoq5U1bEVqq2eVJRdBC1QMKvXlkS7xtLrPSQTMcXB3n20MsiDEZnzjCU8q5njzpHv",
"xdDkusE6NJR+2xckfHNaSXLwkqSKPzctXmAkZIr88/kYxO4cPcI6LUi0EmDS7loRMiYwEu66fTmwhfiQ",
"4xmtpxLe1nV3dY1smzJJ7LvYcyuplE8q2dYF3X0+ppTrCjmZykIqx/4K80v5RLefyqYq0ZSycL1MsrmX",
"idL99vJoIGusttLoJ5FG9netVhYdjixSGH/7ksgPZ3WeUn44c3wUlHSjsjn6MpxdogDaWoNaMfSy8Uw+",
"fIS+lcsQb5mbuYoZJB3QXu8Q9D1jBjlID16HzabAUVHMhHVoCsiY99KGkgAWKBDGXtX62ee3S76WhpNf",
"q30NeODTeyiGroh2r4DiQmm2CiRZ/+0eUqo0aAvor5uCLpXCyllwGc6aHwPC0agitTnzgMDCk8jguD9h",
"P5+rji+bdszhg/OJ6pL0ctekl3HF4RA2cr4RSP2xaXwFr5uU2NLstMKfpkjkOopOXedqTcbcNUa8sFcS",
"eNOETGlgh5jB+OSzG2+5l6V4mTKppfbd3jY4MXoh5BcN+I2fwKVCGrbMlstoWp2DKeCzoWBWzVeHk4lp",
"S16nHAFNDrcopogkiMdlvEDhzvacW/+cE3yyAutVnHfHwKeEEcx6cAGQ35vFYRJVPpxS5U7eAgV5sTEc",
"NoAjBiiybp82GdAW72mDQ4l02v5JqENMw5JTxk1oeSf/mlhBrY3OMeurT3muOsb46UMq1JtbATd2Z10J",
"5Y2udqfbZe8VTkANDbV8rb37ablts6fkMYaE1LkWYbZ7sosju1RnM1DIBQWzsehzIEl9d3RMKohZ44xU",
"96RlJc21ToOmjfFRhHokfIA1yfCc/s3Q4e2quaYfoQlt1uqT+Jj5Fd0MGT6wRepIHZ9I/6jWhl5UHilF",
"ctQqzJD+uE4plyCjdjtib3VEhgBJ64pauE0TRnHSlr82HDabMVNDBqs6cCy8pXh1uZzLlCntauY006Zb",
"3Wv3hAe4tHJOoO2ap59hZPARLm3ymmQwpe7Lwwtsmw+Ty4rGAEqX6OHFiiBmMWhrpPKxgXCUBDyOUhi+",
"XsTVg+3nyzh6sKn3wM1DhUN18qggliyDEFw6j8BPoD6PEPwGFpEPqch+gMvTN6zpaadL/3XG/3VGxXt1",
"vqFPm003lC2DJy5NMw5V0zlrPDz8TEMrRdq13jWB2edSUVoYctc3IbNxDTpIewVgCGC4qDELi8TEL+Le",
"wymhic0X8h4/u3f12X/vZtaR4E+hnsJvLoQeNJRz5HvTgM/rLybH08R/MLvTvU18UccI4kwm4EqhQPv8",
"xIKBLr+hcMAvKR1wc/HQRl/smXxgbKoKCbxhKeGCwIV+hdst+84NGUri7JyKa5Ia3K2Ej/AzKxQMAfYK",
"hbgwxDDywXLjYiNz2KL/esouy0OenHhbRTzkD+H0b+haaC4MaTDLUdIKqb0VUiNGqduRT8yMZmlj5bY5",
"CzvrR7hsn/UyY+NKt3WG7PbGrruxO8L2u0k+EKeB8ZzmPIibHc0jecT8rEczR8C+HM2bMatx4Fqt/ic9",
"ML+z//aeEJn35Cdm3a4NPwIE8MMzqDQQXgAC3kPyBZH5RLJ9rfyQ7KMXHyWQd/12+cOf8nTTVknHwKii",
"PeXzvmwKZqx5t6sh8mp+RsEjIrBpwITspXcCHbKvre4rfT8VfKzk9Smx3fp66sIhMlrcUgwEn6CS1tvn",
"LCXqgaPELtiB4/ZFIxw4uKsENgjC+Nlje8/OdqT1AmL3zlXkW51cgAGY+rAXAwJ7bEzKHoLXVtGLhRSS",
"P/T4v5+5iPEhgWVhc8F+x6kZyUbQ8D4H672X5/pq2HopOg795K+VLZxC9lm25NiME2FGriZdNL+PtRH0",
"zTjhcKLoD4UTthvov5pW8GKh/pacy+E7GM4VIfiNObfq5FvAxZQxX6MbpOylZ/FP7Gt7g5TUqOBjpRuk",
"xHZ7g9TdIDNa3EyQoBjv+Dv/w0IJdIAAwrmPw0VdkC2nhh9DFRTLNsHGP++Ud3/bCu+uogP+HFy7R7lq",
"rwypaVMmzW1MA3nRlYRskUaqNIlZBPwYOvBeiIDtKr98u+yUX4GOPUl5ZSm9NHqw2LdWeL2w8DLKlRWE",
"V5XWE8XhApI5THBvQXVQt758UdbFEV1SH7y6zJQ3addPYrIf4qJA4DdyHPkAFaiiOFKTO0AZyy1TvjRT",
"Ug7Q7MumbiD/TmACrdmQtW7Mgf9Dex0Q8x12ZPMhBatu3x6So73VMlg4jzDGKAxambhPMjHdnbJElJyz",
"qkzMnvpsXL3j9LGxztd7BAi8pA3bvBr7XJ12EzkYajG5zUwLKZ3tQbaFIiy7KquR57UGwQQKO7d+hgUr",
"uIqbTNwyb4tL/uuqElf06EWhj9xlfcpJ2cHhHWwSTkpX6BvWo003eaxDy2qPRoXdaB+Pdp61FfvAfahO",
"NDmmTZwnOJ2H4UP5OZV9/sK/ts+pPMekipMmt4cCqveJHXZU8fg2AAmZhzH6D/T4xK93M/EnSOahxyp6",
"AN8Pn/TVlvkGMT2Qs4B6nrGPazHiMSYgJkZ2HNOv/By77idk7rDLSpEhb7F8tmEAXVOEsp6HyJmvTs40",
"eFC5h6FMHCs5rMwh8ITXiB9ygqmxeLINh24SI7Jk+HHD8AFBOigrivRVpQeG0vyMkhDoDqxMB3V5f8dX",
"4yIBFgRygFs5LOTw1XiooqqBJC5iuZXFeyeLy4yQSuKr8RrphgsD6xisjcZgCMjzV2WW4c3RbH5S66iK",
"4q62DL1HDG3kPEuOrjxRRZ3O3i6erETp8EN7udq+uUCHmGY2g7SedW5n2keVfXhUSfdm08/Muqrqlayb",
"FVB3pkvOUIXTmxPigdjxuvta2X2bEkNs0YryoZUIOyuFqtLiE+D1UOtEhHqo05/oRq9aZbtaTtTmBOwT",
"AheRSG7J2iriwyQ4Di0ZYCtBqlziEWa+0kKEcCLw9++C8MKPeHWMsiuGjiHtWJE7jCVZtOVh1rxl4X3M",
"ZhYngdiqGo92FEQJ84fgj7u65T7vhabS5jKrkC9sw19CoGRrqrQF8GbCWaBOuLyHZMyHbUXLy2kHzbL0",
"GiwNYrj2QrHPFwq5S1uRGgTghx4mgNQYDAF+YNWghKWwxko4AfhhzAa1FxHDix/RNpgiogGHanHd8uge",
"mAFNbLCL9EjCa6b3FMYPVckiMgdso0tT682UBZNwVHxhSKUIqarqSZGRBrzwjo7cjva5bd/ezxXyXz2J",
"oRjExEI//Tt5jn84NnZUjFczs9coBaHc2pZz9++hXGW8lQ5LRhXVD2n0hOTCu9pLPjsbfvrDMsNEW/N6",
"IxmqpfaQj9Fb3btSIpobgprXolCr/2pKUigle9vCFEphCgUvuMagm6uv/HJlKnRwW5ezV2y9OYJpL6l7",
"Wb4iv0flcOBqU1ITgfNd/WedH0uOE2pPYEGmh+zWUmB9PWgqBg9YTRDbtWpmgdbNxRzXn39Bqo/p7+Zp",
"anV+PmaPkbWPSfzJkjO0CvRRDV8P2egtc788c2dZTG6UIpQcxnXenfI4YtvdmrV3ZNb+ouI+sMkfkm1S",
"U5VhcxIHz0EEt6RHjNnYrbw5GGWCb1irUfxAGkUauyJ8hiojQ0Wldsbivp++j2ONrlHF+ixwkruyDGRh",
"v1YGbBzAS4CJM7xgCevn0PGB3EFTmiKAydAz5il6dabLU7QDH9smBT1LZflak8j++dasIEvsHW/sZCG2",
"eplgLe00mp8ycZoH70Hik86bk25OVOwihVo69+tVJh/zTGrTpcMm0E8qPpnzOexC7Wofezavb20yJWM6",
"Zm0w0LmMa5gC4s5Ljz1VGtPhBANty8tBeSfhyLB12xfRJOWnkk0/9kSKpeZ7qvSNkmDo4Vzq2bUQXM63",
"29AgJCKQ2tejmvRonGx28XKDj904DOo1EtrK+TucZkCRGM1mte4T53EY/NRqysHkd003Fnl02hkkqUp8",
"VJPG23Rx28Jdl87cFLyrOlVKOyWj+CbT0Q7NpzrMDOUVOXOnS+de5OXdWOpeVYpg+/S90+X2MvgqSsGO",
"c/jmkLGGht4euxotvXTObUldp4fu8Xf6n5781a7MXfkgtn74oIRz4EXv0tWbwMphdPdl7yzr02k3sc0P",
"XKwXp0dTs7eKPEF8fe5WPSauyVyH7J60x5y1paOzPTYPwbDf6LDeiHyoKy/JZk1ntBYOB15rcr/kw7aq",
"TaoCYsINHFa2PkoFvISjjW2vTlVQi0G2qkK1HBBsuQ1RYKfKs+PA9kFPfWWsd1NqDWb7bDBjj8gNrGWs",
"/Q5NZftox4tATJFmcF0pgMUbf1EfM3YEnyZFjBY24SSyXbj62vgsloggwdCq3qJsu4p1a8z6CjuTDXAP",
"KPCsoGING4P0EQVePTQHb0wlaAEdcE8BLTlPPwEsY5nVJXTOTs5Oeyf0f5OTkzfsf//XaKxm3ft0Aj3x",
"0mO1R6Ho2FYjpxBP4X0Yw22C/JbNsEmYK7B8jwKE56vDLPvvFM+bAnqjmN7e40DZEv/TPg0UdcfWwrEV",
"d+ntvAkwD2mb/P3AEaDRgy7P/mpCf8tAiEOuQN2q4a0avns1vNUtW93yRUKg8JoV25kAaiuL1J/vW6ie",
"np3zFFQv8enxWGM1TFuuYj8cy86tFXGfrYjbuxelBHBQnlOtMtUqUwejTGXLyET1RmyzKUhWDJ5aaTUw",
"bzVGsiRhWqvDZrUSgwawXb3keJr4D73ME1EfUfQ28R+EU9uGFBU64uH4J27JD6HMUxlabMOOpvVbs9s6",
"IpVrMieeU0ksTtu1EkJKiLdW+7x1ScHdVWokBW/k/BJD2fvXDYqNw3Gu2qnYkGk6G4gNsU/7KzbkmmrE",
"hlhHKzYMYqN2n7cpNr6nf/ZKOSNrIyD0IDcUGgceB6HBgbGakRbVexsaod/d1uGxGBthwFMzj0cDbdRE",
"SWyEAQ+6QvFBcd82D+T2rn/oMRTbliPV0RS568CGJMuBB1rsvXDZVuxFSbo0qI+akVE57+PLXllqJaQa",
"7PFTKj8HUP3ttuqytClZaXeJSlNoPmeZW6rKWDnACeCTOX+LffoWEQ91OEWv6jOJVOfMrARtR6KRY3vV",
"sDRROdq4+TuVjc2Cb9VaXWb4W8m4e8m4d4VOhKCrovLtpM5SZHHOqUcvj6VuICSyvYarU4xaKbxLKSx3",
"YAXNtEKt23PFVJXArWLail+T+BUKSZ1OvHGRy6vn9dwwCUhNvARrI3ORy7KP4BEgH0x9yKSvIm709oX3",
"kPDqfPiczXjworcuZfyBl4zIbdaKZkpOKpx82hdEg8N0DkmrFZLIs3+CYYyP3SSOYTVnY3474A0d2q3E",
"vbcYxu8hOReDbZHu6EwN6YxB3BYgfvkCxNBNYkSWTIy7YfiAYD+hsuuvr1RUFZIO5clNkjvbfg0ZzxCZ",
"J9NjF/j+FLgPRnI+DxeRDwnkNH1N53e05xGdiNuj3rOhrykuz+XwBQJ/dXJW8/bqinm98rxzCDx2uH3v",
"+CHfjPw+FMX6cwGZOdzJBebnsEQfJiA2i4Ix/boa4ljX5lhj8GwfZwy6hggLw5kPt0NvbOgfnN44+jZM",
"bxnifjh6Q8EjIrC6dhNm0UxSG+YdmNJtdXzTESas71DMtcVTXJ3IypndR1huTH6Brb5ofayymjwF7GWU",
"N9HcEHO0dwxcF0bEbHnrs+84tbCJSUrUpm4+79PZjj2JD84nUgxJBgNQBfXxlevor/WYSsmLY7u09/b0",
"FUNW3aKikj793oy+eJ/OturS08E3QF985S19VdIXx/YK9OWHMxSYyeoynGEHBQ5gZ+NRhYJxyQbaknMG",
"PYLp+PWEtLt7tB/OZtBzUNBen1/4+tzt/HZ2tqt1R3FIaYAZbQcBQWTp9JxH4COPTUY3RTRBwcyBciSz",
"wssIW3+V73a+9WBAp+rFgMAes4FTHZq/1eiYOUxIDTeHCbFj5zB5eWOVYLJwzwp1t0aqGm2aUY+tfWoB",
"F1MY4zmKGtzhlE529zh+Bn7KuomkFFslcP2kzS90KoraS90qlzoVg/UkGQGMn8K4wpUizcVOOziyfZVI",
"vZFjbk9JOp+DYJZOtE/akssg81JEteK8VZqaKU3VrM4pP8+Ma+tTMZxRSRxXXbt5C1ypUqWeUtviewnG",
"PnG8RF770Ngy/WZuSpLKN3NZwj5wH7bySDWmI+/xG1WNJG34aPUIYyxAMLo/0TWIdtIFCsP4UaOlD4P7",
"8D0kn8WgG61JrECaZWg8PTo5OtHlgFQ8j/5Ku361KDc8qVhswduygti/QCeGJImDHPIKNx0qZpMgoPyT",
"TvGtJ4fshRFPOVVmgSc4nYfhQ084oh1/Fz9YhL/To060Ljuq8d/tI9vFQGZHsHSiHfuBWYaKS/jag+3l",
"jRPF8HSVTI3eX6LFVyvmOBZ4tjFTyKbCr76GY4Tihm0TZe4t32zGf5JDz90nBWooZqoyrlCspHVABHbS",
"7WrZc4/Yk1llSlvUlEdT3mR/PNd4X/NWWsdq5pxpxXPcybTKZ1lzxh+Ox3Jj31Gx4tYeWXJKLgV8yQuK",
"2QeZqdX1lR8rCdk+7cBe0PK2ovhz54bprBAYSCTKdhcHZclralB+y2mGmovrMFvhNCkG91glAmtWg7XB",
"vWgvI2SaJNFKAWwD9F44c4QgVoViVoyP6dZpWPac0EDl+hkCxVYMDmt566V5S41CW4exbNQ+e+5qpgfu",
"BYNtXhfMI8M2Vl7kJM1x2a6VQyuJUFQPW3lgVBDXY84aNdGqXB7dpHxdvJTxHtOXDuNJ2aA83j7ws6ZE",
"BS8wsYH6watXD9YDNovDJGJ1PzIQ5EYZQWGdPsJlpzYNyJaFxJq1uOSjUluOaw+1iZXqfzUSXDI1kdG5",
"RWbVaJosaKUcQXspuSYadjlyhvfMuo0TSh3Q6zKu8gGBmKQ8hbBzD4k7h56pOlQm+PdckRJksGLioRdL",
"N6TA2yjPUJtdqM0utIXsQo1Es5AN2OJVK3eSW4ll4VtzQCaYH0Eub1nKSYep9VTBVt7tlQqYkeKqKmDR",
"8W8KQQzj1PGvq3UFZJ5kXB4ksd950+k8f33+fwEAAP//y98GQS44AwA=",
}
// GetSwagger returns the content of the embedded swagger specification file
@@ -4,6 +4,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
)
@@ -2,6 +2,7 @@ package transformers
import (
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
@@ -53,3 +54,48 @@ func ToV1WebhookList(webhooks []*sqlcv1.V1IncomingWebhook) gen.V1WebhookList {
Rows: &rows,
}
}
func ToV1WebhookResponse(message, challenge *string, event *sqlcv1.Event) (*gen.V1WebhookResponse, error) {
res := &gen.V1WebhookResponse{
Message: message,
Challenge: challenge,
}
if event != nil {
v1Event := &gen.V1Event{
Metadata: gen.APIResourceMeta{
Id: event.ID.String(),
CreatedAt: event.CreatedAt.Time,
UpdatedAt: event.UpdatedAt.Time,
},
Key: event.Key,
TenantId: event.TenantId.String(),
}
if len(event.AdditionalMetadata) > 0 {
var additionalMetadata map[string]interface{}
err := json.Unmarshal(event.AdditionalMetadata, &additionalMetadata)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal additional metadata for event %s: %w", event.Key, err)
}
v1Event.AdditionalMetadata = &additionalMetadata
}
if len(event.Data) > 0 {
var data map[string]interface{}
err := json.Unmarshal(event.Data, &data)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal data for event %s: %w", event.Key, err)
}
v1Event.Payload = &data
}
res.Event = v1Event
}
return res, nil
}
+15 -16
View File
@@ -59,12 +59,10 @@ func ToWorkerRuntimeInfo(worker *sqlcv1.Worker) *gen.WorkerRuntimeInfo {
return runtime
}
func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string, actions []string, workflows *[]*sqlcv1.Workflow) *gen.Worker {
func ToWorkerSqlc(worker *sqlcv1.Worker, slotConfig map[string]gen.WorkerSlotConfig, actions []string, workflows *[]*sqlcv1.Workflow) *gen.Worker {
dispatcherId := worker.DispatcherId
maxRuns := int(worker.MaxRuns)
status := gen.ACTIVE
if worker.IsPaused {
@@ -75,10 +73,13 @@ func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string
status = gen.INACTIVE
}
var availableRuns int
if remainingSlots != nil {
availableRuns = *remainingSlots
var slotConfigInt *map[string]gen.WorkerSlotConfig
if len(slotConfig) > 0 {
tmp := make(map[string]gen.WorkerSlotConfig, len(slotConfig))
for k, v := range slotConfig {
tmp[k] = v
}
slotConfigInt = &tmp
}
res := &gen.Worker{
@@ -87,15 +88,13 @@ func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string
CreatedAt: worker.CreatedAt.Time,
UpdatedAt: worker.UpdatedAt.Time,
},
Name: worker.Name,
Type: gen.WorkerType(worker.Type),
Status: &status,
DispatcherId: dispatcherId,
MaxRuns: &maxRuns,
AvailableRuns: &availableRuns,
WebhookUrl: webhookUrl,
RuntimeInfo: ToWorkerRuntimeInfo(worker),
WebhookId: worker.WebhookId,
Name: worker.Name,
Type: gen.WorkerType(worker.Type),
Status: &status,
DispatcherId: dispatcherId,
SlotConfig: slotConfigInt,
RuntimeInfo: ToWorkerRuntimeInfo(worker),
WebhookId: worker.WebhookId,
}
if !worker.LastHeartbeatAt.Time.IsZero() {
+16 -17
View File
@@ -55,12 +55,10 @@ func ToWorkerRuntimeInfo(worker *sqlcv1.Worker) *gen.WorkerRuntimeInfo {
return runtime
}
func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string, actions []string) *gen.Worker {
func ToWorkerSqlc(worker *sqlcv1.Worker, slotConfig map[string]gen.WorkerSlotConfig, actions []string) *gen.Worker {
dispatcherId := worker.DispatcherId
maxRuns := int(worker.MaxRuns)
status := gen.ACTIVE
if worker.IsPaused {
@@ -71,23 +69,24 @@ func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string
status = gen.INACTIVE
}
var availableRuns int
if remainingSlots != nil {
availableRuns = *remainingSlots
var slotConfigInt *map[string]gen.WorkerSlotConfig
if len(slotConfig) > 0 {
tmp := make(map[string]gen.WorkerSlotConfig, len(slotConfig))
for k, v := range slotConfig {
tmp[k] = v
}
slotConfigInt = &tmp
}
res := &gen.Worker{
Metadata: *toAPIMetadata(worker.ID, worker.CreatedAt.Time, worker.UpdatedAt.Time),
Name: worker.Name,
Type: gen.WorkerType(worker.Type),
Status: &status,
DispatcherId: dispatcherId,
MaxRuns: &maxRuns,
AvailableRuns: &availableRuns,
WebhookUrl: webhookUrl,
RuntimeInfo: ToWorkerRuntimeInfo(worker),
WebhookId: worker.WebhookId,
Metadata: *toAPIMetadata(worker.ID, worker.CreatedAt.Time, worker.UpdatedAt.Time),
Name: worker.Name,
Type: gen.WorkerType(worker.Type),
Status: &status,
DispatcherId: dispatcherId,
SlotConfig: slotConfigInt,
RuntimeInfo: ToWorkerRuntimeInfo(worker),
WebhookId: worker.WebhookId,
}
if !worker.LastHeartbeatAt.Time.IsZero() {
@@ -213,6 +213,7 @@ func ToJob(job *sqlcv1.Job, steps []*sqlcv1.GetStepsForJobsRow) *gen.Job {
}
func ToStep(step *sqlcv1.Step, parents []uuid.UUID) *gen.Step {
isDurable := step.IsDurable
res := &gen.Step{
Metadata: *toAPIMetadata(
step.ID,
@@ -224,6 +225,7 @@ func ToStep(step *sqlcv1.Step, parents []uuid.UUID) *gen.Step {
TenantId: step.TenantId.String(),
ReadableId: step.ReadableId.String,
Timeout: &step.Timeout.String,
IsDurable: &isDurable,
}
parentStr := make([]string, 0)
+4 -1
View File
@@ -510,13 +510,16 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po
})
populatorMW.RegisterGetter("worker", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
idUuid, err := uuid.Parse(id)
if err != nil {
return nil, "", echo.NewHTTPError(http.StatusBadRequest, "invalid worker id")
}
worker, err := config.V1.Workers().GetWorkerById(idUuid)
worker, err := config.V1.Workers().GetWorkerById(ctx, idUuid)
if err != nil {
return nil, "", err
+11 -3
View File
@@ -245,10 +245,18 @@ func (v *WorkerDetailsView) renderWorkerInfo() string {
b.WriteString(sectionStyle.Render(labelStyle.Render("Last Heartbeat: ") + lastHeartbeat))
b.WriteString("\n\n")
// Available Run Slots
// Available Run Slots - aggregate across all slot types
slotsStr := "N/A"
if v.worker.AvailableRuns != nil && v.worker.MaxRuns != nil {
slotsStr = fmt.Sprintf("%d / %d", *v.worker.AvailableRuns, *v.worker.MaxRuns)
if v.worker.SlotConfig != nil && len(*v.worker.SlotConfig) > 0 {
totalAvailable := 0
totalLimit := 0
for _, slotConfig := range *v.worker.SlotConfig {
if slotConfig.Available != nil {
totalAvailable += *slotConfig.Available
}
totalLimit += slotConfig.Limit
}
slotsStr = fmt.Sprintf("%d / %d", totalAvailable, totalLimit)
}
b.WriteString(sectionStyle.Render(labelStyle.Render("Available Run Slots: ") + slotsStr))
b.WriteString("\n\n")
+11 -3
View File
@@ -512,10 +512,18 @@ func (v *WorkersView) updateTableRows() {
// Started At
startedAt := formatRelativeTime(worker.Metadata.CreatedAt)
// Slots
// Slots - aggregate across all slot types
slots := "N/A"
if worker.AvailableRuns != nil && worker.MaxRuns != nil {
slots = fmt.Sprintf("%d / %d", *worker.AvailableRuns, *worker.MaxRuns)
if worker.SlotConfig != nil && len(*worker.SlotConfig) > 0 {
totalAvailable := 0
totalLimit := 0
for _, slotConfig := range *worker.SlotConfig {
if slotConfig.Available != nil {
totalAvailable += *slotConfig.Available
}
totalLimit += slotConfig.Limit
}
slots = fmt.Sprintf("%d / %d", totalAvailable, totalLimit)
}
// Last Seen
+2
View File
@@ -362,6 +362,7 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro
dispatcher.WithPayloadSizeThreshold(sc.Runtime.GRPCMaxMsgSize),
dispatcher.WithDefaultMaxWorkerBacklogSize(int64(sc.Runtime.GRPCWorkerStreamMaxBacklogSize)),
dispatcher.WithWorkflowRunBufferSize(sc.Runtime.WorkflowRunBufferSize),
dispatcher.WithVersion(sc.Version),
)
if err != nil {
@@ -802,6 +803,7 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro
dispatcher.WithPayloadSizeThreshold(sc.Runtime.GRPCMaxMsgSize),
dispatcher.WithDefaultMaxWorkerBacklogSize(int64(sc.Runtime.GRPCWorkerStreamMaxBacklogSize)),
dispatcher.WithWorkflowRunBufferSize(sc.Runtime.WorkflowRunBufferSize),
dispatcher.WithVersion(sc.Version),
)
if err != nil {
+24 -12
View File
@@ -15,15 +15,14 @@ type avgResult struct {
func do(config LoadTestConfig) error {
l.Info().Msgf("testing with duration=%s, eventsPerSecond=%d, delay=%s, wait=%s, concurrency=%d, averageDurationThreshold=%s", config.Duration, config.Events, config.Delay, config.Wait, config.Concurrency, config.AverageDurationThreshold)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
after := 10 * time.Second
go func() {
time.Sleep(config.Duration + after + config.Wait + 5*time.Second)
cancel()
}()
// The worker may intentionally be delayed (WorkerDelay) before it starts consuming tasks.
// The test timeout must include this delay, otherwise we can cancel while work is still expected to complete.
timeout := config.WorkerDelay + after + config.Duration + config.Wait + 30*time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan int64, 2)
durations := make(chan time.Duration, config.Events)
@@ -98,7 +97,20 @@ func do(config LoadTestConfig) error {
finalDurationResult := <-durationsResult
finalScheduledResult := <-scheduledResult
log.Printf("️ emitted %d, executed %d, uniques %d, using %d events/s", emitted, executed, uniques, config.Events)
expected := int64(config.EventFanout) * emitted * int64(config.DagSteps)
// NOTE: `emit()` returns successfully pushed events (not merely generated IDs),
// so `emitted` here is effectively "pushed".
log.Printf(
"️ pushed %d, executed %d, uniques %d, using %d events/s (fanout=%d dagSteps=%d expected=%d)",
emitted,
executed,
uniques,
config.Events,
config.EventFanout,
config.DagSteps,
expected,
)
if executed == 0 {
return fmt.Errorf("❌ no events executed")
@@ -107,12 +119,12 @@ func do(config LoadTestConfig) error {
log.Printf("️ final average duration per executed event: %s", finalDurationResult.avg)
log.Printf("️ final average scheduling time per event: %s", finalScheduledResult.avg)
if int64(config.EventFanout)*emitted*int64(config.DagSteps) != executed {
log.Printf("⚠️ warning: emitted and executed counts do not match: %d != %d", int64(config.EventFanout)*emitted*int64(config.DagSteps), executed)
if expected != executed {
log.Printf("⚠️ warning: pushed and executed counts do not match: expected=%d got=%d", expected, executed)
}
if int64(config.EventFanout)*emitted*int64(config.DagSteps) != uniques {
return fmt.Errorf("❌ emitted and unique executed counts do not match: %d != %d", int64(config.EventFanout)*emitted, uniques)
if expected != uniques {
return fmt.Errorf("❌ pushed and unique executed counts do not match: expected=%d got=%d (fanout=%d pushed=%d dagSteps=%d)", expected, uniques, config.EventFanout, emitted, config.DagSteps)
}
// Add a small tolerance (1% or 1ms, whichever is smaller)
+28 -11
View File
@@ -53,6 +53,7 @@ func emit(ctx context.Context, namespace string, amountPerSecond int, duration t
}
var id int64
var pushed int64
// Precompute payload data.
payloadSize := parseSize(payloadArg)
@@ -68,18 +69,34 @@ func emit(ctx context.Context, namespace string, amountPerSecond int, duration t
wg.Add(1)
go func() {
defer wg.Done()
for ev := range jobCh {
l.Info().Msgf("pushing event %d", ev.ID)
for {
select {
case <-ctx.Done():
// Stop promptly on cancellation. Remaining buffered events (if any) are intentionally dropped.
return
case ev, ok := <-jobCh:
if !ok {
return
}
err := c.Events().Push(context.Background(), "load-test:event", ev, client.WithEventMetadata(map[string]string{
"event_id": fmt.Sprintf("%d", ev.ID),
}))
if err != nil {
panic(fmt.Errorf("error pushing event: %w", err))
l.Info().Msgf("pushing event %d", ev.ID)
err := c.Events().Push(ctx, "load-test:event", ev, client.WithEventMetadata(map[string]string{
"event_id": fmt.Sprintf("%d", ev.ID),
}))
if err != nil {
// If the test is shutting down, treat this as a clean stop rather than a correctness failure.
if ctx.Err() != nil {
return
}
panic(fmt.Errorf("error pushing event: %w", err))
}
atomic.AddInt64(&pushed, 1)
took := time.Since(ev.CreatedAt)
l.Info().Msgf("pushed event %d took %s", ev.ID, took)
scheduled <- took
}
took := time.Since(ev.CreatedAt)
l.Info().Msgf("pushed event %d took %s", ev.ID, took)
scheduled <- took
}
}()
}
@@ -115,5 +132,5 @@ loop:
close(jobCh)
wg.Wait()
return id
return atomic.LoadInt64(&pushed)
}
+26 -7
View File
@@ -4,6 +4,7 @@ package main
import (
"log"
"os"
"testing"
"time"
@@ -28,6 +29,24 @@ func TestLoadCLI(t *testing.T) {
"loadtest",
)
avgThreshold := 300 * time.Millisecond
if v := os.Getenv("HATCHET_LOADTEST_AVERAGE_DURATION_THRESHOLD"); v != "" {
if parsed, err := time.ParseDuration(v); err == nil {
avgThreshold = parsed
} else {
t.Fatalf("invalid HATCHET_LOADTEST_AVERAGE_DURATION_THRESHOLD=%q: %v", v, err)
}
}
startupSleep := 15 * time.Second
if v := os.Getenv("HATCHET_LOADTEST_STARTUP_SLEEP"); v != "" {
if parsed, err := time.ParseDuration(v); err == nil {
startupSleep = parsed
} else {
t.Fatalf("invalid HATCHET_LOADTEST_STARTUP_SLEEP=%q: %v", v, err)
}
}
tests := []struct {
name string
config LoadTestConfig
@@ -49,7 +68,7 @@ func TestLoadCLI(t *testing.T) {
RlKeys: 0,
RlLimit: 0,
RlDurationUnit: "",
AverageDurationThreshold: 300 * time.Millisecond,
AverageDurationThreshold: avgThreshold,
},
},
{
@@ -68,7 +87,7 @@ func TestLoadCLI(t *testing.T) {
RlKeys: 0,
RlLimit: 0,
RlDurationUnit: "",
AverageDurationThreshold: 300 * time.Millisecond,
AverageDurationThreshold: avgThreshold,
},
},
{
@@ -87,7 +106,7 @@ func TestLoadCLI(t *testing.T) {
RlKeys: 0,
RlLimit: 0,
RlDurationUnit: "",
AverageDurationThreshold: 300 * time.Millisecond,
AverageDurationThreshold: avgThreshold,
},
},
{
@@ -106,7 +125,7 @@ func TestLoadCLI(t *testing.T) {
RlKeys: 0,
RlLimit: 0,
RlDurationUnit: "",
AverageDurationThreshold: 300 * time.Millisecond,
AverageDurationThreshold: avgThreshold,
},
},
{
@@ -126,7 +145,7 @@ func TestLoadCLI(t *testing.T) {
RlKeys: 0,
RlLimit: 0,
RlDurationUnit: "",
AverageDurationThreshold: 300 * time.Millisecond,
AverageDurationThreshold: avgThreshold,
},
},
{
@@ -145,13 +164,13 @@ func TestLoadCLI(t *testing.T) {
RlKeys: 10,
RlLimit: 100,
RlDurationUnit: "second",
AverageDurationThreshold: 300 * time.Millisecond,
AverageDurationThreshold: avgThreshold,
},
},
}
// TODO instead of waiting, figure out when the engine setup is complete
time.Sleep(15 * time.Second)
time.Sleep(startupSleep)
for _, tt := range tests {
tt := tt // pin the loop variable
@@ -0,0 +1,177 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE "Step"
ADD COLUMN IF NOT EXISTS "isDurable" BOOLEAN NOT NULL DEFAULT false;
CREATE TABLE IF NOT EXISTS v1_worker_slot_config (
tenant_id UUID NOT NULL,
worker_id UUID NOT NULL,
slot_type TEXT NOT NULL,
max_units INTEGER NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (tenant_id, worker_id, slot_type)
);
CREATE TABLE IF NOT EXISTS v1_step_slot_request (
tenant_id UUID NOT NULL,
step_id UUID NOT NULL,
slot_type TEXT NOT NULL,
units INTEGER NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (tenant_id, step_id, slot_type)
);
CREATE TABLE IF NOT EXISTS v1_task_runtime_slot (
tenant_id UUID NOT NULL,
task_id BIGINT NOT NULL,
task_inserted_at TIMESTAMPTZ NOT NULL,
retry_count INTEGER NOT NULL,
worker_id UUID NOT NULL,
-- slot_type is user defined, we use default and durable internally as defaults
slot_type TEXT NOT NULL,
units INTEGER NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
PRIMARY KEY (task_id, task_inserted_at, retry_count, slot_type)
);
-- Compatibility triggers for blue/green: keep new slot tables updated from the old write paths.
CREATE OR REPLACE FUNCTION v1_worker_slot_config_insert_function()
RETURNS TRIGGER AS
$$
BEGIN
INSERT INTO v1_worker_slot_config (tenant_id, worker_id, slot_type, max_units)
SELECT
"tenantId",
"id",
'default'::text,
"maxRuns"
FROM new_rows
WHERE "maxRuns" IS NOT NULL
ON CONFLICT (tenant_id, worker_id, slot_type) DO NOTHING;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS v1_worker_slot_config_insert_trigger ON "Worker";
CREATE TRIGGER v1_worker_slot_config_insert_trigger
AFTER INSERT ON "Worker"
REFERENCING NEW TABLE AS new_rows
FOR EACH STATEMENT
EXECUTE FUNCTION v1_worker_slot_config_insert_function();
CREATE OR REPLACE FUNCTION v1_step_slot_request_insert_function()
RETURNS TRIGGER AS
$$
BEGIN
INSERT INTO v1_step_slot_request (tenant_id, step_id, slot_type, units)
SELECT
"tenantId",
"id",
CASE WHEN "isDurable" THEN 'durable'::text ELSE 'default'::text END,
1
FROM new_rows
ON CONFLICT (tenant_id, step_id, slot_type) DO NOTHING;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS v1_step_slot_request_insert_trigger ON "Step";
CREATE TRIGGER v1_step_slot_request_insert_trigger
AFTER INSERT ON "Step"
REFERENCING NEW TABLE AS new_rows
FOR EACH STATEMENT
EXECUTE FUNCTION v1_step_slot_request_insert_function();
CREATE OR REPLACE FUNCTION v1_task_runtime_slot_insert_function()
RETURNS TRIGGER AS
$$
BEGIN
INSERT INTO v1_task_runtime_slot (
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id,
slot_type,
units
)
SELECT
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id,
'default'::text,
1
FROM new_rows
WHERE worker_id IS NOT NULL
ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS v1_task_runtime_slot_insert_trigger ON v1_task_runtime;
CREATE TRIGGER v1_task_runtime_slot_insert_trigger
AFTER INSERT ON v1_task_runtime
REFERENCING NEW TABLE AS new_rows
FOR EACH STATEMENT
EXECUTE FUNCTION v1_task_runtime_slot_insert_function();
CREATE OR REPLACE FUNCTION v1_task_runtime_slot_delete_function()
RETURNS TRIGGER AS
$$
BEGIN
DELETE FROM v1_task_runtime_slot s
USING deleted_rows d
WHERE s.task_id = d.task_id
AND s.task_inserted_at = d.task_inserted_at
AND s.retry_count = d.retry_count;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS v1_task_runtime_slot_delete_trigger ON v1_task_runtime;
CREATE TRIGGER v1_task_runtime_slot_delete_trigger
AFTER DELETE ON v1_task_runtime
REFERENCING OLD TABLE AS deleted_rows
FOR EACH STATEMENT
EXECUTE FUNCTION v1_task_runtime_slot_delete_function();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TRIGGER IF EXISTS v1_worker_slot_config_insert_trigger ON "Worker";
DROP FUNCTION IF EXISTS v1_worker_slot_config_insert_function();
DROP TRIGGER IF EXISTS v1_step_slot_request_insert_trigger ON "Step";
DROP FUNCTION IF EXISTS v1_step_slot_request_insert_function();
DROP TRIGGER IF EXISTS v1_task_runtime_slot_insert_trigger ON v1_task_runtime;
DROP FUNCTION IF EXISTS v1_task_runtime_slot_insert_function();
DROP TRIGGER IF EXISTS v1_task_runtime_slot_delete_trigger ON v1_task_runtime;
DROP FUNCTION IF EXISTS v1_task_runtime_slot_delete_function();
DROP TABLE IF EXISTS v1_task_runtime_slot;
DROP TABLE IF EXISTS v1_step_slot_request;
DROP TABLE IF EXISTS v1_worker_slot_config;
ALTER TABLE "Step"
DROP COLUMN IF EXISTS "isDurable";
-- +goose StatementEnd
@@ -0,0 +1,12 @@
-- +goose Up
-- +goose NO TRANSACTION
CREATE INDEX CONCURRENTLY IF NOT EXISTS v1_task_runtime_slot_tenant_worker_type_idx
ON v1_task_runtime_slot (tenant_id ASC, worker_id ASC, slot_type ASC);
CREATE INDEX CONCURRENTLY IF NOT EXISTS v1_step_slot_request_step_idx
ON v1_step_slot_request (step_id ASC);
-- +goose Down
DROP INDEX IF EXISTS v1_task_runtime_slot_tenant_worker_type_idx;
DROP INDEX IF EXISTS v1_step_slot_request_step_idx;
@@ -0,0 +1,218 @@
package migrations
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/google/uuid"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationNoTxContext(up20260216000003, down20260216000003)
}
const backfillSlotsBatchSize = 10_000
var zeroUUID = uuid.Nil
func up20260216000003(ctx context.Context, db *sql.DB) error {
if err := backfillWorkerSlotConfigs(ctx, db); err != nil {
return err
}
if err := backfillStepSlotRequests(ctx, db); err != nil {
return err
}
if err := backfillTaskRuntimeSlots(ctx, db); err != nil {
return err
}
return nil
}
// down20260216000003 is intentionally a no-op.
//
// By the time this migration runs, new services may already be writing to these
// tables. Deleting rows here would risk removing valid post-cutover data.
func down20260216000003(ctx context.Context, db *sql.DB) error {
return nil
}
func backfillWorkerSlotConfigs(ctx context.Context, db *sql.DB) error {
lastWorkerID := zeroUUID
for {
var (
n int
nextWorkerID uuid.NullUUID
)
err := db.QueryRowContext(ctx, `
WITH batch AS (
SELECT
"tenantId" AS tenant_id,
"id" AS worker_id,
"maxRuns" AS max_units
FROM "Worker"
WHERE "maxRuns" IS NOT NULL
AND "id" > $1::uuid
ORDER BY "id"
LIMIT $2
),
ins AS (
INSERT INTO v1_worker_slot_config (tenant_id, worker_id, slot_type, max_units)
SELECT
tenant_id,
worker_id,
'default'::text,
max_units
FROM batch
ON CONFLICT (tenant_id, worker_id, slot_type) DO NOTHING
)
SELECT
(SELECT COUNT(*) FROM batch) AS n,
(SELECT worker_id FROM batch ORDER BY worker_id DESC LIMIT 1) AS last_worker_id;
`, lastWorkerID, backfillSlotsBatchSize).Scan(&n, &nextWorkerID)
if err != nil {
return fmt.Errorf("backfill v1_worker_slot_config: %w", err)
}
if n == 0 {
return nil
}
if !nextWorkerID.Valid {
return fmt.Errorf("backfill v1_worker_slot_config: expected last keys for non-empty batch")
}
lastWorkerID = nextWorkerID.UUID
}
}
func backfillStepSlotRequests(ctx context.Context, db *sql.DB) error {
lastStepID := zeroUUID
for {
var (
n int
nextStep uuid.NullUUID
)
err := db.QueryRowContext(ctx, `
WITH batch AS (
SELECT
"tenantId" AS tenant_id,
"id" AS step_id,
"isDurable" AS is_durable
FROM "Step"
WHERE "id" > $1::uuid
ORDER BY "id"
LIMIT $2
),
ins AS (
INSERT INTO v1_step_slot_request (tenant_id, step_id, slot_type, units)
SELECT
tenant_id,
step_id,
CASE WHEN is_durable THEN 'durable'::text ELSE 'default'::text END,
1
FROM batch
ON CONFLICT (tenant_id, step_id, slot_type) DO NOTHING
)
SELECT
(SELECT COUNT(*) FROM batch) AS n,
(SELECT step_id FROM batch ORDER BY step_id DESC LIMIT 1) AS last_step_id;
`, lastStepID, backfillSlotsBatchSize).Scan(&n, &nextStep)
if err != nil {
return fmt.Errorf("backfill v1_step_slot_request: %w", err)
}
if n == 0 {
return nil
}
if !nextStep.Valid {
return fmt.Errorf("backfill v1_step_slot_request: expected last keys for non-empty batch")
}
lastStepID = nextStep.UUID
}
}
func backfillTaskRuntimeSlots(ctx context.Context, db *sql.DB) error {
var (
lastTaskID int64
lastTaskInsertedAt = time.Unix(0, 0).UTC()
lastRetryCount int32
)
for {
var (
n int
nextTaskID sql.NullInt64
nextInsertedAt sql.NullTime
nextRetry sql.NullInt32
)
err := db.QueryRowContext(ctx, `
WITH batch AS (
SELECT
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id
FROM v1_task_runtime
WHERE worker_id IS NOT NULL
AND (task_id, task_inserted_at, retry_count) > ($1::bigint, $2::timestamptz, $3::int)
ORDER BY task_id, task_inserted_at, retry_count
LIMIT $4
),
ins AS (
INSERT INTO v1_task_runtime_slot (
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id,
slot_type,
units
)
SELECT
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id,
'default'::text,
1
FROM batch
ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING
)
SELECT
(SELECT COUNT(*) FROM batch) AS n,
(SELECT task_id FROM batch ORDER BY task_id DESC, task_inserted_at DESC, retry_count DESC LIMIT 1) AS last_task_id,
(SELECT task_inserted_at FROM batch ORDER BY task_id DESC, task_inserted_at DESC, retry_count DESC LIMIT 1) AS last_task_inserted_at,
(SELECT retry_count FROM batch ORDER BY task_id DESC, task_inserted_at DESC, retry_count DESC LIMIT 1) AS last_retry_count;
`, lastTaskID, lastTaskInsertedAt, lastRetryCount, backfillSlotsBatchSize).Scan(&n, &nextTaskID, &nextInsertedAt, &nextRetry)
if err != nil {
return fmt.Errorf("backfill v1_task_runtime_slot: %w", err)
}
if n == 0 {
return nil
}
if !nextTaskID.Valid || !nextInsertedAt.Valid || !nextRetry.Valid {
return fmt.Errorf("backfill v1_task_runtime_slot: expected last keys for non-empty batch")
}
lastTaskID = nextTaskID.Int64
lastTaskInsertedAt = nextInsertedAt.Time
lastRetryCount = nextRetry.Int32
}
}
+1 -5
View File
@@ -27,15 +27,11 @@ async def test_durable(hatchet: Hatchet) -> None:
active_workers = [w for w in workers.rows if w.status == "ACTIVE"]
assert len(active_workers) == 2
assert len(active_workers) == 1
assert any(
w.name == hatchet.config.apply_namespace("e2e-test-worker")
for w in active_workers
)
assert any(
w.name == hatchet.config.apply_namespace("e2e-test-worker_durable")
for w in active_workers
)
assert result["durable_task"]["status"] == "success"
+155
View File
@@ -0,0 +1,155 @@
# > Simple
import argparse
import asyncio
import signal
import threading
import time
import traceback
from typing import Any
from datetime import datetime, timezone
from pathlib import Path
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
FAILURE_LOG = Path(__file__).parent / "failures.log"
# Track the current worker so we can clean up on Ctrl+C
_current_worker = None
_current_thread = None
# poetry run python ./simple/worker_test.py --suffix new
def log_failure(phase: str, error: Exception) -> None:
"""Log a failure loudly to stderr and append to the failures log file."""
timestamp = datetime.now(timezone.utc).isoformat()
tb = traceback.format_exception(type(error), error, error.__traceback__)
tb_str = "".join(tb)
msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}"
# Loud stderr output
print(f"\n{'!' * 60}", flush=True)
print(f"!!! FAILURE: {phase} !!!", flush=True)
print(msg, flush=True)
print(f"{'!' * 60}\n", flush=True)
# Append to log file
with open(FAILURE_LOG, "a") as f:
f.write(msg)
f.write("-" * 60 + "\n")
@hatchet.task()
def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing simple task!")
return {"result": "Hello, world!"}
@hatchet.durable_task()
def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
def _force_stop_worker(worker: Any, thread: threading.Thread) -> None:
"""Forcefully terminate the worker and its child processes."""
worker.killing = True
worker._terminate_processes()
worker._close_queues()
if worker.loop and worker.loop.is_running():
worker.loop.call_soon_threadsafe(worker.loop.stop)
thread.join(timeout=5)
def start_worker(suffix: str = "") -> tuple[Any, threading.Thread]:
"""Create and start a worker in a background thread."""
name = f"test-worker-{suffix}" if suffix else "test-worker"
worker = hatchet.worker(
name,
workflows=[simple, simple_durable],
slots=10,
)
worker.handle_kill = False # Prevent sys.exit on shutdown
# Restore default signal handlers so Ctrl+C raises KeyboardInterrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
thread = threading.Thread(target=worker.start, daemon=True)
thread.start()
# Give the worker a moment to initialize
time.sleep(2)
print("Worker connected.")
return worker, thread
def stop_worker(worker: Any, thread: threading.Thread) -> None:
"""Stop the worker gracefully."""
try:
if worker.loop and worker.loop.is_running():
asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop)
thread.join(timeout=10)
if thread.is_alive():
_force_stop_worker(worker, thread)
print("Worker disconnected.")
except Exception as e:
log_failure("worker disconnect", e)
def main() -> None:
global _current_worker, _current_thread
parser = argparse.ArgumentParser()
parser.add_argument(
"--suffix",
default="",
help="Suffix to append to the worker name (e.g. 'old' or 'new')",
)
args = parser.parse_args()
try:
while True:
# --- Connect the worker ---
print("\n=== Connecting worker ===")
try:
worker, thread = start_worker(args.suffix)
_current_worker, _current_thread = worker, thread
except Exception as e:
log_failure("worker connect", e)
time.sleep(5)
continue
# --- Trigger tasks every 1 second for 5 seconds ---
for tick in range(5):
time.sleep(1)
print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---")
try:
ref = simple.run_no_wait()
print(f"Task triggered: {ref}")
except Exception as e:
log_failure(f"task trigger (tick {tick + 1}/5)", e)
try:
ref = simple_durable.run_no_wait()
print(f"Durable task triggered: {ref}")
except Exception as e:
log_failure(f"durable task trigger (tick {tick + 1}/5)", e)
# --- Disconnect the worker ---
print("\n=== Disconnecting worker ===")
stop_worker(worker, thread)
_current_worker, _current_thread = None, None
except KeyboardInterrupt:
print("\n\nCtrl+C received, shutting down...")
if _current_worker and _current_thread:
_force_stop_worker(_current_worker, _current_thread)
print("Bye!")
if __name__ == "__main__":
main()
+154
View File
@@ -0,0 +1,154 @@
# This is a worker script that will introduce chaos to test
# complex deployments and migrations.
import argparse
import asyncio
import signal
import threading
import time
import traceback
from datetime import datetime, timezone
from pathlib import Path
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
FAILURE_LOG = Path(__file__).parent / "failures.log"
# Track the current worker so we can clean up on Ctrl+C
_current_worker = None
_current_thread = None
# poetry run python ./simple/worker_test.py --suffix new
def log_failure(phase: str, error: Exception) -> None:
"""Log a failure loudly to stderr and append to the failures log file."""
timestamp = datetime.now(timezone.utc).isoformat()
tb = traceback.format_exception(type(error), error, error.__traceback__)
tb_str = "".join(tb)
msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}"
# Loud stderr output
print(f"\n{'!' * 60}", flush=True)
print(f"!!! FAILURE: {phase} !!!", flush=True)
print(msg, flush=True)
print(f"{'!' * 60}\n", flush=True)
# Append to log file
with open(FAILURE_LOG, "a") as f:
f.write(msg)
f.write("-" * 60 + "\n")
@hatchet.task()
def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing simple task!")
return {"result": "Hello, world!"}
@hatchet.durable_task()
def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
def _force_stop_worker(worker, thread) -> None:
"""Forcefully terminate the worker and its child processes."""
worker.killing = True
worker._terminate_processes()
worker._close_queues()
if worker.loop and worker.loop.is_running():
worker.loop.call_soon_threadsafe(worker.loop.stop)
thread.join(timeout=5)
def start_worker(suffix: str = "") -> tuple:
"""Create and start a worker in a background thread."""
name = f"test-worker-{suffix}" if suffix else "test-worker"
worker = hatchet.worker(
name,
workflows=[simple, simple_durable],
slots=10,
)
worker.handle_kill = False # Prevent sys.exit on shutdown
# Restore default signal handlers so Ctrl+C raises KeyboardInterrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
thread = threading.Thread(target=worker.start, daemon=True)
thread.start()
# Give the worker a moment to initialize
time.sleep(2)
print("Worker connected.")
return worker, thread
def stop_worker(worker, thread) -> None:
"""Stop the worker gracefully."""
try:
if worker.loop and worker.loop.is_running():
asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop)
thread.join(timeout=10)
if thread.is_alive():
_force_stop_worker(worker, thread)
print("Worker disconnected.")
except Exception as e:
log_failure("worker disconnect", e)
def main() -> None:
global _current_worker, _current_thread
parser = argparse.ArgumentParser()
parser.add_argument(
"--suffix",
default="",
help="Suffix to append to the worker name (e.g. 'old' or 'new')",
)
args = parser.parse_args()
try:
while True:
# --- Connect the worker ---
print("\n=== Connecting worker ===")
try:
worker, thread = start_worker(args.suffix)
_current_worker, _current_thread = worker, thread
except Exception as e:
log_failure("worker connect", e)
time.sleep(5)
continue
# --- Trigger tasks every 1 second for 5 seconds ---
for tick in range(5):
time.sleep(1)
print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---")
try:
ref = simple.run_no_wait()
print(f"Task triggered: {ref}")
except Exception as e:
log_failure(f"task trigger (tick {tick + 1}/5)", e)
try:
ref = simple_durable.run_no_wait()
print(f"Durable task triggered: {ref}")
except Exception as e:
log_failure(f"durable task trigger (tick {tick + 1}/5)", e)
# --- Disconnect the worker ---
print("\n=== Disconnecting worker ===")
stop_worker(worker, thread)
_current_worker, _current_thread = None, None
except KeyboardInterrupt:
print("\n\nCtrl+C received, shutting down...")
if _current_worker and _current_thread:
_force_stop_worker(_current_worker, _current_thread)
print("Bye!")
if __name__ == "__main__":
main()
+4 -2
View File
@@ -1,5 +1,4 @@
# > Simple
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
@@ -17,7 +16,10 @@ async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
def main() -> None:
worker = hatchet.worker("test-worker", workflows=[simple, simple_durable])
worker = hatchet.worker(
"test-worker",
workflows=[simple, simple_durable],
)
worker.start()
+10 -5
View File
@@ -332,7 +332,8 @@ async def test_basic_auth_success(
) as response:
assert response.status == 200
data = await response.json()
assert data == {"message": "ok"}
assert data["message"] == "ok"
assert data["event"]["metadata"]["id"]
await assert_has_runs(
hatchet,
@@ -413,7 +414,8 @@ async def test_api_key_success(
) as response:
assert response.status == 200
data = await response.json()
assert data == {"message": "ok"}
assert data["message"] == "ok"
assert data["event"]["metadata"]["id"]
await assert_has_runs(
hatchet,
@@ -496,7 +498,8 @@ async def test_hmac_success(
) as response:
assert response.status == 200
data = await response.json()
assert data == {"message": "ok"}
assert data["message"] == "ok"
assert data["event"]["metadata"]["id"]
await assert_has_runs(
hatchet,
@@ -540,7 +543,8 @@ async def test_hmac_different_algorithms_and_encodings(
) as response:
assert response.status == 200
data = await response.json()
assert data == {"message": "ok"}
assert data["message"] == "ok"
assert data["event"]["metadata"]["id"]
await assert_has_runs(
hatchet,
@@ -633,7 +637,8 @@ async def test_different_source_types(
) as response:
assert response.status == 200
data = await response.json()
assert data == {"message": "ok"}
assert data["message"] == "ok"
assert data["event"]["metadata"]["id"]
await assert_has_runs(
hatchet,
+2 -1
View File
@@ -120,6 +120,7 @@ import {
V1UpdateWebhookRequest,
V1Webhook,
V1WebhookList,
V1WebhookResponse,
V1WebhookSourceName,
V1WorkflowRunDetails,
V1WorkflowRunDisplayNameList,
@@ -1007,7 +1008,7 @@ export class Api<
data?: any,
params: RequestParams = {},
) =>
this.request<Record<string, any>, APIErrors>({
this.request<V1WebhookResponse, APIErrors>({
path: `/api/v1/stable/tenants/${tenant}/webhooks/${v1Webhook}`,
method: "POST",
body: data,
@@ -985,6 +985,13 @@ export type V1CreateWebhookRequest =
| V1CreateWebhookRequestAPIKey
| V1CreateWebhookRequestHMAC;
export interface V1WebhookResponse {
/** The message for the webhook response */
message?: string;
event?: V1Event;
challenge?: string;
}
export interface V1UpdateWebhookRequest {
/** The CEL expression to use for the event key. This is used to create the event key from the webhook payload. */
eventKeyExpression?: string;
@@ -1570,6 +1577,10 @@ export interface Step {
action: string;
/** The timeout of the step. */
timeout?: string;
/** Whether the step is durable. */
isDurable?: boolean;
/** Slot requests for the step (slot_type -> units). */
slotRequests?: Record<string, number>;
children?: string[];
parents?: string[];
}
@@ -2148,6 +2159,14 @@ export interface RecentStepRuns {
workflowRunId: string;
}
/** Slot availability and limits for a slot type. */
export interface WorkerSlotConfig {
/** The number of available units for this slot type. */
available?: number;
/** The maximum number of units for this slot type. */
limit: number;
}
export interface WorkerLabel {
metadata: APIResourceMeta;
/** The key of the label. */
@@ -2191,10 +2210,8 @@ export interface Worker {
recentStepRuns?: RecentStepRuns[];
/** The status of the worker. */
status?: "ACTIVE" | "INACTIVE" | "PAUSED";
/** The maximum number of runs this worker can execute concurrently. */
maxRuns?: number;
/** The number of runs this worker can execute concurrently. */
availableRuns?: number;
/** Slot availability and limits for this worker (slot_type -> { available, limit }). */
slotConfig?: Record<string, WorkerSlotConfig>;
/**
* the id of the assigned dispatcher, in UUID format
* @format uuid
@@ -177,11 +177,9 @@ export default function WorkerDetail() {
return <Loading />;
}
const availableSlots = worker.availableRuns ?? 0;
const maxSlots = worker.maxRuns ?? 0;
const usedSlots = maxSlots - availableSlots;
const usedPercentage =
maxSlots > 0 ? Math.round((usedSlots / maxSlots) * 100) : 0;
const slotCapacityEntries = Object.entries(worker.slotConfig || {}).sort(
([a], [b]) => a.localeCompare(b),
);
// dynamically set the max columns in the grid based on the presence of runtime info and labels
const maxCols =
@@ -276,30 +274,54 @@ export default function WorkerDetail() {
className="h-52 overflow-y-auto bg-background border-none"
>
<CardHeader>
<CardTitle>Available Run Slots</CardTitle>
<CardTitle>Slots</CardTitle>
</CardHeader>
<CardContent className="space-y-2">
<div className="flex items-baseline gap-2">
<span className="text-3xl font-bold text-gray-900 dark:text-gray-100">
{maxSlots > 0 ? availableSlots : '∞'}
</span>
{maxSlots > 0 && (
<span className="text-sm text-gray-500 dark:text-gray-400">
/ {maxSlots} total
</span>
)}
</div>
{maxSlots > 0 && (
<div className="space-y-1">
<div className="h-2 w-full overflow-hidden rounded-full bg-gray-600/40 dark:bg-gray-500/50 ">
<div
className="h-full bg-emerald-300 dark:bg-emerald-500 transition-all"
style={{ width: `${usedPercentage}%` }}
/>
</div>
<div className="text-xs text-gray-500 dark:text-gray-400">
{usedSlots} used, {availableSlots} available
</div>
<CardContent className="space-y-3">
{slotCapacityEntries.length === 0 ? (
<div className="text-sm text-gray-500 dark:text-gray-400">
No slots
</div>
) : (
<div className="space-y-3">
{slotCapacityEntries.map(([slotType, capacity]) => {
const available = capacity?.available;
const limit = capacity?.limit ?? 0;
const showAvailability = available !== undefined;
const used = showAvailability ? limit - available : 0;
const usedPercentage =
showAvailability && limit > 0
? Math.round((used / limit) * 100)
: 0;
const label = showAvailability
? `${available} / ${limit}`
: `${limit}`;
return (
<div key={slotType} className="space-y-1">
<div className="flex items-center justify-between text-sm">
<span className="text-gray-500 dark:text-gray-400">
{slotType}
</span>
<span className="font-medium text-gray-900 dark:text-gray-100">
{label}
</span>
</div>
{showAvailability && limit > 0 && (
<div className="space-y-1">
<div className="h-2 w-full overflow-hidden rounded-full bg-gray-600/40 dark:bg-gray-500/50">
<div
className="h-full bg-emerald-300 dark:bg-emerald-500 transition-all"
style={{ width: `${usedPercentage}%` }}
/>
</div>
<div className="text-xs text-gray-500 dark:text-gray-400">
{used} used, {available} available
</div>
</div>
)}
</div>
);
})}
</div>
)}
<p className="text-xs text-gray-500 dark:text-gray-400">
@@ -13,7 +13,7 @@ export const WorkerColumn = {
name: 'Name',
type: 'Type',
startedAt: 'Started at',
slots: 'Available Slots',
slots: 'Slots',
lastHeartbeatAt: 'Last seen',
runtime: 'SDK Version',
} as const;
@@ -181,11 +181,34 @@ export const columns: (tenantId: string) => ColumnDef<Worker>[] = (
header: ({ column }) => (
<DataTableColumnHeader column={column} title={WorkerColumn.slots} />
),
cell: ({ row }) => (
<div>
{row.original.availableRuns} / {row.original.maxRuns}
</div>
),
cell: ({ row }) => {
const slotConfig = row.original.slotConfig || {};
const entries = Object.entries(slotConfig).sort(([a], [b]) =>
a.localeCompare(b),
);
if (entries.length === 0) {
return <div className="text-xs text-muted-foreground">No slots</div>;
}
return (
<div className="space-y-1">
{entries.map(([slotType, capacity]) => {
const available = capacity?.available;
const limit = capacity?.limit;
const label =
available !== undefined ? `${available} / ${limit}` : `${limit}`;
return (
<div key={slotType} className="text-xs text-muted-foreground">
<span className="font-medium text-foreground">{slotType}</span>:{' '}
{label}
</div>
);
})}
</div>
);
},
enableSorting: false,
enableHiding: true,
},
@@ -22,13 +22,13 @@ This is especially useful in cases such as:
## How Hatchet Runs Durable Tasks
When you register a durable task, Hatchet will start a second worker in the background for running durable tasks. If you don't register any durable workflows, the durable worker will not be started. Similarly, if you start a worker with _only_ durable workflows, the "main" worker will not start, and _only_ the durable worker will run. The durable worker will show up as a second worker in the Hatchet Dashboard.
Durable tasks run on the same worker process as regular tasks, but they consume a separate slot type so they do not compete with regular tasks for slots. This pattern prevents deadlock scenarios where durable tasks would starve children tasks for slots which are needed for the parent durable task to complete.
Tasks that are declared as being durable (using `durable_task` instead of `task`), will receive a `DurableContext` object instead of a normal `Context,` which extends the `Context` by providing some additional tools for working with durable execution features.
## Example Task
Now that we know a bit about how Hatchet handles durable execution, let's build a task. We'll start by declaring a task that will run durably, on the "durable worker".
Now that we know a bit about how Hatchet handles durable execution, let's build a task. We'll start by declaring a task that will run durably.
<Snippet src={snippets.python.durable.worker.create_a_durable_workflow} />
+8 -8
View File
@@ -73,14 +73,14 @@ Create a Hatchet worker on which to run workflows.
Parameters:
| Name | Type | Description | Default |
| --------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------- |
| `name` | `str` | The name of the worker. | _required_ |
| `slots` | `int` | The number of workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time | `100` |
| `durable_slots` | `int` | The number of durable workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time that are durable. | `1000` |
| `labels` | `dict[str, str \| int] \| None` | A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. | `None` |
| `workflows` | `list[BaseWorkflow[Any]] \| None` | A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them. | `None` |
| `lifespan` | `LifespanFn \| None` | A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks. | `None` |
| Name | Type | Description | Default |
| --------------- | --------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
| `name` | `str` | The name of the worker. | _required_ |
| `slots` | `int` | Maximum number of concurrent runs. | `100` |
| `durable_slots` | `int` | Maximum number of concurrent durable tasks. | `1000` |
| `labels` | `dict[str, str \| int] \| None` | A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. | `None` |
| `workflows` | `list[BaseWorkflow[Any]] \| None` | A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them. | `None` |
| `lifespan` | `LifespanFn \| None` | A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks. | `None` |
Returns:
+1
View File
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/datautils"
)
+1
View File
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/random"
)
+1
View File
@@ -7,6 +7,7 @@ import (
"golang.org/x/sync/errgroup"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
)
+1 -1
View File
@@ -72,7 +72,7 @@ func TestInterval_RunInterval_WithJitter(t *testing.T) {
assert.GreaterOrEqual(t, len(timings), 2, "Should have at least 2 timing measurements")
for _, timing := range timings {
assert.GreaterOrEqual(t, timing, 50*time.Millisecond, "Timing should be at least the base interval")
assert.LessOrEqual(t, timing, 75*time.Millisecond, "Timing should include jitter but not exceed base + max jitter + buffer")
assert.LessOrEqual(t, timing, 85*time.Millisecond, "Timing should include jitter but not exceed base + max jitter + buffer")
}
return
case <-ch:
+6
View File
@@ -917,6 +917,12 @@ func getCreateTaskOpts(tasks []*contracts.CreateTaskOpts, kind string) ([]v1.Cre
TriggerConditions: make([]v1.CreateStepMatchConditionOpt, 0),
RateLimits: make([]v1.CreateWorkflowStepRateLimitOpts, 0), // Initialize to avoid nil
ScheduleTimeout: stepCp.ScheduleTimeout,
IsDurable: stepCp.IsDurable,
SlotRequests: nil,
}
if stepCp.SlotRequests != nil {
steps[j].SlotRequests = stepCp.SlotRequests
}
// Safely set Parents
@@ -457,23 +457,42 @@ func (mc *MetricsCollectorImpl) collectWorkerMetrics(ctx context.Context) func()
mc.l.Debug().Msg("collecting worker metrics")
// Count active slots per tenant
activeSlots, err := mc.repo.Workers().CountActiveSlotsPerTenant()
// Count active slots per tenant (total)
activeSlotsTotal, err := mc.repo.Workers().ListTotalActiveSlotsPerTenant(ctx)
switch {
case err != nil:
mc.l.Error().Err(err).Msg("failed to count active slots per tenant")
case len(activeSlots) == 0:
mc.l.Error().Err(err).Msg("failed to list total active slots per tenant")
case len(activeSlotsTotal) == 0:
mc.l.Debug().Msg("no active worker slots found")
default:
mc.l.Info().Int("tenant_count", len(activeSlots)).Msg("recording active slots metrics")
for tenantId, count := range activeSlots {
mc.l.Info().Int("tenant_count", len(activeSlotsTotal)).Msg("recording active slots metrics")
for tenantId, count := range activeSlotsTotal {
mc.recorder.RecordActiveSlots(ctx, tenantId, count)
mc.l.Debug().Str("tenant_id", tenantId.String()).Int64("count", count).Msg("recorded active slots metric")
}
}
// Count active slots per tenant and slot key
activeSlotsByKey, err := mc.repo.Workers().ListActiveSlotsPerTenantAndSlotType(ctx)
switch {
case err != nil:
mc.l.Error().Err(err).Msg("failed to list active slots per tenant and slot key")
case len(activeSlotsByKey) == 0:
mc.l.Debug().Msg("no active worker slots by key found")
default:
mc.l.Info().Int("slot_count", len(activeSlotsByKey)).Msg("recording active slots by key metrics")
for tuple, count := range activeSlotsByKey {
mc.recorder.RecordActiveSlotsByKey(ctx, tuple.TenantId, tuple.SlotType, count)
mc.l.Debug().
Str("tenant_id", tuple.TenantId.String()).
Str("slot_key", tuple.SlotType).
Int64("count", count).
Msg("recorded active slots by key metric")
}
}
// Count active workers per tenant
activeWorkers, err := mc.repo.Workers().CountActiveWorkersPerTenant()
activeWorkers, err := mc.repo.Workers().CountActiveWorkersPerTenant(ctx)
switch {
case err != nil:
mc.l.Error().Err(err).Msg("failed to count active workers per tenant")
@@ -488,7 +507,7 @@ func (mc *MetricsCollectorImpl) collectWorkerMetrics(ctx context.Context) func()
}
// Count active SDKs per tenant
activeSDKs, err := mc.repo.Workers().ListActiveSDKsPerTenant()
activeSDKs, err := mc.repo.Workers().ListActiveSDKsPerTenant(ctx)
switch {
case err != nil:
@@ -6,6 +6,7 @@ import (
"time"
"github.com/google/uuid"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
"github.com/hatchet-dev/hatchet/pkg/telemetry"
@@ -5,6 +5,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
@@ -5,6 +5,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/telemetry"
)
@@ -6,6 +6,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
"github.com/hatchet-dev/hatchet/pkg/integrations/metrics/prometheus"
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/telemetry"
)
@@ -6,6 +6,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
File diff suppressed because it is too large Load Diff
@@ -38,6 +38,10 @@ type DispatcherClient interface {
RefreshTimeout(ctx context.Context, in *RefreshTimeoutRequest, opts ...grpc.CallOption) (*RefreshTimeoutResponse, error)
ReleaseSlot(ctx context.Context, in *ReleaseSlotRequest, opts ...grpc.CallOption) (*ReleaseSlotResponse, error)
UpsertWorkerLabels(ctx context.Context, in *UpsertWorkerLabelsRequest, opts ...grpc.CallOption) (*UpsertWorkerLabelsResponse, error)
// GetVersion returns the dispatcher protocol version as a simple integer.
// SDKs use this to determine feature support (e.g. slot_config registration).
// Old engines that do not implement this RPC will return UNIMPLEMENTED.
GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error)
}
type dispatcherClient struct {
@@ -256,6 +260,15 @@ func (c *dispatcherClient) UpsertWorkerLabels(ctx context.Context, in *UpsertWor
return out, nil
}
func (c *dispatcherClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) {
out := new(GetVersionResponse)
err := c.cc.Invoke(ctx, "/Dispatcher/GetVersion", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DispatcherServer is the server API for Dispatcher service.
// All implementations must embed UnimplementedDispatcherServer
// for forward compatibility
@@ -276,6 +289,10 @@ type DispatcherServer interface {
RefreshTimeout(context.Context, *RefreshTimeoutRequest) (*RefreshTimeoutResponse, error)
ReleaseSlot(context.Context, *ReleaseSlotRequest) (*ReleaseSlotResponse, error)
UpsertWorkerLabels(context.Context, *UpsertWorkerLabelsRequest) (*UpsertWorkerLabelsResponse, error)
// GetVersion returns the dispatcher protocol version as a simple integer.
// SDKs use this to determine feature support (e.g. slot_config registration).
// Old engines that do not implement this RPC will return UNIMPLEMENTED.
GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error)
mustEmbedUnimplementedDispatcherServer()
}
@@ -322,6 +339,9 @@ func (UnimplementedDispatcherServer) ReleaseSlot(context.Context, *ReleaseSlotRe
func (UnimplementedDispatcherServer) UpsertWorkerLabels(context.Context, *UpsertWorkerLabelsRequest) (*UpsertWorkerLabelsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpsertWorkerLabels not implemented")
}
func (UnimplementedDispatcherServer) GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented")
}
func (UnimplementedDispatcherServer) mustEmbedUnimplementedDispatcherServer() {}
// UnsafeDispatcherServer may be embedded to opt out of forward compatibility for this service.
@@ -586,6 +606,24 @@ func _Dispatcher_UpsertWorkerLabels_Handler(srv interface{}, ctx context.Context
return interceptor(ctx, in, info, handler)
}
func _Dispatcher_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetVersionRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DispatcherServer).GetVersion(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/Dispatcher/GetVersion",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DispatcherServer).GetVersion(ctx, req.(*GetVersionRequest))
}
return interceptor(ctx, in, info, handler)
}
// Dispatcher_ServiceDesc is the grpc.ServiceDesc for Dispatcher service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -629,6 +667,10 @@ var Dispatcher_ServiceDesc = grpc.ServiceDesc{
MethodName: "UpsertWorkerLabels",
Handler: _Dispatcher_UpsertWorkerLabels_Handler,
},
{
MethodName: "GetVersion",
Handler: _Dispatcher_GetVersion_Handler,
},
},
Streams: []grpc.StreamDesc{
{
@@ -51,6 +51,7 @@ type DispatcherImpl struct {
a *hatcheterrors.Wrapped
durableCallbackFn func(taskExternalId uuid.UUID, nodeId int64, payload []byte) error
version string
}
var ErrWorkerNotFound = fmt.Errorf("worker not found")
@@ -127,6 +128,7 @@ type DispatcherOpts struct {
payloadSizeThreshold int
defaultMaxWorkerBacklogSize int64
workflowRunBufferSize int
version string
}
func defaultDispatcherOpts() *DispatcherOpts {
@@ -204,6 +206,12 @@ func WithWorkflowRunBufferSize(size int) DispatcherOpt {
}
}
func WithVersion(version string) DispatcherOpt {
return func(opts *DispatcherOpts) {
opts.version = version
}
}
func New(fs ...DispatcherOpt) (*DispatcherImpl, error) {
opts := defaultDispatcherOpts()
@@ -253,6 +261,7 @@ func New(fs ...DispatcherOpt) (*DispatcherImpl, error) {
payloadSizeThreshold: opts.payloadSizeThreshold,
defaultMaxWorkerBacklogSize: opts.defaultMaxWorkerBacklogSize,
workflowRunBufferSize: opts.workflowRunBufferSize,
version: opts.version,
}, nil
}
+19 -2
View File
@@ -53,9 +53,20 @@ func (s *DispatcherImpl) Register(ctx context.Context, request *contracts.Worker
}
}
if len(request.SlotConfig) > 0 {
opts.SlotConfig = request.SlotConfig
} else {
// default to 100 slots
opts.SlotConfig = map[string]int32{v1.SlotTypeDefault: 100}
}
// fixme: deprecated remove in a future release feb6 2026
if request.Slots != nil {
mr := int(*request.Slots)
opts.MaxRuns = &mr
if len(request.SlotConfig) > 0 {
return nil, status.Errorf(codes.InvalidArgument, "either slot_config or slots (deprecated) must be provided, not both")
}
opts.SlotConfig = map[string]int32{v1.SlotTypeDefault: *request.Slots}
}
if apiErrors, err := s.v.ValidateAPI(opts); err != nil {
@@ -640,3 +651,9 @@ func UnmarshalPayload[T any](payload interface{}) (T, error) {
return result, nil
}
func (s *DispatcherImpl) GetVersion(ctx context.Context, req *contracts.GetVersionRequest) (*contracts.GetVersionResponse, error) {
return &contracts.GetVersionResponse{
Version: s.version,
}, nil
}
@@ -4,6 +4,7 @@ import (
"sync"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
"github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts"
)
@@ -10,6 +10,7 @@ import (
"google.golang.org/grpc"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
"github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts"
tasktypesv1 "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
+1
View File
@@ -8,6 +8,7 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/datautils"
"github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts"
"github.com/hatchet-dev/hatchet/pkg/constants"
+1
View File
@@ -6,6 +6,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
"github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts"
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
+199 -170
View File
@@ -1073,19 +1073,21 @@ type CreateTaskOpts struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name
Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id
Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout
Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON
Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task
Retries int32 `protobuf:"varint,6,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0
RateLimits []*CreateTaskRateLimit `protobuf:"bytes,7,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task
WorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,8,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task
BackoffFactor *float32 `protobuf:"fixed32,9,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task
BackoffMaxSeconds *int32 `protobuf:"varint,10,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task
Concurrency []*Concurrency `protobuf:"bytes,11,rep,name=concurrency,proto3" json:"concurrency,omitempty"` // (optional) the task concurrency options
Conditions *TaskConditions `protobuf:"bytes,12,opt,name=conditions,proto3,oneof" json:"conditions,omitempty"` // (optional) the task conditions for creating the task
ScheduleTimeout *string `protobuf:"bytes,13,opt,name=schedule_timeout,json=scheduleTimeout,proto3,oneof" json:"schedule_timeout,omitempty"` // (optional) the timeout for the schedule
ReadableId string `protobuf:"bytes,1,opt,name=readable_id,json=readableId,proto3" json:"readable_id,omitempty"` // (required) the task name
Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` // (required) the task action id
Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // (optional) the task timeout
Inputs string `protobuf:"bytes,4,opt,name=inputs,proto3" json:"inputs,omitempty"` // (optional) the task inputs, assuming string representation of JSON
Parents []string `protobuf:"bytes,5,rep,name=parents,proto3" json:"parents,omitempty"` // (optional) the task parents. if none are passed in, this is a root task
Retries int32 `protobuf:"varint,6,opt,name=retries,proto3" json:"retries,omitempty"` // (optional) the number of retries for the task, default 0
RateLimits []*CreateTaskRateLimit `protobuf:"bytes,7,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` // (optional) the rate limits for the task
WorkerLabels map[string]*DesiredWorkerLabels `protobuf:"bytes,8,rep,name=worker_labels,json=workerLabels,proto3" json:"worker_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // (optional) the desired worker affinity state for the task
BackoffFactor *float32 `protobuf:"fixed32,9,opt,name=backoff_factor,json=backoffFactor,proto3,oneof" json:"backoff_factor,omitempty"` // (optional) the retry backoff factor for the task
BackoffMaxSeconds *int32 `protobuf:"varint,10,opt,name=backoff_max_seconds,json=backoffMaxSeconds,proto3,oneof" json:"backoff_max_seconds,omitempty"` // (optional) the maximum backoff time for the task
Concurrency []*Concurrency `protobuf:"bytes,11,rep,name=concurrency,proto3" json:"concurrency,omitempty"` // (optional) the task concurrency options
Conditions *TaskConditions `protobuf:"bytes,12,opt,name=conditions,proto3,oneof" json:"conditions,omitempty"` // (optional) the task conditions for creating the task
ScheduleTimeout *string `protobuf:"bytes,13,opt,name=schedule_timeout,json=scheduleTimeout,proto3,oneof" json:"schedule_timeout,omitempty"` // (optional) the timeout for the schedule
IsDurable bool `protobuf:"varint,14,opt,name=is_durable,json=isDurable,proto3" json:"is_durable,omitempty"` // (optional) whether the task is durable
SlotRequests map[string]int32 `protobuf:"bytes,15,rep,name=slot_requests,json=slotRequests,proto3" json:"slot_requests,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // (optional) slot requests (slot_type -> units)
}
func (x *CreateTaskOpts) Reset() {
@@ -1211,6 +1213,20 @@ func (x *CreateTaskOpts) GetScheduleTimeout() string {
return ""
}
func (x *CreateTaskOpts) GetIsDurable() bool {
if x != nil {
return x.IsDurable
}
return false
}
func (x *CreateTaskOpts) GetSlotRequests() map[string]int32 {
if x != nil {
return x.SlotRequests
}
return nil
}
type CreateTaskRateLimit struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1702,7 +1718,7 @@ var file_v1_workflows_proto_rawDesc = []byte{
0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42,
0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x09,
0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xda, 0x05, 0x0a, 0x0e, 0x43, 0x72,
0x0a, 0x07, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x85, 0x07, 0x0a, 0x0e, 0x43, 0x72,
0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a,
@@ -1737,136 +1753,147 @@ var file_v1_workflows_proto_rawDesc = []byte{
0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a,
0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64,
0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x1a, 0x58, 0x0a,
0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64,
0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b,
0x6f, 0x66, 0x66, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62,
0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e,
0x64, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74,
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xb8, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x19, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48,
0x00, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b,
0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52,
0x07, 0x6b, 0x65, 0x79, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75,
0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48,
0x02, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12,
0x2f, 0x0a, 0x11, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f,
0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69,
0x6d, 0x69, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01,
0x12, 0x36, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01,
0x28, 0x0e, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69,
0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69,
0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42,
0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14,
0x0a, 0x12, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f,
0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66,
0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69,
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
0x77, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74,
0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65,
0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xc5, 0x01, 0x0a,
0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1f,
0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12,
0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a,
0x0a, 0x69, 0x73, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28,
0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x49, 0x0a, 0x0d,
0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x0f, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
0x61, 0x73, 0x6b, 0x4f, 0x70, 0x74, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x6c, 0x6f, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65,
0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d,
0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e,
0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72,
0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
0x38, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x66,
0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66,
0x66, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x42, 0x0d, 0x0a,
0x0b, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x13, 0x0a, 0x11,
0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
0x74, 0x22, 0xb8, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b,
0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x05, 0x75,
0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e,
0x69, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78,
0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x45,
0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x5f,
0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x09, 0x75, 0x6e,
0x69, 0x74, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x6c, 0x69,
0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18,
0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x73, 0x45, 0x78, 0x70, 0x72, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x08, 0x64,
0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e,
0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x48, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a,
0x09, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75,
0x6e, 0x69, 0x74, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6c, 0x69,
0x6d, 0x69, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x42,
0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x1d,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a,
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a,
0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x22, 0x37,
0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x22, 0xc5, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b,
0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74,
0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e,
0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
0x73, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06,
0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x06,
0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61,
0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22,
0xaf, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70,
0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12,
0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01,
0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0c, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1f,
0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x42,
0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75,
0x74, 0x70, 0x75, 0x74, 0x22, 0xaf, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44,
0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14,
0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69,
0x6e, 0x70, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x74,
0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75,
0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e,
0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x4e, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75,
0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61,
0x73, 0x6b, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x24, 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79,
0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54,
0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x41, 0x52, 0x44, 0x10, 0x01, 0x2a, 0x5d, 0x0a, 0x11,
0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a,
0x06, 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55,
0x52, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
0x57, 0x45, 0x45, 0x4b, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10,
0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x2a, 0x4e, 0x0a, 0x09, 0x52,
0x75, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55,
0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10,
0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02,
0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09,
0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x2a, 0x7f, 0x0a, 0x18, 0x43,
0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53,
0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x4e, 0x43, 0x45,
0x4c, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12,
0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x01,
0x12, 0x10, 0x0a, 0x0c, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54,
0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, 0x4f, 0x55, 0x4e,
0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x41, 0x4e,
0x43, 0x45, 0x4c, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x85, 0x01, 0x0a,
0x15, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70,
0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10,
0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01,
0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e,
0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48,
0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x0d, 0x0a,
0x09, 0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12,
0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55,
0x41, 0x4c, 0x10, 0x05, 0x32, 0xfd, 0x02, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b,
0x66, 0x6c, 0x6f, 0x77, 0x12, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x61, 0x6e,
0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61,
0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x52, 0x65, 0x70,
0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x54, 0x72, 0x69,
0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x12,
0x1d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b,
0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e,
0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66,
0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44,
0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12,
0x18, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69,
0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x47,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72,
0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x31, 0x2e, 0x47,
0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68,
0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04,
0x64, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65,
0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61,
0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x1a, 0x4e, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e,
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x2a, 0x24, 0x0a, 0x0e, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74,
0x65, 0x67, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a,
0x04, 0x48, 0x41, 0x52, 0x44, 0x10, 0x01, 0x2a, 0x5d, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c,
0x69, 0x6d, 0x69, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06,
0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55,
0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, 0x10, 0x02, 0x12, 0x07,
0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x45, 0x45, 0x4b, 0x10,
0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04,
0x59, 0x45, 0x41, 0x52, 0x10, 0x06, 0x2a, 0x4e, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x00, 0x12,
0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46,
0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45,
0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x2a, 0x7f, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72,
0x72, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65,
0x67, 0x79, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x5f,
0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52,
0x4f, 0x50, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x51,
0x55, 0x45, 0x55, 0x45, 0x5f, 0x4e, 0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x15, 0x0a,
0x11, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42,
0x49, 0x4e, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x4e,
0x45, 0x57, 0x45, 0x53, 0x54, 0x10, 0x04, 0x2a, 0x85, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b,
0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f,
0x72, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09,
0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x47,
0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x02, 0x12, 0x19, 0x0a,
0x15, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52,
0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x45, 0x53, 0x53,
0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x53, 0x53, 0x5f,
0x54, 0x48, 0x41, 0x4e, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x05, 0x32,
0xfd, 0x02, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x12, 0x52, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12,
0x20, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66,
0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x21, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72,
0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61,
0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54,
0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31,
0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61,
0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54,
0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31,
0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57,
0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x12, 0x1d, 0x2e, 0x76, 0x31, 0x2e,
0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52,
0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x31, 0x2e, 0x54,
0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75,
0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74,
0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x31, 0x2e,
0x47, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6e,
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61,
0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65,
0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1882,7 +1909,7 @@ func file_v1_workflows_proto_rawDescGZIP() []byte {
}
var file_v1_workflows_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
var file_v1_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_v1_workflows_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
var file_v1_workflows_proto_goTypes = []interface{}{
(StickyStrategy)(0), // 0: v1.StickyStrategy
(RateLimitDuration)(0), // 1: v1.RateLimitDuration
@@ -1907,15 +1934,16 @@ var file_v1_workflows_proto_goTypes = []interface{}{
(*TaskRunDetail)(nil), // 20: v1.TaskRunDetail
(*GetRunDetailsResponse)(nil), // 21: v1.GetRunDetailsResponse
nil, // 22: v1.CreateTaskOpts.WorkerLabelsEntry
nil, // 23: v1.GetRunDetailsResponse.TaskRunsEntry
(*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp
(*TaskConditions)(nil), // 25: v1.TaskConditions
nil, // 23: v1.CreateTaskOpts.SlotRequestsEntry
nil, // 24: v1.GetRunDetailsResponse.TaskRunsEntry
(*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp
(*TaskConditions)(nil), // 26: v1.TaskConditions
}
var file_v1_workflows_proto_depIdxs = []int32{
7, // 0: v1.CancelTasksRequest.filter:type_name -> v1.TasksFilter
7, // 1: v1.ReplayTasksRequest.filter:type_name -> v1.TasksFilter
24, // 2: v1.TasksFilter.since:type_name -> google.protobuf.Timestamp
24, // 3: v1.TasksFilter.until:type_name -> google.protobuf.Timestamp
25, // 2: v1.TasksFilter.since:type_name -> google.protobuf.Timestamp
25, // 3: v1.TasksFilter.until:type_name -> google.protobuf.Timestamp
16, // 4: v1.CreateWorkflowVersionRequest.tasks:type_name -> v1.CreateTaskOpts
14, // 5: v1.CreateWorkflowVersionRequest.concurrency:type_name -> v1.Concurrency
16, // 6: v1.CreateWorkflowVersionRequest.on_failure_task:type_name -> v1.CreateTaskOpts
@@ -1927,28 +1955,29 @@ var file_v1_workflows_proto_depIdxs = []int32{
17, // 12: v1.CreateTaskOpts.rate_limits:type_name -> v1.CreateTaskRateLimit
22, // 13: v1.CreateTaskOpts.worker_labels:type_name -> v1.CreateTaskOpts.WorkerLabelsEntry
14, // 14: v1.CreateTaskOpts.concurrency:type_name -> v1.Concurrency
25, // 15: v1.CreateTaskOpts.conditions:type_name -> v1.TaskConditions
1, // 16: v1.CreateTaskRateLimit.duration:type_name -> v1.RateLimitDuration
2, // 17: v1.TaskRunDetail.status:type_name -> v1.RunStatus
2, // 18: v1.GetRunDetailsResponse.status:type_name -> v1.RunStatus
23, // 19: v1.GetRunDetailsResponse.task_runs:type_name -> v1.GetRunDetailsResponse.TaskRunsEntry
15, // 20: v1.CreateTaskOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels
20, // 21: v1.GetRunDetailsResponse.TaskRunsEntry.value:type_name -> v1.TaskRunDetail
12, // 22: v1.AdminService.PutWorkflow:input_type -> v1.CreateWorkflowVersionRequest
5, // 23: v1.AdminService.CancelTasks:input_type -> v1.CancelTasksRequest
6, // 24: v1.AdminService.ReplayTasks:input_type -> v1.ReplayTasksRequest
10, // 25: v1.AdminService.TriggerWorkflowRun:input_type -> v1.TriggerWorkflowRunRequest
19, // 26: v1.AdminService.GetRunDetails:input_type -> v1.GetRunDetailsRequest
18, // 27: v1.AdminService.PutWorkflow:output_type -> v1.CreateWorkflowVersionResponse
8, // 28: v1.AdminService.CancelTasks:output_type -> v1.CancelTasksResponse
9, // 29: v1.AdminService.ReplayTasks:output_type -> v1.ReplayTasksResponse
11, // 30: v1.AdminService.TriggerWorkflowRun:output_type -> v1.TriggerWorkflowRunResponse
21, // 31: v1.AdminService.GetRunDetails:output_type -> v1.GetRunDetailsResponse
27, // [27:32] is the sub-list for method output_type
22, // [22:27] is the sub-list for method input_type
22, // [22:22] is the sub-list for extension type_name
22, // [22:22] is the sub-list for extension extendee
0, // [0:22] is the sub-list for field type_name
26, // 15: v1.CreateTaskOpts.conditions:type_name -> v1.TaskConditions
23, // 16: v1.CreateTaskOpts.slot_requests:type_name -> v1.CreateTaskOpts.SlotRequestsEntry
1, // 17: v1.CreateTaskRateLimit.duration:type_name -> v1.RateLimitDuration
2, // 18: v1.TaskRunDetail.status:type_name -> v1.RunStatus
2, // 19: v1.GetRunDetailsResponse.status:type_name -> v1.RunStatus
24, // 20: v1.GetRunDetailsResponse.task_runs:type_name -> v1.GetRunDetailsResponse.TaskRunsEntry
15, // 21: v1.CreateTaskOpts.WorkerLabelsEntry.value:type_name -> v1.DesiredWorkerLabels
20, // 22: v1.GetRunDetailsResponse.TaskRunsEntry.value:type_name -> v1.TaskRunDetail
12, // 23: v1.AdminService.PutWorkflow:input_type -> v1.CreateWorkflowVersionRequest
5, // 24: v1.AdminService.CancelTasks:input_type -> v1.CancelTasksRequest
6, // 25: v1.AdminService.ReplayTasks:input_type -> v1.ReplayTasksRequest
10, // 26: v1.AdminService.TriggerWorkflowRun:input_type -> v1.TriggerWorkflowRunRequest
19, // 27: v1.AdminService.GetRunDetails:input_type -> v1.GetRunDetailsRequest
18, // 28: v1.AdminService.PutWorkflow:output_type -> v1.CreateWorkflowVersionResponse
8, // 29: v1.AdminService.CancelTasks:output_type -> v1.CancelTasksResponse
9, // 30: v1.AdminService.ReplayTasks:output_type -> v1.ReplayTasksResponse
11, // 31: v1.AdminService.TriggerWorkflowRun:output_type -> v1.TriggerWorkflowRunResponse
21, // 32: v1.AdminService.GetRunDetails:output_type -> v1.GetRunDetailsResponse
28, // [28:33] is the sub-list for method output_type
23, // [23:28] is the sub-list for method input_type
23, // [23:23] is the sub-list for extension type_name
23, // [23:23] is the sub-list for extension extendee
0, // [0:23] is the sub-list for field type_name
}
func init() { file_v1_workflows_proto_init() }
@@ -2180,7 +2209,7 @@ func file_v1_workflows_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_v1_workflows_proto_rawDesc,
NumEnums: 5,
NumMessages: 19,
NumMessages: 20,
NumExtensions: 0,
NumServices: 1,
},
@@ -4,6 +4,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
)
@@ -2,6 +2,7 @@ package v1
import (
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
@@ -6,6 +6,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
+2 -1
View File
@@ -10,10 +10,11 @@ import (
"testing"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/hatchet-dev/hatchet/pkg/config/loader"
"github.com/hatchet-dev/hatchet/pkg/config/server"
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/jackc/pgx/v5"
)
func Prepare(t *testing.T) {
+24 -4
View File
@@ -25,6 +25,10 @@ import (
type DispatcherClient interface {
GetActionListener(ctx context.Context, req *GetActionListenerRequest) (WorkerActionListener, *string, error)
// GetVersion calls the GetVersion RPC. Returns the engine semantic version string.
// Old engines that do not implement this will return codes.Unimplemented.
GetVersion(ctx context.Context) (string, error)
SendStepActionEvent(ctx context.Context, in *ActionEvent) (*ActionEventResponse, error)
SendGroupKeyActionEvent(ctx context.Context, in *ActionEvent) (*ActionEventResponse, error)
@@ -47,9 +51,14 @@ type GetActionListenerRequest struct {
WorkerName string
Services []string
Actions []string
Slots *int
SlotConfig map[string]int32
Labels map[string]interface{}
WebhookId *string
// LegacySlots, when non-nil, causes the registration to use the deprecated
// `slots` proto field instead of `slot_config`. This is for backward
// compatibility with engines that do not support multiple slot types.
LegacySlots *int32
}
// ActionPayload unmarshals the action payload into the target. It also validates the resulting target.
@@ -270,9 +279,12 @@ func (d *dispatcherClientImpl) newActionListener(ctx context.Context, req *GetAc
}
}
if req.Slots != nil {
mr := int32(*req.Slots) // nolint: gosec
registerReq.Slots = &mr
if req.LegacySlots != nil {
registerReq.Slots = req.LegacySlots
} else if len(req.SlotConfig) > 0 {
registerReq.SlotConfig = req.SlotConfig
} else {
return nil, nil, fmt.Errorf("slot config is required for worker registration")
}
// register the worker
@@ -534,6 +546,14 @@ func (a *actionListenerImpl) Unregister() error {
return nil
}
func (d *dispatcherClientImpl) GetVersion(ctx context.Context) (string, error) {
resp, err := d.client.GetVersion(d.ctx.newContext(ctx), &dispatchercontracts.GetVersionRequest{})
if err != nil {
return "", err
}
return resp.Version, nil
}
func (d *dispatcherClientImpl) GetActionListener(ctx context.Context, req *GetActionListenerRequest) (WorkerActionListener, *string, error) {
return d.newActionListener(ctx, req)
}
+37 -16
View File
@@ -996,15 +996,21 @@ type SlackWebhook struct {
// Step defines model for Step.
type Step struct {
Action string `json:"action"`
Children *[]string `json:"children,omitempty"`
JobId string `json:"jobId"`
Metadata APIResourceMeta `json:"metadata"`
Parents *[]string `json:"parents,omitempty"`
Action string `json:"action"`
Children *[]string `json:"children,omitempty"`
// IsDurable Whether the step is durable.
IsDurable *bool `json:"isDurable,omitempty"`
JobId string `json:"jobId"`
Metadata APIResourceMeta `json:"metadata"`
Parents *[]string `json:"parents,omitempty"`
// ReadableId The readable id of the step.
ReadableId string `json:"readableId"`
TenantId string `json:"tenantId"`
// SlotRequests Slot requests for the step (slot_type -> units).
SlotRequests *map[string]int `json:"slotRequests,omitempty"`
TenantId string `json:"tenantId"`
// Timeout The timeout of the step.
Timeout *string `json:"timeout,omitempty"`
@@ -1970,6 +1976,15 @@ type V1WebhookList struct {
Rows *[]V1Webhook `json:"rows,omitempty"`
}
// V1WebhookResponse defines model for V1WebhookResponse.
type V1WebhookResponse struct {
Challenge *string `json:"challenge,omitempty"`
Event *V1Event `json:"event,omitempty"`
// Message The message for the webhook response
Message *string `json:"message,omitempty"`
}
// V1WebhookSourceName defines model for V1WebhookSourceName.
type V1WebhookSourceName string
@@ -2111,9 +2126,6 @@ type Worker struct {
// Actions The actions this worker can perform.
Actions *[]string `json:"actions,omitempty"`
// AvailableRuns The number of runs this worker can execute concurrently.
AvailableRuns *int `json:"availableRuns,omitempty"`
// DispatcherId the id of the assigned dispatcher, in UUID format
DispatcherId *openapi_types.UUID `json:"dispatcherId,omitempty"`
@@ -2124,11 +2136,8 @@ type Worker struct {
LastHeartbeatAt *time.Time `json:"lastHeartbeatAt,omitempty"`
// LastListenerEstablished The time this worker last sent a heartbeat.
LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"`
// MaxRuns The maximum number of runs this worker can execute concurrently.
MaxRuns *int `json:"maxRuns,omitempty"`
Metadata APIResourceMeta `json:"metadata"`
LastListenerEstablished *time.Time `json:"lastListenerEstablished,omitempty"`
Metadata APIResourceMeta `json:"metadata"`
// Name The name of the worker.
Name string `json:"name"`
@@ -2140,6 +2149,9 @@ type Worker struct {
RegisteredWorkflows *[]RegisteredWorkflow `json:"registeredWorkflows,omitempty"`
RuntimeInfo *WorkerRuntimeInfo `json:"runtimeInfo,omitempty"`
// SlotConfig Slot availability and limits for this worker (slot_type -> { available, limit }).
SlotConfig *map[string]WorkerSlotConfig `json:"slotConfig,omitempty"`
// Slots The semaphore slot state for the worker.
Slots *[]SemaphoreSlots `json:"slots,omitempty"`
@@ -2185,6 +2197,15 @@ type WorkerRuntimeInfo struct {
// WorkerRuntimeSDKs defines model for WorkerRuntimeSDKs.
type WorkerRuntimeSDKs string
// WorkerSlotConfig Slot availability and limits for a slot type.
type WorkerSlotConfig struct {
// Available The number of available units for this slot type.
Available *int `json:"available,omitempty"`
// Limit The maximum number of units for this slot type.
Limit int `json:"limit"`
}
// WorkerType defines model for WorkerType.
type WorkerType string
@@ -14545,7 +14566,7 @@ func (r V1WebhookUpdateResponse) StatusCode() int {
type V1WebhookReceiveResponse struct {
Body []byte
HTTPResponse *http.Response
JSON200 *map[string]interface{}
JSON200 *V1WebhookResponse
JSON400 *APIErrors
JSON403 *APIErrors
}
@@ -19940,7 +19961,7 @@ func ParseV1WebhookReceiveResponse(rsp *http.Response) (*V1WebhookReceiveRespons
switch {
case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200:
var dest map[string]interface{}
var dest V1WebhookResponse
if err := json.Unmarshal(bodyBytes, &dest); err != nil {
return nil, err
}
+1
View File
@@ -5,6 +5,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/cache"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
+1
View File
@@ -5,6 +5,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
+1
View File
@@ -2,6 +2,7 @@ package repository
import (
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/internal/msgqueue"
)
+1
View File
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
+4
View File
@@ -4,6 +4,7 @@ import (
"context"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
@@ -38,12 +39,15 @@ type QueueRepository interface {
GetTaskRateLimits(ctx context.Context, tx *OptimisticTx, queueItems []*sqlcv1.V1QueueItem) (map[int64]map[string]int32, error)
RequeueRateLimitedItems(ctx context.Context, tenantId uuid.UUID, queueName string) ([]*sqlcv1.RequeueRateLimitedQueueItemsRow, error)
GetDesiredLabels(ctx context.Context, tx *OptimisticTx, stepIds []uuid.UUID) (map[uuid.UUID][]*sqlcv1.GetDesiredLabelsRow, error)
GetStepSlotRequests(ctx context.Context, tx *OptimisticTx, stepIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error)
Cleanup()
}
type AssignmentRepository interface {
ListActionsForWorkers(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListActionsForWorkersRow, error)
ListAvailableSlotsForWorkers(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersParams) ([]*sqlcv1.ListAvailableSlotsForWorkersRow, error)
ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersAndTypesParams) ([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, error)
ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error)
}
type OptimisticSchedulingRepository interface {
+18
View File
@@ -4,6 +4,7 @@ import (
"context"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
"github.com/hatchet-dev/hatchet/pkg/telemetry"
)
@@ -34,3 +35,20 @@ func (d *assignmentRepository) ListAvailableSlotsForWorkers(ctx context.Context,
return d.queries.ListAvailableSlotsForWorkers(ctx, d.pool, params)
}
func (d *assignmentRepository) ListAvailableSlotsForWorkersAndTypes(ctx context.Context, tenantId uuid.UUID, params sqlcv1.ListAvailableSlotsForWorkersAndTypesParams) ([]*sqlcv1.ListAvailableSlotsForWorkersAndTypesRow, error) {
ctx, span := telemetry.NewSpan(ctx, "list-available-slots-for-workers-and-types")
defer span.End()
return d.queries.ListAvailableSlotsForWorkersAndTypes(ctx, d.pool, params)
}
func (d *assignmentRepository) ListWorkerSlotConfigs(ctx context.Context, tenantId uuid.UUID, workerIds []uuid.UUID) ([]*sqlcv1.ListWorkerSlotConfigsRow, error) {
ctx, span := telemetry.NewSpan(ctx, "list-worker-slot-configs")
defer span.End()
return d.queries.ListWorkerSlotConfigs(ctx, d.pool, sqlcv1.ListWorkerSlotConfigsParams{
Tenantid: tenantId,
Workerids: workerIds,
})
}
+1
View File
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlchelpers"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
+9 -12
View File
@@ -13,10 +13,9 @@ import (
)
type ListActiveWorkersResult struct {
ID uuid.UUID
MaxRuns int
Name string
Labels []*sqlcv1.ListManyWorkerLabelsRow
ID uuid.UUID
Name string
Labels []*sqlcv1.ListManyWorkerLabelsRow
}
type leaseRepository struct {
@@ -149,10 +148,9 @@ func (d *leaseRepository) ListActiveWorkers(ctx context.Context, tenantId uuid.U
for _, worker := range activeWorkers {
res = append(res, &ListActiveWorkersResult{
ID: worker.ID,
MaxRuns: int(worker.MaxRuns),
Labels: workerIdsToLabels[worker.ID],
Name: worker.Name,
ID: worker.ID,
Labels: workerIdsToLabels[worker.ID],
Name: worker.Name,
})
}
@@ -189,10 +187,9 @@ func (d *leaseRepository) GetActiveWorker(ctx context.Context, tenantId, workerI
}
return &ListActiveWorkersResult{
ID: worker.Worker.ID,
MaxRuns: int(worker.Worker.MaxRuns),
Labels: workerIdsToLabels[worker.Worker.ID],
Name: worker.Worker.Name,
ID: worker.Worker.ID,
Labels: workerIdsToLabels[worker.Worker.ID],
Name: worker.Worker.Name,
}, nil
}
+57
View File
@@ -639,6 +639,63 @@ func (d *queueRepository) GetDesiredLabels(ctx context.Context, tx *OptimisticTx
return stepIdToLabels, nil
}
func (d *queueRepository) GetStepSlotRequests(ctx context.Context, tx *OptimisticTx, stepIds []uuid.UUID) (map[uuid.UUID]map[string]int32, error) {
ctx, span := telemetry.NewSpan(ctx, "get-step-slot-requests")
defer span.End()
uniqueStepIds := sqlchelpers.UniqueSet(stepIds)
stepIdsToLookup := make([]uuid.UUID, 0, len(uniqueStepIds))
stepIdToRequests := make(map[uuid.UUID]map[string]int32, len(uniqueStepIds))
for _, stepId := range uniqueStepIds {
if value, found := d.stepIdSlotRequestsCache.Get(stepId); found {
stepIdToRequests[stepId] = value
} else {
stepIdsToLookup = append(stepIdsToLookup, stepId)
}
}
if len(stepIdsToLookup) == 0 {
return stepIdToRequests, nil
}
var queryTx sqlcv1.DBTX
if tx != nil {
queryTx = tx.tx
} else {
queryTx = d.pool
}
rows, err := d.queries.GetStepSlotRequests(ctx, queryTx, sqlcv1.GetStepSlotRequestsParams{
Stepids: stepIdsToLookup,
Tenantid: d.tenantId,
})
if err != nil {
return nil, err
}
for _, row := range rows {
if _, ok := stepIdToRequests[row.StepID]; !ok {
stepIdToRequests[row.StepID] = make(map[string]int32)
}
stepIdToRequests[row.StepID][row.SlotType] = row.Units
}
// cache empty results so we skip DB lookups for steps without explicit slot requests
for _, stepId := range stepIdsToLookup {
if _, ok := stepIdToRequests[stepId]; !ok {
stepIdToRequests[stepId] = map[string]int32{}
}
d.stepIdSlotRequestsCache.Add(stepId, stepIdToRequests[stepId])
}
return stepIdToRequests, nil
}
func (d *queueRepository) RequeueRateLimitedItems(ctx context.Context, tenantId uuid.UUID, queueName string) ([]*sqlcv1.RequeueRateLimitedQueueItemsRow, error) {
tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, d.pool, d.l)
+3
View File
@@ -38,6 +38,7 @@ type sharedRepository struct {
tenantIdWorkflowNameCache *expirable.LRU[string, *sqlcv1.ListWorkflowsByNamesRow]
stepsInWorkflowVersionCache *expirable.LRU[uuid.UUID, []*sqlcv1.ListStepsByWorkflowVersionIdsRow]
stepIdLabelsCache *expirable.LRU[uuid.UUID, []*sqlcv1.GetDesiredLabelsRow]
stepIdSlotRequestsCache *expirable.LRU[uuid.UUID, map[string]int32]
celParser *cel.CELParser
env *celgo.Env
@@ -68,6 +69,7 @@ func newSharedRepository(
tenantIdWorkflowNameCache := expirable.NewLRU(10000, func(key string, value *sqlcv1.ListWorkflowsByNamesRow) {}, 5*time.Second)
stepsInWorkflowVersionCache := expirable.NewLRU(10000, func(key uuid.UUID, value []*sqlcv1.ListStepsByWorkflowVersionIdsRow) {}, 5*time.Minute)
stepIdLabelsCache := expirable.NewLRU(10000, func(key uuid.UUID, value []*sqlcv1.GetDesiredLabelsRow) {}, 5*time.Minute)
stepIdSlotRequestsCache := expirable.NewLRU(10000, func(key uuid.UUID, value map[string]int32) {}, 5*time.Minute)
celParser := cel.NewCELParser()
@@ -97,6 +99,7 @@ func newSharedRepository(
tenantIdWorkflowNameCache: tenantIdWorkflowNameCache,
stepsInWorkflowVersionCache: stepsInWorkflowVersionCache,
stepIdLabelsCache: stepIdLabelsCache,
stepIdSlotRequestsCache: stepIdSlotRequestsCache,
celParser: celParser,
env: env,
taskLookupCache: lookupCache,
+1
View File
@@ -4,6 +4,7 @@ import (
"context"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
+7
View File
@@ -0,0 +1,7 @@
package repository
// SlotType constants for worker slot configurations.
const (
SlotTypeDefault = "default"
SlotTypeDurable = "durable"
)
+1
View File
@@ -4,6 +4,7 @@ import (
"context"
"github.com/google/uuid"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
)
+3 -2
View File
@@ -50,11 +50,12 @@ RETURNING l.*;
-- name: ListActiveWorkers :many
SELECT
w."id",
w."maxRuns",
DISTINCT w."id",
w."name"
FROM
"Worker" w
JOIN
v1_worker_slot_config wsc ON w."id" = wsc."worker_id"
WHERE
w."tenantId" = @tenantId::uuid
AND w."dispatcherId" IS NOT NULL
+6 -6
View File
@@ -106,11 +106,12 @@ func (q *Queries) GetLeasesToAcquire(ctx context.Context, db DBTX, arg GetLeases
const listActiveWorkers = `-- name: ListActiveWorkers :many
SELECT
w."id",
w."maxRuns",
DISTINCT w."id",
w."name"
FROM
"Worker" w
JOIN
v1_worker_slot_config wsc ON w."id" = wsc."worker_id"
WHERE
w."tenantId" = $1::uuid
AND w."dispatcherId" IS NOT NULL
@@ -120,9 +121,8 @@ WHERE
`
type ListActiveWorkersRow struct {
ID uuid.UUID `json:"id"`
MaxRuns int32 `json:"maxRuns"`
Name string `json:"name"`
ID uuid.UUID `json:"id"`
Name string `json:"name"`
}
func (q *Queries) ListActiveWorkers(ctx context.Context, db DBTX, tenantid uuid.UUID) ([]*ListActiveWorkersRow, error) {
@@ -134,7 +134,7 @@ func (q *Queries) ListActiveWorkers(ctx context.Context, db DBTX, tenantid uuid.
var items []*ListActiveWorkersRow
for rows.Next() {
var i ListActiveWorkersRow
if err := rows.Scan(&i.ID, &i.MaxRuns, &i.Name); err != nil {
if err := rows.Scan(&i.ID, &i.Name); err != nil {
return nil, err
}
items = append(items, &i)
+31
View File
@@ -2686,6 +2686,7 @@ type Step struct {
RetryBackoffFactor pgtype.Float8 `json:"retryBackoffFactor"`
RetryMaxBackoff pgtype.Int4 `json:"retryMaxBackoff"`
ScheduleTimeout string `json:"scheduleTimeout"`
IsDurable bool `json:"isDurable"`
}
type StepDesiredWorkerLabel struct {
@@ -3433,6 +3434,15 @@ type V1StepMatchCondition struct {
ParentReadableID pgtype.Text `json:"parent_readable_id"`
}
type V1StepSlotRequest struct {
TenantID uuid.UUID `json:"tenant_id"`
StepID uuid.UUID `json:"step_id"`
SlotType string `json:"slot_type"`
Units int32 `json:"units"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
type V1Task struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
@@ -3537,6 +3547,18 @@ type V1TaskRuntime struct {
TimeoutAt pgtype.Timestamp `json:"timeout_at"`
}
type V1TaskRuntimeSlot struct {
TenantID uuid.UUID `json:"tenant_id"`
TaskID int64 `json:"task_id"`
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
RetryCount int32 `json:"retry_count"`
WorkerID uuid.UUID `json:"worker_id"`
SlotType string `json:"slot_type"`
Units int32 `json:"units"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
type V1TaskStatusUpdatesTmp struct {
TenantID uuid.UUID `json:"tenant_id"`
RequeueAfter pgtype.Timestamptz `json:"requeue_after"`
@@ -3573,6 +3595,15 @@ type V1TasksOlap struct {
ParentTaskExternalID *uuid.UUID `json:"parent_task_external_id"`
}
type V1WorkerSlotConfig struct {
TenantID uuid.UUID `json:"tenant_id"`
WorkerID uuid.UUID `json:"worker_id"`
SlotType string `json:"slot_type"`
MaxUnits int32 `json:"max_units"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
type V1WorkflowConcurrency struct {
ID int64 `json:"id"`
WorkflowID uuid.UUID `json:"workflow_id"`
+48 -31
View File
@@ -50,37 +50,6 @@ WHERE
AND w."isActive" = true
AND w."isPaused" = false;
-- name: ListAvailableSlotsForWorkers :many
WITH worker_max_runs AS (
SELECT
"id",
"maxRuns"
FROM
"Worker"
WHERE
"tenantId" = @tenantId::uuid
AND "id" = ANY(@workerIds::uuid[])
), worker_filled_slots AS (
SELECT
worker_id,
COUNT(task_id) AS "filledSlots"
FROM
v1_task_runtime
WHERE
tenant_id = @tenantId::uuid
AND worker_id = ANY(@workerIds::uuid[])
GROUP BY
worker_id
)
-- subtract the filled slots from the max runs to get the available slots
SELECT
wmr."id",
wmr."maxRuns" - COALESCE(wfs."filledSlots", 0) AS "availableSlots"
FROM
worker_max_runs wmr
LEFT JOIN
worker_filled_slots wfs ON wmr."id" = wfs.worker_id;
-- name: ListQueues :many
SELECT
*
@@ -230,6 +199,7 @@ WITH input AS (
t.retry_count,
i.worker_id,
t.tenant_id,
t.step_id,
CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at
FROM
v1_task t
@@ -259,6 +229,42 @@ WITH input AS (
ON CONFLICT (task_id, task_inserted_at, retry_count) DO NOTHING
-- only return the task ids that were successfully assigned
RETURNING task_id, worker_id
), slot_requests AS (
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.worker_id,
t.tenant_id,
COALESCE(req.slot_type, 'default'::text) AS slot_type,
COALESCE(req.units, 1) AS units
FROM
updated_tasks t
LEFT JOIN
v1_step_slot_request req
ON req.step_id = t.step_id AND req.tenant_id = t.tenant_id
), assigned_slots AS (
INSERT INTO v1_task_runtime_slot (
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id,
slot_type,
units
)
SELECT
tenant_id,
id,
inserted_at,
retry_count,
worker_id,
slot_type,
units
FROM
slot_requests
ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING
RETURNING task_id
)
SELECT
asr.task_id,
@@ -280,6 +286,17 @@ FROM
WHERE
"stepId" = ANY(@stepIds::uuid[]);
-- name: GetStepSlotRequests :many
SELECT
step_id,
slot_type,
units
FROM
v1_step_slot_request
WHERE
step_id = ANY(@stepIds::uuid[])
AND tenant_id = @tenantId::uuid;
-- name: GetQueuedCounts :many
SELECT
queue,
+80 -62
View File
@@ -333,6 +333,49 @@ func (q *Queries) GetQueuedCounts(ctx context.Context, db DBTX, tenantid uuid.UU
return items, nil
}
const getStepSlotRequests = `-- name: GetStepSlotRequests :many
SELECT
step_id,
slot_type,
units
FROM
v1_step_slot_request
WHERE
step_id = ANY($1::uuid[])
AND tenant_id = $2::uuid
`
type GetStepSlotRequestsParams struct {
Stepids []uuid.UUID `json:"stepids"`
Tenantid uuid.UUID `json:"tenantid"`
}
type GetStepSlotRequestsRow struct {
StepID uuid.UUID `json:"step_id"`
SlotType string `json:"slot_type"`
Units int32 `json:"units"`
}
func (q *Queries) GetStepSlotRequests(ctx context.Context, db DBTX, arg GetStepSlotRequestsParams) ([]*GetStepSlotRequestsRow, error) {
rows, err := db.Query(ctx, getStepSlotRequests, arg.Stepids, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*GetStepSlotRequestsRow
for rows.Next() {
var i GetStepSlotRequestsRow
if err := rows.Scan(&i.StepID, &i.SlotType, &i.Units); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listActionsForWorkers = `-- name: ListActionsForWorkers :many
SELECT
w."id" as "workerId",
@@ -382,68 +425,6 @@ func (q *Queries) ListActionsForWorkers(ctx context.Context, db DBTX, arg ListAc
return items, nil
}
const listAvailableSlotsForWorkers = `-- name: ListAvailableSlotsForWorkers :many
WITH worker_max_runs AS (
SELECT
"id",
"maxRuns"
FROM
"Worker"
WHERE
"tenantId" = $1::uuid
AND "id" = ANY($2::uuid[])
), worker_filled_slots AS (
SELECT
worker_id,
COUNT(task_id) AS "filledSlots"
FROM
v1_task_runtime
WHERE
tenant_id = $1::uuid
AND worker_id = ANY($2::uuid[])
GROUP BY
worker_id
)
SELECT
wmr."id",
wmr."maxRuns" - COALESCE(wfs."filledSlots", 0) AS "availableSlots"
FROM
worker_max_runs wmr
LEFT JOIN
worker_filled_slots wfs ON wmr."id" = wfs.worker_id
`
type ListAvailableSlotsForWorkersParams struct {
Tenantid uuid.UUID `json:"tenantid"`
Workerids []uuid.UUID `json:"workerids"`
}
type ListAvailableSlotsForWorkersRow struct {
ID uuid.UUID `json:"id"`
AvailableSlots int32 `json:"availableSlots"`
}
// subtract the filled slots from the max runs to get the available slots
func (q *Queries) ListAvailableSlotsForWorkers(ctx context.Context, db DBTX, arg ListAvailableSlotsForWorkersParams) ([]*ListAvailableSlotsForWorkersRow, error) {
rows, err := db.Query(ctx, listAvailableSlotsForWorkers, arg.Tenantid, arg.Workerids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListAvailableSlotsForWorkersRow
for rows.Next() {
var i ListAvailableSlotsForWorkersRow
if err := rows.Scan(&i.ID, &i.AvailableSlots); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listQueueItemsForQueue = `-- name: ListQueueItemsForQueue :many
SELECT
id, tenant_id, queue, task_id, task_inserted_at, external_id, action_id, step_id, workflow_id, workflow_run_id, schedule_timeout_at, step_timeout, priority, sticky, desired_worker_id, retry_count
@@ -886,6 +867,7 @@ WITH input AS (
t.retry_count,
i.worker_id,
t.tenant_id,
t.step_id,
CURRENT_TIMESTAMP + convert_duration_to_interval(t.step_timeout) AS timeout_at
FROM
v1_task t
@@ -915,6 +897,42 @@ WITH input AS (
ON CONFLICT (task_id, task_inserted_at, retry_count) DO NOTHING
-- only return the task ids that were successfully assigned
RETURNING task_id, worker_id
), slot_requests AS (
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.worker_id,
t.tenant_id,
COALESCE(req.slot_type, 'default'::text) AS slot_type,
COALESCE(req.units, 1) AS units
FROM
updated_tasks t
LEFT JOIN
v1_step_slot_request req
ON req.step_id = t.step_id AND req.tenant_id = t.tenant_id
), assigned_slots AS (
INSERT INTO v1_task_runtime_slot (
tenant_id,
task_id,
task_inserted_at,
retry_count,
worker_id,
slot_type,
units
)
SELECT
tenant_id,
id,
inserted_at,
retry_count,
worker_id,
slot_type,
units
FROM
slot_requests
ON CONFLICT (task_id, task_inserted_at, retry_count, slot_type) DO NOTHING
RETURNING task_id
)
SELECT
asr.task_id,
+5 -2
View File
@@ -729,13 +729,16 @@ WITH input AS (
ORDER BY
task_id, task_inserted_at, retry_count
FOR UPDATE
), deleted_slots AS (
DELETE FROM
v1_task_runtime_slot
WHERE
(task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM input)
), deleted_runtimes AS (
DELETE FROM
v1_task_runtime
WHERE
(task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM runtimes_to_delete)
-- return a constant for ordering
RETURNING 1 AS cte_order
)
SELECT
t.queue,
-146
View File
@@ -1,146 +0,0 @@
-- NOTE: this file doesn't typically get generated, since we need to overwrite the
-- behavior of `@dagIds` and `@dagInsertedAts` to be nullable. It can be generated
-- when we'd like to change the query.
-- name: CreateTasks :many
WITH input AS (
SELECT
*
FROM
(
SELECT
unnest(@tenantIds::uuid[]) AS tenant_id,
unnest(@queues::text[]) AS queue,
unnest(@actionIds::text[]) AS action_id,
unnest(@stepIds::uuid[]) AS step_id,
unnest(@stepReadableIds::text[]) AS step_readable_id,
unnest(@workflowIds::uuid[]) AS workflow_id,
unnest(@scheduleTimeouts::text[]) AS schedule_timeout,
unnest(@stepTimeouts::text[]) AS step_timeout,
unnest(@priorities::integer[]) AS priority,
unnest(cast(@stickies::text[] as v1_sticky_strategy[])) AS sticky,
unnest(@desiredWorkerIds::uuid[]) AS desired_worker_id,
unnest(@externalIds::uuid[]) AS external_id,
unnest(@displayNames::text[]) AS display_name,
unnest(@inputs::jsonb[]) AS input,
unnest(@retryCounts::integer[]) AS retry_count,
unnest(@additionalMetadatas::jsonb[]) AS additional_metadata,
unnest(cast(@initialStates::text[] as v1_task_initial_state[])) AS initial_state,
-- NOTE: these are nullable, so sqlc doesn't support casting to a type
unnest(@dagIds::bigint[]) AS dag_id,
unnest(@dagInsertedAts::timestamptz[]) AS dag_inserted_at
) AS subquery
)
INSERT INTO v1_task (
tenant_id,
queue,
action_id,
step_id,
step_readable_id,
workflow_id,
schedule_timeout,
step_timeout,
priority,
sticky,
desired_worker_id,
external_id,
display_name,
input,
retry_count,
additional_metadata,
initial_state,
dag_id,
dag_inserted_at
)
SELECT
i.tenant_id,
i.queue,
i.action_id,
i.step_id,
i.step_readable_id,
i.workflow_id,
i.schedule_timeout,
i.step_timeout,
i.priority,
i.sticky,
i.desired_worker_id,
i.external_id,
i.display_name,
i.input,
i.retry_count,
i.additional_metadata,
i.initial_state,
i.dag_id,
i.dag_inserted_at
FROM
input i
RETURNING
*;
-- name: ReplayTasks :many
-- NOTE: at this point, we assume we have a lock on tasks and therefor we can update the tasks
WITH input AS (
SELECT
*
FROM
(
SELECT
unnest(@taskIds::bigint[]) AS task_id,
unnest(@inputs::jsonb[]) AS input,
unnest(cast(@initialStates::text[] as v1_task_initial_state[])) AS initial_state,
unnest_nd_1d(@concurrencyStrategyIds::bigint[][]) AS concurrency_strategy_ids,
unnest_nd_1d(@concurrencyKeys::text[][]) AS concurrency_keys,
unnest(@initialStateReason::text[]) AS initial_state_reason
) AS subquery
)
UPDATE
v1_task
SET
retry_count = retry_count + 1,
app_retry_count = 0,
internal_retry_count = 0,
input = CASE WHEN i.input IS NOT NULL THEN i.input ELSE v1_task.input END,
initial_state = i.initial_state,
concurrency_strategy_ids = i.concurrency_strategy_ids,
concurrency_keys = i.concurrency_keys,
initial_state_reason = i.initial_state_reason
FROM
input i
WHERE
v1_task.id = i.task_id
RETURNING
v1_task.*;
-- name: CreateTaskExpressionEvals :exec
WITH input AS (
SELECT
*
FROM
(
SELECT
unnest(@taskIds::bigint[]) AS task_id,
unnest(@taskInsertedAts::timestamptz[]) AS task_inserted_at,
unnest(@keys::text[]) AS key,
unnest(@valuesStr::text[]) AS value_str,
unnest(cast(@kinds::text[] as "StepExpressionKind"[])) AS kind
) AS subquery
)
INSERT INTO v1_task_expression_eval (
key,
task_id,
task_inserted_at,
value_str,
kind
)
SELECT
i.key,
i.task_id,
i.task_inserted_at,
i.value_str,
i.kind
FROM
input i
ON CONFLICT (key, task_id, task_inserted_at, kind) DO UPDATE
SET
value_str = EXCLUDED.value_str,
value_int = EXCLUDED.value_int;
+18 -1
View File
@@ -949,6 +949,11 @@ WITH task AS (
ORDER BY
task_id, task_inserted_at, retry_count
FOR UPDATE
), deleted_slots AS (
DELETE FROM v1_task_runtime_slot
WHERE
(task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM locked_runtime)
RETURNING task_id
)
UPDATE
v1_task_runtime
@@ -1007,6 +1012,12 @@ WITH locked_trs AS (
LIMIT @batchSize::int
FOR UPDATE SKIP LOCKED
)
DELETE FROM v1_task_runtime_slot
WHERE (task_id, task_inserted_at, retry_count) IN (
SELECT task_id, task_inserted_at, retry_count
FROM locked_trs
);
DELETE FROM v1_task_runtime
WHERE (task_id, task_inserted_at, retry_count) IN (
SELECT task_id, task_inserted_at, retry_count
@@ -1197,7 +1208,13 @@ SELECT
FROM running_tasks;
-- name: FindOldestRunningTask :one
SELECT *
SELECT
task_id,
task_inserted_at,
retry_count,
worker_id,
tenant_id,
timeout_at
FROM v1_task_runtime
ORDER BY task_id, task_inserted_at
LIMIT 1;
+13 -2
View File
@@ -125,7 +125,7 @@ WITH locked_trs AS (
LIMIT $1::int
FOR UPDATE SKIP LOCKED
)
DELETE FROM v1_task_runtime
DELETE FROM v1_task_runtime_slot
WHERE (task_id, task_inserted_at, retry_count) IN (
SELECT task_id, task_inserted_at, retry_count
FROM locked_trs
@@ -612,7 +612,13 @@ func (q *Queries) FilterValidTasks(ctx context.Context, db DBTX, arg FilterValid
}
const findOldestRunningTask = `-- name: FindOldestRunningTask :one
SELECT task_id, task_inserted_at, retry_count, worker_id, tenant_id, timeout_at
SELECT
task_id,
task_inserted_at,
retry_count,
worker_id,
tenant_id,
timeout_at
FROM v1_task_runtime
ORDER BY task_id, task_inserted_at
LIMIT 1
@@ -2227,6 +2233,11 @@ WITH task AS (
ORDER BY
task_id, task_inserted_at, retry_count
FOR UPDATE
), deleted_slots AS (
DELETE FROM v1_task_runtime_slot
WHERE
(task_id, task_inserted_at, retry_count) IN (SELECT task_id, task_inserted_at, retry_count FROM locked_runtime)
RETURNING task_id
)
UPDATE
v1_task_runtime
-7
View File
@@ -93,10 +93,3 @@ FROM "Worker"
WHERE "tenantId" = @tenantId::uuid
AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL
AND "isActive" = true;
-- name: CountTenantWorkerSlots :one
SELECT COALESCE(SUM(w."maxRuns"), 0)::int AS "count"
FROM "Worker" w
WHERE "tenantId" = @tenantId::uuid
AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL
AND "isActive" = true;
@@ -12,21 +12,6 @@ import (
"github.com/jackc/pgx/v5/pgtype"
)
const countTenantWorkerSlots = `-- name: CountTenantWorkerSlots :one
SELECT COALESCE(SUM(w."maxRuns"), 0)::int AS "count"
FROM "Worker" w
WHERE "tenantId" = $1::uuid
AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL
AND "isActive" = true
`
func (q *Queries) CountTenantWorkerSlots(ctx context.Context, db DBTX, tenantid uuid.UUID) (int32, error) {
row := db.QueryRow(ctx, countTenantWorkerSlots, tenantid)
var count int32
err := row.Scan(&count)
return count, err
}
const countTenantWorkers = `-- name: CountTenantWorkers :one
SELECT COUNT(distinct id) AS "count"
FROM "Worker"
+7 -15
View File
@@ -652,8 +652,7 @@ WHERE "id" = @id::uuid;
-- name: GetTenantUsageData :one
WITH active_workers AS (
SELECT
workers."id",
workers."maxRuns"
workers."id"
FROM
"Worker" workers
WHERE
@@ -662,20 +661,13 @@ WITH active_workers AS (
AND workers."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND workers."isActive" = true
AND workers."isPaused" = false
), worker_slots AS (
SELECT
aw."id" AS worker_id,
aw."maxRuns" - (
SELECT COUNT(*)
FROM v1_task_runtime runtime
WHERE
runtime.tenant_id = @tenantId::uuid AND
runtime.worker_id = aw."id"
) AS "remainingSlots"
FROM
active_workers aw
)
SELECT
(SELECT COUNT(*) FROM active_workers) AS "workerCount",
COALESCE((SELECT SUM("maxRuns") - SUM("remainingSlots") FROM active_workers aw JOIN worker_slots ws ON aw."id" = ws.worker_id), 0)::bigint AS "usedWorkerSlotsCount",
COALESCE((
SELECT SUM(s.units)
FROM v1_task_runtime_slot s
WHERE s.tenant_id = @tenantId::uuid
AND s.worker_id IN (SELECT "id" FROM active_workers)
), 0)::bigint AS "usedWorkerSlotsCount",
(SELECT COUNT(*) FROM "TenantMember" WHERE "tenantId" = @tenantId::uuid) AS "tenantMembersCount";
+7 -15
View File
@@ -761,8 +761,7 @@ func (q *Queries) GetTenantTotalQueueMetrics(ctx context.Context, db DBTX, arg G
const getTenantUsageData = `-- name: GetTenantUsageData :one
WITH active_workers AS (
SELECT
workers."id",
workers."maxRuns"
workers."id"
FROM
"Worker" workers
WHERE
@@ -771,22 +770,15 @@ WITH active_workers AS (
AND workers."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND workers."isActive" = true
AND workers."isPaused" = false
), worker_slots AS (
SELECT
aw."id" AS worker_id,
aw."maxRuns" - (
SELECT COUNT(*)
FROM v1_task_runtime runtime
WHERE
runtime.tenant_id = $1::uuid AND
runtime.worker_id = aw."id"
) AS "remainingSlots"
FROM
active_workers aw
)
SELECT
(SELECT COUNT(*) FROM active_workers) AS "workerCount",
COALESCE((SELECT SUM("maxRuns") - SUM("remainingSlots") FROM active_workers aw JOIN worker_slots ws ON aw."id" = ws.worker_id), 0)::bigint AS "usedWorkerSlotsCount",
COALESCE((
SELECT SUM(s.units)
FROM v1_task_runtime_slot s
WHERE s.tenant_id = $1::uuid
AND s.worker_id IN (SELECT "id" FROM active_workers)
), 0)::bigint AS "usedWorkerSlotsCount",
(SELECT COUNT(*) FROM "TenantMember" WHERE "tenantId" = $1::uuid) AS "tenantMembersCount"
`
+137 -42
View File
@@ -10,22 +10,111 @@ SELECT
FROM "WorkerLabel" wl
WHERE wl."workerId" = ANY(@workerIds::uuid[]);
-- name: ListWorkersWithSlotCount :many
-- name: ListWorkerSlotConfigs :many
SELECT
sqlc.embed(workers),
ww."url" AS "webhookUrl",
ww."id" AS "webhookId",
workers."maxRuns" - (
SELECT COUNT(*)
FROM v1_task_runtime runtime
WHERE
runtime.tenant_id = workers."tenantId" AND
runtime.worker_id = workers."id"
) AS "remainingSlots"
worker_id,
slot_type,
max_units
FROM
v1_worker_slot_config
WHERE
tenant_id = @tenantId::uuid
AND worker_id = ANY(@workerIds::uuid[]);
-- name: CreateWorkerSlotConfigs :exec
INSERT INTO v1_worker_slot_config (
tenant_id,
worker_id,
slot_type,
max_units,
created_at,
updated_at
)
SELECT
@tenantId::uuid,
@workerId::uuid,
unnest(@slotTypes::text[]),
unnest(@maxUnits::integer[]),
CURRENT_TIMESTAMP,
CURRENT_TIMESTAMP
-- NOTE: ON CONFLICT can be removed after the 0_76_d migration is run to remove insert triggers added in 0_76
ON CONFLICT (tenant_id, worker_id, slot_type) DO UPDATE SET
max_units = EXCLUDED.max_units,
updated_at = CURRENT_TIMESTAMP;
-- name: ListAvailableSlotsForWorkers :many
WITH worker_capacities AS (
SELECT
worker_id,
max_units
FROM
v1_worker_slot_config
WHERE
tenant_id = @tenantId::uuid
AND worker_id = ANY(@workerIds::uuid[])
AND slot_type = @slotType::text
), worker_used_slots AS (
SELECT
worker_id,
SUM(units) AS used_units
FROM
v1_task_runtime_slot
WHERE
tenant_id = @tenantId::uuid
AND worker_id = ANY(@workerIds::uuid[])
AND slot_type = @slotType::text
GROUP BY
worker_id
)
SELECT
wc.worker_id AS "id",
wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots"
FROM
worker_capacities wc
LEFT JOIN
worker_used_slots wus ON wc.worker_id = wus.worker_id;
-- name: ListAvailableSlotsForWorkersAndTypes :many
WITH worker_capacities AS (
SELECT
worker_id,
slot_type,
max_units
FROM
v1_worker_slot_config
WHERE
tenant_id = @tenantId::uuid
AND worker_id = ANY(@workerIds::uuid[])
AND slot_type = ANY(@slotTypes::text[])
), worker_used_slots AS (
SELECT
worker_id,
slot_type,
SUM(units) AS used_units
FROM
v1_task_runtime_slot
WHERE
tenant_id = @tenantId::uuid
AND worker_id = ANY(@workerIds::uuid[])
AND slot_type = ANY(@slotTypes::text[])
GROUP BY
worker_id,
slot_type
)
SELECT
wc.worker_id AS "id",
wc.slot_type AS "slotType",
wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots"
FROM
worker_capacities wc
LEFT JOIN
worker_used_slots wus ON wc.worker_id = wus.worker_id AND wc.slot_type = wus.slot_type;
-- name: ListWorkers :many
SELECT
sqlc.embed(workers)
FROM
"Worker" workers
LEFT JOIN
"WebhookWorker" ww ON workers."webhookId" = ww."id"
WHERE
workers."tenantId" = @tenantId
AND (
@@ -43,31 +132,24 @@ WHERE
)
AND (
sqlc.narg('assignable')::boolean IS NULL OR
workers."maxRuns" IS NULL OR
(sqlc.narg('assignable')::boolean AND workers."maxRuns" > (
SELECT COUNT(*)
FROM "StepRun" srs
WHERE srs."workerId" = workers."id" AND srs."status" = 'RUNNING'
(sqlc.narg('assignable')::boolean AND (
SELECT COALESCE(SUM(cap.max_units), 0)
FROM v1_worker_slot_config cap
WHERE cap.tenant_id = workers."tenantId" AND cap.worker_id = workers."id"
) > (
SELECT COALESCE(SUM(runtime.units), 0)
FROM v1_task_runtime_slot runtime
WHERE runtime.tenant_id = workers."tenantId" AND runtime.worker_id = workers."id"
))
)
GROUP BY
workers."id", ww."url", ww."id";
workers."id";
-- name: GetWorkerById :one
SELECT
sqlc.embed(w),
ww."url" AS "webhookUrl",
w."maxRuns" - (
SELECT COUNT(*)
FROM v1_task_runtime runtime
WHERE
runtime.tenant_id = w."tenantId" AND
runtime.worker_id = w."id"
) AS "remainingSlots"
sqlc.embed(w)
FROM
"Worker" w
LEFT JOIN
"WebhookWorker" ww ON w."webhookId" = ww."id"
WHERE
w."id" = @id::uuid;
@@ -108,14 +190,32 @@ LIMIT
COALESCE(sqlc.narg('limit')::int, 100);
-- name: ListTotalActiveSlotsPerTenant :many
SELECT "tenantId", SUM("maxRuns") AS "totalActiveSlots"
FROM "Worker"
SELECT
wc.tenant_id AS "tenantId",
SUM(wc.max_units) AS "totalActiveSlots"
FROM v1_worker_slot_config wc
JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id
WHERE
"dispatcherId" IS NOT NULL
AND "lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND "isActive" = true
AND "isPaused" = false
GROUP BY "tenantId"
w."dispatcherId" IS NOT NULL
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND w."isActive" = true
AND w."isPaused" = false
GROUP BY wc.tenant_id
;
-- name: ListActiveSlotsPerTenantAndSlotType :many
SELECT
wc.tenant_id AS "tenantId",
wc.slot_type AS "slotType",
SUM(wc.max_units) AS "activeSlots"
FROM v1_worker_slot_config wc
JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id
WHERE
w."dispatcherId" IS NOT NULL
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND w."isActive" = true
AND w."isPaused" = false
GROUP BY wc.tenant_id, wc.slot_type
;
-- name: ListActiveSDKsPerTenant :many
@@ -210,7 +310,6 @@ UPDATE
SET
"updatedAt" = CURRENT_TIMESTAMP,
"dispatcherId" = coalesce(sqlc.narg('dispatcherId')::uuid, "dispatcherId"),
"maxRuns" = coalesce(sqlc.narg('maxRuns')::int, "maxRuns"),
"lastHeartbeatAt" = coalesce(sqlc.narg('lastHeartbeatAt')::timestamp, "lastHeartbeatAt"),
"isActive" = coalesce(sqlc.narg('isActive')::boolean, "isActive"),
"isPaused" = coalesce(sqlc.narg('isPaused')::boolean, "isPaused")
@@ -350,8 +449,6 @@ INSERT INTO "Worker" (
"tenantId",
"name",
"dispatcherId",
"maxRuns",
"webhookId",
"type",
"sdkVersion",
"language",
@@ -365,8 +462,6 @@ INSERT INTO "Worker" (
@tenantId::uuid,
@name::text,
@dispatcherId::uuid,
sqlc.narg('maxRuns')::int,
sqlc.narg('webhookId')::uuid,
sqlc.narg('type')::"WorkerType",
sqlc.narg('sdkVersion')::text,
sqlc.narg('language')::"WorkerSDKS",
+295 -75
View File
@@ -20,8 +20,6 @@ INSERT INTO "Worker" (
"tenantId",
"name",
"dispatcherId",
"maxRuns",
"webhookId",
"type",
"sdkVersion",
"language",
@@ -35,14 +33,12 @@ INSERT INTO "Worker" (
$1::uuid,
$2::text,
$3::uuid,
$4::int,
$5::uuid,
$6::"WorkerType",
$4::"WorkerType",
$5::text,
$6::"WorkerSDKS",
$7::text,
$8::"WorkerSDKS",
$9::text,
$10::text,
$11::text
$8::text,
$9::text
) RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId"
`
@@ -50,8 +46,6 @@ type CreateWorkerParams struct {
Tenantid uuid.UUID `json:"tenantid"`
Name string `json:"name"`
Dispatcherid uuid.UUID `json:"dispatcherid"`
MaxRuns pgtype.Int4 `json:"maxRuns"`
WebhookId *uuid.UUID `json:"webhookId"`
Type NullWorkerType `json:"type"`
SdkVersion pgtype.Text `json:"sdkVersion"`
Language NullWorkerSDKS `json:"language"`
@@ -65,8 +59,6 @@ func (q *Queries) CreateWorker(ctx context.Context, db DBTX, arg CreateWorkerPar
arg.Tenantid,
arg.Name,
arg.Dispatcherid,
arg.MaxRuns,
arg.WebhookId,
arg.Type,
arg.SdkVersion,
arg.Language,
@@ -100,6 +92,45 @@ func (q *Queries) CreateWorker(ctx context.Context, db DBTX, arg CreateWorkerPar
return &i, err
}
const createWorkerSlotConfigs = `-- name: CreateWorkerSlotConfigs :exec
INSERT INTO v1_worker_slot_config (
tenant_id,
worker_id,
slot_type,
max_units,
created_at,
updated_at
)
SELECT
$1::uuid,
$2::uuid,
unnest($3::text[]),
unnest($4::integer[]),
CURRENT_TIMESTAMP,
CURRENT_TIMESTAMP
ON CONFLICT (tenant_id, worker_id, slot_type) DO UPDATE SET
max_units = EXCLUDED.max_units,
updated_at = CURRENT_TIMESTAMP
`
type CreateWorkerSlotConfigsParams struct {
Tenantid uuid.UUID `json:"tenantid"`
Workerid uuid.UUID `json:"workerid"`
Slottypes []string `json:"slottypes"`
Maxunits []int32 `json:"maxunits"`
}
// NOTE: ON CONFLICT can be removed after the 0_76_d migration is run to remove insert triggers added in 0_76
func (q *Queries) CreateWorkerSlotConfigs(ctx context.Context, db DBTX, arg CreateWorkerSlotConfigsParams) error {
_, err := db.Exec(ctx, createWorkerSlotConfigs,
arg.Tenantid,
arg.Workerid,
arg.Slottypes,
arg.Maxunits,
)
return err
}
const deleteOldWorkers = `-- name: DeleteOldWorkers :one
WITH for_delete AS (
SELECT
@@ -294,27 +325,15 @@ func (q *Queries) GetWorkerActionsByWorkerId(ctx context.Context, db DBTX, arg G
const getWorkerById = `-- name: GetWorkerById :one
SELECT
w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion", w."durableTaskDispatcherId",
ww."url" AS "webhookUrl",
w."maxRuns" - (
SELECT COUNT(*)
FROM v1_task_runtime runtime
WHERE
runtime.tenant_id = w."tenantId" AND
runtime.worker_id = w."id"
) AS "remainingSlots"
w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion", w."durableTaskDispatcherId"
FROM
"Worker" w
LEFT JOIN
"WebhookWorker" ww ON w."webhookId" = ww."id"
WHERE
w."id" = $1::uuid
`
type GetWorkerByIdRow struct {
Worker Worker `json:"worker"`
WebhookUrl pgtype.Text `json:"webhookUrl"`
RemainingSlots int32 `json:"remainingSlots"`
Worker Worker `json:"worker"`
}
func (q *Queries) GetWorkerById(ctx context.Context, db DBTX, id uuid.UUID) (*GetWorkerByIdRow, error) {
@@ -341,8 +360,6 @@ func (q *Queries) GetWorkerById(ctx context.Context, db DBTX, id uuid.UUID) (*Ge
&i.Worker.RuntimeExtra,
&i.Worker.SdkVersion,
&i.Worker.DurableTaskDispatcherId,
&i.WebhookUrl,
&i.RemainingSlots,
)
return &i, err
}
@@ -538,6 +555,47 @@ func (q *Queries) ListActiveSDKsPerTenant(ctx context.Context, db DBTX) ([]*List
return items, nil
}
const listActiveSlotsPerTenantAndSlotType = `-- name: ListActiveSlotsPerTenantAndSlotType :many
SELECT
wc.tenant_id AS "tenantId",
wc.slot_type AS "slotType",
SUM(wc.max_units) AS "activeSlots"
FROM v1_worker_slot_config wc
JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id
WHERE
w."dispatcherId" IS NOT NULL
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND w."isActive" = true
AND w."isPaused" = false
GROUP BY wc.tenant_id, wc.slot_type
`
type ListActiveSlotsPerTenantAndSlotTypeRow struct {
TenantId uuid.UUID `json:"tenantId"`
SlotType string `json:"slotType"`
ActiveSlots int64 `json:"activeSlots"`
}
func (q *Queries) ListActiveSlotsPerTenantAndSlotType(ctx context.Context, db DBTX) ([]*ListActiveSlotsPerTenantAndSlotTypeRow, error) {
rows, err := db.Query(ctx, listActiveSlotsPerTenantAndSlotType)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListActiveSlotsPerTenantAndSlotTypeRow
for rows.Next() {
var i ListActiveSlotsPerTenantAndSlotTypeRow
if err := rows.Scan(&i.TenantId, &i.SlotType, &i.ActiveSlots); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listActiveWorkersPerTenant = `-- name: ListActiveWorkersPerTenant :many
SELECT "tenantId", COUNT(*)
FROM "Worker"
@@ -574,6 +632,139 @@ func (q *Queries) ListActiveWorkersPerTenant(ctx context.Context, db DBTX) ([]*L
return items, nil
}
const listAvailableSlotsForWorkers = `-- name: ListAvailableSlotsForWorkers :many
WITH worker_capacities AS (
SELECT
worker_id,
max_units
FROM
v1_worker_slot_config
WHERE
tenant_id = $1::uuid
AND worker_id = ANY($2::uuid[])
AND slot_type = $3::text
), worker_used_slots AS (
SELECT
worker_id,
SUM(units) AS used_units
FROM
v1_task_runtime_slot
WHERE
tenant_id = $1::uuid
AND worker_id = ANY($2::uuid[])
AND slot_type = $3::text
GROUP BY
worker_id
)
SELECT
wc.worker_id AS "id",
wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots"
FROM
worker_capacities wc
LEFT JOIN
worker_used_slots wus ON wc.worker_id = wus.worker_id
`
type ListAvailableSlotsForWorkersParams struct {
Tenantid uuid.UUID `json:"tenantid"`
Workerids []uuid.UUID `json:"workerids"`
Slottype string `json:"slottype"`
}
type ListAvailableSlotsForWorkersRow struct {
ID uuid.UUID `json:"id"`
AvailableSlots int32 `json:"availableSlots"`
}
func (q *Queries) ListAvailableSlotsForWorkers(ctx context.Context, db DBTX, arg ListAvailableSlotsForWorkersParams) ([]*ListAvailableSlotsForWorkersRow, error) {
rows, err := db.Query(ctx, listAvailableSlotsForWorkers, arg.Tenantid, arg.Workerids, arg.Slottype)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListAvailableSlotsForWorkersRow
for rows.Next() {
var i ListAvailableSlotsForWorkersRow
if err := rows.Scan(&i.ID, &i.AvailableSlots); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listAvailableSlotsForWorkersAndTypes = `-- name: ListAvailableSlotsForWorkersAndTypes :many
WITH worker_capacities AS (
SELECT
worker_id,
slot_type,
max_units
FROM
v1_worker_slot_config
WHERE
tenant_id = $1::uuid
AND worker_id = ANY($2::uuid[])
AND slot_type = ANY($3::text[])
), worker_used_slots AS (
SELECT
worker_id,
slot_type,
SUM(units) AS used_units
FROM
v1_task_runtime_slot
WHERE
tenant_id = $1::uuid
AND worker_id = ANY($2::uuid[])
AND slot_type = ANY($3::text[])
GROUP BY
worker_id,
slot_type
)
SELECT
wc.worker_id AS "id",
wc.slot_type AS "slotType",
wc.max_units - COALESCE(wus.used_units, 0) AS "availableSlots"
FROM
worker_capacities wc
LEFT JOIN
worker_used_slots wus ON wc.worker_id = wus.worker_id AND wc.slot_type = wus.slot_type
`
type ListAvailableSlotsForWorkersAndTypesParams struct {
Tenantid uuid.UUID `json:"tenantid"`
Workerids []uuid.UUID `json:"workerids"`
Slottypes []string `json:"slottypes"`
}
type ListAvailableSlotsForWorkersAndTypesRow struct {
ID uuid.UUID `json:"id"`
SlotType string `json:"slotType"`
AvailableSlots int32 `json:"availableSlots"`
}
func (q *Queries) ListAvailableSlotsForWorkersAndTypes(ctx context.Context, db DBTX, arg ListAvailableSlotsForWorkersAndTypesParams) ([]*ListAvailableSlotsForWorkersAndTypesRow, error) {
rows, err := db.Query(ctx, listAvailableSlotsForWorkersAndTypes, arg.Tenantid, arg.Workerids, arg.Slottypes)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListAvailableSlotsForWorkersAndTypesRow
for rows.Next() {
var i ListAvailableSlotsForWorkersAndTypesRow
if err := rows.Scan(&i.ID, &i.SlotType, &i.AvailableSlots); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listDispatcherIdsForWorkers = `-- name: ListDispatcherIdsForWorkers :many
SELECT
"id" as "workerId",
@@ -861,14 +1052,17 @@ func (q *Queries) ListSemaphoreSlotsWithStateForWorker(ctx context.Context, db D
}
const listTotalActiveSlotsPerTenant = `-- name: ListTotalActiveSlotsPerTenant :many
SELECT "tenantId", SUM("maxRuns") AS "totalActiveSlots"
FROM "Worker"
SELECT
wc.tenant_id AS "tenantId",
SUM(wc.max_units) AS "totalActiveSlots"
FROM v1_worker_slot_config wc
JOIN "Worker" w ON w."id" = wc.worker_id AND w."tenantId" = wc.tenant_id
WHERE
"dispatcherId" IS NOT NULL
AND "lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND "isActive" = true
AND "isPaused" = false
GROUP BY "tenantId"
w."dispatcherId" IS NOT NULL
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND w."isActive" = true
AND w."isPaused" = false
GROUP BY wc.tenant_id
`
type ListTotalActiveSlotsPerTenantRow struct {
@@ -944,22 +1138,54 @@ func (q *Queries) ListWorkerLabels(ctx context.Context, db DBTX, workerid uuid.U
return items, nil
}
const listWorkersWithSlotCount = `-- name: ListWorkersWithSlotCount :many
const listWorkerSlotConfigs = `-- name: ListWorkerSlotConfigs :many
SELECT
workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId", workers.language, workers."languageVersion", workers.os, workers."runtimeExtra", workers."sdkVersion", workers."durableTaskDispatcherId",
ww."url" AS "webhookUrl",
ww."id" AS "webhookId",
workers."maxRuns" - (
SELECT COUNT(*)
FROM v1_task_runtime runtime
WHERE
runtime.tenant_id = workers."tenantId" AND
runtime.worker_id = workers."id"
) AS "remainingSlots"
worker_id,
slot_type,
max_units
FROM
v1_worker_slot_config
WHERE
tenant_id = $1::uuid
AND worker_id = ANY($2::uuid[])
`
type ListWorkerSlotConfigsParams struct {
Tenantid uuid.UUID `json:"tenantid"`
Workerids []uuid.UUID `json:"workerids"`
}
type ListWorkerSlotConfigsRow struct {
WorkerID uuid.UUID `json:"worker_id"`
SlotType string `json:"slot_type"`
MaxUnits int32 `json:"max_units"`
}
func (q *Queries) ListWorkerSlotConfigs(ctx context.Context, db DBTX, arg ListWorkerSlotConfigsParams) ([]*ListWorkerSlotConfigsRow, error) {
rows, err := db.Query(ctx, listWorkerSlotConfigs, arg.Tenantid, arg.Workerids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListWorkerSlotConfigsRow
for rows.Next() {
var i ListWorkerSlotConfigsRow
if err := rows.Scan(&i.WorkerID, &i.SlotType, &i.MaxUnits); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listWorkers = `-- name: ListWorkers :many
SELECT
workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId", workers.language, workers."languageVersion", workers.os, workers."runtimeExtra", workers."sdkVersion", workers."durableTaskDispatcherId"
FROM
"Worker" workers
LEFT JOIN
"WebhookWorker" ww ON workers."webhookId" = ww."id"
WHERE
workers."tenantId" = $1
AND (
@@ -977,33 +1203,33 @@ WHERE
)
AND (
$4::boolean IS NULL OR
workers."maxRuns" IS NULL OR
($4::boolean AND workers."maxRuns" > (
SELECT COUNT(*)
FROM "StepRun" srs
WHERE srs."workerId" = workers."id" AND srs."status" = 'RUNNING'
($4::boolean AND (
SELECT COALESCE(SUM(cap.max_units), 0)
FROM v1_worker_slot_config cap
WHERE cap.tenant_id = workers."tenantId" AND cap.worker_id = workers."id"
) > (
SELECT COALESCE(SUM(runtime.units), 0)
FROM v1_task_runtime_slot runtime
WHERE runtime.tenant_id = workers."tenantId" AND runtime.worker_id = workers."id"
))
)
GROUP BY
workers."id", ww."url", ww."id"
workers."id"
`
type ListWorkersWithSlotCountParams struct {
type ListWorkersParams struct {
Tenantid uuid.UUID `json:"tenantid"`
ActionId pgtype.Text `json:"actionId"`
LastHeartbeatAfter pgtype.Timestamp `json:"lastHeartbeatAfter"`
Assignable pgtype.Bool `json:"assignable"`
}
type ListWorkersWithSlotCountRow struct {
Worker Worker `json:"worker"`
WebhookUrl pgtype.Text `json:"webhookUrl"`
WebhookId *uuid.UUID `json:"webhookId"`
RemainingSlots int32 `json:"remainingSlots"`
type ListWorkersRow struct {
Worker Worker `json:"worker"`
}
func (q *Queries) ListWorkersWithSlotCount(ctx context.Context, db DBTX, arg ListWorkersWithSlotCountParams) ([]*ListWorkersWithSlotCountRow, error) {
rows, err := db.Query(ctx, listWorkersWithSlotCount,
func (q *Queries) ListWorkers(ctx context.Context, db DBTX, arg ListWorkersParams) ([]*ListWorkersRow, error) {
rows, err := db.Query(ctx, listWorkers,
arg.Tenantid,
arg.ActionId,
arg.LastHeartbeatAfter,
@@ -1013,9 +1239,9 @@ func (q *Queries) ListWorkersWithSlotCount(ctx context.Context, db DBTX, arg Lis
return nil, err
}
defer rows.Close()
var items []*ListWorkersWithSlotCountRow
var items []*ListWorkersRow
for rows.Next() {
var i ListWorkersWithSlotCountRow
var i ListWorkersRow
if err := rows.Scan(
&i.Worker.ID,
&i.Worker.CreatedAt,
@@ -1037,9 +1263,6 @@ func (q *Queries) ListWorkersWithSlotCount(ctx context.Context, db DBTX, arg Lis
&i.Worker.RuntimeExtra,
&i.Worker.SdkVersion,
&i.Worker.DurableTaskDispatcherId,
&i.WebhookUrl,
&i.WebhookId,
&i.RemainingSlots,
); err != nil {
return nil, err
}
@@ -1057,18 +1280,16 @@ UPDATE
SET
"updatedAt" = CURRENT_TIMESTAMP,
"dispatcherId" = coalesce($1::uuid, "dispatcherId"),
"maxRuns" = coalesce($2::int, "maxRuns"),
"lastHeartbeatAt" = coalesce($3::timestamp, "lastHeartbeatAt"),
"isActive" = coalesce($4::boolean, "isActive"),
"isPaused" = coalesce($5::boolean, "isPaused")
"lastHeartbeatAt" = coalesce($2::timestamp, "lastHeartbeatAt"),
"isActive" = coalesce($3::boolean, "isActive"),
"isPaused" = coalesce($4::boolean, "isPaused")
WHERE
"id" = $6::uuid
"id" = $5::uuid
RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "lastHeartbeatAt", name, "dispatcherId", "maxRuns", "isActive", "lastListenerEstablished", "isPaused", type, "webhookId", language, "languageVersion", os, "runtimeExtra", "sdkVersion", "durableTaskDispatcherId"
`
type UpdateWorkerParams struct {
DispatcherId *uuid.UUID `json:"dispatcherId"`
MaxRuns pgtype.Int4 `json:"maxRuns"`
LastHeartbeatAt pgtype.Timestamp `json:"lastHeartbeatAt"`
IsActive pgtype.Bool `json:"isActive"`
IsPaused pgtype.Bool `json:"isPaused"`
@@ -1078,7 +1299,6 @@ type UpdateWorkerParams struct {
func (q *Queries) UpdateWorker(ctx context.Context, db DBTX, arg UpdateWorkerParams) (*Worker, error) {
row := db.QueryRow(ctx, updateWorker,
arg.DispatcherId,
arg.MaxRuns,
arg.LastHeartbeatAt,
arg.IsActive,
arg.IsPaused,

Some files were not shown because too many files have changed in this diff Show More