diff --git a/.github/workflows/app.yml b/.github/workflows/app.yml index 515c8e67e..c088bf304 100644 --- a/.github/workflows/app.yml +++ b/.github/workflows/app.yml @@ -1,5 +1,9 @@ name: "frontend / app" -on: pull_request +on: + pull_request: + paths-ignore: + - 'sdks/**' + jobs: lint: runs-on: ubuntu-latest diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3b54822d5..a01392813 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,5 +1,9 @@ name: build -on: pull_request +on: + pull_request: + paths-ignore: + - 'sdks/**' + jobs: frontend: runs-on: ubuntu-latest @@ -63,7 +67,7 @@ jobs: - name: Clone repository uses: actions/checkout@v4 - name: Build migrate - run: docker build -f ./build/package/migrate.dockerfile . + run: docker build -f ./build/package/servers.dockerfile . --build-arg SERVER_TARGET=migrate migrate-arm: runs-on: hatchet-arm64-2 @@ -71,7 +75,7 @@ jobs: - name: Clone repository uses: actions/checkout@v4 - name: Build migrate - run: docker build -f ./build/package/migrate.dockerfile . + run: docker build -f ./build/package/servers.dockerfile . --build-arg SERVER_TARGET=migrate lite-arm: runs-on: hatchet-arm64-2 @@ -92,11 +96,18 @@ jobs: -t hatchet-admin-tmp:arm64 \ . & + DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \ + --build-arg SERVER_TARGET=migrate \ + --platform linux/arm64 \ + -t hatchet-migrate-tmp:arm64 \ + . & + wait DOCKER_BUILDKIT=1 docker build -f ./build/package/lite.dockerfile \ --build-arg HATCHET_LITE_IMAGE=hatchet-lite-tmp:arm64 \ --build-arg HATCHET_ADMIN_IMAGE=hatchet-admin-tmp:arm64 \ + --build-arg HATCHET_MIGRATE_IMAGE=hatchet-migrate-tmp:arm64 \ --platform linux/arm64 \ . @@ -119,11 +130,18 @@ jobs: -t hatchet-admin-tmp:amd64 \ . & + DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \ + --build-arg SERVER_TARGET=migrate \ + --platform linux/amd64 \ + -t hatchet-migrate-tmp:amd64 \ + . & + wait DOCKER_BUILDKIT=1 docker build -f ./build/package/lite.dockerfile \ --build-arg HATCHET_LITE_IMAGE=hatchet-lite-tmp:amd64 \ --build-arg HATCHET_ADMIN_IMAGE=hatchet-admin-tmp:amd64 \ + --build-arg HATCHET_MIGRATE_IMAGE=hatchet-migrate-tmp:amd64 \ --platform linux/amd64 \ . diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index cd7520f14..8cafb690c 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,5 +1,8 @@ name: "frontend / docs" -on: pull_request +on: + pull_request: + paths-ignore: + - 'sdks/**' jobs: lint: runs-on: ubuntu-latest diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 0d9601667..b59fa415c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,8 @@ name: lint all -on: pull_request +on: + pull_request: + paths-ignore: + - 'sdks/**' jobs: lint: runs-on: ubuntu-latest diff --git a/.github/workflows/pre-release.yaml b/.github/workflows/pre-release.yaml index f563cafef..865538fcd 100644 --- a/.github/workflows/pre-release.yaml +++ b/.github/workflows/pre-release.yaml @@ -251,7 +251,8 @@ jobs: run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - name: Build run: | - DOCKER_BUILDKIT=1 docker build -f ./build/package/migrate.dockerfile \ + DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \ + --build-arg SERVER_TARGET=migrate \ -t ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{steps.tag_name.outputs.tag}}-amd64 \ --platform linux/amd64 \ . @@ -274,7 +275,8 @@ jobs: run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - name: Build run: | - DOCKER_BUILDKIT=1 docker build -f ./build/package/migrate.dockerfile \ + DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \ + --build-arg SERVER_TARGET=migrate \ -t ghcr.io/hatchet-dev/hatchet/hatchet-migrate:${{steps.tag_name.outputs.tag}}-arm64 \ --platform linux/arm64 \ . @@ -413,6 +415,12 @@ jobs: -t hatchet-admin-tmp:amd64 \ . & + DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \ + --build-arg SERVER_TARGET=migrate \ + --platform linux/amd64 \ + -t hatchet-migrate-tmp:amd64 \ + . & + wait DOCKER_BUILDKIT=1 docker build -f ./build/package/lite.dockerfile \ @@ -420,6 +428,7 @@ jobs: --platform linux/amd64 \ --build-arg HATCHET_LITE_IMAGE=hatchet-lite-tmp:amd64 \ --build-arg HATCHET_ADMIN_IMAGE=hatchet-admin-tmp:amd64 \ + --build-arg HATCHET_MIGRATE_IMAGE=hatchet-migrate-tmp:amd64 \ . - name: Push to GHCR run: | @@ -456,6 +465,12 @@ jobs: -t hatchet-admin-tmp:arm64 \ . & + DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \ + --build-arg SERVER_TARGET=migrate \ + --platform linux/arm64 \ + -t hatchet-migrate-tmp:arm64 \ + . & + wait DOCKER_BUILDKIT=1 docker build -f ./build/package/lite.dockerfile \ @@ -463,6 +478,7 @@ jobs: --platform linux/arm64 \ --build-arg HATCHET_LITE_IMAGE=hatchet-lite-tmp:arm64 \ --build-arg HATCHET_ADMIN_IMAGE=hatchet-admin-tmp:arm64 \ + --build-arg HATCHET_MIGRATE_IMAGE=hatchet-migrate-tmp:arm64 \ . - name: Push to GHCR run: | diff --git a/.github/workflows/sdk-python.yml b/.github/workflows/sdk-python.yml new file mode 100644 index 000000000..aa89949ac --- /dev/null +++ b/.github/workflows/sdk-python.yml @@ -0,0 +1,119 @@ +name: python +on: + pull_request: + paths: + - 'sdks/python/**' + - '.github/**' + push: + branches: + - main + paths: + - 'sdks/python/**' + - '.github/**' + +defaults: + run: + working-directory: ./sdks/python + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: 1.5.1 + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Install linting tools + run: poetry install --all-extras + + - name: Run Black + run: poetry run black . --check --verbose --diff --color + + - name: Run Isort + run: poetry run isort . --check-only --diff + + - name: Run MyPy + run: poetry run mypy --config-file=pyproject.toml + + - name: Run Ruff + run: poetry run ruff check . + + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11", "3.12", "3.13"] + + steps: + - uses: actions/checkout@v4 + + - name: Run Hatchet Engine + run: docker compose up -d + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: 1.5.1 + virtualenvs-create: true + virtualenvs-in-project: true + - name: Install dependencies + run: poetry install --no-interaction --all-extras + + - name: Generate Env File + run: | + cat < .env + HATCHET_CLIENT_TOKEN="$(docker compose run --no-deps setup-config /hatchet/hatchet-admin token create --config /hatchet/config --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52 | xargs)" + HATCHET_CLIENT_TLS_STRATEGY=none + EOF + + - name: Set HATCHET_CLIENT_NAMESPACE + run: | + PYTHON_VERSION=$(python -c "import sys; print(f'py{sys.version_info.major}{sys.version_info.minor}')") + SHORT_SHA=$(git rev-parse --short HEAD) + echo "HATCHET_CLIENT_NAMESPACE=${PYTHON_VERSION}-${SHORT_SHA}" >> $GITHUB_ENV + - name: Run pytest + run: | + echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE" + poetry run pytest -s -vvv --maxfail=5 --timeout=180 --capture=no + + publish: + runs-on: ubuntu-latest + needs: [lint, test] + if: github.ref == 'refs/heads/main' + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + + - name: Install Poetry + run: | + pipx install poetry==1.7.1 + + - name: Run publish.sh script + run: | + sh publish.sh + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.POETRY_PYPI_TOKEN_PYPI }} diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml index 401361762..f6fb313d6 100644 --- a/.github/workflows/spelling.yml +++ b/.github/workflows/spelling.yml @@ -6,3 +6,4 @@ jobs: steps: - uses: actions/checkout@v4 - uses: crate-ci/typos@master + continue-on-error: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 458cc36fb..121be6604 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,5 +1,8 @@ name: test -on: pull_request +on: + pull_request: + paths-ignore: + - 'sdks/**' jobs: generate: runs-on: ubuntu-latest @@ -37,7 +40,7 @@ jobs: - name: Generate run: | - sh ./hack/db/atlas-apply.sh + go run ./cmd/hatchet-migrate task pre-commit-install task generate-all @@ -99,7 +102,7 @@ jobs: - name: Generate run: | - sh ./hack/db/atlas-apply.sh + go run ./cmd/hatchet-migrate task generate-go task generate-certs task generate-local-encryption-keys @@ -173,7 +176,7 @@ jobs: - name: Generate run: | - sh ./hack/db/atlas-apply.sh + go run ./cmd/hatchet-migrate task generate-go task generate-certs task generate-local-encryption-keys @@ -262,7 +265,7 @@ jobs: - name: Generate run: | - sh ./hack/db/atlas-apply.sh + go run ./cmd/hatchet-migrate task generate-go task generate-certs task generate-local-encryption-keys @@ -339,7 +342,7 @@ jobs: - name: Generate run: | - sh ./hack/db/atlas-apply.sh + go run ./cmd/hatchet-migrate task generate-go task generate-certs task generate-local-encryption-keys @@ -415,7 +418,7 @@ jobs: - name: Generate run: | - sh ./hack/db/atlas-apply.sh + go run ./cmd/hatchet-migrate task generate-go task generate-certs task generate-local-encryption-keys diff --git a/.golangci.yml b/.golangci.yml index 53dbb9513..af3803f63 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -36,8 +36,7 @@ linters-settings: - "-ST1005" issues: - exclude-files: - - "pkg/repository/prisma/db/db_gen.go" + exclude-files: [] exclude: - "by other packages, and that stutters; consider calling this" - "var-naming:" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c12eefe1e..a2d63b001 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,9 +6,7 @@ repos: - id: mixed-line-ending args: ["--fix=lf"] - id: end-of-file-fixer - exclude: prisma/migrations/.*\.sql|sql/migrations/.*\.sql - id: trailing-whitespace - exclude: prisma/migrations/.*\.sql|sql/migrations/.*\.sql - id: check-yaml - repo: https://github.com/golangci/golangci-lint rev: v1.62.0 diff --git a/Taskfile.yaml b/Taskfile.yaml index 7ed57bd30..71e2fab68 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -70,14 +70,13 @@ tasks: migrate: cmds: - task: generate-sqlc - - task: atlas-compare-schema-to-migrations-dir - - task: atlas-apply-migrations - atlas-compare-schema-to-migrations-dir: + - task: goose-migrate + atlas-migrate: cmds: - sh ./hack/dev/atlas-migrate.sh {{.CLI_ARGS}} - atlas-apply-migrations: + goose-migrate: cmds: - - DATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet' sh ./hack/db/atlas-apply.sh + - sh ./hack/dev/migrate.sh seed-dev: cmds: - SEED_DEVELOPMENT=true sh ./hack/dev/run-go-with-env.sh run ./cmd/hatchet-admin seed @@ -150,7 +149,8 @@ tasks: - sh ./generate.sh generate-sqlc: cmds: - - go run github.com/sqlc-dev/sqlc/cmd/sqlc@v1.24.0 generate --file pkg/repository/prisma/dbsqlc/sqlc.yaml + - go run github.com/sqlc-dev/sqlc/cmd/sqlc@v1.24.0 generate --file pkg/repository/postgres/dbsqlc/sqlc.yaml + - go run github.com/sqlc-dev/sqlc/cmd/sqlc@v1.24.0 generate --file pkg/repository/v1/sqlcv1/sqlc.yaml lint: cmds: - task: lint-go @@ -161,9 +161,6 @@ tasks: lint-frontend: cmds: - cd frontend/app/ && pnpm run lint:check - kill-query-engines: - cmds: - - ps -A | grep 'prisma-query-engine-darwin-arm64' | grep -v grep | awk '{print $1}' | xargs kill -9 $1 kill-apis: cmds: - ps -A | grep 'cmd/hatchet-api' | grep -v grep | awk '{print $1}' | xargs kill -9 $1 diff --git a/api-contracts/openapi/components/schemas/_index.yaml b/api-contracts/openapi/components/schemas/_index.yaml index b6b669910..7d1fe014d 100644 --- a/api-contracts/openapi/components/schemas/_index.yaml +++ b/api-contracts/openapi/components/schemas/_index.yaml @@ -292,3 +292,41 @@ WebhookWorkerCreateResponse: $ref: "./webhook_worker.yaml#/WebhookWorkerCreateResponse" WebhookWorkerListResponse: $ref: "./webhook_worker.yaml#/WebhookWorkerListResponse" +V1TaskSummaryList: + $ref: "./v1/task.yaml#/V1TaskSummaryList" +V1WorkflowRunDisplayNameList: + $ref: "./v1/task.yaml#/V1WorkflowRunDisplayNameList" +V1TaskSummary: + $ref: "./v1/task.yaml#/V1TaskSummary" +V1DagChildren: + $ref: "./v1/task.yaml#/V1DagChildren" +V1TaskEventList: + $ref: "./v1/task.yaml#/V1TaskEventList" +V1TaskStatus: + $ref: "./v1/task.yaml#/V1TaskStatus" +V1TaskRunMetrics: + $ref: "./v1/task.yaml#/V1TaskRunMetrics" +V1TaskPointMetric: + $ref: "./v1/task.yaml#/V1TaskPointMetric" +V1TaskPointMetrics: + $ref: "./v1/task.yaml#/V1TaskPointMetrics" +V1TaskFilter: + $ref: "./v1/task.yaml#/V1TaskFilter" +V1CancelTaskRequest: + $ref: "./v1/task.yaml#/V1CancelTaskRequest" +V1ReplayTaskRequest: + $ref: "./v1/task.yaml#/V1ReplayTaskRequest" +V1WorkflowRun: + $ref: "./v1/workflow_run.yaml#/V1WorkflowRun" +V1WorkflowRunDetails: + $ref: "./v1/workflow_run.yaml#/V1WorkflowRunDetails" +V1TaskRunStatus: + $ref: "./workflow_run.yaml#/V1TaskRunStatus" +V1TriggerWorkflowRunRequest: + $ref: "./v1/workflow_run.yaml#/V1TriggerWorkflowRunRequest" +V1LogLine: + $ref: "./v1/logs.yaml#/V1LogLine" +V1LogLineLevel: + $ref: "./v1/logs.yaml#/V1LogLineLevel" +V1LogLineList: + $ref: "./v1/logs.yaml#/V1LogLineList" diff --git a/api-contracts/openapi/components/schemas/tenant.yaml b/api-contracts/openapi/components/schemas/tenant.yaml index 46acbaa11..7de0f43e1 100644 --- a/api-contracts/openapi/components/schemas/tenant.yaml +++ b/api-contracts/openapi/components/schemas/tenant.yaml @@ -14,12 +14,23 @@ Tenant: alertMemberEmails: type: boolean description: Whether to alert tenant members. + version: + $ref: "#/TenantVersion" + description: The version of the tenant. required: - metadata - name - slug + - version type: object +TenantVersion: + enum: + - "V0" + - "V1" + type: string + + TenantAlertingSettings: properties: metadata: @@ -90,6 +101,9 @@ UpdateTenantRequest: description: The max frequency at which to alert. x-oapi-codegen-extra-tags: validate: "omitnil,duration" + version: + $ref: "#/TenantVersion" + description: The version of the tenant. type: object TenantResource: diff --git a/api-contracts/openapi/components/schemas/v1/logs.yaml b/api-contracts/openapi/components/schemas/v1/logs.yaml new file mode 100644 index 000000000..9733281f1 --- /dev/null +++ b/api-contracts/openapi/components/schemas/v1/logs.yaml @@ -0,0 +1,33 @@ +V1LogLine: + properties: + createdAt: + type: string + format: date-time + description: The creation date of the log line. + message: + type: string + description: The log message. + metadata: + type: object + description: The log metadata. + required: + - createdAt + - message + - metadata + +V1LogLineLevel: + type: string + enum: + - DEBUG + - INFO + - WARN + - ERROR + +V1LogLineList: + properties: + pagination: + $ref: "../metadata.yaml#/PaginationResponse" + rows: + items: + $ref: "#/V1LogLine" + type: array diff --git a/api-contracts/openapi/components/schemas/v1/task.yaml b/api-contracts/openapi/components/schemas/v1/task.yaml new file mode 100644 index 000000000..3e02cdee5 --- /dev/null +++ b/api-contracts/openapi/components/schemas/v1/task.yaml @@ -0,0 +1,321 @@ +V1WorkflowType: + type: string + enum: + - DAG + - TASK + +V1TaskSummary: + properties: + metadata: + $ref: ".././metadata.yaml#/APIResourceMeta" + additionalMetadata: + type: object + description: Additional metadata for the task run. + children: + type: array + items: + $ref: "#/V1TaskSummary" + description: The list of children tasks + createdAt: + type: string + format: date-time + description: The timestamp the task was created. + displayName: + type: string + description: The display name of the task run. + duration: + type: integer + description: The duration of the task run, in milliseconds. + errorMessage: + type: string + description: The error message of the task run (for the latest run) + finishedAt: + type: string + format: date-time + description: The timestamp the task run finished. + input: + type: object + description: The input of the task run. + numSpawnedChildren: + type: integer + description: The number of spawned children tasks + output: + type: object + description: The output of the task run (for the latest run) + status: + $ref: "#/V1TaskStatus" + startedAt: + type: string + format: date-time + description: The timestamp the task run started. + stepId: + type: string + description: The step ID of the task. + format: uuid + minLength: 36 + maxLength: 36 + taskExternalId: + type: string + description: The external ID of the task. + format: uuid + minLength: 36 + maxLength: 36 + taskId: + type: integer + description: The ID of the task. + taskInsertedAt: + type: string + format: date-time + description: The timestamp the task was inserted. + tenantId: + type: string + description: The ID of the tenant. + example: bb214807-246e-43a5-a25d-41761d1cff9e + minLength: 36 + maxLength: 36 + format: uuid + type: + $ref: "#/V1WorkflowType" + description: The type of the workflow (whether it's a DAG or a task) + workflowId: + type: string + format: uuid + workflowName: + type: string + workflowRunExternalId: + type: string + format: uuid + description: The external ID of the workflow run + workflowVersionId: + type: string + format: uuid + description: The version ID of the workflow + required: + - metadata + - createdAt + - displayName + - id + - input + - numSpawnedChildren + - output + - status + - taskExternalId + - taskId + - taskInsertedAt + - tenantId + - type + - workflowId + +V1WorkflowRunDisplayName: + properties: + metadata: + $ref: ".././metadata.yaml#/APIResourceMeta" + displayName: + type: string + required: + - metadata + - displayName + +V1DagChildren: + type: object + properties: + dagId: + type: string + format: uuid + children: + type: array + items: + $ref: "#/V1TaskSummary" + +V1TaskSummaryList: + type: object + properties: + pagination: + $ref: ".././metadata.yaml#/PaginationResponse" + rows: + type: array + items: + $ref: "#/V1TaskSummary" + description: The list of tasks + required: + - pagination + - rows + +V1WorkflowRunDisplayNameList: + type: object + properties: + pagination: + $ref: ".././metadata.yaml#/PaginationResponse" + rows: + type: array + items: + $ref: "#/V1WorkflowRunDisplayName" + description: The list of display names + required: + - pagination + - rows + +V1TaskEventList: + properties: + pagination: + $ref: ".././metadata.yaml#/PaginationResponse" + rows: + items: + $ref: "#/V1TaskEvent" + type: array + +V1TaskEvent: + type: object + properties: + id: + type: integer + taskId: + type: string + format: uuid + timestamp: + type: string + format: date-time + eventType: + $ref: "#/V1TaskEventType" + message: + type: string + errorMessage: + type: string + output: + type: string + workerId: + type: string + format: uuid + taskDisplayName: + type: string + required: + - id + - taskId + - timestamp + - eventType + - message + +V1TaskStatus: + type: string + enum: + - QUEUED + - RUNNING + - COMPLETED + - CANCELLED + - FAILED + +V1TaskEventType: + type: string + enum: + - REQUEUED_NO_WORKER + - REQUEUED_RATE_LIMIT + - SCHEDULING_TIMED_OUT + - ASSIGNED + - STARTED + - FINISHED + - FAILED + - RETRYING + - CANCELLED + - TIMED_OUT + - REASSIGNED + - SLOT_RELEASED + - TIMEOUT_REFRESHED + - RETRIED_BY_USER + - SENT_TO_WORKER + - RATE_LIMIT_ERROR + - ACKNOWLEDGED + - CREATED + - QUEUED + - SKIPPED + +V1TaskRunMetrics: + type: array + items: + $ref: "#/V1TaskRunMetric" + +V1TaskRunMetric: + type: object + properties: + status: + $ref: "#/V1TaskStatus" + count: + type: integer + required: + - status + - count + +V1TaskPointMetric: + type: object + properties: + time: + type: string + format: date-time + SUCCEEDED: + type: integer + FAILED: + type: integer + required: + - time + - SUCCEEDED + - FAILED + +V1TaskPointMetrics: + type: object + properties: + results: + type: array + items: + $ref: "#/V1TaskPointMetric" + +V1TaskFilter: + type: object + properties: + since: + type: string + format: date-time + until: + type: string + format: date-time + statuses: + type: array + items: + $ref: "#/V1TaskStatus" + workflowIds: + type: array + items: + type: string + format: uuid + additionalMetadata: + type: array + items: + type: string + required: + - since + +V1CancelTaskRequest: + type: object + properties: + externalIds: + type: array + description: A list of external IDs, which can refer to either task or workflow run external IDs + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + filter: + $ref: "#/V1TaskFilter" + +V1ReplayTaskRequest: + type: object + properties: + externalIds: + type: array + description: A list of external IDs, which can refer to either task or workflow run external IDs + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + filter: + $ref: "#/V1TaskFilter" diff --git a/api-contracts/openapi/components/schemas/v1/workflow_run.yaml b/api-contracts/openapi/components/schemas/v1/workflow_run.yaml new file mode 100644 index 000000000..99281ab70 --- /dev/null +++ b/api-contracts/openapi/components/schemas/v1/workflow_run.yaml @@ -0,0 +1,126 @@ +V1WorkflowRun: + properties: + metadata: + $ref: ".././metadata.yaml#/APIResourceMeta" + status: + $ref: "./task.yaml#/V1TaskStatus" + startedAt: + type: string + format: date-time + description: The timestamp the task run started. + finishedAt: + type: string + format: date-time + description: The timestamp the task run finished. + duration: + type: integer + description: The duration of the task run, in milliseconds. + tenantId: + type: string + description: The ID of the tenant. + example: bb214807-246e-43a5-a25d-41761d1cff9e + minLength: 36 + maxLength: 36 + format: uuid + additionalMetadata: + type: object + description: Additional metadata for the task run. + displayName: + type: string + description: The display name of the task run. + workflowId: + type: string + format: uuid + output: + type: object + description: The output of the task run (for the latest run) + errorMessage: + type: string + description: The error message of the task run (for the latest run) + workflowVersionId: + type: string + format: uuid + description: The ID of the workflow version. + input: + type: object + description: The input of the task run. + createdAt: + type: string + format: date-time + description: The timestamp the task run was created. + required: + - metadata + - id + - status + - tenantId + - displayName + - workflowId + - output + - input + +WorkflowRunShapeItemForWorkflowRunDetails: + type: object + properties: + taskExternalId: + type: string + format: uuid + minLength: 36 + maxLength: 36 + stepId: + type: string + format: uuid + minLength: 36 + maxLength: 36 + childrenStepIds: + type: array + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + taskName: + type: string + required: + - taskExternalId + - stepId + - childrenStepIds + - taskName + +WorkflowRunShapeForWorkflowRunDetails: + type: array + items: + $ref: "#/WorkflowRunShapeItemForWorkflowRunDetails" + +V1WorkflowRunDetails: + properties: + run: + $ref: "#/V1WorkflowRun" + taskEvents: + type: array + items: + $ref: "./task.yaml#/V1TaskEvent" + description: The list of task events for the workflow run + shape: + $ref: "#/WorkflowRunShapeForWorkflowRunDetails" + tasks: + type: array + items: + $ref: "./task.yaml#/V1TaskSummary" + required: + - run + - taskEvents + - shape + - tasks + +V1TriggerWorkflowRunRequest: + properties: + workflowName: + type: string + description: The name of the workflow. + input: + type: object + additionalMetadata: + type: object + required: + - workflowName + - input diff --git a/api-contracts/openapi/components/schemas/workflow_run.yaml b/api-contracts/openapi/components/schemas/workflow_run.yaml index 895fffaa6..115726471 100644 --- a/api-contracts/openapi/components/schemas/workflow_run.yaml +++ b/api-contracts/openapi/components/schemas/workflow_run.yaml @@ -219,7 +219,6 @@ ScheduledWorkflows: - triggerAt - method - ScheduledWorkflowsMethod: type: string enum: @@ -305,6 +304,20 @@ WorkflowRunsMetricsCounts: CANCELLED: type: integer +TaskRunMetricsCounts: + type: object + properties: + PENDING: + type: integer + RUNNING: + type: integer + SUCCEEDED: + type: integer + FAILED: + type: integer + QUEUED: + type: integer + WorkflowRunsMetrics: type: object properties: @@ -335,6 +348,15 @@ JobRunStatus: - CANCELLED - BACKOFF +V1TaskRunStatus: + type: string + enum: + - PENDING + - RUNNING + - COMPLETED + - FAILED + - CANCELLED + WorkflowRunStatus: type: string enum: diff --git a/api-contracts/openapi/openapi.yaml b/api-contracts/openapi/openapi.yaml index 0942d5f6b..546e9499f 100644 --- a/api-contracts/openapi/openapi.yaml +++ b/api-contracts/openapi/openapi.yaml @@ -20,6 +20,32 @@ components: schemas: $ref: "./components/schemas/_index.yaml" paths: + /api/v1/stable/tasks/{task}: + $ref: "./paths/v1/tasks/tasks.yaml#/getTask" + /api/v1/stable/tasks/{task}/task-events: + $ref: "./paths/v1/tasks/tasks.yaml#/listTaskEvents" + /api/v1/stable/tasks/{task}/logs: + $ref: "./paths/v1/tasks/tasks.yaml#/listLogs" + /api/v1/stable/tenants/{tenant}/tasks/cancel: + $ref: "./paths/v1/tasks/tasks.yaml#/cancelTasks" + /api/v1/stable/tenants/{tenant}/tasks/replay: + $ref: "./paths/v1/tasks/tasks.yaml#/replayTasks" + /api/v1/stable/dags/tasks: + $ref: "./paths/v1/tasks/tasks.yaml#/listTasksByDAGIds" + /api/v1/stable/tenants/{tenant}/workflow-runs: + $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/listWorkflowRuns" + /api/v1/stable/tenants/{tenant}/workflow-runs/display-names: + $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/listWorkflowRunDisplayNames" + /api/v1/stable/tenants/{tenant}/workflow-runs/trigger: + $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/trigger" + /api/v1/stable/workflow-runs/{v1-workflow-run}: + $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/getWorkflowRunDetails" + /api/v1/stable/workflow-runs/{v1-workflow-run}/task-events: + $ref: "./paths/v1/workflow-runs/workflow_run.yaml#/listTaskEventsForWorkflowRun" + /api/v1/stable/tenants/{tenant}/task-metrics: + $ref: "./paths/v1/tasks/tasks.yaml#/getTaskStatusMetrics" + /api/v1/stable/tenants/{tenant}/task-point-metrics: + $ref: "./paths/v1/tasks/tasks.yaml#/getTaskPointMetrics" /api/ready: $ref: "./paths/metadata/metadata.yaml#/readiness" /api/live: diff --git a/api-contracts/openapi/paths/v1/tasks/tasks.yaml b/api-contracts/openapi/paths/v1/tasks/tasks.yaml new file mode 100644 index 000000000..7ab429d9a --- /dev/null +++ b/api-contracts/openapi/paths/v1/tasks/tasks.yaml @@ -0,0 +1,443 @@ +listTasksByDAGIds: + get: + description: Lists all tasks that belong a specific list of dags + operationId: v1-dag:list:tasks + parameters: + - description: The external id of the DAG + in: query + name: dag_ids + required: true + schema: + type: array + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The tenant id + in: query + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + type: array + items: + $ref: "../../../components/schemas/_index.yaml#/V1DagChildren" + description: The list of tasks + description: Successfully listed the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: List tasks + tags: + - Task + +getTask: + get: + x-resources: ["tenant", "task"] + description: Get a task by id + operationId: v1-task:get + parameters: + - description: The task id + in: path + name: task + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TaskSummary" + description: Successfully retrieved the task + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: The task was not found + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: Get a task + tags: + - Task + +listTaskEvents: + get: + x-resources: ["tenant", "task"] + description: List events for a task + operationId: v1-task-event:list + parameters: + - description: The task id + in: path + name: task + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The number to skip + in: query + name: offset + required: false + schema: + type: integer + format: int64 + - description: The number to limit by + in: query + name: limit + required: false + schema: + type: integer + format: int64 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TaskEventList" + description: Successfully retrieved the events + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: The task was not found + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: List events for a task + tags: + - Task + +getTaskStatusMetrics: + get: + x-resources: ["tenant"] + description: Get a summary of task run metrics for a tenant + operationId: v1-task:list:status-metrics + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The start time to get metrics for + in: query + name: since + required: true + schema: + type: string + format: date-time + - description: The workflow id to find runs for + in: query + name: workflow_ids + required: false + schema: + type: array + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The parent task's external id + in: query + name: parent_task_external_id + required: false + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TaskRunMetrics" + description: Successfully retrieved the task run metrics + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: Get task metrics + tags: + - Task + +getTaskPointMetrics: + get: + x-resources: ["tenant"] + description: Get a minute by minute breakdown of task metrics for a tenant + operationId: v1-task:get:point-metrics + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The time after the task was created + in: query + name: createdAfter + example: "2021-01-01T00:00:00Z" + required: false + schema: + type: string + format: date-time + - description: The time before the task was completed + in: query + name: finishedBefore + example: "2021-01-01T00:00:00Z" + required: false + schema: + type: string + format: date-time + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TaskPointMetrics" + description: Successfully retrieved the task point metrics + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: Get task point metrics + tags: + - Task +cancelTasks: + post: + x-resources: ["tenant"] + description: Cancel tasks + operationId: v1-task:cancel + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + requestBody: + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1CancelTaskRequest" + description: The tasks to cancel + required: true + responses: + "200": + description: Successfully cancelled the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: The task was not found + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: Cancel tasks + tags: + - Task +replayTasks: + post: + x-resources: ["tenant"] + description: Replay tasks + operationId: v1-task:replay + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + requestBody: + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1ReplayTaskRequest" + description: The tasks to replay + required: true + responses: + "200": + description: Successfully replayed the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: The task was not found + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: Replay tasks + tags: + - Task + +listLogs: + get: + x-resources: ["tenant", "task"] + description: Lists log lines for a task + operationId: v1-log-line:list + parameters: + - description: The task id + in: path + name: task + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1LogLineList" + description: Successfully listed the events + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + summary: List log lines + tags: + - Log diff --git a/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml b/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml new file mode 100644 index 000000000..e1851ef8f --- /dev/null +++ b/api-contracts/openapi/paths/v1/workflow-runs/workflow_run.yaml @@ -0,0 +1,324 @@ +listWorkflowRuns: + get: + x-resources: ["tenant"] + description: Lists workflow runs for a tenant. + operationId: v1-workflow-run:list + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The number to skip + in: query + name: offset + required: false + schema: + type: integer + format: int64 + - description: The number to limit by + in: query + name: limit + required: false + schema: + type: integer + format: int64 + - description: A list of statuses to filter by + in: query + name: statuses + required: false + schema: + type: array + items: + $ref: "../../../components/schemas/_index.yaml#/V1TaskStatus" + - description: The earliest date to filter by + in: query + name: since + required: true + schema: + type: string + format: date-time + - description: The latest date to filter by + in: query + name: until + required: false + schema: + type: string + format: date-time + - description: Additional metadata k-v pairs to filter by + in: query + name: additional_metadata + required: false + schema: + type: array + items: + type: string + - description: The workflow ids to find runs for + in: query + name: workflow_ids + required: false + schema: + type: array + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The worker id to filter by + in: query + name: worker_id + required: false + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: Whether to include DAGs or only to include tasks + in: query + name: only_tasks + required: true + schema: + type: boolean + - description: The parent task external id to filter by + in: query + name: parent_task_external_id + required: false + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TaskSummaryList" + description: Successfully listed the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: List workflow runs + tags: + - Workflow Runs + +listWorkflowRunDisplayNames: + get: + x-resources: ["tenant"] + description: Lists displayable names of workflow runs for a tenant + operationId: v1-workflow-run:display-names:list + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + - description: The external ids of the workflow runs to get display names for + in: query + name: external_ids + required: true + schema: + type: array + items: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1WorkflowRunDisplayNameList" + description: Successfully listed the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: List workflow runs + tags: + - Workflow Runs + +listTaskEventsForWorkflowRun: + get: + x-resources: ["tenant", "v1-workflow-run"] + description: List all tasks for a workflow run + operationId: v1-workflow-run:task-events:list + parameters: + - description: The number to skip + in: query + name: offset + required: false + schema: + type: integer + format: int64 + - description: The number to limit by + in: query + name: limit + required: false + schema: + type: integer + format: int64 + - description: The workflow run id to find runs for + in: path + name: v1-workflow-run + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TaskEventList" + description: Successfully listed the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: List tasks + tags: + - Workflow Runs + +getWorkflowRunDetails: + get: + x-resources: ["tenant", "v1-workflow-run"] + description: Get a workflow run and its metadata to display on the "detail" page + operationId: v1-workflow-run:get + parameters: + - description: The workflow run id to get + in: path + name: v1-workflow-run + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1WorkflowRunDetails" + description: Successfully listed the tasks + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + "501": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Not implemented + summary: List tasks + tags: + - Workflow Runs + +trigger: + post: + x-resources: ["tenant"] + description: Trigger a new workflow run + operationId: v1-workflow-run:create + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + requestBody: + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1TriggerWorkflowRunRequest" + description: The workflow run to create + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/V1WorkflowRunDetails" + description: Successfully created the workflow run + "400": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../../components/schemas/_index.yaml#/APIErrors" + description: Forbidden + summary: Create workflow run + tags: + - Workflow Runs diff --git a/api-contracts/workflows/v1-admin.proto b/api-contracts/workflows/v1-admin.proto new file mode 100644 index 000000000..35841baa7 --- /dev/null +++ b/api-contracts/workflows/v1-admin.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +option go_package = "github.com/hatchet-dev/hatchet/internal/services/admin/v1/contracts"; + +import "google/protobuf/timestamp.proto"; + +// AdminService represents a set of RPCs for admin management of tasks, workflows, etc. +service AdminService { + rpc CancelTasks(CancelTasksRequest) returns (CancelTasksResponse); + rpc ReplayTasks(ReplayTasksRequest) returns (ReplayTasksResponse); + rpc TriggerWorkflowRun(TriggerWorkflowRunRequest) returns (TriggerWorkflowRunResponse); +} + +message CancelTasksRequest { + repeated string externalIds = 1; // a list of external UUIDs + optional TasksFilter filter = 2; +} + +message ReplayTasksRequest { + repeated string externalIds = 1; // a list of external UUIDs + optional TasksFilter filter = 2; +} + +message TasksFilter { + repeated string statuses = 1; + google.protobuf.Timestamp since = 2; + optional google.protobuf.Timestamp until = 3; + repeated string workflow_ids = 4; + repeated string additional_metadata = 5; +} + +message CancelTasksResponse { + repeated string cancelled_tasks = 1; +} + +message ReplayTasksResponse { + repeated string replayed_tasks = 1; +} + +message TriggerWorkflowRunRequest { + string workflow_name = 1; + bytes input = 2; + bytes additional_metadata = 3; +} + +message TriggerWorkflowRunResponse { + string external_id = 1; +} diff --git a/api/v1/server/authn/middleware.go b/api/v1/server/authn/middleware.go index 8d55575cf..b242b8feb 100644 --- a/api/v1/server/authn/middleware.go +++ b/api/v1/server/authn/middleware.go @@ -6,13 +6,15 @@ import ( "net/http" "strings" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/rs/zerolog" "github.com/hatchet-dev/hatchet/api/v1/server/middleware" "github.com/hatchet-dev/hatchet/api/v1/server/middleware/redirect" "github.com/hatchet-dev/hatchet/pkg/config/server" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) type AuthN struct { @@ -144,11 +146,11 @@ func (a *AuthN) handleCookieAuth(c echo.Context) error { return forbidden } - user, err := a.config.APIRepository.User().GetUserByID(userID) + user, err := a.config.APIRepository.User().GetUserByID(c.Request().Context(), userID) if err != nil { a.l.Debug().Err(err).Msg("error getting user by id") - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return forbidden } @@ -167,7 +169,7 @@ func (a *AuthN) handleBearerAuth(c echo.Context) error { // a tenant id must exist in the context in order for the bearer auth to succeed, since // these tokens are tenant-scoped - queriedTenant, ok := c.Get("tenant").(*db.TenantModel) + queriedTenant, ok := c.Get("tenant").(*dbsqlc.Tenant) if !ok { a.l.Debug().Msgf("tenant not found in context") @@ -194,7 +196,7 @@ func (a *AuthN) handleBearerAuth(c echo.Context) error { // Verify that the tenant id which exists in the context is the same as the tenant id // in the token. - if queriedTenant.ID != tenantId { + if sqlchelpers.UUIDToStr(queriedTenant.ID) != tenantId { a.l.Debug().Msgf("tenant id in token does not match tenant id in context") return forbidden diff --git a/api/v1/server/authn/session_helpers.go b/api/v1/server/authn/session_helpers.go index b904f1c65..1b334f502 100644 --- a/api/v1/server/authn/session_helpers.go +++ b/api/v1/server/authn/session_helpers.go @@ -8,7 +8,8 @@ import ( "github.com/hatchet-dev/hatchet/pkg/config/server" "github.com/hatchet-dev/hatchet/pkg/random" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) type SessionHelpers struct { @@ -21,7 +22,7 @@ func NewSessionHelpers(config *server.ServerConfig) *SessionHelpers { } } -func (s *SessionHelpers) SaveAuthenticated(c echo.Context, user *db.UserModel) error { +func (s *SessionHelpers) SaveAuthenticated(c echo.Context, user *dbsqlc.User) error { session, err := s.config.SessionStore.Get(c.Request(), s.config.SessionStore.GetName()) if err != nil { @@ -29,7 +30,7 @@ func (s *SessionHelpers) SaveAuthenticated(c echo.Context, user *db.UserModel) e } session.Values["authenticated"] = true - session.Values["user_id"] = user.ID + session.Values["user_id"] = sqlchelpers.UUIDToStr(user.ID) return session.Save(c.Request(), c.Response()) } diff --git a/api/v1/server/authz/middleware.go b/api/v1/server/authz/middleware.go index 5c06ca678..47736db45 100644 --- a/api/v1/server/authz/middleware.go +++ b/api/v1/server/authz/middleware.go @@ -9,7 +9,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/middleware" "github.com/hatchet-dev/hatchet/pkg/config/server" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) type AuthZ struct { @@ -64,8 +65,8 @@ func (a *AuthZ) handleCookieAuth(c echo.Context, r *middleware.RouteInfo) error } // if tenant is set in the context, verify that the user is a member of the tenant - if tenant, ok := c.Get("tenant").(*db.TenantModel); ok { - user, ok := c.Get("user").(*db.UserModel) + if tenant, ok := c.Get("tenant").(*dbsqlc.Tenant); ok { + user, ok := c.Get("user").(*dbsqlc.User) if !ok { a.l.Debug().Msgf("user not found in context") @@ -74,7 +75,7 @@ func (a *AuthZ) handleCookieAuth(c echo.Context, r *middleware.RouteInfo) error } // check if the user is a member of the tenant - tenantMember, err := a.config.APIRepository.Tenant().GetTenantMemberByUserID(tenant.ID, user.ID) + tenantMember, err := a.config.APIRepository.Tenant().GetTenantMemberByUserID(c.Request().Context(), sqlchelpers.UUIDToStr(tenant.ID), sqlchelpers.UUIDToStr(user.ID)) if err != nil { a.l.Debug().Err(err).Msgf("error getting tenant member") @@ -125,7 +126,7 @@ var permittedWithUnverifiedEmail = []string{ } func (a *AuthZ) ensureVerifiedEmail(c echo.Context, r *middleware.RouteInfo) error { - user, ok := c.Get("user").(*db.UserModel) + user, ok := c.Get("user").(*dbsqlc.User) if !ok { return nil @@ -154,15 +155,15 @@ var adminAndOwnerOnly = []string{ "ApiTokenUpdateRevoke", } -func (a *AuthZ) authorizeTenantOperations(tenant *db.TenantModel, tenantMember *db.TenantMemberModel, r *middleware.RouteInfo) error { +func (a *AuthZ) authorizeTenantOperations(tenant *dbsqlc.Tenant, tenantMember *dbsqlc.PopulateTenantMembersRow, r *middleware.RouteInfo) error { // if the user is an owner, they can do anything - if tenantMember.Role == db.TenantMemberRoleOwner { + if tenantMember.Role == dbsqlc.TenantMemberRoleOWNER { return nil } // if the user is an admin, they can do anything at the moment. Some downstream handlers will case on // admin roles, for example admins cannot mark users as owners. - if tenantMember.Role == db.TenantMemberRoleAdmin { + if tenantMember.Role == dbsqlc.TenantMemberRoleADMIN { return nil } diff --git a/api/v1/server/handlers/api-tokens/create.go b/api/v1/server/handlers/api-tokens/create.go index 23dd2e1e1..95ce4f4af 100644 --- a/api/v1/server/handlers/api-tokens/create.go +++ b/api/v1/server/handlers/api-tokens/create.go @@ -7,11 +7,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (a *APITokenService) ApiTokenCreate(ctx echo.Context, request gen.ApiTokenCreateRequestObject) (gen.ApiTokenCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) // validate the request if apiErrors, err := a.config.Validator.ValidateAPI(request.Body); err != nil { @@ -34,7 +36,7 @@ func (a *APITokenService) ApiTokenCreate(ctx echo.Context, request gen.ApiTokenC expiresAt = &e } - token, err := a.config.Auth.JWTManager.GenerateTenantToken(ctx.Request().Context(), tenant.ID, request.Body.Name, false, expiresAt) + token, err := a.config.Auth.JWTManager.GenerateTenantToken(ctx.Request().Context(), tenantId, request.Body.Name, false, expiresAt) if err != nil { return nil, err diff --git a/api/v1/server/handlers/api-tokens/list.go b/api/v1/server/handlers/api-tokens/list.go index b46088068..7557a147c 100644 --- a/api/v1/server/handlers/api-tokens/list.go +++ b/api/v1/server/handlers/api-tokens/list.go @@ -5,13 +5,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (a *APITokenService) ApiTokenList(ctx echo.Context, request gen.ApiTokenListRequestObject) (gen.ApiTokenListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - tokens, err := a.config.APIRepository.APIToken().ListAPITokensByTenant(tenant.ID) + tokens, err := a.config.APIRepository.APIToken().ListAPITokensByTenant(ctx.Request().Context(), tenantId) if err != nil { return nil, err @@ -20,7 +22,7 @@ func (a *APITokenService) ApiTokenList(ctx echo.Context, request gen.ApiTokenLis rows := make([]gen.APIToken, len(tokens)) for i := range tokens { - rows[i] = *transformers.ToAPIToken(&tokens[i]) + rows[i] = *transformers.ToAPIToken(tokens[i]) } return gen.ApiTokenList200JSONResponse( diff --git a/api/v1/server/handlers/api-tokens/revoke.go b/api/v1/server/handlers/api-tokens/revoke.go index ec640f18d..3b8288c54 100644 --- a/api/v1/server/handlers/api-tokens/revoke.go +++ b/api/v1/server/handlers/api-tokens/revoke.go @@ -5,11 +5,12 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (a *APITokenService) ApiTokenUpdateRevoke(ctx echo.Context, request gen.ApiTokenUpdateRevokeRequestObject) (gen.ApiTokenUpdateRevokeResponseObject, error) { - apiToken := ctx.Get("api-token").(*db.APITokenModel) + apiToken := ctx.Get("api-token").(*dbsqlc.APIToken) if apiToken.Internal { return gen.ApiTokenUpdateRevoke403JSONResponse( @@ -17,7 +18,7 @@ func (a *APITokenService) ApiTokenUpdateRevoke(ctx echo.Context, request gen.Api ), nil } - err := a.config.APIRepository.APIToken().RevokeAPIToken(apiToken.ID) + err := a.config.APIRepository.APIToken().RevokeAPIToken(ctx.Request().Context(), sqlchelpers.UUIDToStr(apiToken.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/events/bulk_create.go b/api/v1/server/handlers/events/bulk_create.go index d6653bf7f..d4fbc5069 100644 --- a/api/v1/server/handlers/events/bulk_create.go +++ b/api/v1/server/handlers/events/bulk_create.go @@ -10,11 +10,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/metered" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *EventService) EventCreateBulk(ctx echo.Context, request gen.EventCreateBulkRequestObject) (gen.EventCreateBulkResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) eventOpts := make([]*repository.CreateEventOpts, len(request.Body.Events)) @@ -36,13 +38,13 @@ func (t *EventService) EventCreateBulk(ctx echo.Context, request gen.EventCreate } eventOpts[i] = &repository.CreateEventOpts{ - TenantId: tenant.ID, + TenantId: tenantId, Key: event.Key, Data: dataBytes, AdditionalMetadata: additionalMetadata, } } - events, err := t.config.Ingestor.BulkIngestEvent(ctx.Request().Context(), tenant.ID, eventOpts) + events, err := t.config.Ingestor.BulkIngestEvent(ctx.Request().Context(), tenant, eventOpts) if err != nil { diff --git a/api/v1/server/handlers/events/cancel.go b/api/v1/server/handlers/events/cancel.go index 43689f3ef..d8d15d93d 100644 --- a/api/v1/server/handlers/events/cancel.go +++ b/api/v1/server/handlers/events/cancel.go @@ -12,12 +12,13 @@ import ( "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *EventService) EventUpdateCancel(ctx echo.Context, request gen.EventUpdateCancelRequestObject) (gen.EventUpdateCancelResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) eventIds := make([]string, len(request.Body.EventIds)) @@ -30,7 +31,7 @@ func (t *EventService) EventUpdateCancel(ctx echo.Context, request gen.EventUpda for i := range eventIds { eventId := eventIds[i] - runs, err := t.config.EngineRepository.WorkflowRun().ListWorkflowRuns(ctx.Request().Context(), tenant.ID, &repository.ListWorkflowRunsOpts{ + runs, err := t.config.EngineRepository.WorkflowRun().ListWorkflowRuns(ctx.Request().Context(), tenantId, &repository.ListWorkflowRunsOpts{ EventId: &eventId, }) @@ -56,7 +57,7 @@ func (t *EventService) EventUpdateCancel(ctx echo.Context, request gen.EventUpda defer wg.Done() // Lookup step runs for the workflow run - jobRun, err := t.config.EngineRepository.JobRun().ListJobRunsForWorkflowRun(ctx.Request().Context(), tenant.ID, runId) + jobRun, err := t.config.EngineRepository.JobRun().ListJobRunsForWorkflowRun(ctx.Request().Context(), tenantId, runId) if err != nil { returnErr = multierror.Append(err, fmt.Errorf("failed to list job runs for workflow run %s", runId)) return @@ -70,7 +71,7 @@ func (t *EventService) EventUpdateCancel(ctx echo.Context, request gen.EventUpda err = t.config.MessageQueue.AddMessage( ctx.Request().Context(), msgqueue.JOB_PROCESSING_QUEUE, - tasktypes.JobRunCancelledToTask(tenant.ID, jobRunId, &reason), + tasktypes.JobRunCancelledToTask(tenantId, jobRunId, &reason), ) if err != nil { returnErr = multierror.Append(err, fmt.Errorf("failed to send cancel task for job run %s", jobRunId)) diff --git a/api/v1/server/handlers/events/create.go b/api/v1/server/handlers/events/create.go index f03e3d4c7..3b0d5518f 100644 --- a/api/v1/server/handlers/events/create.go +++ b/api/v1/server/handlers/events/create.go @@ -9,12 +9,12 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository/metered" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *EventService) EventCreate(ctx echo.Context, request gen.EventCreateRequestObject) (gen.EventCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) // marshal the data object to bytes dataBytes, err := json.Marshal(request.Body.Data) @@ -33,7 +33,7 @@ func (t *EventService) EventCreate(ctx echo.Context, request gen.EventCreateRequ } } - newEvent, err := t.config.Ingestor.IngestEvent(ctx.Request().Context(), tenant.ID, request.Body.Key, dataBytes, additionalMetadata) + newEvent, err := t.config.Ingestor.IngestEvent(ctx.Request().Context(), tenant, request.Body.Key, dataBytes, additionalMetadata) if err != nil { if err == metered.ErrResourceExhausted { @@ -45,13 +45,13 @@ func (t *EventService) EventCreate(ctx echo.Context, request gen.EventCreateRequ return nil, err } - dbNewEvent, err := t.config.APIRepository.Event().GetEventById(sqlchelpers.UUIDToStr(newEvent.ID)) + dbNewEvent, err := t.config.APIRepository.Event().GetEventById(ctx.Request().Context(), sqlchelpers.UUIDToStr(newEvent.ID)) if err != nil { return nil, err } return gen.EventCreate200JSONResponse( - *transformers.ToEvent(dbNewEvent), + transformers.ToEvent(dbNewEvent), ), nil } diff --git a/api/v1/server/handlers/events/get.go b/api/v1/server/handlers/events/get.go index 2b1cbbf00..759f1b7c7 100644 --- a/api/v1/server/handlers/events/get.go +++ b/api/v1/server/handlers/events/get.go @@ -5,13 +5,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func (t *EventService) EventGet(ctx echo.Context, request gen.EventGetRequestObject) (gen.EventGetResponseObject, error) { - event := ctx.Get("event").(*db.EventModel) + event := ctx.Get("event").(*dbsqlc.Event) return gen.EventGet200JSONResponse( - *transformers.ToEvent(event), + transformers.ToEvent(event), ), nil } diff --git a/api/v1/server/handlers/events/get_data.go b/api/v1/server/handlers/events/get_data.go index 3d5a65d2b..cce25e9f4 100644 --- a/api/v1/server/handlers/events/get_data.go +++ b/api/v1/server/handlers/events/get_data.go @@ -1,21 +1,19 @@ package events import ( - "encoding/json" - "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func (t *EventService) EventDataGet(ctx echo.Context, request gen.EventDataGetRequestObject) (gen.EventDataGetResponseObject, error) { - event := ctx.Get("event").(*db.EventModel) + event := ctx.Get("event").(*dbsqlc.Event) var dataStr string - if dataType, ok := event.Data(); ok { - dataStr = string(json.RawMessage(dataType)) + if len(event.Data) > 0 { + dataStr = string(event.Data) } return gen.EventDataGet200JSONResponse( diff --git a/api/v1/server/handlers/events/list.go b/api/v1/server/handlers/events/list.go index 0139de3b1..cd1f02bf1 100644 --- a/api/v1/server/handlers/events/list.go +++ b/api/v1/server/handlers/events/list.go @@ -14,11 +14,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *EventService) EventList(ctx echo.Context, request gen.EventListRequestObject) (gen.EventListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) limit := 50 offset := 0 @@ -59,10 +61,10 @@ func (t *EventService) EventList(ctx echo.Context, request gen.EventListRequestO } if request.Params.Statuses != nil { - statuses := make([]db.WorkflowRunStatus, len(*request.Params.Statuses)) + statuses := make([]dbsqlc.WorkflowRunStatus, len(*request.Params.Statuses)) for i, status := range *request.Params.Statuses { - statuses[i] = db.WorkflowRunStatus(status) + statuses[i] = dbsqlc.WorkflowRunStatus(status) } listOpts.WorkflowRunStatus = statuses @@ -105,7 +107,7 @@ func (t *EventService) EventList(ctx echo.Context, request gen.EventListRequestO dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - listRes, err := t.config.APIRepository.Event().ListEvents(dbCtx, tenant.ID, listOpts) + listRes, err := t.config.APIRepository.Event().ListEvents(dbCtx, tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/events/list_keys.go b/api/v1/server/handlers/events/list_keys.go index 23799d883..c17da7a0c 100644 --- a/api/v1/server/handlers/events/list_keys.go +++ b/api/v1/server/handlers/events/list_keys.go @@ -4,13 +4,15 @@ import ( "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *EventService) EventKeyList(ctx echo.Context, request gen.EventKeyListRequestObject) (gen.EventKeyListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - eventKeys, err := t.config.APIRepository.Event().ListEventKeys(tenant.ID) + eventKeys, err := t.config.APIRepository.Event().ListEventKeys(tenantId) if err != nil { return nil, err diff --git a/api/v1/server/handlers/events/replay.go b/api/v1/server/handlers/events/replay.go index 209317ee7..7fe7f4398 100644 --- a/api/v1/server/handlers/events/replay.go +++ b/api/v1/server/handlers/events/replay.go @@ -8,12 +8,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository/metered" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *EventService) EventUpdateReplay(ctx echo.Context, request gen.EventUpdateReplayRequestObject) (gen.EventUpdateReplayResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) eventIds := make([]string, len(request.Body.EventIds)) @@ -21,7 +22,7 @@ func (t *EventService) EventUpdateReplay(ctx echo.Context, request gen.EventUpda eventIds[i] = request.Body.EventIds[i].String() } - events, err := t.config.EngineRepository.Event().ListEventsByIds(ctx.Request().Context(), tenant.ID, eventIds) + events, err := t.config.EngineRepository.Event().ListEventsByIds(ctx.Request().Context(), tenantId, eventIds) if err != nil { return nil, err @@ -34,7 +35,7 @@ func (t *EventService) EventUpdateReplay(ctx echo.Context, request gen.EventUpda for i := range events { event := events[i] - newEvent, err := t.config.Ingestor.IngestReplayedEvent(ctx.Request().Context(), tenant.ID, event) + newEvent, err := t.config.Ingestor.IngestReplayedEvent(ctx.Request().Context(), tenant, event) if err == metered.ErrResourceExhausted { return gen.EventUpdateReplay429JSONResponse( @@ -53,7 +54,7 @@ func (t *EventService) EventUpdateReplay(ctx echo.Context, request gen.EventUpda return nil, allErrs } - newEvents, err := t.config.APIRepository.Event().ListEventsById(tenant.ID, newEventIds) + newEvents, err := t.config.APIRepository.Event().ListEventsById(ctx.Request().Context(), tenantId, newEventIds) if err != nil { return nil, err @@ -62,7 +63,7 @@ func (t *EventService) EventUpdateReplay(ctx echo.Context, request gen.EventUpda rows := make([]gen.Event, len(newEvents)) for i := range newEvents { - rows[i] = *transformers.ToEvent(&newEvents[i]) + rows[i] = transformers.ToEvent(newEvents[i]) } return gen.EventUpdateReplay200JSONResponse( diff --git a/api/v1/server/handlers/ingestors/sns.go b/api/v1/server/handlers/ingestors/sns.go index 2b6626fbc..882b5eb4a 100644 --- a/api/v1/server/handlers/ingestors/sns.go +++ b/api/v1/server/handlers/ingestors/sns.go @@ -34,7 +34,7 @@ func (i *IngestorsService) SnsUpdate(ctx echo.Context, req gen.SnsUpdateRequestO tenantId := req.Tenant.String() // verify that the tenant and the topic ARN are set in the database - snsInt, err := i.config.APIRepository.SNS().GetSNSIntegration(tenantId, payload.TopicArn) + snsInt, err := i.config.APIRepository.SNS().GetSNSIntegration(ctx.Request().Context(), tenantId, payload.TopicArn) if err != nil { return nil, err @@ -44,6 +44,12 @@ func (i *IngestorsService) SnsUpdate(ctx echo.Context, req gen.SnsUpdateRequestO return nil, fmt.Errorf("SNS integration not found for tenant %s and topic ARN %s", tenantId, payload.TopicArn) } + tenant, err := i.config.APIRepository.Tenant().GetTenantByID(ctx.Request().Context(), tenantId) + + if err != nil { + return nil, err + } + switch payload.Type { case "SubscriptionConfirmation": _, err := payload.Subscribe() @@ -58,7 +64,7 @@ func (i *IngestorsService) SnsUpdate(ctx echo.Context, req gen.SnsUpdateRequestO return nil, err } default: - _, err := i.config.Ingestor.IngestEvent(ctx.Request().Context(), req.Tenant.String(), req.Event, body, nil) + _, err := i.config.Ingestor.IngestEvent(ctx.Request().Context(), tenant, req.Event, body, nil) if err != nil { return nil, err diff --git a/api/v1/server/handlers/ingestors/sns_create.go b/api/v1/server/handlers/ingestors/sns_create.go index 17fa54885..1c4db66f9 100644 --- a/api/v1/server/handlers/ingestors/sns_create.go +++ b/api/v1/server/handlers/ingestors/sns_create.go @@ -7,11 +7,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *IngestorsService) SnsCreate(ctx echo.Context, req gen.SnsCreateRequestObject) (gen.SnsCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) // validate the request if apiErrors, err := i.config.Validator.ValidateAPI(req.Body); err != nil { @@ -25,7 +27,7 @@ func (i *IngestorsService) SnsCreate(ctx echo.Context, req gen.SnsCreateRequestO } // create the SNS integration - snsIntegration, err := i.config.APIRepository.SNS().CreateSNSIntegration(tenant.ID, opts) + snsIntegration, err := i.config.APIRepository.SNS().CreateSNSIntegration(ctx.Request().Context(), tenantId, opts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/ingestors/sns_delete.go b/api/v1/server/handlers/ingestors/sns_delete.go index 23dcabf06..e2f4e2452 100644 --- a/api/v1/server/handlers/ingestors/sns_delete.go +++ b/api/v1/server/handlers/ingestors/sns_delete.go @@ -5,15 +5,17 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *IngestorsService) SnsDelete(ctx echo.Context, req gen.SnsDeleteRequestObject) (gen.SnsDeleteResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) - sns := ctx.Get("sns").(*db.SNSIntegrationModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + sns := ctx.Get("sns").(*dbsqlc.SNSIntegration) // create the SNS integration - err := i.config.APIRepository.SNS().DeleteSNSIntegration(tenant.ID, sns.ID) + err := i.config.APIRepository.SNS().DeleteSNSIntegration(ctx.Request().Context(), tenantId, sqlchelpers.UUIDToStr(sns.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/ingestors/sns_list.go b/api/v1/server/handlers/ingestors/sns_list.go index 42034a251..a83ea0ae6 100644 --- a/api/v1/server/handlers/ingestors/sns_list.go +++ b/api/v1/server/handlers/ingestors/sns_list.go @@ -5,15 +5,16 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *IngestorsService) SnsList(ctx echo.Context, req gen.SnsListRequestObject) (gen.SnsListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) // create the SNS integration - snsIntegrations, err := i.config.APIRepository.SNS().ListSNSIntegrations(tenant.ID) + snsIntegrations, err := i.config.APIRepository.SNS().ListSNSIntegrations(ctx.Request().Context(), tenantId) if err != nil { return nil, err @@ -24,7 +25,7 @@ func (i *IngestorsService) SnsList(ctx echo.Context, req gen.SnsListRequestObjec serverUrl := i.config.Runtime.ServerURL for i := range snsIntegrations { - rows[i] = *transformers.ToSNSIntegration(&snsIntegrations[i], serverUrl) + rows[i] = *transformers.ToSNSIntegration(snsIntegrations[i], serverUrl) } return gen.SnsList200JSONResponse( diff --git a/api/v1/server/handlers/logs/list.go b/api/v1/server/handlers/logs/list.go index 6728ba66b..216ce369d 100644 --- a/api/v1/server/handlers/logs/list.go +++ b/api/v1/server/handlers/logs/list.go @@ -9,12 +9,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *LogService) LogLineList(ctx echo.Context, request gen.LogLineListRequestObject) (gen.LogLineListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) stepRun := ctx.Get("step-run").(*repository.GetStepRunFull) limit := 1000 @@ -60,7 +61,7 @@ func (t *LogService) LogLineList(ctx echo.Context, request gen.LogLineListReques listOpts.Offset = &offset } - listRes, err := t.config.APIRepository.Log().ListLogLines(tenant.ID, listOpts) + listRes, err := t.config.APIRepository.Log().ListLogLines(tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/monitoring/probe.go b/api/v1/server/handlers/monitoring/probe.go index ec33adaf5..6af109ae8 100644 --- a/api/v1/server/handlers/monitoring/probe.go +++ b/api/v1/server/handlers/monitoring/probe.go @@ -16,9 +16,8 @@ import ( "github.com/hatchet-dev/hatchet/pkg/client" clientconfig "github.com/hatchet-dev/hatchet/pkg/config/client" "github.com/hatchet-dev/hatchet/pkg/config/shared" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" "github.com/hatchet-dev/hatchet/pkg/worker" ) @@ -28,9 +27,10 @@ func (m *MonitoringService) MonitoringPostRunProbe(ctx echo.Context, request gen return gen.MonitoringPostRunProbe403JSONResponse{}, nil } - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - if !slices.Contains[[]string](m.permittedTenants, tenant.ID) { + if !slices.Contains[[]string](m.permittedTenants, tenantId) { err := fmt.Errorf("tenant is not a monitoring tenant for this instance") @@ -59,7 +59,7 @@ func (m *MonitoringService) MonitoringPostRunProbe(ctx echo.Context, request gen cf := clientconfig.ClientConfigFile{ Token: token, - TenantId: tenant.ID, + TenantId: tenantId, Namespace: randomNamespace(), TLS: clientconfig.ClientTLSConfigFile{ Base: shared.TLSConfigFile{ diff --git a/api/v1/server/handlers/rate-limits/list.go b/api/v1/server/handlers/rate-limits/list.go index 4d075f650..aeb3cca22 100644 --- a/api/v1/server/handlers/rate-limits/list.go +++ b/api/v1/server/handlers/rate-limits/list.go @@ -11,11 +11,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *RateLimitService) RateLimitList(ctx echo.Context, request gen.RateLimitListRequestObject) (gen.RateLimitListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) limit := 50 offset := 0 @@ -50,7 +52,7 @@ func (t *RateLimitService) RateLimitList(ctx echo.Context, request gen.RateLimit dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - listRes, err := t.config.EngineRepository.RateLimit().ListRateLimits(dbCtx, tenant.ID, listOpts) + listRes, err := t.config.EngineRepository.RateLimit().ListRateLimits(dbCtx, tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/slack-app/oauth_callback.go b/api/v1/server/handlers/slack-app/oauth_callback.go index 5fe0a6e08..6fb3b7a2e 100644 --- a/api/v1/server/handlers/slack-app/oauth_callback.go +++ b/api/v1/server/handlers/slack-app/oauth_callback.go @@ -62,6 +62,7 @@ func (g *SlackAppService) UserUpdateSlackOauthCallback(ctx echo.Context, _ gen.U } _, err = g.config.APIRepository.Slack().UpsertSlackWebhook( + ctx.Request().Context(), tenantId, &repository.UpsertSlackWebhookOpts{ TeamId: resp.Team.ID, diff --git a/api/v1/server/handlers/slack-app/oauth_start.go b/api/v1/server/handlers/slack-app/oauth_start.go index 83c02e25d..2dd231fc1 100644 --- a/api/v1/server/handlers/slack-app/oauth_start.go +++ b/api/v1/server/handlers/slack-app/oauth_start.go @@ -7,12 +7,14 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/authn" "github.com/hatchet-dev/hatchet/api/v1/server/middleware/redirect" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) // Note: we want all errors to redirect, otherwise the user will be greeted with raw JSON in the middle of the login flow. func (g *SlackAppService) UserUpdateSlackOauthStart(ctx echo.Context, _ gen.UserUpdateSlackOauthStartRequestObject) (gen.UserUpdateSlackOauthStartResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) oauth, ok := g.config.AdditionalOAuthConfigs["slack"] @@ -22,7 +24,7 @@ func (g *SlackAppService) UserUpdateSlackOauthStart(ctx echo.Context, _ gen.User sh := authn.NewSessionHelpers(g.config) - if err := sh.SaveKV(ctx, "tenant", tenant.ID); err != nil { + if err := sh.SaveKV(ctx, "tenant", tenantId); err != nil { return nil, redirect.GetRedirectWithError(ctx, g.config.Logger, err, "Could not get cookie. Please make sure cookies are enabled.") } diff --git a/api/v1/server/handlers/slack-app/slack_delete.go b/api/v1/server/handlers/slack-app/slack_delete.go index ebfe524ee..bfafe7b4a 100644 --- a/api/v1/server/handlers/slack-app/slack_delete.go +++ b/api/v1/server/handlers/slack-app/slack_delete.go @@ -5,14 +5,16 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *SlackAppService) SlackWebhookDelete(ctx echo.Context, req gen.SlackWebhookDeleteRequestObject) (gen.SlackWebhookDeleteResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) - slack := ctx.Get("slack").(*db.SlackAppWebhookModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + slack := ctx.Get("slack").(*dbsqlc.SlackAppWebhook) - err := i.config.APIRepository.Slack().DeleteSlackWebhook(tenant.ID, slack.ID) + err := i.config.APIRepository.Slack().DeleteSlackWebhook(ctx.Request().Context(), tenantId, sqlchelpers.UUIDToStr(slack.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/slack-app/slack_list.go b/api/v1/server/handlers/slack-app/slack_list.go index ebafff24d..6bb9c28ad 100644 --- a/api/v1/server/handlers/slack-app/slack_list.go +++ b/api/v1/server/handlers/slack-app/slack_list.go @@ -5,14 +5,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (s *SlackAppService) SlackWebhookList(ctx echo.Context, req gen.SlackWebhookListRequestObject) (gen.SlackWebhookListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - webhooks, err := s.config.APIRepository.Slack().ListSlackWebhooks(tenant.ID) + webhooks, err := s.config.APIRepository.Slack().ListSlackWebhooks(ctx.Request().Context(), tenantId) if err != nil { return nil, err @@ -21,7 +22,7 @@ func (s *SlackAppService) SlackWebhookList(ctx echo.Context, req gen.SlackWebhoo rows := make([]gen.SlackWebhook, len(webhooks)) for i := range webhooks { - rows[i] = *transformers.ToSlackWebhook(&webhooks[i]) + rows[i] = *transformers.ToSlackWebhook(webhooks[i]) } return gen.SlackWebhookList200JSONResponse( diff --git a/api/v1/server/handlers/step-runs/cancel.go b/api/v1/server/handlers/step-runs/cancel.go index a309b41f1..83dde67bc 100644 --- a/api/v1/server/handlers/step-runs/cancel.go +++ b/api/v1/server/handlers/step-runs/cancel.go @@ -12,13 +12,13 @@ import ( "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *StepRunService) StepRunUpdateCancel(ctx echo.Context, request gen.StepRunUpdateCancelRequestObject) (gen.StepRunUpdateCancelResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) stepRun := ctx.Get("step-run").(*repository.GetStepRunFull) // check to see if the step run is in a running or pending state @@ -36,7 +36,7 @@ func (t *StepRunService) StepRunUpdateCancel(ctx echo.Context, request gen.StepR engineStepRun, err := t.config.EngineRepository.StepRun().GetStepRunForEngine( ctx.Request().Context(), - tenant.ID, + tenantId, sqlchelpers.UUIDToStr(stepRun.ID), ) diff --git a/api/v1/server/handlers/step-runs/list_archives.go b/api/v1/server/handlers/step-runs/list_archives.go index 68d279303..72ebb9af2 100644 --- a/api/v1/server/handlers/step-runs/list_archives.go +++ b/api/v1/server/handlers/step-runs/list_archives.go @@ -8,7 +8,7 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *StepRunService) StepRunListArchives(ctx echo.Context, request gen.StepRunListArchivesRequestObject) (gen.StepRunListArchivesResponseObject, error) { diff --git a/api/v1/server/handlers/step-runs/list_events.go b/api/v1/server/handlers/step-runs/list_events.go index 9520a995e..d1a04bceb 100644 --- a/api/v1/server/handlers/step-runs/list_events.go +++ b/api/v1/server/handlers/step-runs/list_events.go @@ -8,7 +8,7 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *StepRunService) StepRunListEvents(ctx echo.Context, request gen.StepRunListEventsRequestObject) (gen.StepRunListEventsResponseObject, error) { diff --git a/api/v1/server/handlers/step-runs/list_events_by_workflow_run.go b/api/v1/server/handlers/step-runs/list_events_by_workflow_run.go index 6b3d9172c..7c2625235 100644 --- a/api/v1/server/handlers/step-runs/list_events_by_workflow_run.go +++ b/api/v1/server/handlers/step-runs/list_events_by_workflow_run.go @@ -8,11 +8,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *StepRunService) WorkflowRunListStepRunEvents(ctx echo.Context, request gen.WorkflowRunListStepRunEventsRequestObject) (gen.WorkflowRunListStepRunEventsResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) reqCtx, cancel := context.WithTimeout(ctx.Request().Context(), 5*time.Second) defer cancel() @@ -21,7 +23,7 @@ func (t *StepRunService) WorkflowRunListStepRunEvents(ctx echo.Context, request listRes, err := t.config.APIRepository.StepRun().ListStepRunEventsByWorkflowRunId( reqCtx, - tenant.ID, + tenantId, request.WorkflowRun.String(), lastId, ) diff --git a/api/v1/server/handlers/step-runs/rerun.go b/api/v1/server/handlers/step-runs/rerun.go index abafe5e6f..fcdeb73c8 100644 --- a/api/v1/server/handlers/step-runs/rerun.go +++ b/api/v1/server/handlers/step-runs/rerun.go @@ -15,18 +15,19 @@ import ( "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *StepRunService) StepRunUpdateRerun(ctx echo.Context, request gen.StepRunUpdateRerunRequestObject) (gen.StepRunUpdateRerunResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) stepRun := ctx.Get("step-run").(*repository.GetStepRunFull) // preflight check to verify step run status and worker availability err := t.config.EngineRepository.StepRun().PreflightCheckReplayStepRun( ctx.Request().Context(), - tenant.ID, + tenantId, sqlchelpers.UUIDToStr(stepRun.ID), ) @@ -80,7 +81,7 @@ func (t *StepRunService) StepRunUpdateRerun(ctx echo.Context, request gen.StepRu engineStepRun, err := t.config.EngineRepository.StepRun().GetStepRunForEngine( ctx.Request().Context(), - tenant.ID, + tenantId, sqlchelpers.UUIDToStr(stepRun.ID), ) diff --git a/api/v1/server/handlers/tenants/create.go b/api/v1/server/handlers/tenants/create.go index c4a8cb676..ed4840fc2 100644 --- a/api/v1/server/handlers/tenants/create.go +++ b/api/v1/server/handlers/tenants/create.go @@ -4,18 +4,19 @@ import ( "context" "errors" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateRequestObject) (gen.TenantCreateResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) if !t.config.Runtime.AllowCreateTenant { return gen.TenantCreate400JSONResponse( @@ -31,16 +32,16 @@ func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateR } // determine if a tenant with the slug already exists - existingTenant, err := t.config.APIRepository.Tenant().GetTenantBySlug(request.Body.Slug) + _, err := t.config.APIRepository.Tenant().GetTenantBySlug(ctx.Request().Context(), request.Body.Slug) - if err != nil && !errors.Is(err, db.ErrNotFound) { + if err != nil && !errors.Is(err, pgx.ErrNoRows) { return nil, err } - if existingTenant != nil { + if err == nil { // just return bad request return gen.TenantCreate400JSONResponse( - apierrors.NewAPIErrors("Tenant with the slug already exists."), + apierrors.NewAPIErrors("Tenant with that slug already exists."), ), nil } @@ -54,7 +55,7 @@ func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateR } // write the user to the db - tenant, err := t.config.APIRepository.Tenant().CreateTenant(createOpts) + tenant, err := t.config.APIRepository.Tenant().CreateTenant(ctx.Request().Context(), createOpts) if err != nil { return nil, err @@ -69,8 +70,8 @@ func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateR } // add the user as an owner of the tenant - _, err = t.config.APIRepository.Tenant().CreateTenantMember(tenantId, &repository.CreateTenantMemberOpts{ - UserId: user.ID, + _, err = t.config.APIRepository.Tenant().CreateTenantMember(ctx.Request().Context(), tenantId, &repository.CreateTenantMemberOpts{ + UserId: sqlchelpers.UUIDToStr(user.ID), Role: "OWNER", }) @@ -85,12 +86,12 @@ func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateR t.config.Analytics.Enqueue( "tenant:create", - user.ID, + sqlchelpers.UUIDToStr(user.ID), &tenantId, nil, ) return gen.TenantCreate200JSONResponse( - *transformers.ToTenantSqlc(tenant), + *transformers.ToTenant(tenant), ), nil } diff --git a/api/v1/server/handlers/tenants/create_email_group.go b/api/v1/server/handlers/tenants/create_email_group.go index f9ec6008c..9f96def4c 100644 --- a/api/v1/server/handlers/tenants/create_email_group.go +++ b/api/v1/server/handlers/tenants/create_email_group.go @@ -6,11 +6,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) AlertEmailGroupCreate(ctx echo.Context, request gen.AlertEmailGroupCreateRequestObject) (gen.AlertEmailGroupCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) // validate the request if apiErrors, err := t.config.Validator.ValidateAPI(request.Body); err != nil { @@ -24,7 +26,7 @@ func (t *TenantService) AlertEmailGroupCreate(ctx echo.Context, request gen.Aler Emails: request.Body.Emails, } - emailGroup, err := t.config.APIRepository.TenantAlertingSettings().CreateTenantAlertGroup(tenant.ID, createOpts) + emailGroup, err := t.config.APIRepository.TenantAlertingSettings().CreateTenantAlertGroup(ctx.Request().Context(), tenantId, createOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/create_invite.go b/api/v1/server/handlers/tenants/create_invite.go index adb7127e4..60f034eec 100644 --- a/api/v1/server/handlers/tenants/create_invite.go +++ b/api/v1/server/handlers/tenants/create_invite.go @@ -11,13 +11,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/internal/integrations/email" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantInviteCreate(ctx echo.Context, request gen.TenantInviteCreateRequestObject) (gen.TenantInviteCreateResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) - tenant := ctx.Get("tenant").(*db.TenantModel) - tenantMember := ctx.Get("tenant-member").(*db.TenantMemberModel) + user := ctx.Get("user").(*dbsqlc.User) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + tenantMember := ctx.Get("tenant-member").(*dbsqlc.PopulateTenantMembersRow) if !t.config.Runtime.AllowInvites { t.config.Logger.Warn().Msg("tenant invites are disabled") return gen.TenantInviteCreate400JSONResponse( @@ -34,7 +36,7 @@ func (t *TenantService) TenantInviteCreate(ctx echo.Context, request gen.TenantI } // ensure that this user isn't already a member of the tenant - if _, err := t.config.APIRepository.Tenant().GetTenantMemberByEmail(tenant.ID, request.Body.Email); err == nil { + if _, err := t.config.APIRepository.Tenant().GetTenantMemberByEmail(ctx.Request().Context(), tenantId, request.Body.Email); err == nil { t.config.Logger.Warn().Msg("this user is already a member of this tenant") return gen.TenantInviteCreate400JSONResponse( apierrors.NewAPIErrors("this user is already a member of this tenant"), @@ -42,7 +44,7 @@ func (t *TenantService) TenantInviteCreate(ctx echo.Context, request gen.TenantI } // if user is not an owner, they cannot change a role to owner - if tenantMember.Role != db.TenantMemberRoleOwner && request.Body.Role == gen.OWNER { + if tenantMember.Role != dbsqlc.TenantMemberRoleOWNER && request.Body.Role == gen.OWNER { t.config.Logger.Warn().Msg("only an owner can change a role to owner") return gen.TenantInviteCreate400JSONResponse( apierrors.NewAPIErrors("only an owner can change a role to owner"), @@ -59,7 +61,7 @@ func (t *TenantService) TenantInviteCreate(ctx echo.Context, request gen.TenantI } // create the invite - invite, err := t.config.APIRepository.TenantInvite().CreateTenantInvite(tenant.ID, createOpts) + invite, err := t.config.APIRepository.TenantInvite().CreateTenantInvite(ctx.Request().Context(), tenantId, createOpts) if err != nil { t.config.Logger.Err(err).Msg("could not create tenant invite") @@ -76,8 +78,8 @@ func (t *TenantService) TenantInviteCreate(ctx echo.Context, request gen.TenantI name := user.Email - if userName, ok := user.Name(); ok && userName != "" { - name = userName + if user.Name.Valid { + name = user.Name.String } if err := t.config.Email.SendTenantInviteEmail(emailCtx, invite.InviteeEmail, email.TenantInviteEmailData{ @@ -90,8 +92,8 @@ func (t *TenantService) TenantInviteCreate(ctx echo.Context, request gen.TenantI }() t.config.Analytics.Enqueue("user-invite:create", - user.ID, - &invite.TenantID, + sqlchelpers.UUIDToStr(user.ID), + &tenantId, nil, ) diff --git a/api/v1/server/handlers/tenants/delete_email_group.go b/api/v1/server/handlers/tenants/delete_email_group.go index 34145b385..baa96ef38 100644 --- a/api/v1/server/handlers/tenants/delete_email_group.go +++ b/api/v1/server/handlers/tenants/delete_email_group.go @@ -4,15 +4,17 @@ import ( "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) AlertEmailGroupDelete(ctx echo.Context, request gen.AlertEmailGroupDeleteRequestObject) (gen.AlertEmailGroupDeleteResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) - emailGroup := ctx.Get("alert-email-group").(*db.TenantAlertEmailGroupModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + emailGroup := ctx.Get("alert-email-group").(*dbsqlc.TenantAlertEmailGroup) // delete the invite - err := t.config.APIRepository.TenantAlertingSettings().DeleteTenantAlertGroup(tenant.ID, emailGroup.ID) + err := t.config.APIRepository.TenantAlertingSettings().DeleteTenantAlertGroup(ctx.Request().Context(), tenantId, sqlchelpers.UUIDToStr(emailGroup.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/delete_invite.go b/api/v1/server/handlers/tenants/delete_invite.go index 0132de426..84bd17821 100644 --- a/api/v1/server/handlers/tenants/delete_invite.go +++ b/api/v1/server/handlers/tenants/delete_invite.go @@ -5,14 +5,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantInviteDelete(ctx echo.Context, request gen.TenantInviteDeleteRequestObject) (gen.TenantInviteDeleteResponseObject, error) { - invite := ctx.Get("tenant-invite").(*db.TenantInviteLinkModel) + invite := ctx.Get("tenant-invite").(*dbsqlc.TenantInviteLink) // delete the invite - err := t.config.APIRepository.TenantInvite().DeleteTenantInvite(invite.ID) + err := t.config.APIRepository.TenantInvite().DeleteTenantInvite(ctx.Request().Context(), sqlchelpers.UUIDToStr(invite.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/delete_member.go b/api/v1/server/handlers/tenants/delete_member.go index 17b7b1f07..5353b18c9 100644 --- a/api/v1/server/handlers/tenants/delete_member.go +++ b/api/v1/server/handlers/tenants/delete_member.go @@ -5,38 +5,40 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantMemberDelete(ctx echo.Context, request gen.TenantMemberDeleteRequestObject) (gen.TenantMemberDeleteResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) - tenantMember := ctx.Get("tenant-member").(*db.TenantMemberModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + tenantMember := ctx.Get("tenant-member").(*dbsqlc.PopulateTenantMembersRow) - if tenantMember.Role != db.TenantMemberRoleOwner { + if tenantMember.Role != dbsqlc.TenantMemberRoleOWNER { return gen.TenantMemberDelete403JSONResponse( apierrors.NewAPIErrors("Only owners can delete members"), ), nil } - memberToDelete, err := t.config.APIRepository.Tenant().GetTenantMemberByID(request.Member.String()) + memberToDelete, err := t.config.APIRepository.Tenant().GetTenantMemberByID(ctx.Request().Context(), request.Member.String()) if err != nil { return nil, err } - if tenantMember.UserID == memberToDelete.UserID { + if sqlchelpers.UUIDToStr(tenantMember.UserId) == sqlchelpers.UUIDToStr(memberToDelete.UserId) { return gen.TenantMemberDelete403JSONResponse( apierrors.NewAPIErrors("You cannot delete yourself"), ), nil } - if memberToDelete.TenantID != tenant.ID { + if sqlchelpers.UUIDToStr(memberToDelete.TenantId) != tenantId { return gen.TenantMemberDelete404JSONResponse( apierrors.NewAPIErrors("Member not found"), ), nil } - _, err = t.config.APIRepository.Tenant().DeleteTenantMember(memberToDelete.ID) + err = t.config.APIRepository.Tenant().DeleteTenantMember(ctx.Request().Context(), sqlchelpers.UUIDToStr(memberToDelete.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/get_alerting_settings.go b/api/v1/server/handlers/tenants/get_alerting_settings.go index 3af6961ff..84523b7d2 100644 --- a/api/v1/server/handlers/tenants/get_alerting_settings.go +++ b/api/v1/server/handlers/tenants/get_alerting_settings.go @@ -5,19 +5,21 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantAlertingSettingsGet(ctx echo.Context, request gen.TenantAlertingSettingsGetRequestObject) (gen.TenantAlertingSettingsGetResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - tenantAlerting, err := t.config.APIRepository.TenantAlertingSettings().GetTenantAlertingSettings(tenant.ID) + tenantAlerting, err := t.config.APIRepository.TenantAlertingSettings().GetTenantAlertingSettings(ctx.Request().Context(), tenantId) if err != nil { return nil, err } return gen.TenantAlertingSettingsGet200JSONResponse( - *transformers.ToTenantAlertingSettings(tenantAlerting), + *transformers.ToTenantAlertingSettings(tenantAlerting.Settings), ), nil } diff --git a/api/v1/server/handlers/tenants/get_queue_metrics.go b/api/v1/server/handlers/tenants/get_queue_metrics.go index 0d2092f25..4756df18d 100644 --- a/api/v1/server/handlers/tenants/get_queue_metrics.go +++ b/api/v1/server/handlers/tenants/get_queue_metrics.go @@ -9,11 +9,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantGetQueueMetrics(ctx echo.Context, request gen.TenantGetQueueMetricsRequestObject) (gen.TenantGetQueueMetricsResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) opts := repository.GetQueueMetricsOpts{} @@ -38,13 +40,13 @@ func (t *TenantService) TenantGetQueueMetrics(ctx echo.Context, request gen.Tena opts.WorkflowIds = *request.Params.Workflows } - metrics, err := t.config.APIRepository.Tenant().GetQueueMetrics(ctx.Request().Context(), tenant.ID, &opts) + metrics, err := t.config.APIRepository.Tenant().GetQueueMetrics(ctx.Request().Context(), tenantId, &opts) if err != nil { return nil, err } - stepRunQueueCounts, err := t.config.EngineRepository.StepRun().GetQueueCounts(ctx.Request().Context(), tenant.ID) + stepRunQueueCounts, err := t.config.EngineRepository.StepRun().GetQueueCounts(ctx.Request().Context(), tenantId) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/get_resource_policy.go b/api/v1/server/handlers/tenants/get_resource_policy.go index f7cb9831b..bc83ea51f 100644 --- a/api/v1/server/handlers/tenants/get_resource_policy.go +++ b/api/v1/server/handlers/tenants/get_resource_policy.go @@ -7,13 +7,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantResourcePolicyGet(ctx echo.Context, request gen.TenantResourcePolicyGetRequestObject) (gen.TenantResourcePolicyGetResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - limits, err := t.config.EntitlementRepository.TenantLimit().GetLimits(context.Background(), tenant.ID) + limits, err := t.config.EntitlementRepository.TenantLimit().GetLimits(context.Background(), tenantId) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/get_step_run_queue_metrics.go b/api/v1/server/handlers/tenants/get_step_run_queue_metrics.go index 1f9516cc0..d35c7c0b9 100644 --- a/api/v1/server/handlers/tenants/get_step_run_queue_metrics.go +++ b/api/v1/server/handlers/tenants/get_step_run_queue_metrics.go @@ -1,16 +1,48 @@ package tenants import ( + "fmt" + "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantGetStepRunQueueMetrics(ctx echo.Context, request gen.TenantGetStepRunQueueMetricsRequestObject) (gen.TenantGetStepRunQueueMetricsResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) - stepRunQueueCounts, err := t.config.EngineRepository.StepRun().GetQueueCounts(ctx.Request().Context(), tenant.ID) + switch tenant.Version { + case dbsqlc.TenantMajorEngineVersionV0: + return t.tenantGetStepRunQueueMetricsV0(ctx, tenant, request) + case dbsqlc.TenantMajorEngineVersionV1: + return t.tenantGetStepRunQueueMetricsV1(ctx, tenant, request) + default: + return nil, fmt.Errorf("unsupported tenant version: %s", string(tenant.Version)) + } +} + +func (t *TenantService) tenantGetStepRunQueueMetricsV0(ctx echo.Context, tenant *dbsqlc.Tenant, request gen.TenantGetStepRunQueueMetricsRequestObject) (gen.TenantGetStepRunQueueMetricsResponseObject, error) { + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + stepRunQueueCounts, err := t.config.EngineRepository.StepRun().GetQueueCounts(ctx.Request().Context(), tenantId) + + if err != nil { + return nil, err + } + + resp := gen.TenantStepRunQueueMetrics{ + Queues: &stepRunQueueCounts, + } + + return gen.TenantGetStepRunQueueMetrics200JSONResponse(resp), nil +} + +func (t *TenantService) tenantGetStepRunQueueMetricsV1(ctx echo.Context, tenant *dbsqlc.Tenant, request gen.TenantGetStepRunQueueMetricsRequestObject) (gen.TenantGetStepRunQueueMetricsResponseObject, error) { + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + stepRunQueueCounts, err := t.config.V1.Tasks().GetQueueCounts(ctx.Request().Context(), tenantId) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/list_email_groups.go b/api/v1/server/handlers/tenants/list_email_groups.go index caf8400f7..6f1334352 100644 --- a/api/v1/server/handlers/tenants/list_email_groups.go +++ b/api/v1/server/handlers/tenants/list_email_groups.go @@ -5,13 +5,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) AlertEmailGroupList(ctx echo.Context, request gen.AlertEmailGroupListRequestObject) (gen.AlertEmailGroupListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - emailGroups, err := t.config.APIRepository.TenantAlertingSettings().ListTenantAlertGroups(tenant.ID) + emailGroups, err := t.config.APIRepository.TenantAlertingSettings().ListTenantAlertGroups(ctx.Request().Context(), tenantId) if err != nil { return nil, err @@ -20,7 +22,7 @@ func (t *TenantService) AlertEmailGroupList(ctx echo.Context, request gen.AlertE rows := make([]gen.TenantAlertEmailGroup, len(emailGroups)) for i := range emailGroups { - rows[i] = *transformers.ToTenantAlertEmailGroup(&emailGroups[i]) + rows[i] = *transformers.ToTenantAlertEmailGroup(emailGroups[i]) } return gen.AlertEmailGroupList200JSONResponse{ diff --git a/api/v1/server/handlers/tenants/list_invites.go b/api/v1/server/handlers/tenants/list_invites.go index cb665b8a7..05e37b50d 100644 --- a/api/v1/server/handlers/tenants/list_invites.go +++ b/api/v1/server/handlers/tenants/list_invites.go @@ -6,13 +6,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantInviteList(ctx echo.Context, request gen.TenantInviteListRequestObject) (gen.TenantInviteListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - tenantInvites, err := t.config.APIRepository.TenantInvite().ListTenantInvitesByTenantId(tenant.ID, &repository.ListTenantInvitesOpts{ + tenantInvites, err := t.config.APIRepository.TenantInvite().ListTenantInvitesByTenantId(ctx.Request().Context(), tenantId, &repository.ListTenantInvitesOpts{ Expired: repository.BoolPtr(false), Status: repository.StringPtr("PENDING"), }) @@ -24,7 +26,7 @@ func (t *TenantService) TenantInviteList(ctx echo.Context, request gen.TenantInv rows := make([]gen.TenantInvite, len(tenantInvites)) for i := range tenantInvites { - rows[i] = *transformers.ToTenantInviteLink(&tenantInvites[i]) + rows[i] = *transformers.ToTenantInviteLink(tenantInvites[i]) } return gen.TenantInviteList200JSONResponse{ diff --git a/api/v1/server/handlers/tenants/list_members.go b/api/v1/server/handlers/tenants/list_members.go index 865e65151..d683a674d 100644 --- a/api/v1/server/handlers/tenants/list_members.go +++ b/api/v1/server/handlers/tenants/list_members.go @@ -5,13 +5,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantMemberList(ctx echo.Context, request gen.TenantMemberListRequestObject) (gen.TenantMemberListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - members, err := t.config.APIRepository.Tenant().ListTenantMembers(tenant.ID) + members, err := t.config.APIRepository.Tenant().ListTenantMembers(ctx.Request().Context(), tenantId) if err != nil { return nil, err @@ -20,7 +22,7 @@ func (t *TenantService) TenantMemberList(ctx echo.Context, request gen.TenantMem rows := make([]gen.TenantMember, len(members)) for i := range members { - rows[i] = *transformers.ToTenantMember(&members[i]) + rows[i] = *transformers.ToTenantMember(members[i]) } return gen.TenantMemberList200JSONResponse{ diff --git a/api/v1/server/handlers/tenants/update.go b/api/v1/server/handlers/tenants/update.go index 0c77540bb..4ee04fae9 100644 --- a/api/v1/server/handlers/tenants/update.go +++ b/api/v1/server/handlers/tenants/update.go @@ -6,11 +6,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantUpdate(ctx echo.Context, request gen.TenantUpdateRequestObject) (gen.TenantUpdateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) // validate the request if apiErrors, err := t.config.Validator.ValidateAPI(request.Body); err != nil { @@ -34,8 +36,15 @@ func (t *TenantService) TenantUpdate(ctx echo.Context, request gen.TenantUpdateR updateOpts.Name = request.Body.Name } + if request.Body.Version != nil { + updateOpts.Version = &dbsqlc.NullTenantMajorEngineVersion{ + Valid: true, + TenantMajorEngineVersion: dbsqlc.TenantMajorEngineVersion(*request.Body.Version), + } + } + // update the tenant - tenant, err := t.config.APIRepository.Tenant().UpdateTenant(tenant.ID, updateOpts) + tenant, err := t.config.APIRepository.Tenant().UpdateTenant(ctx.Request().Context(), tenantId, updateOpts) if err != nil { return nil, err @@ -47,7 +56,8 @@ func (t *TenantService) TenantUpdate(ctx echo.Context, request gen.TenantUpdateR request.Body.EnableWorkflowRunFailureAlerts != nil { _, err = t.config.APIRepository.TenantAlertingSettings().UpsertTenantAlertingSettings( - tenant.ID, + ctx.Request().Context(), + tenantId, &repository.UpsertTenantAlertingSettingsOpts{ MaxFrequency: request.Body.MaxAlertingFrequency, EnableExpiringTokenAlerts: request.Body.EnableExpiringTokenAlerts, diff --git a/api/v1/server/handlers/tenants/update_email_group.go b/api/v1/server/handlers/tenants/update_email_group.go index 9e4bf31b6..0138ed0a6 100644 --- a/api/v1/server/handlers/tenants/update_email_group.go +++ b/api/v1/server/handlers/tenants/update_email_group.go @@ -6,11 +6,12 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) AlertEmailGroupUpdate(ctx echo.Context, request gen.AlertEmailGroupUpdateRequestObject) (gen.AlertEmailGroupUpdateResponseObject, error) { - emailGroup := ctx.Get("alert-email-group").(*db.TenantAlertEmailGroupModel) + emailGroup := ctx.Get("alert-email-group").(*dbsqlc.TenantAlertEmailGroup) // validate the request if apiErrors, err := t.config.Validator.ValidateAPI(request.Body); err != nil { @@ -24,7 +25,7 @@ func (t *TenantService) AlertEmailGroupUpdate(ctx echo.Context, request gen.Aler Emails: request.Body.Emails, } - emailGroup, err := t.config.APIRepository.TenantAlertingSettings().UpdateTenantAlertGroup(emailGroup.ID, updateOpts) + emailGroup, err := t.config.APIRepository.TenantAlertingSettings().UpdateTenantAlertGroup(ctx.Request().Context(), sqlchelpers.UUIDToStr(emailGroup.ID), updateOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/tenants/update_invite.go b/api/v1/server/handlers/tenants/update_invite.go index 2ec635a0f..91db1d76c 100644 --- a/api/v1/server/handlers/tenants/update_invite.go +++ b/api/v1/server/handlers/tenants/update_invite.go @@ -7,12 +7,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *TenantService) TenantInviteUpdate(ctx echo.Context, request gen.TenantInviteUpdateRequestObject) (gen.TenantInviteUpdateResponseObject, error) { - tenantMember := ctx.Get("tenant-member").(*db.TenantMemberModel) - invite := ctx.Get("tenant-invite").(*db.TenantInviteLinkModel) + tenantMember := ctx.Get("tenant-member").(*dbsqlc.PopulateTenantMembersRow) + invite := ctx.Get("tenant-invite").(*dbsqlc.TenantInviteLink) // validate the request if apiErrors, err := t.config.Validator.ValidateAPI(request.Body); err != nil { @@ -22,7 +23,7 @@ func (t *TenantService) TenantInviteUpdate(ctx echo.Context, request gen.TenantI } // if user is not an owner, they cannot change a role to owner - if tenantMember.Role != db.TenantMemberRoleOwner && request.Body.Role == gen.OWNER { + if tenantMember.Role != dbsqlc.TenantMemberRoleOWNER && request.Body.Role == gen.OWNER { return gen.TenantInviteUpdate400JSONResponse( apierrors.NewAPIErrors("only an owner can change a role to owner"), ), nil @@ -34,7 +35,7 @@ func (t *TenantService) TenantInviteUpdate(ctx echo.Context, request gen.TenantI } // update the invite - invite, err := t.config.APIRepository.TenantInvite().UpdateTenantInvite(invite.ID, updateOpts) + invite, err := t.config.APIRepository.TenantInvite().UpdateTenantInvite(ctx.Request().Context(), sqlchelpers.UUIDToStr(invite.ID), updateOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/users/accept_invite.go b/api/v1/server/handlers/users/accept_invite.go index 1139db3af..111ecc834 100644 --- a/api/v1/server/handlers/users/accept_invite.go +++ b/api/v1/server/handlers/users/accept_invite.go @@ -4,16 +4,19 @@ import ( "errors" "time" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (u *UserService) TenantInviteAccept(ctx echo.Context, request gen.TenantInviteAcceptRequestObject) (gen.TenantInviteAcceptResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) + userId := sqlchelpers.UUIDToStr(user.ID) // validate the request if apiErrors, err := u.config.Validator.ValidateAPI(request.Body); err != nil { @@ -29,7 +32,7 @@ func (u *UserService) TenantInviteAccept(ctx echo.Context, request gen.TenantInv } // get the invite - invite, err := u.config.APIRepository.TenantInvite().GetTenantInvite(inviteId) + invite, err := u.config.APIRepository.TenantInvite().GetTenantInvite(ctx.Request().Context(), inviteId) if err != nil { return nil, err @@ -41,19 +44,19 @@ func (u *UserService) TenantInviteAccept(ctx echo.Context, request gen.TenantInv } // ensure the invite is not expired - if invite.Expires.Before(time.Now()) { + if invite.Expires.Time.Before(time.Now()) { return gen.TenantInviteAccept400JSONResponse(apierrors.NewAPIErrors("invite is expired")), nil } // ensure invite is in a pending state - if invite.Status != db.InviteLinkStatusPending { + if invite.Status != dbsqlc.InviteLinkStatusPENDING { return gen.TenantInviteAccept400JSONResponse(apierrors.NewAPIErrors("invite has already been used")), nil } // ensure the user is not already a member of the tenant - _, err = u.config.APIRepository.Tenant().GetTenantMemberByEmail(invite.TenantID, user.Email) + _, err = u.config.APIRepository.Tenant().GetTenantMemberByEmail(ctx.Request().Context(), sqlchelpers.UUIDToStr(invite.TenantId), user.Email) - if err != nil && !errors.Is(err, db.ErrNotFound) { + if err != nil && !errors.Is(err, pgx.ErrNoRows) { return nil, err } else if err == nil { return gen.TenantInviteAccept400JSONResponse(apierrors.NewAPIErrors("user is already a member of the tenant")), nil @@ -61,19 +64,19 @@ func (u *UserService) TenantInviteAccept(ctx echo.Context, request gen.TenantInv // construct the database query updateOpts := &repository.UpdateTenantInviteOpts{ - Status: repository.StringPtr(string(db.InviteLinkStatusAccepted)), + Status: repository.StringPtr(string(dbsqlc.InviteLinkStatusACCEPTED)), } // update the invite - invite, err = u.config.APIRepository.TenantInvite().UpdateTenantInvite(invite.ID, updateOpts) + invite, err = u.config.APIRepository.TenantInvite().UpdateTenantInvite(ctx.Request().Context(), sqlchelpers.UUIDToStr(invite.ID), updateOpts) if err != nil { return nil, err } // add the user to the tenant - _, err = u.config.APIRepository.Tenant().CreateTenantMember(invite.TenantID, &repository.CreateTenantMemberOpts{ - UserId: user.ID, + _, err = u.config.APIRepository.Tenant().CreateTenantMember(ctx.Request().Context(), sqlchelpers.UUIDToStr(invite.TenantId), &repository.CreateTenantMemberOpts{ + UserId: userId, Role: string(invite.Role), }) @@ -81,10 +84,12 @@ func (u *UserService) TenantInviteAccept(ctx echo.Context, request gen.TenantInv return nil, err } + tenantId := sqlchelpers.UUIDToStr(invite.TenantId) + u.config.Analytics.Enqueue( - "user-invite:reject", - user.ID, - &invite.TenantID, + "user-invite:accept", + userId, + &tenantId, nil, ) diff --git a/api/v1/server/handlers/users/create.go b/api/v1/server/handlers/users/create.go index a1906288e..e292f4f1f 100644 --- a/api/v1/server/handlers/users/create.go +++ b/api/v1/server/handlers/users/create.go @@ -3,13 +3,14 @@ package users import ( "errors" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" "github.com/hatchet-dev/hatchet/api/v1/server/authn" ) @@ -35,13 +36,13 @@ func (u *UserService) UserCreate(ctx echo.Context, request gen.UserCreateRequest } // determine if the user exists before attempting to write the user - existingUser, err := u.config.APIRepository.User().GetUserByEmail(string(request.Body.Email)) + _, err := u.config.APIRepository.User().GetUserByEmail(ctx.Request().Context(), string(request.Body.Email)) - if err != nil && !errors.Is(err, db.ErrNotFound) { + if err != nil && !errors.Is(err, pgx.ErrNoRows) { return nil, err } - if existingUser != nil { + if err == nil { // just return bad request return gen.UserCreate400JSONResponse( apierrors.NewAPIErrors("Email is already registered."), @@ -66,7 +67,7 @@ func (u *UserService) UserCreate(ctx echo.Context, request gen.UserCreateRequest } // write the user to the db - user, err := u.config.APIRepository.User().CreateUser(createOpts) + user, err := u.config.APIRepository.User().CreateUser(ctx.Request().Context(), createOpts) if err != nil { return nil, err } @@ -79,7 +80,7 @@ func (u *UserService) UserCreate(ctx echo.Context, request gen.UserCreateRequest u.config.Analytics.Enqueue( "user:create", - user.ID, + sqlchelpers.UUIDToStr(user.ID), nil, map[string]interface{}{ "email": request.Body.Email, diff --git a/api/v1/server/handlers/users/get_current.go b/api/v1/server/handlers/users/get_current.go index e192eee52..829f2b263 100644 --- a/api/v1/server/handlers/users/get_current.go +++ b/api/v1/server/handlers/users/get_current.go @@ -6,25 +6,28 @@ import ( "encoding/hex" "errors" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (u *UserService) UserGetCurrent(ctx echo.Context, request gen.UserGetCurrentRequestObject) (gen.UserGetCurrentResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) + userId := sqlchelpers.UUIDToStr(user.ID) var hasPass bool - pass, err := u.config.APIRepository.User().GetUserPassword(user.ID) + _, err := u.config.APIRepository.User().GetUserPassword(ctx.Request().Context(), userId) - if err != nil && !errors.Is(err, db.ErrNotFound) { + if err != nil && !errors.Is(err, pgx.ErrNoRows) { return nil, err } - if pass != nil { + if err == nil { hasPass = true } @@ -42,7 +45,7 @@ func (u *UserService) UserGetCurrent(ctx echo.Context, request gen.UserGetCurren u.config.Analytics.Enqueue( "user:current", - user.ID, + userId, nil, map[string]interface{}{ "email": user.Email, diff --git a/api/v1/server/handlers/users/github_oauth_callback.go b/api/v1/server/handlers/users/github_oauth_callback.go index 10d20e5b1..c27f4ba27 100644 --- a/api/v1/server/handlers/users/github_oauth_callback.go +++ b/api/v1/server/handlers/users/github_oauth_callback.go @@ -6,6 +6,7 @@ import ( "fmt" githubsdk "github.com/google/go-github/v57/github" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "golang.org/x/oauth2" @@ -14,7 +15,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/config/server" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) // Note: we want all errors to redirect, otherwise the user will be greeted with raw JSON in the middle of the login flow. @@ -35,7 +37,7 @@ func (u *UserService) UserUpdateGithubOauthCallback(ctx echo.Context, _ gen.User return nil, redirect.GetRedirectWithError(ctx, u.config.Logger, fmt.Errorf("invalid token"), "Forbidden") } - user, err := u.upsertGithubUserFromToken(u.config, token) + user, err := u.upsertGithubUserFromToken(ctx.Request().Context(), u.config, token) if err != nil { if errors.Is(err, ErrNotInRestrictedDomain) { @@ -66,7 +68,7 @@ func (u *UserService) UserUpdateGithubOauthCallback(ctx echo.Context, _ gen.User }, nil } -func (u *UserService) upsertGithubUserFromToken(config *server.ServerConfig, tok *oauth2.Token) (*db.UserModel, error) { +func (u *UserService) upsertGithubUserFromToken(ctx context.Context, config *server.ServerConfig, tok *oauth2.Token) (*dbsqlc.User, error) { gInfo, err := u.getGithubEmailFromToken(tok) if err != nil { @@ -96,15 +98,15 @@ func (u *UserService) upsertGithubUserFromToken(config *server.ServerConfig, tok Provider: "github", ProviderUserId: gInfo.ID, AccessToken: accessTokenEncrypted, - RefreshToken: &refreshTokenEncrypted, + RefreshToken: refreshTokenEncrypted, ExpiresAt: &expiresAt, } - user, err := u.config.APIRepository.User().GetUserByEmail(gInfo.Email) + user, err := u.config.APIRepository.User().GetUserByEmail(ctx, gInfo.Email) switch err { case nil: - user, err = u.config.APIRepository.User().UpdateUser(user.ID, &repository.UpdateUserOpts{ + user, err = u.config.APIRepository.User().UpdateUser(ctx, sqlchelpers.UUIDToStr(user.ID), &repository.UpdateUserOpts{ EmailVerified: repository.BoolPtr(gInfo.EmailVerified), Name: repository.StringPtr(gInfo.Name), OAuth: oauthOpts, @@ -113,8 +115,8 @@ func (u *UserService) upsertGithubUserFromToken(config *server.ServerConfig, tok if err != nil { return nil, fmt.Errorf("failed to update user: %s", err.Error()) } - case db.ErrNotFound: - user, err = u.config.APIRepository.User().CreateUser(&repository.CreateUserOpts{ + case pgx.ErrNoRows: + user, err = u.config.APIRepository.User().CreateUser(ctx, &repository.CreateUserOpts{ Email: gInfo.Email, EmailVerified: repository.BoolPtr(gInfo.EmailVerified), Name: repository.StringPtr(gInfo.Name), diff --git a/api/v1/server/handlers/users/google_oauth_callback.go b/api/v1/server/handlers/users/google_oauth_callback.go index e257ff56a..78e049fce 100644 --- a/api/v1/server/handlers/users/google_oauth_callback.go +++ b/api/v1/server/handlers/users/google_oauth_callback.go @@ -8,6 +8,7 @@ import ( "io" "net/http" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "golang.org/x/oauth2" @@ -16,7 +17,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/config/server" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) // Note: we want all errors to redirect, otherwise the user will be greeted with raw JSON in the middle of the login flow. @@ -37,7 +39,7 @@ func (u *UserService) UserUpdateGoogleOauthCallback(ctx echo.Context, _ gen.User return nil, redirect.GetRedirectWithError(ctx, u.config.Logger, fmt.Errorf("invalid token"), "Forbidden") } - user, err := u.upsertGoogleUserFromToken(u.config, token) + user, err := u.upsertGoogleUserFromToken(ctx.Request().Context(), u.config, token) if err != nil { if errors.Is(err, ErrNotInRestrictedDomain) { @@ -60,7 +62,7 @@ func (u *UserService) UserUpdateGoogleOauthCallback(ctx echo.Context, _ gen.User }, nil } -func (u *UserService) upsertGoogleUserFromToken(config *server.ServerConfig, tok *oauth2.Token) (*db.UserModel, error) { +func (u *UserService) upsertGoogleUserFromToken(ctx context.Context, config *server.ServerConfig, tok *oauth2.Token) (*dbsqlc.User, error) { gInfo, err := getGoogleUserInfoFromToken(tok) if err != nil { return nil, err @@ -89,15 +91,15 @@ func (u *UserService) upsertGoogleUserFromToken(config *server.ServerConfig, tok Provider: "google", ProviderUserId: gInfo.Sub, AccessToken: accessTokenEncrypted, - RefreshToken: &refreshTokenEncrypted, + RefreshToken: refreshTokenEncrypted, ExpiresAt: &expiresAt, } - user, err := u.config.APIRepository.User().GetUserByEmail(gInfo.Email) + user, err := u.config.APIRepository.User().GetUserByEmail(ctx, gInfo.Email) switch err { case nil: - user, err = u.config.APIRepository.User().UpdateUser(user.ID, &repository.UpdateUserOpts{ + user, err = u.config.APIRepository.User().UpdateUser(ctx, sqlchelpers.UUIDToStr(user.ID), &repository.UpdateUserOpts{ EmailVerified: repository.BoolPtr(gInfo.EmailVerified), Name: repository.StringPtr(gInfo.Name), OAuth: oauthOpts, @@ -106,8 +108,8 @@ func (u *UserService) upsertGoogleUserFromToken(config *server.ServerConfig, tok if err != nil { return nil, fmt.Errorf("failed to update user: %s", err.Error()) } - case db.ErrNotFound: - user, err = u.config.APIRepository.User().CreateUser(&repository.CreateUserOpts{ + case pgx.ErrNoRows: + user, err = u.config.APIRepository.User().CreateUser(ctx, &repository.CreateUserOpts{ Email: gInfo.Email, EmailVerified: repository.BoolPtr(gInfo.EmailVerified), Name: repository.StringPtr(gInfo.Name), diff --git a/api/v1/server/handlers/users/list_memberships.go b/api/v1/server/handlers/users/list_memberships.go index 557a7f1b0..cbeb5674e 100644 --- a/api/v1/server/handlers/users/list_memberships.go +++ b/api/v1/server/handlers/users/list_memberships.go @@ -5,13 +5,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *UserService) TenantMembershipsList(ctx echo.Context, request gen.TenantMembershipsListRequestObject) (gen.TenantMembershipsListResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) + userId := sqlchelpers.UUIDToStr(user.ID) - memberships, err := t.config.APIRepository.User().ListTenantMemberships(user.ID) + memberships, err := t.config.APIRepository.User().ListTenantMemberships(ctx.Request().Context(), userId) if err != nil { return nil, err @@ -20,8 +22,7 @@ func (t *UserService) TenantMembershipsList(ctx echo.Context, request gen.Tenant rows := make([]gen.TenantMember, len(memberships)) for i, membership := range memberships { - membershipCp := membership - rows[i] = *transformers.ToTenantMember(&membershipCp) + rows[i] = *transformers.ToTenantMember(membership) } return gen.TenantMembershipsList200JSONResponse( diff --git a/api/v1/server/handlers/users/list_tenant_invites.go b/api/v1/server/handlers/users/list_tenant_invites.go index d098d504f..12fd7b520 100644 --- a/api/v1/server/handlers/users/list_tenant_invites.go +++ b/api/v1/server/handlers/users/list_tenant_invites.go @@ -5,13 +5,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func (t *UserService) UserListTenantInvites(ctx echo.Context, request gen.UserListTenantInvitesRequestObject) (gen.UserListTenantInvitesResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) - invites, err := t.config.APIRepository.TenantInvite().ListTenantInvitesByEmail(user.Email) + invites, err := t.config.APIRepository.TenantInvite().ListTenantInvitesByEmail(ctx.Request().Context(), user.Email) if err != nil { return nil, err @@ -20,7 +20,7 @@ func (t *UserService) UserListTenantInvites(ctx echo.Context, request gen.UserLi rows := make([]gen.TenantInvite, len(invites)) for i := range invites { - rows[i] = *transformers.ToTenantInviteLink(&invites[i]) + rows[i] = *transformers.ToTenantInviteLink(invites[i]) } return gen.UserListTenantInvites200JSONResponse(gen.TenantInviteList200JSONResponse{ diff --git a/api/v1/server/handlers/users/reject_invite.go b/api/v1/server/handlers/users/reject_invite.go index f0fa679fc..9e6404fc9 100644 --- a/api/v1/server/handlers/users/reject_invite.go +++ b/api/v1/server/handlers/users/reject_invite.go @@ -9,11 +9,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (u *UserService) TenantInviteReject(ctx echo.Context, request gen.TenantInviteRejectRequestObject) (gen.TenantInviteRejectResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) + userId := sqlchelpers.UUIDToStr(user.ID) // validate the request if apiErrors, err := u.config.Validator.ValidateAPI(request.Body); err != nil { @@ -29,7 +31,7 @@ func (u *UserService) TenantInviteReject(ctx echo.Context, request gen.TenantInv } // get the invite - invite, err := u.config.APIRepository.TenantInvite().GetTenantInvite(inviteId) + invite, err := u.config.APIRepository.TenantInvite().GetTenantInvite(ctx.Request().Context(), inviteId) if err != nil { return nil, err @@ -41,31 +43,33 @@ func (u *UserService) TenantInviteReject(ctx echo.Context, request gen.TenantInv } // ensure the invite is not expired - if invite.Expires.Before(time.Now()) { + if invite.Expires.Time.Before(time.Now()) { return gen.TenantInviteReject400JSONResponse(apierrors.NewAPIErrors("invite is expired")), nil } // ensure invite is in a pending state - if invite.Status != db.InviteLinkStatusPending { + if invite.Status != dbsqlc.InviteLinkStatusPENDING { return gen.TenantInviteReject400JSONResponse(apierrors.NewAPIErrors("invite has already been used")), nil } // construct the database query updateOpts := &repository.UpdateTenantInviteOpts{ - Status: repository.StringPtr(string(db.InviteLinkStatusRejected)), + Status: repository.StringPtr(string(dbsqlc.InviteLinkStatusREJECTED)), } // update the invite - invite, err = u.config.APIRepository.TenantInvite().UpdateTenantInvite(invite.ID, updateOpts) + invite, err = u.config.APIRepository.TenantInvite().UpdateTenantInvite(ctx.Request().Context(), sqlchelpers.UUIDToStr(invite.ID), updateOpts) if err != nil { return nil, err } + tenantId := sqlchelpers.UUIDToStr(invite.TenantId) + u.config.Analytics.Enqueue( - "user-invite:accept", - user.ID, - &invite.TenantID, + "user-invite:reject", + userId, + &tenantId, nil, ) diff --git a/api/v1/server/handlers/users/update_login.go b/api/v1/server/handlers/users/update_login.go index e5037a103..08aec6129 100644 --- a/api/v1/server/handlers/users/update_login.go +++ b/api/v1/server/handlers/users/update_login.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/authn" @@ -11,7 +12,7 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (u *UserService) UserUpdateLogin(ctx echo.Context, request gen.UserUpdateLoginRequestObject) (gen.UserUpdateLoginResponseObject, error) { @@ -36,16 +37,16 @@ func (u *UserService) UserUpdateLogin(ctx echo.Context, request gen.UserUpdateLo } // determine if the user exists before attempting to write the user - existingUser, err := u.config.APIRepository.User().GetUserByEmail(string(request.Body.Email)) + existingUser, err := u.config.APIRepository.User().GetUserByEmail(ctx.Request().Context(), string(request.Body.Email)) if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return gen.UserUpdateLogin400JSONResponse(apierrors.NewAPIErrors("user not found")), nil } return nil, err } - userPass, err := u.config.APIRepository.User().GetUserPassword(existingUser.ID) + userPass, err := u.config.APIRepository.User().GetUserPassword(ctx.Request().Context(), sqlchelpers.UUIDToStr(existingUser.ID)) if err != nil { return nil, fmt.Errorf("could not get user password: %w", err) diff --git a/api/v1/server/handlers/users/update_logout.go b/api/v1/server/handlers/users/update_logout.go index 841711fb9..d1065c996 100644 --- a/api/v1/server/handlers/users/update_logout.go +++ b/api/v1/server/handlers/users/update_logout.go @@ -6,11 +6,11 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/authn" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func (u *UserService) UserUpdateLogout(ctx echo.Context, request gen.UserUpdateLogoutRequestObject) (gen.UserUpdateLogoutResponseObject, error) { - user := ctx.Get("user").(*db.UserModel) + user := ctx.Get("user").(*dbsqlc.User) if err := authn.NewSessionHelpers(u.config).SaveUnauthenticated(ctx); err != nil { return nil, err diff --git a/api/v1/server/handlers/users/update_password.go b/api/v1/server/handlers/users/update_password.go index e99a030de..4dda68170 100644 --- a/api/v1/server/handlers/users/update_password.go +++ b/api/v1/server/handlers/users/update_password.go @@ -9,12 +9,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (u *UserService) UserUpdatePassword(ctx echo.Context, request gen.UserUpdatePasswordRequestObject) (gen.UserUpdatePasswordResponseObject, error) { // determine if the user exists before attempting to write the user - existingUser := ctx.Get("user").(*db.UserModel) + existingUser := ctx.Get("user").(*dbsqlc.User) if !u.config.Runtime.AllowChangePassword { return gen.UserUpdatePassword405JSONResponse( @@ -36,7 +37,9 @@ func (u *UserService) UserUpdatePassword(ctx echo.Context, request gen.UserUpdat return gen.UserUpdatePassword400JSONResponse(*apiErrors), nil } - userPass, err := u.config.APIRepository.User().GetUserPassword(existingUser.ID) + userId := sqlchelpers.UUIDToStr(existingUser.ID) + + userPass, err := u.config.APIRepository.User().GetUserPassword(ctx.Request().Context(), userId) if err != nil { return nil, fmt.Errorf("could not get user password: %w", err) @@ -54,7 +57,7 @@ func (u *UserService) UserUpdatePassword(ctx echo.Context, request gen.UserUpdat return nil, fmt.Errorf("could not hash user password: %w", err) } - user, err := u.config.APIRepository.User().UpdateUser(existingUser.ID, &repository.UpdateUserOpts{ + user, err := u.config.APIRepository.User().UpdateUser(ctx.Request().Context(), userId, &repository.UpdateUserOpts{ Password: newPass, }) diff --git a/api/v1/server/handlers/v1/proxy/proxy.go b/api/v1/server/handlers/v1/proxy/proxy.go new file mode 100644 index 000000000..0c660fe46 --- /dev/null +++ b/api/v1/server/handlers/v1/proxy/proxy.go @@ -0,0 +1,63 @@ +package proxy + +import ( + "context" + "time" + + client "github.com/hatchet-dev/hatchet/pkg/client/v1" + "github.com/hatchet-dev/hatchet/pkg/config/server" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" +) + +type Proxy[in, out any] struct { + config *server.ServerConfig + method func(ctx context.Context, cli *client.GRPCClient, input *in) (*out, error) +} + +func NewProxy[in, out any](config *server.ServerConfig, method func(ctx context.Context, cli *client.GRPCClient, input *in) (*out, error)) *Proxy[in, out] { + return &Proxy[in, out]{ + config: config, + method: method, + } +} + +func (p *Proxy[in, out]) Do(ctx context.Context, tenant *dbsqlc.Tenant, input *in) (*out, error) { + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + expiresAt := time.Now().Add(5 * time.Minute).UTC() + + // generate the API token for the proxy request + tok, err := p.config.Auth.JWTManager.GenerateTenantToken(ctx, tenantId, "proxy", true, &expiresAt) + + if err != nil { + return nil, err + } + + defer func() { + deleteCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // delete the API token + err = p.config.APIRepository.APIToken().DeleteAPIToken(deleteCtx, tenantId, tok.TokenId) + + if err != nil { + p.config.Logger.Error().Err(err).Msg("failed to delete API token") + } + }() + + c, err := p.config.InternalClientFactory.NewGRPCClient(tok.Token) + + if err != nil { + return nil, err + } + + // call the client method + res, err := p.method(client.AuthContext(ctx, tok.Token), c, input) + + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/api/v1/server/handlers/v1/tasks/cancel.go b/api/v1/server/handlers/v1/tasks/cancel.go new file mode 100644 index 000000000..b2ca0d35c --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/cancel.go @@ -0,0 +1,74 @@ +package tasks + +import ( + "github.com/labstack/echo/v4" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" +) + +func (t *TasksService) V1TaskCancel(ctx echo.Context, request gen.V1TaskCancelRequestObject) (gen.V1TaskCancelResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + + var err error + + grpcReq := &contracts.CancelTasksRequest{} + + if request.Body.ExternalIds != nil { + externalIds := make([]string, 0) + + for _, id := range *request.Body.ExternalIds { + externalIds = append(externalIds, id.String()) + } + + grpcReq.ExternalIds = externalIds + } + + if request.Body.Filter != nil { + filter := &contracts.TasksFilter{ + Since: timestamppb.New(request.Body.Filter.Since), + } + + if request.Body.Filter.Until != nil { + filter.Until = timestamppb.New(*request.Body.Filter.Until) + } + + if request.Body.Filter.Statuses != nil { + filter.Statuses = make([]string, len(*request.Body.Filter.Statuses)) + + for i, status := range *request.Body.Filter.Statuses { + filter.Statuses[i] = string(status) + } + } + + if request.Body.Filter.WorkflowIds != nil { + filter.WorkflowIds = make([]string, len(*request.Body.Filter.WorkflowIds)) + + for i, id := range *request.Body.Filter.WorkflowIds { + filter.WorkflowIds[i] = id.String() + } + } + + if request.Body.Filter.AdditionalMetadata != nil { + filter.AdditionalMetadata = make([]string, len(*request.Body.Filter.AdditionalMetadata)) + + copy(filter.AdditionalMetadata, *request.Body.Filter.AdditionalMetadata) + } + + grpcReq.Filter = filter + } + + _, err = t.proxyCancel.Do( + ctx.Request().Context(), + tenant, + grpcReq, + ) + + if err != nil { + return nil, err + } + + return gen.V1TaskCancel200Response{}, nil +} diff --git a/api/v1/server/handlers/v1/tasks/get.go b/api/v1/server/handlers/v1/tasks/get.go new file mode 100644 index 000000000..e8631ed2d --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/get.go @@ -0,0 +1,26 @@ +package tasks + +import ( + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *TasksService) V1TaskGet(ctx echo.Context, request gen.V1TaskGetRequestObject) (gen.V1TaskGetResponseObject, error) { + task := ctx.Get("task").(*sqlcv1.V1TasksOlap) + + taskWithData, workflowRunExternalId, err := t.config.V1.OLAP().ReadTaskRunData(ctx.Request().Context(), task.TenantID, task.ID, task.InsertedAt) + + if err != nil { + return nil, err + } + + result := transformers.ToTask(taskWithData, workflowRunExternalId) + + return gen.V1TaskGet200JSONResponse( + result, + ), nil +} diff --git a/api/v1/server/handlers/v1/tasks/get_metrics.go b/api/v1/server/handlers/v1/tasks/get_metrics.go new file mode 100644 index 000000000..d7b4941a1 --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/get_metrics.go @@ -0,0 +1,51 @@ +package tasks + +import ( + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + + "github.com/labstack/echo/v4" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *TasksService) V1TaskListStatusMetrics(ctx echo.Context, request gen.V1TaskListStatusMetricsRequestObject) (gen.V1TaskListStatusMetricsResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + var workflowIds []uuid.UUID + + if request.Params.WorkflowIds != nil { + workflowIds = *request.Params.WorkflowIds + } + + var parentTaskExternalId *pgtype.UUID + + if request.Params.ParentTaskExternalId != nil { + uuidPtr := *request.Params.ParentTaskExternalId + uuidVal := sqlchelpers.UUIDFromStr(uuidPtr.String()) + parentTaskExternalId = &uuidVal + } + + metrics, err := t.config.V1.OLAP().ReadTaskRunMetrics(ctx.Request().Context(), tenantId, v1.ReadTaskRunMetricsOpts{ + CreatedAfter: request.Params.Since, + WorkflowIds: workflowIds, + ParentTaskExternalID: parentTaskExternalId, + }) + + if err != nil { + return nil, err + } + + result := transformers.ToTaskRunMetrics(&metrics) + + // Search for api errors to see how we handle errors in other cases + return gen.V1TaskListStatusMetrics200JSONResponse( + result, + ), nil +} diff --git a/api/v1/server/handlers/v1/tasks/get_point_metrics.go b/api/v1/server/handlers/v1/tasks/get_point_metrics.go new file mode 100644 index 000000000..7ec4aba42 --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/get_point_metrics.go @@ -0,0 +1,124 @@ +package tasks + +import ( + "errors" + "time" + + "github.com/jackc/pgx/v5" + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" +) + +func (t *TasksService) V1TaskGetPointMetrics(ctx echo.Context, request gen.V1TaskGetPointMetricsRequestObject) (gen.V1TaskGetPointMetricsResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + // 24 hours ago, rounded to the nearest minute + lowerBound := time.Now().UTC().Add(-24 * time.Hour).Truncate(30 * time.Minute) + upperBound := time.Now().UTC() + + if request.Params.CreatedAfter != nil { + lowerBound = request.Params.CreatedAfter.UTC() + } + if request.Params.FinishedBefore != nil { + upperBound = request.Params.FinishedBefore.UTC() + } + + // determine a bucket interval based on the time range. If the time range is less than 1 hour, use 1 minute intervals. + // If the time range is less than 1 day, use 5 minute intervals. Otherwise, use 30 minute intervals. + var bucketInterval time.Duration + + switch { + case upperBound.Sub(lowerBound) < 61*time.Minute: + bucketInterval = time.Minute + lowerBound = lowerBound.Truncate(time.Minute) + case upperBound.Sub(lowerBound) < 12*time.Hour: + bucketInterval = 5 * time.Minute + lowerBound = lowerBound.Truncate(5 * time.Minute) + case upperBound.Sub(lowerBound) < 48*time.Hour: + bucketInterval = 30 * time.Minute + lowerBound = lowerBound.Truncate(30 * time.Minute) + case upperBound.Sub(lowerBound) < 8*24*time.Hour: + bucketInterval = 8 * time.Hour + lowerBound = lowerBound.Truncate(8 * time.Hour) + default: + bucketInterval = 24 * time.Hour + lowerBound = lowerBound.Truncate(24 * time.Hour) + } + + metrics, err := t.config.V1.OLAP().GetTaskPointMetrics(ctx.Request().Context(), tenantId, &lowerBound, &upperBound, bucketInterval) + + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return gen.V1TaskGetPointMetrics400JSONResponse( + apierrors.NewAPIErrors("workflow not found"), + ), nil + } + + return nil, err + } + + // Fill missing minutes with 0 values + convertedMetrics := fillMissingMinutesWithZero(lowerBound, upperBound, convertToGenMetrics(metrics), bucketInterval) + + return gen.V1TaskGetPointMetrics200JSONResponse{ + Results: &convertedMetrics, + }, nil +} + +type WorkflowRunEventsMetrics struct { + Results *[]gen.V1TaskPointMetric `json:"results,omitempty"` +} + +func convertToGenMetrics(metrics []*sqlcv1.GetTaskPointMetricsRow) []gen.V1TaskPointMetric { + converted := make([]gen.V1TaskPointMetric, len(metrics)) + + for i, metric := range metrics { + if metric == nil || !metric.Bucket2.Valid { + continue + } + + timeMinute := metric.Bucket2.Time.UTC() + + converted[i] = gen.V1TaskPointMetric{ + FAILED: int(metric.FailedCount), + SUCCEEDED: int(metric.CompletedCount), + Time: timeMinute, + } + } + + return converted +} + +// fillMissingMinutesWithZero fills in missing minutes between lowerBound and upperBound with 0 values. +func fillMissingMinutesWithZero(lowerBound, upperBound time.Time, metrics []gen.V1TaskPointMetric, bucketInterval time.Duration) []gen.V1TaskPointMetric { + result := []gen.V1TaskPointMetric{} + + metricMap := make(map[time.Time]gen.V1TaskPointMetric) + + for _, metric := range metrics { + if !metric.Time.IsZero() { + metricMap[(metric.Time).UTC()] = metric + } + } + + for t := lowerBound; t.Before(upperBound) || t.Equal(upperBound); t = t.Add(bucketInterval) { + if metric, exists := metricMap[t]; exists { + result = append(result, metric) + } else { + timeCopy := t + result = append(result, gen.V1TaskPointMetric{ + FAILED: int(0), + SUCCEEDED: int(0), + Time: timeCopy, + }) + } + } + + return result +} diff --git a/api/v1/server/handlers/v1/tasks/list_by_dag_id.go b/api/v1/server/handlers/v1/tasks/list_by_dag_id.go new file mode 100644 index 000000000..42209d766 --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/list_by_dag_id.go @@ -0,0 +1,38 @@ +package tasks + +import ( + "github.com/jackc/pgx/v5/pgtype" + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *TasksService) V1DagListTasks(ctx echo.Context, request gen.V1DagListTasksRequestObject) (gen.V1DagListTasksResponseObject, error) { + tenantId := request.Params.Tenant.String() + dagIds := request.Params.DagIds + + pguuids := make([]pgtype.UUID, 0) + for _, dagId := range dagIds { + pguuids = append(pguuids, sqlchelpers.UUIDFromStr(dagId.String())) + } + + tasks, taskIdToDagExternalId, err := t.config.V1.OLAP().ListTasksByDAGId( + ctx.Request().Context(), + tenantId, + pguuids, + ) + + if err != nil { + return nil, err + } + + result := transformers.ToDagChildren(tasks, taskIdToDagExternalId) + + // Search for api errors to see how we handle errors in other cases + return gen.V1DagListTasks200JSONResponse( + result, + ), nil +} diff --git a/api/v1/server/handlers/v1/tasks/list_events.go b/api/v1/server/handlers/v1/tasks/list_events.go new file mode 100644 index 000000000..bfe3a0f85 --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/list_events.go @@ -0,0 +1,30 @@ +package tasks + +import ( + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *TasksService) V1TaskEventList(ctx echo.Context, request gen.V1TaskEventListRequestObject) (gen.V1TaskEventListResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + task := ctx.Get("task").(*sqlcv1.V1TasksOlap) + + taskRunEvents, err := t.config.V1.OLAP().ListTaskRunEvents(ctx.Request().Context(), tenantId, task.ID, task.InsertedAt, *request.Params.Limit, *request.Params.Offset) + + if err != nil { + return nil, err + } + + result := transformers.ToTaskRunEventMany(taskRunEvents, sqlchelpers.UUIDToStr(task.ExternalID)) + + return gen.V1TaskEventList200JSONResponse( + result, + ), nil +} diff --git a/api/v1/server/handlers/v1/tasks/list_logs.go b/api/v1/server/handlers/v1/tasks/list_logs.go new file mode 100644 index 000000000..5c02d26fe --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/list_logs.go @@ -0,0 +1,47 @@ +package tasks + +import ( + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *TasksService) V1LogLineList(ctx echo.Context, request gen.V1LogLineListRequestObject) (gen.V1LogLineListResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + task := ctx.Get("task").(*sqlcv1.V1TasksOlap) + + logLines, err := t.config.V1.Logs().ListLogLines(ctx.Request().Context(), tenantId, task.ID, task.InsertedAt, &v1.ListLogsOpts{}) + + if err != nil { + return nil, err + } + + rows := make([]gen.V1LogLine, len(logLines)) + + for i, log := range logLines { + rows[i] = *transformers.ToV1LogLine(log) + } + + // use the total rows and limit to calculate the total pages + totalPages := int64(1) + currPage := int64(1) + nextPage := int64(1) + + return gen.V1LogLineList200JSONResponse( + gen.V1LogLineList{ + Rows: &rows, + Pagination: &gen.PaginationResponse{ + NumPages: &totalPages, + CurrentPage: &currPage, + NextPage: &nextPage, + }, + }, + ), nil +} diff --git a/api/v1/server/handlers/v1/tasks/replay.go b/api/v1/server/handlers/v1/tasks/replay.go new file mode 100644 index 000000000..8504bf8c9 --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/replay.go @@ -0,0 +1,74 @@ +package tasks + +import ( + "github.com/labstack/echo/v4" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" +) + +func (t *TasksService) V1TaskReplay(ctx echo.Context, request gen.V1TaskReplayRequestObject) (gen.V1TaskReplayResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + + var err error + + grpcReq := &contracts.ReplayTasksRequest{} + + if request.Body.ExternalIds != nil { + externalIds := make([]string, 0) + + for _, id := range *request.Body.ExternalIds { + externalIds = append(externalIds, id.String()) + } + + grpcReq.ExternalIds = externalIds + } + + if request.Body.Filter != nil { + filter := &contracts.TasksFilter{ + Since: timestamppb.New(request.Body.Filter.Since), + } + + if request.Body.Filter.Until != nil { + filter.Until = timestamppb.New(*request.Body.Filter.Until) + } + + if request.Body.Filter.Statuses != nil { + filter.Statuses = make([]string, len(*request.Body.Filter.Statuses)) + + for i, status := range *request.Body.Filter.Statuses { + filter.Statuses[i] = string(status) + } + } + + if request.Body.Filter.WorkflowIds != nil { + filter.WorkflowIds = make([]string, len(*request.Body.Filter.WorkflowIds)) + + for i, id := range *request.Body.Filter.WorkflowIds { + filter.WorkflowIds[i] = id.String() + } + } + + if request.Body.Filter.AdditionalMetadata != nil { + filter.AdditionalMetadata = make([]string, len(*request.Body.Filter.AdditionalMetadata)) + + copy(filter.AdditionalMetadata, *request.Body.Filter.AdditionalMetadata) + } + + grpcReq.Filter = filter + } + + _, err = t.proxyReplay.Do( + ctx.Request().Context(), + tenant, + grpcReq, + ) + + if err != nil { + return nil, err + } + + return gen.V1TaskReplay200Response{}, nil +} diff --git a/api/v1/server/handlers/v1/tasks/service.go b/api/v1/server/handlers/v1/tasks/service.go new file mode 100644 index 000000000..e87da7ada --- /dev/null +++ b/api/v1/server/handlers/v1/tasks/service.go @@ -0,0 +1,33 @@ +package tasks + +import ( + "context" + + "github.com/hatchet-dev/hatchet/api/v1/server/handlers/v1/proxy" + admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1" + "github.com/hatchet-dev/hatchet/pkg/config/server" + + client "github.com/hatchet-dev/hatchet/pkg/client/v1" +) + +type TasksService struct { + config *server.ServerConfig + proxyCancel *proxy.Proxy[admincontracts.CancelTasksRequest, admincontracts.CancelTasksResponse] + proxyReplay *proxy.Proxy[admincontracts.ReplayTasksRequest, admincontracts.ReplayTasksResponse] +} + +func NewTasksService(config *server.ServerConfig) *TasksService { + proxyCancel := proxy.NewProxy(config, func(ctx context.Context, cli *client.GRPCClient, in *admincontracts.CancelTasksRequest) (*admincontracts.CancelTasksResponse, error) { + return cli.Admin().CancelTasks(ctx, in) + }) + + proxyReplay := proxy.NewProxy(config, func(ctx context.Context, cli *client.GRPCClient, in *admincontracts.ReplayTasksRequest) (*admincontracts.ReplayTasksResponse, error) { + return cli.Admin().ReplayTasks(ctx, in) + }) + + return &TasksService{ + config: config, + proxyCancel: proxyCancel, + proxyReplay: proxyReplay, + } +} diff --git a/api/v1/server/handlers/v1/workflow-runs/get.go b/api/v1/server/handlers/v1/workflow-runs/get.go new file mode 100644 index 000000000..3977101ee --- /dev/null +++ b/api/v1/server/handlers/v1/workflow-runs/get.go @@ -0,0 +1,92 @@ +package workflowruns + +import ( + "context" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *V1WorkflowRunsService) V1WorkflowRunGet(ctx echo.Context, request gen.V1WorkflowRunGetRequestObject) (gen.V1WorkflowRunGetResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + rawWorkflowRun := ctx.Get("v1-workflow-run").(*v1.V1WorkflowRunPopulator) + + requestContext := ctx.Request().Context() + + details, err := t.getWorkflowRunDetails( + requestContext, + tenantId, + rawWorkflowRun, + ) + + if err != nil { + return nil, err + } + + // Search for api errors to see how we handle errors in other cases + return gen.V1WorkflowRunGet200JSONResponse( + *details, + ), nil +} + +func (t *V1WorkflowRunsService) getWorkflowRunDetails( + ctx context.Context, + tenantId string, + rawWorkflowRun *v1.V1WorkflowRunPopulator, +) (*gen.V1WorkflowRunDetails, error) { + workflowRun := rawWorkflowRun.WorkflowRun + taskMetadata := rawWorkflowRun.TaskMetadata + workflowRunId := workflowRun.ExternalID + + taskRunEvents, err := t.config.V1.OLAP().ListTaskRunEventsByWorkflowRunId( + ctx, + tenantId, + workflowRunId, + ) + + if err != nil { + return nil, err + } + + tasks, err := t.config.V1.OLAP().ListTasksByIdAndInsertedAt( + ctx, + tenantId, + taskMetadata, + ) + + if err != nil { + return nil, err + } + + stepIdToTaskExternalId := make(map[pgtype.UUID]pgtype.UUID) + for _, task := range tasks { + stepIdToTaskExternalId[task.StepID] = task.ExternalID + } + + workflowVersionId := uuid.MustParse(sqlchelpers.UUIDToStr(workflowRun.WorkflowVersionId)) + + shape, err := t.config.APIRepository.WorkflowRun().GetWorkflowRunShape( + ctx, workflowVersionId, + ) + + if err != nil { + return nil, err + } + + result, err := transformers.ToWorkflowRunDetails(taskRunEvents, workflowRun, shape, tasks, stepIdToTaskExternalId) + + if err != nil { + return nil, err + } + + return &result, nil +} diff --git a/api/v1/server/handlers/v1/workflow-runs/list-task-events.go b/api/v1/server/handlers/v1/workflow-runs/list-task-events.go new file mode 100644 index 000000000..93aedb92f --- /dev/null +++ b/api/v1/server/handlers/v1/workflow-runs/list-task-events.go @@ -0,0 +1,37 @@ +package workflowruns + +import ( + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *V1WorkflowRunsService) V1WorkflowRunTaskEventsList(ctx echo.Context, request gen.V1WorkflowRunTaskEventsListRequestObject) (gen.V1WorkflowRunTaskEventsListResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + rawWorkflowRun := ctx.Get("v1-workflow-run").(*v1.V1WorkflowRunPopulator) + + workflowRun := rawWorkflowRun.WorkflowRun + + taskRunEvents, err := t.config.V1.OLAP().ListTaskRunEventsByWorkflowRunId( + ctx.Request().Context(), + tenantId, + workflowRun.ExternalID, + ) + + if err != nil { + return nil, err + } + + result := transformers.ToWorkflowRunTaskRunEventsMany(taskRunEvents) + + // Search for api errors to see how we handle errors in other cases + return gen.V1WorkflowRunTaskEventsList200JSONResponse( + result, + ), nil +} diff --git a/api/v1/server/handlers/v1/workflow-runs/list.go b/api/v1/server/handlers/v1/workflow-runs/list.go new file mode 100644 index 000000000..19a41e55d --- /dev/null +++ b/api/v1/server/handlers/v1/workflow-runs/list.go @@ -0,0 +1,281 @@ +package workflowruns + +import ( + "strings" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" + + transformers "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" +) + +func (t *V1WorkflowRunsService) WithDags(ctx echo.Context, request gen.V1WorkflowRunListRequestObject) (gen.V1WorkflowRunListResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + var ( + statuses = []sqlcv1.V1ReadableStatusOlap{ + sqlcv1.V1ReadableStatusOlapQUEUED, + sqlcv1.V1ReadableStatusOlapRUNNING, + sqlcv1.V1ReadableStatusOlapFAILED, + sqlcv1.V1ReadableStatusOlapCOMPLETED, + sqlcv1.V1ReadableStatusOlapCANCELLED, + } + since = request.Params.Since + workflowIds = []uuid.UUID{} + limit int64 = 50 + offset int64 + ) + + if request.Params.Statuses != nil { + if len(*request.Params.Statuses) > 0 { + statuses = []sqlcv1.V1ReadableStatusOlap{} + for _, status := range *request.Params.Statuses { + statuses = append(statuses, sqlcv1.V1ReadableStatusOlap(status)) + } + } + } + + if request.Params.Limit != nil { + limit = *request.Params.Limit + } + + if request.Params.Offset != nil { + offset = *request.Params.Offset + } + + if request.Params.WorkflowIds != nil { + workflowIds = *request.Params.WorkflowIds + } + + opts := v1.ListWorkflowRunOpts{ + CreatedAfter: since, + Statuses: statuses, + WorkflowIds: workflowIds, + Limit: limit, + Offset: offset, + } + + additionalMetadataFilters := make(map[string]interface{}) + + if request.Params.AdditionalMetadata != nil { + for _, v := range *request.Params.AdditionalMetadata { + kv_pairs := strings.Split(v, ":") + if len(kv_pairs) == 2 { + additionalMetadataFilters[kv_pairs[0]] = kv_pairs[1] + } + } + + opts.AdditionalMetadata = additionalMetadataFilters + } + + if request.Params.Until != nil { + opts.FinishedBefore = request.Params.Until + } + + if request.Params.ParentTaskExternalId != nil { + parentTaskExternalId := request.Params.ParentTaskExternalId.String() + id := sqlchelpers.UUIDFromStr(parentTaskExternalId) + opts.ParentTaskExternalId = &id + } + + dags, total, err := t.config.V1.OLAP().ListWorkflowRuns( + ctx.Request().Context(), + tenantId, + opts, + ) + + if err != nil { + return nil, err + } + + dagExternalIds := make([]pgtype.UUID, 0) + + for _, dag := range dags { + if dag.Kind == sqlcv1.V1RunKindDAG { + dagExternalIds = append(dagExternalIds, dag.ExternalID) + } + } + + tasks, taskIdToDagExternalId, err := t.config.V1.OLAP().ListTasksByDAGId( + ctx.Request().Context(), + tenantId, + dagExternalIds, + ) + + if err != nil { + return nil, err + } + + pgWorkflowIds := make([]pgtype.UUID, 0) + + for _, wf := range dags { + pgWorkflowIds = append(pgWorkflowIds, wf.WorkflowID) + } + + workflowNames, err := t.config.V1.Workflows().ListWorkflowNamesByIds( + ctx.Request().Context(), + tenantId, + pgWorkflowIds, + ) + + if err != nil { + return nil, err + } + + taskIdToWorkflowName := make(map[int64]string) + + for _, task := range tasks { + if name, ok := workflowNames[task.WorkflowID]; ok { + taskIdToWorkflowName[task.ID] = name + } + } + + parsedTasks := transformers.TaskRunDataRowToWorkflowRunsMany(tasks, taskIdToWorkflowName, total, limit, offset) + + dagChildren := make(map[uuid.UUID][]gen.V1TaskSummary) + + for _, task := range parsedTasks.Rows { + dagExternalId := taskIdToDagExternalId[int64(task.TaskId)] + existing, ok := dagChildren[dagExternalId] + + if ok { + dagChildren[dagExternalId] = append(existing, task) + } else { + dagChildren[dagExternalId] = []gen.V1TaskSummary{task} + } + } + + result := transformers.ToWorkflowRunMany(dags, dagChildren, workflowNames, total, limit, offset) + + // Search for api errors to see how we handle errors in other cases + return gen.V1WorkflowRunList200JSONResponse( + result, + ), nil +} + +func (t *V1WorkflowRunsService) OnlyTasks(ctx echo.Context, request gen.V1WorkflowRunListRequestObject) (gen.V1WorkflowRunListResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + var ( + statuses = []sqlcv1.V1ReadableStatusOlap{ + sqlcv1.V1ReadableStatusOlapQUEUED, + sqlcv1.V1ReadableStatusOlapRUNNING, + sqlcv1.V1ReadableStatusOlapFAILED, + sqlcv1.V1ReadableStatusOlapCOMPLETED, + sqlcv1.V1ReadableStatusOlapCANCELLED, + } + since = request.Params.Since + workflowIds = []uuid.UUID{} + limit int64 = 50 + offset int64 + ) + + if request.Params.Statuses != nil { + if len(*request.Params.Statuses) > 0 { + statuses = []sqlcv1.V1ReadableStatusOlap{} + for _, status := range *request.Params.Statuses { + statuses = append(statuses, sqlcv1.V1ReadableStatusOlap(status)) + } + } + } + + if request.Params.Limit != nil { + limit = *request.Params.Limit + } + + if request.Params.Offset != nil { + offset = *request.Params.Offset + } + + if request.Params.WorkflowIds != nil { + workflowIds = *request.Params.WorkflowIds + } + + opts := v1.ListTaskRunOpts{ + CreatedAfter: since, + Statuses: statuses, + WorkflowIds: workflowIds, + Limit: limit, + Offset: offset, + WorkerId: request.Params.WorkerId, + } + + additionalMetadataFilters := make(map[string]interface{}) + + if request.Params.AdditionalMetadata != nil { + for _, v := range *request.Params.AdditionalMetadata { + kv_pairs := strings.Split(v, ":") + if len(kv_pairs) == 2 { + additionalMetadataFilters[kv_pairs[0]] = kv_pairs[1] + } + } + + opts.AdditionalMetadata = additionalMetadataFilters + } + + if request.Params.Until != nil { + opts.FinishedBefore = request.Params.Until + } + + tasks, total, err := t.config.V1.OLAP().ListTasks( + ctx.Request().Context(), + tenantId, + opts, + ) + + if err != nil { + return nil, err + } + + taskIdToWorkflowName := make(map[int64]string) + + result := transformers.TaskRunDataRowToWorkflowRunsMany(tasks, taskIdToWorkflowName, total, limit, offset) + + // Search for api errors to see how we handle errors in other cases + return gen.V1WorkflowRunList200JSONResponse( + result, + ), nil +} + +func (t *V1WorkflowRunsService) V1WorkflowRunList(ctx echo.Context, request gen.V1WorkflowRunListRequestObject) (gen.V1WorkflowRunListResponseObject, error) { + if request.Params.OnlyTasks { + return t.OnlyTasks(ctx, request) + } else { + return t.WithDags(ctx, request) + } +} + +func (t *V1WorkflowRunsService) V1WorkflowRunDisplayNamesList(ctx echo.Context, request gen.V1WorkflowRunDisplayNamesListRequestObject) (gen.V1WorkflowRunDisplayNamesListResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + + externalIds := make([]pgtype.UUID, len(request.Params.ExternalIds)) + + for i, id := range request.Params.ExternalIds { + externalIds[i] = sqlchelpers.UUIDFromStr(id.String()) + } + + displayNames, err := t.config.V1.OLAP().ListWorkflowRunDisplayNames( + ctx.Request().Context(), + tenant.ID, + externalIds, + ) + + if err != nil { + return nil, err + } + + result := transformers.ToWorkflowRunDisplayNamesList(displayNames) + + return gen.V1WorkflowRunDisplayNamesList200JSONResponse( + result, + ), nil +} diff --git a/api/v1/server/handlers/v1/workflow-runs/service.go b/api/v1/server/handlers/v1/workflow-runs/service.go new file mode 100644 index 000000000..2c5b06f9b --- /dev/null +++ b/api/v1/server/handlers/v1/workflow-runs/service.go @@ -0,0 +1,27 @@ +package workflowruns + +import ( + "context" + + "github.com/hatchet-dev/hatchet/api/v1/server/handlers/v1/proxy" + admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1" + "github.com/hatchet-dev/hatchet/pkg/config/server" + + client "github.com/hatchet-dev/hatchet/pkg/client/v1" +) + +type V1WorkflowRunsService struct { + config *server.ServerConfig + proxyTrigger *proxy.Proxy[admincontracts.TriggerWorkflowRunRequest, admincontracts.TriggerWorkflowRunResponse] +} + +func NewV1WorkflowRunsService(config *server.ServerConfig) *V1WorkflowRunsService { + proxyTrigger := proxy.NewProxy(config, func(ctx context.Context, cli *client.GRPCClient, in *admincontracts.TriggerWorkflowRunRequest) (*admincontracts.TriggerWorkflowRunResponse, error) { + return cli.Admin().TriggerWorkflowRun(ctx, in) + }) + + return &V1WorkflowRunsService{ + config: config, + proxyTrigger: proxyTrigger, + } +} diff --git a/api/v1/server/handlers/v1/workflow-runs/trigger.go b/api/v1/server/handlers/v1/workflow-runs/trigger.go new file mode 100644 index 000000000..30e84b35e --- /dev/null +++ b/api/v1/server/handlers/v1/workflow-runs/trigger.go @@ -0,0 +1,118 @@ +package workflowruns + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/jackc/pgx/v5" + "github.com/labstack/echo/v4" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" +) + +func (t *V1WorkflowRunsService) V1WorkflowRunCreate(ctx echo.Context, request gen.V1WorkflowRunCreateRequestObject) (gen.V1WorkflowRunCreateResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + // make sure input can be marshalled and unmarshalled to input type + inputBytes, err := json.Marshal(request.Body.Input) + + if err != nil { + return gen.V1WorkflowRunCreate400JSONResponse( + apierrors.NewAPIErrors("Invalid input"), + ), nil + } + + var additionalMetadataBytes []byte + + if request.Body.AdditionalMetadata != nil { + + additionalMetadataBytes, err = json.Marshal(request.Body.AdditionalMetadata) + + if err != nil { + return gen.V1WorkflowRunCreate400JSONResponse( + apierrors.NewAPIErrors("Invalid additional metadata"), + ), nil + } + } + + grpcReq := &contracts.TriggerWorkflowRunRequest{ + WorkflowName: request.Body.WorkflowName, + Input: inputBytes, + AdditionalMetadata: additionalMetadataBytes, + } + + resp, err := t.proxyTrigger.Do( + ctx.Request().Context(), + tenant, + grpcReq, + ) + + if err != nil { + if e, ok := status.FromError(err); ok { + switch e.Code() { // nolint: gocritic + case codes.InvalidArgument: + return gen.V1WorkflowRunCreate400JSONResponse( + apierrors.NewAPIErrors(e.Message()), + ), nil + } + } + + return nil, err + } + + // loop for workflow to be created in the OLAP database + var rawWorkflowRun *v1.V1WorkflowRunPopulator + retries := 0 + + for retries < 10 { + rawWorkflowRun, err = t.config.V1.OLAP().ReadWorkflowRun( + ctx.Request().Context(), + sqlchelpers.UUIDFromStr(resp.ExternalId), + ) + + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, err + } + + if err != nil && errors.Is(err, pgx.ErrNoRows) { + retries++ + time.Sleep(1 * time.Second) + continue + } + + break + } + + if rawWorkflowRun == nil || rawWorkflowRun.WorkflowRun == nil { + return nil, fmt.Errorf("rawWorkflowRun not populated, we are likely seeing high latency in creating tasks") + } + + if sqlchelpers.UUIDToStr(rawWorkflowRun.WorkflowRun.TenantID) != tenantId { + return nil, fmt.Errorf("tenantId mismatch in the triggered workflow run") + } + + details, err := t.getWorkflowRunDetails( + ctx.Request().Context(), + tenantId, + rawWorkflowRun, + ) + + if err != nil { + return nil, err + } + + // Search for api errors to see how we handle errors in other cases + return gen.V1WorkflowRunCreate200JSONResponse( + *details, + ), nil +} diff --git a/api/v1/server/handlers/webhook-worker/create.go b/api/v1/server/handlers/webhook-worker/create.go index d51b51d5e..2f4b07742 100644 --- a/api/v1/server/handlers/webhook-worker/create.go +++ b/api/v1/server/handlers/webhook-worker/create.go @@ -10,11 +10,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/random" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *WebhookWorkersService) WebhookCreate(ctx echo.Context, request gen.WebhookCreateRequestObject) (gen.WebhookCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) var secret string if request.Body.Secret == nil { @@ -27,13 +29,13 @@ func (i *WebhookWorkersService) WebhookCreate(ctx echo.Context, request gen.Webh secret = *request.Body.Secret } - encSecret, err := i.config.Encryption.EncryptString(secret, tenant.ID) + encSecret, err := i.config.Encryption.EncryptString(secret, tenantId) if err != nil { return nil, err } ww, err := i.config.EngineRepository.WebhookWorker().CreateWebhookWorker(ctx.Request().Context(), &repository.CreateWebhookWorkerOpts{ - TenantId: tenant.ID, + TenantId: tenantId, Name: request.Body.Name, URL: request.Body.Url, Secret: encSecret, diff --git a/api/v1/server/handlers/webhook-worker/delete.go b/api/v1/server/handlers/webhook-worker/delete.go index 74bc08817..b109115fb 100644 --- a/api/v1/server/handlers/webhook-worker/delete.go +++ b/api/v1/server/handlers/webhook-worker/delete.go @@ -4,14 +4,16 @@ import ( "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *WebhookWorkersService) WebhookDelete(ctx echo.Context, request gen.WebhookDeleteRequestObject) (gen.WebhookDeleteResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) - webhook := ctx.Get("webhook").(*db.WebhookWorkerModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + webhook := ctx.Get("webhook").(*dbsqlc.WebhookWorker) - err := i.config.EngineRepository.WebhookWorker().SoftDeleteWebhookWorker(ctx.Request().Context(), webhook.ID, tenant.ID) + err := i.config.EngineRepository.WebhookWorker().SoftDeleteWebhookWorker(ctx.Request().Context(), sqlchelpers.UUIDToStr(webhook.ID), tenantId) if err != nil { return nil, err } diff --git a/api/v1/server/handlers/webhook-worker/list.go b/api/v1/server/handlers/webhook-worker/list.go index 77e6c9137..dc6b36116 100644 --- a/api/v1/server/handlers/webhook-worker/list.go +++ b/api/v1/server/handlers/webhook-worker/list.go @@ -7,13 +7,15 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (i *WebhookWorkersService) WebhookList(ctx echo.Context, request gen.WebhookListRequestObject) (gen.WebhookListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - webhooks, err := i.config.EngineRepository.WebhookWorker().ListActiveWebhookWorkers(context.Background(), tenant.ID) + webhooks, err := i.config.EngineRepository.WebhookWorker().ListActiveWebhookWorkers(context.Background(), tenantId) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workers/get.go b/api/v1/server/handlers/workers/get.go index 107916e88..100e2db98 100644 --- a/api/v1/server/handlers/workers/get.go +++ b/api/v1/server/handlers/workers/get.go @@ -1,15 +1,31 @@ package workers import ( + "fmt" + "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + transformersv1 "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkerService) WorkerGet(ctx echo.Context, request gen.WorkerGetRequestObject) (gen.WorkerGetResponseObject, error) { + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + + switch tenant.Version { + case dbsqlc.TenantMajorEngineVersionV0: + return t.workerGetV0(ctx, tenant, request) + case dbsqlc.TenantMajorEngineVersionV1: + return t.workerGetV1(ctx, tenant, request) + default: + return nil, fmt.Errorf("unsupported tenant version: %s", string(tenant.Version)) + } +} + +func (t *WorkerService) workerGetV0(ctx echo.Context, tenant *dbsqlc.Tenant, request gen.WorkerGetRequestObject) (gen.WorkerGetResponseObject, error) { worker := ctx.Get("worker").(*dbsqlc.GetWorkerByIdRow) slotState, recent, err := t.config.APIRepository.Worker().ListWorkerState( @@ -63,3 +79,64 @@ func (t *WorkerService) WorkerGet(ctx echo.Context, request gen.WorkerGetRequest return gen.WorkerGet200JSONResponse(workerResp), nil } + +func (t *WorkerService) workerGetV1(ctx echo.Context, tenant *dbsqlc.Tenant, request gen.WorkerGetRequestObject) (gen.WorkerGetResponseObject, error) { + workerV0 := ctx.Get("worker").(*dbsqlc.GetWorkerByIdRow) + + worker, err := t.config.V1.Workers().GetWorkerById(sqlchelpers.UUIDToStr(workerV0.Worker.ID)) + + if err != nil { + return nil, err + } + + slotState, recent, err := t.config.V1.Workers().ListWorkerState( + sqlchelpers.UUIDToStr(worker.Worker.TenantId), + sqlchelpers.UUIDToStr(worker.Worker.ID), + int(worker.Worker.MaxRuns), + ) + + if err != nil { + return nil, err + } + + actions, err := t.config.APIRepository.Worker().GetWorkerActionsByWorkerId( + sqlchelpers.UUIDToStr(worker.Worker.TenantId), + sqlchelpers.UUIDToStr(worker.Worker.ID), + ) + + if err != nil { + return nil, err + } + + respStepRuns := make([]gen.RecentStepRuns, len(recent)) + + for i := range recent { + genStepRun, err := transformers.ToRecentStepRun(recent[i]) + + if err != nil { + return nil, err + } + + respStepRuns[i] = *genStepRun + } + + slots := int(worker.RemainingSlots) + + workerResp := *transformersv1.ToWorkerSqlc(&worker.Worker, &slots, &worker.WebhookUrl.String, actions) + + workerResp.RecentStepRuns = &respStepRuns + workerResp.Slots = transformersv1.ToSlotState(slotState, slots) + + affinity, err := t.config.APIRepository.Worker().ListWorkerLabels( + sqlchelpers.UUIDToStr(worker.Worker.TenantId), + sqlchelpers.UUIDToStr(worker.Worker.ID), + ) + + if err != nil { + return nil, err + } + + workerResp.Labels = transformers.ToWorkerLabels(affinity) + + return gen.WorkerGet200JSONResponse(workerResp), nil +} diff --git a/api/v1/server/handlers/workers/list.go b/api/v1/server/handlers/workers/list.go index c0ac7352b..b2ed5092c 100644 --- a/api/v1/server/handlers/workers/list.go +++ b/api/v1/server/handlers/workers/list.go @@ -1,22 +1,38 @@ package workers import ( + "fmt" "time" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" + transformersv1 "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers/v1" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkerService) WorkerList(ctx echo.Context, request gen.WorkerListRequestObject) (gen.WorkerListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + + switch tenant.Version { + case dbsqlc.TenantMajorEngineVersionV0: + return t.workerListV0(ctx, tenant, request) + case dbsqlc.TenantMajorEngineVersionV1: + return t.workerListV1(ctx, tenant, request) + default: + return nil, fmt.Errorf("unsupported tenant version: %s", string(tenant.Version)) + } +} + +func (t *WorkerService) workerListV0(ctx echo.Context, tenant *dbsqlc.Tenant, request gen.WorkerListRequestObject) (gen.WorkerListResponseObject, error) { + tenantId := sqlchelpers.UUIDToStr(tenant.ID) sixSecAgo := time.Now().Add(-24 * time.Hour) - workers, err := t.config.APIRepository.Worker().ListWorkers(tenant.ID, &repository.ListWorkersOpts{ + workers, err := t.config.APIRepository.Worker().ListWorkers(tenantId, &repository.ListWorkersOpts{ LastHeartbeatAfter: &sixSecAgo, }) @@ -39,3 +55,32 @@ func (t *WorkerService) WorkerList(ctx echo.Context, request gen.WorkerListReque }, ), nil } + +func (t *WorkerService) workerListV1(ctx echo.Context, tenant *dbsqlc.Tenant, request gen.WorkerListRequestObject) (gen.WorkerListResponseObject, error) { + tenantId := sqlchelpers.UUIDToStr(tenant.ID) + + sixSecAgo := time.Now().Add(-24 * time.Hour) + + workers, err := t.config.V1.Workers().ListWorkers(tenantId, &repository.ListWorkersOpts{ + LastHeartbeatAfter: &sixSecAgo, + }) + + if err != nil { + return nil, err + } + + rows := make([]gen.Worker, len(workers)) + + for i, worker := range workers { + workerCp := worker + slots := int(worker.RemainingSlots) + + rows[i] = *transformersv1.ToWorkerSqlc(&workerCp.Worker, &slots, &workerCp.WebhookUrl.String, nil) + } + + return gen.WorkerList200JSONResponse( + gen.WorkerList{ + Rows: &rows, + }, + ), nil +} diff --git a/api/v1/server/handlers/workers/update.go b/api/v1/server/handlers/workers/update.go index e82c17e61..689f9afff 100644 --- a/api/v1/server/handlers/workers/update.go +++ b/api/v1/server/handlers/workers/update.go @@ -6,8 +6,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkerService) WorkerUpdate(ctx echo.Context, request gen.WorkerUpdateRequestObject) (gen.WorkerUpdateResponseObject, error) { diff --git a/api/v1/server/handlers/workflow-runs/replay_batch.go b/api/v1/server/handlers/workflow-runs/replay_batch.go index 1c981b561..abfbeac24 100644 --- a/api/v1/server/handlers/workflow-runs/replay_batch.go +++ b/api/v1/server/handlers/workflow-runs/replay_batch.go @@ -12,12 +12,13 @@ import ( "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowRunsService) WorkflowRunUpdateReplay(ctx echo.Context, request gen.WorkflowRunUpdateReplayRequestObject) (gen.WorkflowRunUpdateReplayResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflowRunIds := make([]string, len(request.Body.WorkflowRunIds)) @@ -28,7 +29,7 @@ func (t *WorkflowRunsService) WorkflowRunUpdateReplay(ctx echo.Context, request limit := 500 // make sure all workflow runs belong to the tenant - filteredWorkflowRuns, err := t.config.EngineRepository.WorkflowRun().ListWorkflowRuns(ctx.Request().Context(), tenant.ID, &repository.ListWorkflowRunsOpts{ + filteredWorkflowRuns, err := t.config.EngineRepository.WorkflowRun().ListWorkflowRuns(ctx.Request().Context(), tenantId, &repository.ListWorkflowRunsOpts{ Ids: workflowRunIds, Limit: &limit, }) @@ -44,7 +45,7 @@ func (t *WorkflowRunsService) WorkflowRunUpdateReplay(ctx echo.Context, request err = t.config.MessageQueue.AddMessage( ctx.Request().Context(), msgqueue.WORKFLOW_PROCESSING_QUEUE, - tasktypes.WorkflowRunReplayToTask(tenant.ID, sqlchelpers.UUIDToStr(filteredWorkflowRuns.Rows[i].WorkflowRun.ID)), + tasktypes.WorkflowRunReplayToTask(tenantId, sqlchelpers.UUIDToStr(filteredWorkflowRuns.Rows[i].WorkflowRun.ID)), ) if err != nil { @@ -59,7 +60,7 @@ func (t *WorkflowRunsService) WorkflowRunUpdateReplay(ctx echo.Context, request dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 60*time.Second) defer cancel() - newWorkflowRuns, err := t.config.APIRepository.WorkflowRun().ListWorkflowRuns(dbCtx, tenant.ID, &repository.ListWorkflowRunsOpts{ + newWorkflowRuns, err := t.config.APIRepository.WorkflowRun().ListWorkflowRuns(dbCtx, tenantId, &repository.ListWorkflowRunsOpts{ Ids: workflowRunIds, Limit: &limit, }) diff --git a/api/v1/server/handlers/workflows/cancel.go b/api/v1/server/handlers/workflows/cancel.go index 701673506..3616ea39d 100644 --- a/api/v1/server/handlers/workflows/cancel.go +++ b/api/v1/server/handlers/workflows/cancel.go @@ -12,12 +12,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowRunCancel(ctx echo.Context, request gen.WorkflowRunCancelRequestObject) (gen.WorkflowRunCancelResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) runIds := request.Body.WorkflowRunIds var wg sync.WaitGroup @@ -32,7 +33,7 @@ func (t *WorkflowService) WorkflowRunCancel(ctx echo.Context, request gen.Workfl // Lookup step runs for the workflow run runIdStr := runId.String() - jobRun, err := t.config.EngineRepository.JobRun().ListJobRunsForWorkflowRun(ctx.Request().Context(), tenant.ID, runIdStr) + jobRun, err := t.config.EngineRepository.JobRun().ListJobRunsForWorkflowRun(ctx.Request().Context(), tenantId, runIdStr) if err != nil { returnErr = multierror.Append(err, fmt.Errorf("failed to list job runs for workflow run %s", runIdStr)) return @@ -46,7 +47,7 @@ func (t *WorkflowService) WorkflowRunCancel(ctx echo.Context, request gen.Workfl err = t.config.MessageQueue.AddMessage( ctx.Request().Context(), msgqueue.JOB_PROCESSING_QUEUE, - tasktypes.JobRunCancelledToTask(tenant.ID, jobRunId, &reason), + tasktypes.JobRunCancelledToTask(tenantId, jobRunId, &reason), ) if err != nil { returnErr = multierror.Append(err, fmt.Errorf("failed to send cancel task for job run %s", jobRunId)) diff --git a/api/v1/server/handlers/workflows/create_cron.go b/api/v1/server/handlers/workflows/create_cron.go index 49a2bf31e..f45f7d137 100644 --- a/api/v1/server/handlers/workflows/create_cron.go +++ b/api/v1/server/handlers/workflows/create_cron.go @@ -9,25 +9,26 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) CronWorkflowTriggerCreate(ctx echo.Context, request gen.CronWorkflowTriggerCreateRequestObject) (gen.CronWorkflowTriggerCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) if request.Body.CronName == "" { return gen.CronWorkflowTriggerCreate400JSONResponse(apierrors.NewAPIErrors("cron name is required")), nil } - workflow, err := t.config.EngineRepository.Workflow().GetWorkflowByName(ctx.Request().Context(), tenant.ID, request.Workflow) + workflow, err := t.config.EngineRepository.Workflow().GetWorkflowByName(ctx.Request().Context(), tenantId, request.Workflow) if err != nil { return gen.CronWorkflowTriggerCreate400JSONResponse(apierrors.NewAPIErrors("workflow not found")), nil } cronTrigger, err := t.config.APIRepository.Workflow().CreateCronWorkflow( - ctx.Request().Context(), tenant.ID, &repository.CreateCronWorkflowTriggerOpts{ + ctx.Request().Context(), tenantId, &repository.CreateCronWorkflowTriggerOpts{ Name: request.Body.CronName, Cron: request.Body.CronExpression, Input: request.Body.Input, diff --git a/api/v1/server/handlers/workflows/create_scheduled.go b/api/v1/server/handlers/workflows/create_scheduled.go index 5d21205dd..95ea885d8 100644 --- a/api/v1/server/handlers/workflows/create_scheduled.go +++ b/api/v1/server/handlers/workflows/create_scheduled.go @@ -7,20 +7,21 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) ScheduledWorkflowRunCreate(ctx echo.Context, request gen.ScheduledWorkflowRunCreateRequestObject) (gen.ScheduledWorkflowRunCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) - workflow, err := t.config.EngineRepository.Workflow().GetWorkflowByName(ctx.Request().Context(), tenant.ID, request.Workflow) + workflow, err := t.config.EngineRepository.Workflow().GetWorkflowByName(ctx.Request().Context(), tenantId, request.Workflow) if err != nil { return gen.ScheduledWorkflowRunCreate400JSONResponse(apierrors.NewAPIErrors("workflow not found")), nil } - scheduled, err := t.config.APIRepository.Workflow().CreateScheduledWorkflow(ctx.Request().Context(), tenant.ID, &repository.CreateScheduledWorkflowRunForWorkflowOpts{ + scheduled, err := t.config.APIRepository.Workflow().CreateScheduledWorkflow(ctx.Request().Context(), tenantId, &repository.CreateScheduledWorkflowRunForWorkflowOpts{ ScheduledTrigger: request.Body.TriggerAt, Input: request.Body.Input, AdditionalMetadata: request.Body.AdditionalMetadata, diff --git a/api/v1/server/handlers/workflows/delete.go b/api/v1/server/handlers/workflows/delete.go index 68345ee75..525f21aea 100644 --- a/api/v1/server/handlers/workflows/delete.go +++ b/api/v1/server/handlers/workflows/delete.go @@ -4,16 +4,16 @@ import ( "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowDelete(ctx echo.Context, request gen.WorkflowDeleteRequestObject) (gen.WorkflowDeleteResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflow := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) - _, err := t.config.APIRepository.Workflow().DeleteWorkflow(ctx.Request().Context(), tenant.ID, sqlchelpers.UUIDToStr(workflow.Workflow.ID)) + _, err := t.config.APIRepository.Workflow().DeleteWorkflow(ctx.Request().Context(), tenantId, sqlchelpers.UUIDToStr(workflow.Workflow.ID)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/delete_cron.go b/api/v1/server/handlers/workflows/delete_cron.go index d4d07f29d..fa1e5618f 100644 --- a/api/v1/server/handlers/workflows/delete_cron.go +++ b/api/v1/server/handlers/workflows/delete_cron.go @@ -7,13 +7,12 @@ import ( "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowCronDelete(ctx echo.Context, request gen.WorkflowCronDeleteRequestObject) (gen.WorkflowCronDeleteResponseObject, error) { - _ = ctx.Get("tenant").(*db.TenantModel) + _ = ctx.Get("tenant").(*dbsqlc.Tenant) cron := ctx.Get("cron-workflow").(*dbsqlc.ListCronWorkflowsRow) dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) diff --git a/api/v1/server/handlers/workflows/delete_scheduled.go b/api/v1/server/handlers/workflows/delete_scheduled.go index d30faa33d..c56aaa03b 100644 --- a/api/v1/server/handlers/workflows/delete_scheduled.go +++ b/api/v1/server/handlers/workflows/delete_scheduled.go @@ -7,8 +7,8 @@ import ( "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowScheduledDelete(ctx echo.Context, request gen.WorkflowScheduledDeleteRequestObject) (gen.WorkflowScheduledDeleteResponseObject, error) { diff --git a/api/v1/server/handlers/workflows/get.go b/api/v1/server/handlers/workflows/get.go index a3d90da60..c0c817109 100644 --- a/api/v1/server/handlers/workflows/get.go +++ b/api/v1/server/handlers/workflows/get.go @@ -5,20 +5,20 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowGet(ctx echo.Context, request gen.WorkflowGetRequestObject) (gen.WorkflowGetResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflow := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) if workflow == nil || !workflow.WorkflowVersionId.Valid { return gen.WorkflowGet404JSONResponse(gen.APIErrors{}), nil } - version, _, _, _, err := t.config.APIRepository.Workflow().GetWorkflowVersionById(tenant.ID, sqlchelpers.UUIDToStr(workflow.WorkflowVersionId)) + version, _, _, _, err := t.config.APIRepository.Workflow().GetWorkflowVersionById(tenantId, sqlchelpers.UUIDToStr(workflow.WorkflowVersionId)) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/get_cron.go b/api/v1/server/handlers/workflows/get_cron.go index 121870f71..c5b7ebab9 100644 --- a/api/v1/server/handlers/workflows/get_cron.go +++ b/api/v1/server/handlers/workflows/get_cron.go @@ -9,16 +9,18 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowCronGet(ctx echo.Context, request gen.WorkflowCronGetRequestObject) (gen.WorkflowCronGetResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - scheduled, err := t.config.APIRepository.Workflow().GetCronWorkflow(dbCtx, tenant.ID, request.CronWorkflow.String()) + scheduled, err := t.config.APIRepository.Workflow().GetCronWorkflow(dbCtx, tenantId, request.CronWorkflow.String()) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/get_metrics.go b/api/v1/server/handlers/workflows/get_metrics.go index 5f1960cc0..29a188294 100644 --- a/api/v1/server/handlers/workflows/get_metrics.go +++ b/api/v1/server/handlers/workflows/get_metrics.go @@ -3,18 +3,19 @@ package workflows import ( "errors" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowGetMetrics(ctx echo.Context, request gen.WorkflowGetMetricsRequestObject) (gen.WorkflowGetMetricsResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflow := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) opts := &repository.GetWorkflowMetricsOpts{} @@ -27,10 +28,10 @@ func (t *WorkflowService) WorkflowGetMetrics(ctx echo.Context, request gen.Workf opts.GroupKey = request.Params.GroupKey } - metrics, err := t.config.APIRepository.Workflow().GetWorkflowMetrics(tenant.ID, sqlchelpers.UUIDToStr(workflow.Workflow.ID), opts) + metrics, err := t.config.APIRepository.Workflow().GetWorkflowMetrics(tenantId, sqlchelpers.UUIDToStr(workflow.Workflow.ID), opts) if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return gen.WorkflowGetMetrics404JSONResponse( apierrors.NewAPIErrors("workflow not found"), ), nil diff --git a/api/v1/server/handlers/workflows/get_run.go b/api/v1/server/handlers/workflows/get_run.go index 0e4706973..6ff06b2d6 100644 --- a/api/v1/server/handlers/workflows/get_run.go +++ b/api/v1/server/handlers/workflows/get_run.go @@ -5,8 +5,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowRunGet(ctx echo.Context, request gen.WorkflowRunGetRequestObject) (gen.WorkflowRunGetResponseObject, error) { diff --git a/api/v1/server/handlers/workflows/get_scheduled.go b/api/v1/server/handlers/workflows/get_scheduled.go index 6d7f47895..573eee2d0 100644 --- a/api/v1/server/handlers/workflows/get_scheduled.go +++ b/api/v1/server/handlers/workflows/get_scheduled.go @@ -6,7 +6,7 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func (t *WorkflowService) WorkflowScheduledGet(ctx echo.Context, request gen.WorkflowScheduledGetRequestObject) (gen.WorkflowScheduledGetResponseObject, error) { diff --git a/api/v1/server/handlers/workflows/get_shape.go b/api/v1/server/handlers/workflows/get_shape.go index 21e53cb29..d0feb6efe 100644 --- a/api/v1/server/handlers/workflows/get_shape.go +++ b/api/v1/server/handlers/workflows/get_shape.go @@ -8,8 +8,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowRunGetShape(ctx echo.Context, request gen.WorkflowRunGetShapeRequestObject) (gen.WorkflowRunGetShapeResponseObject, error) { diff --git a/api/v1/server/handlers/workflows/get_version.go b/api/v1/server/handlers/workflows/get_version.go index 7cee5314f..8d53fd873 100644 --- a/api/v1/server/handlers/workflows/get_version.go +++ b/api/v1/server/handlers/workflows/get_version.go @@ -4,18 +4,19 @@ import ( "errors" "fmt" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowVersionGet(ctx echo.Context, request gen.WorkflowVersionGetRequestObject) (gen.WorkflowVersionGetResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflow := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) var workflowVersionId string @@ -29,7 +30,7 @@ func (t *WorkflowService) WorkflowVersionGet(ctx echo.Context, request gen.Workf ) if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return gen.WorkflowVersionGet404JSONResponse( apierrors.NewAPIErrors("workflow not found"), ), nil @@ -42,10 +43,10 @@ func (t *WorkflowService) WorkflowVersionGet(ctx echo.Context, request gen.Workf workflowVersionId = sqlchelpers.UUIDToStr(row.WorkflowVersionId) } - row, crons, events, scheduleT, err := t.config.APIRepository.Workflow().GetWorkflowVersionById(tenant.ID, workflowVersionId) + row, crons, events, scheduleT, err := t.config.APIRepository.Workflow().GetWorkflowVersionById(tenantId, workflowVersionId) if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return gen.WorkflowVersionGet404JSONResponse( apierrors.NewAPIErrors("version not found"), ), nil diff --git a/api/v1/server/handlers/workflows/get_worker_count.go b/api/v1/server/handlers/workflows/get_worker_count.go index 1370b053d..71a3818c9 100644 --- a/api/v1/server/handlers/workflows/get_worker_count.go +++ b/api/v1/server/handlers/workflows/get_worker_count.go @@ -3,24 +3,25 @@ package workflows import ( "errors" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowGetWorkersCount(ctx echo.Context, request gen.WorkflowGetWorkersCountRequestObject) (gen.WorkflowGetWorkersCountResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) w := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) workflow := sqlchelpers.UUIDToStr(w.Workflow.ID) - freeSlotCount, maxSlotCount, err := t.config.APIRepository.Workflow().GetWorkflowWorkerCount(tenant.ID, workflow) + freeSlotCount, maxSlotCount, err := t.config.APIRepository.Workflow().GetWorkflowWorkerCount(tenantId, workflow) if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return gen.WorkflowGetWorkersCount400JSONResponse( apierrors.NewAPIErrors("workflow not found"), ), nil diff --git a/api/v1/server/handlers/workflows/list.go b/api/v1/server/handlers/workflows/list.go index cf59bbccb..df4960923 100644 --- a/api/v1/server/handlers/workflows/list.go +++ b/api/v1/server/handlers/workflows/list.go @@ -8,11 +8,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowList(ctx echo.Context, request gen.WorkflowListRequestObject) (gen.WorkflowListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) if request.Params.Limit == nil { request.Params.Limit = new(int) @@ -40,7 +42,7 @@ func (t *WorkflowService) WorkflowList(ctx echo.Context, request gen.WorkflowLis Name: &name, } - listResp, err := t.config.APIRepository.Workflow().ListWorkflows(tenant.ID, listOpts) + listResp, err := t.config.APIRepository.Workflow().ListWorkflows(tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/list_crons.go b/api/v1/server/handlers/workflows/list_crons.go index 89d06e2d4..653046970 100644 --- a/api/v1/server/handlers/workflows/list_crons.go +++ b/api/v1/server/handlers/workflows/list_crons.go @@ -13,11 +13,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) CronWorkflowList(ctx echo.Context, request gen.CronWorkflowListRequestObject) (gen.CronWorkflowListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) limit := 50 offset := 0 @@ -76,7 +78,7 @@ func (t *WorkflowService) CronWorkflowList(ctx echo.Context, request gen.CronWor dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - crons, count, err := t.config.APIRepository.Workflow().ListCronWorkflows(dbCtx, tenant.ID, listOpts) + crons, count, err := t.config.APIRepository.Workflow().ListCronWorkflows(dbCtx, tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/list_runs.go b/api/v1/server/handlers/workflows/list_runs.go index e5a200b17..af37b0df9 100644 --- a/api/v1/server/handlers/workflows/list_runs.go +++ b/api/v1/server/handlers/workflows/list_runs.go @@ -13,11 +13,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowRunList(ctx echo.Context, request gen.WorkflowRunListRequestObject) (gen.WorkflowRunListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) limit := 50 offset := 0 @@ -88,10 +90,10 @@ func (t *WorkflowService) WorkflowRunList(ctx echo.Context, request gen.Workflow } if request.Params.Statuses != nil { - statuses := make([]db.WorkflowRunStatus, len(*request.Params.Statuses)) + statuses := make([]dbsqlc.WorkflowRunStatus, len(*request.Params.Statuses)) for i, status := range *request.Params.Statuses { - statuses[i] = db.WorkflowRunStatus(status) + statuses[i] = dbsqlc.WorkflowRunStatus(status) } listOpts.Statuses = &statuses @@ -117,7 +119,7 @@ func (t *WorkflowService) WorkflowRunList(ctx echo.Context, request gen.Workflow dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - workflowRuns, err := t.config.APIRepository.WorkflowRun().ListWorkflowRuns(dbCtx, tenant.ID, listOpts) + workflowRuns, err := t.config.APIRepository.WorkflowRun().ListWorkflowRuns(dbCtx, tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/list_scheduled.go b/api/v1/server/handlers/workflows/list_scheduled.go index cb766efbe..38bd40e53 100644 --- a/api/v1/server/handlers/workflows/list_scheduled.go +++ b/api/v1/server/handlers/workflows/list_scheduled.go @@ -13,11 +13,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowScheduledList(ctx echo.Context, request gen.WorkflowScheduledListRequestObject) (gen.WorkflowScheduledListResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) limit := 50 offset := 0 @@ -57,10 +59,10 @@ func (t *WorkflowService) WorkflowScheduledList(ctx echo.Context, request gen.Wo } if request.Params.Statuses != nil { - statuses := make([]db.WorkflowRunStatus, len(*request.Params.Statuses)) + statuses := make([]dbsqlc.WorkflowRunStatus, len(*request.Params.Statuses)) for i, status := range *request.Params.Statuses { - statuses[i] = db.WorkflowRunStatus(status) + statuses[i] = dbsqlc.WorkflowRunStatus(status) } listOpts.Statuses = &statuses @@ -86,7 +88,7 @@ func (t *WorkflowService) WorkflowScheduledList(ctx echo.Context, request gen.Wo dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - scheduled, count, err := t.config.APIRepository.WorkflowRun().ListScheduledWorkflows(dbCtx, tenant.ID, listOpts) + scheduled, count, err := t.config.APIRepository.WorkflowRun().ListScheduledWorkflows(dbCtx, tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/metrics_runs.go b/api/v1/server/handlers/workflows/metrics_runs.go index 874a6bb47..1a2e92949 100644 --- a/api/v1/server/handlers/workflows/metrics_runs.go +++ b/api/v1/server/handlers/workflows/metrics_runs.go @@ -11,11 +11,13 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowRunGetMetrics(ctx echo.Context, request gen.WorkflowRunGetMetricsRequestObject) (gen.WorkflowRunGetMetricsResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) listOpts := &repository.WorkflowRunsMetricsOpts{} @@ -67,7 +69,7 @@ func (t *WorkflowService) WorkflowRunGetMetrics(ctx echo.Context, request gen.Wo dbCtx, cancel := context.WithTimeout(ctx.Request().Context(), 30*time.Second) defer cancel() - workflowRunsMetricsCount, err := t.config.APIRepository.WorkflowRun().WorkflowRunMetricsCount(dbCtx, tenant.ID, listOpts) + workflowRunsMetricsCount, err := t.config.APIRepository.WorkflowRun().WorkflowRunMetricsCount(dbCtx, tenantId, listOpts) if err != nil { return nil, err diff --git a/api/v1/server/handlers/workflows/trigger.go b/api/v1/server/handlers/workflows/trigger.go index a5a42513b..067af1648 100644 --- a/api/v1/server/handlers/workflows/trigger.go +++ b/api/v1/server/handlers/workflows/trigger.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/jackc/pgx/v5" "github.com/labstack/echo/v4" "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" @@ -14,13 +15,13 @@ import ( "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" "github.com/hatchet-dev/hatchet/pkg/repository" "github.com/hatchet-dev/hatchet/pkg/repository/metered" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowRunCreate(ctx echo.Context, request gen.WorkflowRunCreateRequestObject) (gen.WorkflowRunCreateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflow := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) var workflowVersionId string @@ -38,10 +39,10 @@ func (t *WorkflowService) WorkflowRunCreate(ctx echo.Context, request gen.Workfl workflowVersionId = sqlchelpers.UUIDToStr(workflow.WorkflowVersionId) } - workflowVersion, err := t.config.EngineRepository.Workflow().GetWorkflowVersionById(ctx.Request().Context(), tenant.ID, workflowVersionId) + workflowVersion, err := t.config.EngineRepository.Workflow().GetWorkflowVersionById(ctx.Request().Context(), tenantId, workflowVersionId) if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { return gen.WorkflowRunCreate400JSONResponse( apierrors.NewAPIErrors("version not found"), ), nil @@ -83,7 +84,7 @@ func (t *WorkflowService) WorkflowRunCreate(ctx echo.Context, request gen.Workfl return nil, err } - createdWorkflowRun, err := t.config.APIRepository.WorkflowRun().CreateNewWorkflowRun(ctx.Request().Context(), tenant.ID, createOpts) + createdWorkflowRun, err := t.config.APIRepository.WorkflowRun().CreateNewWorkflowRun(ctx.Request().Context(), tenantId, createOpts) if err == metered.ErrResourceExhausted { return gen.WorkflowRunCreate429JSONResponse( @@ -109,7 +110,7 @@ func (t *WorkflowService) WorkflowRunCreate(ctx echo.Context, request gen.Workfl return nil, fmt.Errorf("could not add workflow run to queue: %w", err) } - workflowRun, err := t.config.APIRepository.WorkflowRun().GetWorkflowRunById(ctx.Request().Context(), tenant.ID, sqlchelpers.UUIDToStr(createdWorkflowRun.ID)) + workflowRun, err := t.config.APIRepository.WorkflowRun().GetWorkflowRunById(ctx.Request().Context(), tenantId, sqlchelpers.UUIDToStr(createdWorkflowRun.ID)) if err != nil { return nil, fmt.Errorf("could not get workflow run: %w", err) diff --git a/api/v1/server/handlers/workflows/update.go b/api/v1/server/handlers/workflows/update.go index af78f43b6..c40989ccd 100644 --- a/api/v1/server/handlers/workflows/update.go +++ b/api/v1/server/handlers/workflows/update.go @@ -6,20 +6,20 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func (t *WorkflowService) WorkflowUpdate(ctx echo.Context, request gen.WorkflowUpdateRequestObject) (gen.WorkflowUpdateResponseObject, error) { - tenant := ctx.Get("tenant").(*db.TenantModel) + tenant := ctx.Get("tenant").(*dbsqlc.Tenant) + tenantId := sqlchelpers.UUIDToStr(tenant.ID) workflow := ctx.Get("workflow").(*dbsqlc.GetWorkflowByIdRow) opts := repository.UpdateWorkflowOpts{ IsPaused: request.Body.IsPaused, } - updated, err := t.config.APIRepository.Workflow().UpdateWorkflow(ctx.Request().Context(), tenant.ID, sqlchelpers.UUIDToStr(workflow.Workflow.ID), &opts) + updated, err := t.config.APIRepository.Workflow().UpdateWorkflow(ctx.Request().Context(), tenantId, sqlchelpers.UUIDToStr(workflow.Workflow.ID), &opts) if err != nil { return nil, err diff --git a/api/v1/server/oas/gen/openapi.gen.go b/api/v1/server/oas/gen/openapi.gen.go index 370353606..1ac77f4b7 100644 --- a/api/v1/server/oas/gen/openapi.gen.go +++ b/api/v1/server/oas/gen/openapi.gen.go @@ -179,6 +179,51 @@ const ( WORKFLOWRUN TenantResource = "WORKFLOW_RUN" ) +// Defines values for TenantVersion. +const ( + V0 TenantVersion = "V0" + V1 TenantVersion = "V1" +) + +// Defines values for V1TaskEventType. +const ( + V1TaskEventTypeACKNOWLEDGED V1TaskEventType = "ACKNOWLEDGED" + V1TaskEventTypeASSIGNED V1TaskEventType = "ASSIGNED" + V1TaskEventTypeCANCELLED V1TaskEventType = "CANCELLED" + V1TaskEventTypeCREATED V1TaskEventType = "CREATED" + V1TaskEventTypeFAILED V1TaskEventType = "FAILED" + V1TaskEventTypeFINISHED V1TaskEventType = "FINISHED" + V1TaskEventTypeQUEUED V1TaskEventType = "QUEUED" + V1TaskEventTypeRATELIMITERROR V1TaskEventType = "RATE_LIMIT_ERROR" + V1TaskEventTypeREASSIGNED V1TaskEventType = "REASSIGNED" + V1TaskEventTypeREQUEUEDNOWORKER V1TaskEventType = "REQUEUED_NO_WORKER" + V1TaskEventTypeREQUEUEDRATELIMIT V1TaskEventType = "REQUEUED_RATE_LIMIT" + V1TaskEventTypeRETRIEDBYUSER V1TaskEventType = "RETRIED_BY_USER" + V1TaskEventTypeRETRYING V1TaskEventType = "RETRYING" + V1TaskEventTypeSCHEDULINGTIMEDOUT V1TaskEventType = "SCHEDULING_TIMED_OUT" + V1TaskEventTypeSENTTOWORKER V1TaskEventType = "SENT_TO_WORKER" + V1TaskEventTypeSKIPPED V1TaskEventType = "SKIPPED" + V1TaskEventTypeSLOTRELEASED V1TaskEventType = "SLOT_RELEASED" + V1TaskEventTypeSTARTED V1TaskEventType = "STARTED" + V1TaskEventTypeTIMEDOUT V1TaskEventType = "TIMED_OUT" + V1TaskEventTypeTIMEOUTREFRESHED V1TaskEventType = "TIMEOUT_REFRESHED" +) + +// Defines values for V1TaskStatus. +const ( + V1TaskStatusCANCELLED V1TaskStatus = "CANCELLED" + V1TaskStatusCOMPLETED V1TaskStatus = "COMPLETED" + V1TaskStatusFAILED V1TaskStatus = "FAILED" + V1TaskStatusQUEUED V1TaskStatus = "QUEUED" + V1TaskStatusRUNNING V1TaskStatus = "RUNNING" +) + +// Defines values for V1WorkflowType. +const ( + V1WorkflowTypeDAG V1WorkflowType = "DAG" + V1WorkflowTypeTASK V1WorkflowType = "TASK" +) + // Defines values for WorkerStatus. const ( ACTIVE WorkerStatus = "ACTIVE" @@ -202,9 +247,9 @@ const ( // Defines values for WorkflowKind. const ( - DAG WorkflowKind = "DAG" - DURABLE WorkflowKind = "DURABLE" - FUNCTION WorkflowKind = "FUNCTION" + WorkflowKindDAG WorkflowKind = "DAG" + WorkflowKindDURABLE WorkflowKind = "DURABLE" + WorkflowKindFUNCTION WorkflowKind = "FUNCTION" ) // Defines values for WorkflowRunOrderByDirection. @@ -889,7 +934,8 @@ type Tenant struct { Name string `json:"name"` // Slug The slug of the tenant. - Slug string `json:"slug"` + Slug string `json:"slug"` + Version TenantVersion `json:"version"` } // TenantAlertEmailGroup defines model for TenantAlertEmailGroup. @@ -1008,6 +1054,9 @@ type TenantStepRunQueueMetrics struct { Queues *map[string]int `json:"queues,omitempty"` } +// TenantVersion defines model for TenantVersion. +type TenantVersion string + // TriggerWorkflowRunRequest defines model for TriggerWorkflowRunRequest. type TriggerWorkflowRunRequest struct { AdditionalMetadata *map[string]interface{} `json:"additionalMetadata,omitempty"` @@ -1046,7 +1095,8 @@ type UpdateTenantRequest struct { MaxAlertingFrequency *string `json:"maxAlertingFrequency,omitempty" validate:"omitnil,duration"` // Name The name of the tenant. - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` + Version *TenantVersion `json:"version,omitempty"` } // UpdateWorkerRequest defines model for UpdateWorkerRequest. @@ -1119,6 +1169,244 @@ type UserTenantPublic struct { Name *string `json:"name,omitempty"` } +// V1CancelTaskRequest defines model for V1CancelTaskRequest. +type V1CancelTaskRequest struct { + // ExternalIds A list of external IDs, which can refer to either task or workflow run external IDs + ExternalIds *[]openapi_types.UUID `json:"externalIds,omitempty"` + Filter *V1TaskFilter `json:"filter,omitempty"` +} + +// V1DagChildren defines model for V1DagChildren. +type V1DagChildren struct { + Children *[]V1TaskSummary `json:"children,omitempty"` + DagId *openapi_types.UUID `json:"dagId,omitempty"` +} + +// V1LogLine defines model for V1LogLine. +type V1LogLine struct { + // CreatedAt The creation date of the log line. + CreatedAt time.Time `json:"createdAt"` + + // Message The log message. + Message string `json:"message"` + + // Metadata The log metadata. + Metadata map[string]interface{} `json:"metadata"` +} + +// V1LogLineList defines model for V1LogLineList. +type V1LogLineList struct { + Pagination *PaginationResponse `json:"pagination,omitempty"` + Rows *[]V1LogLine `json:"rows,omitempty"` +} + +// V1ReplayTaskRequest defines model for V1ReplayTaskRequest. +type V1ReplayTaskRequest struct { + // ExternalIds A list of external IDs, which can refer to either task or workflow run external IDs + ExternalIds *[]openapi_types.UUID `json:"externalIds,omitempty"` + Filter *V1TaskFilter `json:"filter,omitempty"` +} + +// V1TaskEvent defines model for V1TaskEvent. +type V1TaskEvent struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + EventType V1TaskEventType `json:"eventType"` + Id int `json:"id"` + Message string `json:"message"` + Output *string `json:"output,omitempty"` + TaskDisplayName *string `json:"taskDisplayName,omitempty"` + TaskId openapi_types.UUID `json:"taskId"` + Timestamp time.Time `json:"timestamp"` + WorkerId *openapi_types.UUID `json:"workerId,omitempty"` +} + +// V1TaskEventList defines model for V1TaskEventList. +type V1TaskEventList struct { + Pagination *PaginationResponse `json:"pagination,omitempty"` + Rows *[]V1TaskEvent `json:"rows,omitempty"` +} + +// V1TaskEventType defines model for V1TaskEventType. +type V1TaskEventType string + +// V1TaskFilter defines model for V1TaskFilter. +type V1TaskFilter struct { + AdditionalMetadata *[]string `json:"additionalMetadata,omitempty"` + Since time.Time `json:"since"` + Statuses *[]V1TaskStatus `json:"statuses,omitempty"` + Until *time.Time `json:"until,omitempty"` + WorkflowIds *[]openapi_types.UUID `json:"workflowIds,omitempty"` +} + +// V1TaskPointMetric defines model for V1TaskPointMetric. +type V1TaskPointMetric struct { + FAILED int `json:"FAILED"` + SUCCEEDED int `json:"SUCCEEDED"` + Time time.Time `json:"time"` +} + +// V1TaskPointMetrics defines model for V1TaskPointMetrics. +type V1TaskPointMetrics struct { + Results *[]V1TaskPointMetric `json:"results,omitempty"` +} + +// V1TaskRunMetric defines model for V1TaskRunMetric. +type V1TaskRunMetric struct { + Count int `json:"count"` + Status V1TaskStatus `json:"status"` +} + +// V1TaskRunMetrics defines model for V1TaskRunMetrics. +type V1TaskRunMetrics = []V1TaskRunMetric + +// V1TaskStatus defines model for V1TaskStatus. +type V1TaskStatus string + +// V1TaskSummary defines model for V1TaskSummary. +type V1TaskSummary struct { + // AdditionalMetadata Additional metadata for the task run. + AdditionalMetadata *map[string]interface{} `json:"additionalMetadata,omitempty"` + + // Children The list of children tasks + Children *[]V1TaskSummary `json:"children,omitempty"` + + // CreatedAt The timestamp the task was created. + CreatedAt time.Time `json:"createdAt"` + + // DisplayName The display name of the task run. + DisplayName string `json:"displayName"` + + // Duration The duration of the task run, in milliseconds. + Duration *int `json:"duration,omitempty"` + + // ErrorMessage The error message of the task run (for the latest run) + ErrorMessage *string `json:"errorMessage,omitempty"` + + // FinishedAt The timestamp the task run finished. + FinishedAt *time.Time `json:"finishedAt,omitempty"` + + // Input The input of the task run. + Input map[string]interface{} `json:"input"` + Metadata APIResourceMeta `json:"metadata"` + + // NumSpawnedChildren The number of spawned children tasks + NumSpawnedChildren int `json:"numSpawnedChildren"` + + // Output The output of the task run (for the latest run) + Output map[string]interface{} `json:"output"` + + // StartedAt The timestamp the task run started. + StartedAt *time.Time `json:"startedAt,omitempty"` + Status V1TaskStatus `json:"status"` + + // StepId The step ID of the task. + StepId *openapi_types.UUID `json:"stepId,omitempty"` + + // TaskExternalId The external ID of the task. + TaskExternalId openapi_types.UUID `json:"taskExternalId"` + + // TaskId The ID of the task. + TaskId int `json:"taskId"` + + // TaskInsertedAt The timestamp the task was inserted. + TaskInsertedAt time.Time `json:"taskInsertedAt"` + + // TenantId The ID of the tenant. + TenantId openapi_types.UUID `json:"tenantId"` + Type V1WorkflowType `json:"type"` + WorkflowId openapi_types.UUID `json:"workflowId"` + WorkflowName *string `json:"workflowName,omitempty"` + + // WorkflowRunExternalId The external ID of the workflow run + WorkflowRunExternalId *openapi_types.UUID `json:"workflowRunExternalId,omitempty"` + + // WorkflowVersionId The version ID of the workflow + WorkflowVersionId *openapi_types.UUID `json:"workflowVersionId,omitempty"` +} + +// V1TaskSummaryList defines model for V1TaskSummaryList. +type V1TaskSummaryList struct { + Pagination PaginationResponse `json:"pagination"` + + // Rows The list of tasks + Rows []V1TaskSummary `json:"rows"` +} + +// V1TriggerWorkflowRunRequest defines model for V1TriggerWorkflowRunRequest. +type V1TriggerWorkflowRunRequest struct { + AdditionalMetadata *map[string]interface{} `json:"additionalMetadata,omitempty"` + Input map[string]interface{} `json:"input"` + + // WorkflowName The name of the workflow. + WorkflowName string `json:"workflowName"` +} + +// V1WorkflowRun defines model for V1WorkflowRun. +type V1WorkflowRun struct { + // AdditionalMetadata Additional metadata for the task run. + AdditionalMetadata *map[string]interface{} `json:"additionalMetadata,omitempty"` + + // CreatedAt The timestamp the task run was created. + CreatedAt *time.Time `json:"createdAt,omitempty"` + + // DisplayName The display name of the task run. + DisplayName string `json:"displayName"` + + // Duration The duration of the task run, in milliseconds. + Duration *int `json:"duration,omitempty"` + + // ErrorMessage The error message of the task run (for the latest run) + ErrorMessage *string `json:"errorMessage,omitempty"` + + // FinishedAt The timestamp the task run finished. + FinishedAt *time.Time `json:"finishedAt,omitempty"` + + // Input The input of the task run. + Input map[string]interface{} `json:"input"` + Metadata APIResourceMeta `json:"metadata"` + + // Output The output of the task run (for the latest run) + Output map[string]interface{} `json:"output"` + + // StartedAt The timestamp the task run started. + StartedAt *time.Time `json:"startedAt,omitempty"` + Status V1TaskStatus `json:"status"` + + // TenantId The ID of the tenant. + TenantId openapi_types.UUID `json:"tenantId"` + WorkflowId openapi_types.UUID `json:"workflowId"` + + // WorkflowVersionId The ID of the workflow version. + WorkflowVersionId *openapi_types.UUID `json:"workflowVersionId,omitempty"` +} + +// V1WorkflowRunDetails defines model for V1WorkflowRunDetails. +type V1WorkflowRunDetails struct { + Run V1WorkflowRun `json:"run"` + Shape WorkflowRunShapeForWorkflowRunDetails `json:"shape"` + + // TaskEvents The list of task events for the workflow run + TaskEvents []V1TaskEvent `json:"taskEvents"` + Tasks []V1TaskSummary `json:"tasks"` +} + +// V1WorkflowRunDisplayName defines model for V1WorkflowRunDisplayName. +type V1WorkflowRunDisplayName struct { + DisplayName string `json:"displayName"` + Metadata APIResourceMeta `json:"metadata"` +} + +// V1WorkflowRunDisplayNameList defines model for V1WorkflowRunDisplayNameList. +type V1WorkflowRunDisplayNameList struct { + Pagination PaginationResponse `json:"pagination"` + + // Rows The list of display names + Rows []V1WorkflowRunDisplayName `json:"rows"` +} + +// V1WorkflowType defines model for V1WorkflowType. +type V1WorkflowType string + // WebhookWorker defines model for WebhookWorker. type WebhookWorker struct { Metadata APIResourceMeta `json:"metadata"` @@ -1368,6 +1656,17 @@ type WorkflowRunShape struct { WorkflowVersionId string `json:"workflowVersionId"` } +// WorkflowRunShapeForWorkflowRunDetails defines model for WorkflowRunShapeForWorkflowRunDetails. +type WorkflowRunShapeForWorkflowRunDetails = []WorkflowRunShapeItemForWorkflowRunDetails + +// WorkflowRunShapeItemForWorkflowRunDetails defines model for WorkflowRunShapeItemForWorkflowRunDetails. +type WorkflowRunShapeItemForWorkflowRunDetails struct { + ChildrenStepIds []openapi_types.UUID `json:"childrenStepIds"` + StepId openapi_types.UUID `json:"stepId"` + TaskExternalId openapi_types.UUID `json:"taskExternalId"` + TaskName string `json:"taskName"` +} + // WorkflowRunStatus defines model for WorkflowRunStatus. type WorkflowRunStatus string @@ -1478,6 +1777,93 @@ type WorkflowWorkersCount struct { WorkflowRunId *string `json:"workflowRunId,omitempty"` } +// V1DagListTasksParams defines parameters for V1DagListTasks. +type V1DagListTasksParams struct { + // DagIds The external id of the DAG + DagIds []openapi_types.UUID `form:"dag_ids" json:"dag_ids"` + + // Tenant The tenant id + Tenant openapi_types.UUID `form:"tenant" json:"tenant"` +} + +// V1TaskEventListParams defines parameters for V1TaskEventList. +type V1TaskEventListParams struct { + // Offset The number to skip + Offset *int64 `form:"offset,omitempty" json:"offset,omitempty"` + + // Limit The number to limit by + Limit *int64 `form:"limit,omitempty" json:"limit,omitempty"` +} + +// V1TaskListStatusMetricsParams defines parameters for V1TaskListStatusMetrics. +type V1TaskListStatusMetricsParams struct { + // Since The start time to get metrics for + Since time.Time `form:"since" json:"since"` + + // WorkflowIds The workflow id to find runs for + WorkflowIds *[]openapi_types.UUID `form:"workflow_ids,omitempty" json:"workflow_ids,omitempty"` + + // ParentTaskExternalId The parent task's external id + ParentTaskExternalId *openapi_types.UUID `form:"parent_task_external_id,omitempty" json:"parent_task_external_id,omitempty"` +} + +// V1TaskGetPointMetricsParams defines parameters for V1TaskGetPointMetrics. +type V1TaskGetPointMetricsParams struct { + // CreatedAfter The time after the task was created + CreatedAfter *time.Time `form:"createdAfter,omitempty" json:"createdAfter,omitempty"` + + // FinishedBefore The time before the task was completed + FinishedBefore *time.Time `form:"finishedBefore,omitempty" json:"finishedBefore,omitempty"` +} + +// V1WorkflowRunListParams defines parameters for V1WorkflowRunList. +type V1WorkflowRunListParams struct { + // Offset The number to skip + Offset *int64 `form:"offset,omitempty" json:"offset,omitempty"` + + // Limit The number to limit by + Limit *int64 `form:"limit,omitempty" json:"limit,omitempty"` + + // Statuses A list of statuses to filter by + Statuses *[]V1TaskStatus `form:"statuses,omitempty" json:"statuses,omitempty"` + + // Since The earliest date to filter by + Since time.Time `form:"since" json:"since"` + + // Until The latest date to filter by + Until *time.Time `form:"until,omitempty" json:"until,omitempty"` + + // AdditionalMetadata Additional metadata k-v pairs to filter by + AdditionalMetadata *[]string `form:"additional_metadata,omitempty" json:"additional_metadata,omitempty"` + + // WorkflowIds The workflow ids to find runs for + WorkflowIds *[]openapi_types.UUID `form:"workflow_ids,omitempty" json:"workflow_ids,omitempty"` + + // WorkerId The worker id to filter by + WorkerId *openapi_types.UUID `form:"worker_id,omitempty" json:"worker_id,omitempty"` + + // OnlyTasks Whether to include DAGs or only to include tasks + OnlyTasks bool `form:"only_tasks" json:"only_tasks"` + + // ParentTaskExternalId The parent task external id to filter by + ParentTaskExternalId *openapi_types.UUID `form:"parent_task_external_id,omitempty" json:"parent_task_external_id,omitempty"` +} + +// V1WorkflowRunDisplayNamesListParams defines parameters for V1WorkflowRunDisplayNamesList. +type V1WorkflowRunDisplayNamesListParams struct { + // ExternalIds The external ids of the workflow runs to get display names for + ExternalIds []openapi_types.UUID `form:"external_ids" json:"external_ids"` +} + +// V1WorkflowRunTaskEventsListParams defines parameters for V1WorkflowRunTaskEventsList. +type V1WorkflowRunTaskEventsListParams struct { + // Offset The number to skip + Offset *int64 `form:"offset,omitempty" json:"offset,omitempty"` + + // Limit The number to limit by + Limit *int64 `form:"limit,omitempty" json:"limit,omitempty"` +} + // StepRunListArchivesParams defines parameters for StepRunListArchives. type StepRunListArchivesParams struct { // Offset The number to skip @@ -1742,6 +2128,15 @@ type WorkflowVersionGetParams struct { // AlertEmailGroupUpdateJSONRequestBody defines body for AlertEmailGroupUpdate for application/json ContentType. type AlertEmailGroupUpdateJSONRequestBody = UpdateTenantAlertEmailGroupRequest +// V1TaskCancelJSONRequestBody defines body for V1TaskCancel for application/json ContentType. +type V1TaskCancelJSONRequestBody = V1CancelTaskRequest + +// V1TaskReplayJSONRequestBody defines body for V1TaskReplay for application/json ContentType. +type V1TaskReplayJSONRequestBody = V1ReplayTaskRequest + +// V1WorkflowRunCreateJSONRequestBody defines body for V1WorkflowRunCreate for application/json ContentType. +type V1WorkflowRunCreateJSONRequestBody = V1TriggerWorkflowRunRequest + // TenantCreateJSONRequestBody defines body for TenantCreate for application/json ContentType. type TenantCreateJSONRequestBody = CreateTenantRequest @@ -1861,6 +2256,45 @@ type ServerInterface interface { // Github app tenant webhook // (POST /api/v1/sns/{tenant}/{event}) SnsUpdate(ctx echo.Context, tenant openapi_types.UUID, event string) error + // List tasks + // (GET /api/v1/stable/dags/tasks) + V1DagListTasks(ctx echo.Context, params V1DagListTasksParams) error + // Get a task + // (GET /api/v1/stable/tasks/{task}) + V1TaskGet(ctx echo.Context, task openapi_types.UUID) error + // List log lines + // (GET /api/v1/stable/tasks/{task}/logs) + V1LogLineList(ctx echo.Context, task openapi_types.UUID) error + // List events for a task + // (GET /api/v1/stable/tasks/{task}/task-events) + V1TaskEventList(ctx echo.Context, task openapi_types.UUID, params V1TaskEventListParams) error + // Get task metrics + // (GET /api/v1/stable/tenants/{tenant}/task-metrics) + V1TaskListStatusMetrics(ctx echo.Context, tenant openapi_types.UUID, params V1TaskListStatusMetricsParams) error + // Get task point metrics + // (GET /api/v1/stable/tenants/{tenant}/task-point-metrics) + V1TaskGetPointMetrics(ctx echo.Context, tenant openapi_types.UUID, params V1TaskGetPointMetricsParams) error + // Cancel tasks + // (POST /api/v1/stable/tenants/{tenant}/tasks/cancel) + V1TaskCancel(ctx echo.Context, tenant openapi_types.UUID) error + // Replay tasks + // (POST /api/v1/stable/tenants/{tenant}/tasks/replay) + V1TaskReplay(ctx echo.Context, tenant openapi_types.UUID) error + // List workflow runs + // (GET /api/v1/stable/tenants/{tenant}/workflow-runs) + V1WorkflowRunList(ctx echo.Context, tenant openapi_types.UUID, params V1WorkflowRunListParams) error + // List workflow runs + // (GET /api/v1/stable/tenants/{tenant}/workflow-runs/display-names) + V1WorkflowRunDisplayNamesList(ctx echo.Context, tenant openapi_types.UUID, params V1WorkflowRunDisplayNamesListParams) error + // Create workflow run + // (POST /api/v1/stable/tenants/{tenant}/workflow-runs/trigger) + V1WorkflowRunCreate(ctx echo.Context, tenant openapi_types.UUID) error + // List tasks + // (GET /api/v1/stable/workflow-runs/{v1-workflow-run}) + V1WorkflowRunGet(ctx echo.Context, v1WorkflowRun openapi_types.UUID) error + // List tasks + // (GET /api/v1/stable/workflow-runs/{v1-workflow-run}/task-events) + V1WorkflowRunTaskEventsList(ctx echo.Context, v1WorkflowRun openapi_types.UUID, params V1WorkflowRunTaskEventsListParams) error // List archives for step run // (GET /api/v1/step-runs/{step-run}/archives) StepRunListArchives(ctx echo.Context, stepRun openapi_types.UUID, params StepRunListArchivesParams) error @@ -2341,6 +2775,427 @@ func (w *ServerInterfaceWrapper) SnsUpdate(ctx echo.Context) error { return err } +// V1DagListTasks converts echo context to params. +func (w *ServerInterfaceWrapper) V1DagListTasks(ctx echo.Context) error { + var err error + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1DagListTasksParams + // ------------- Required query parameter "dag_ids" ------------- + + err = runtime.BindQueryParameter("form", true, true, "dag_ids", ctx.QueryParams(), ¶ms.DagIds) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter dag_ids: %s", err)) + } + + // ------------- Required query parameter "tenant" ------------- + + err = runtime.BindQueryParameter("form", true, true, "tenant", ctx.QueryParams(), ¶ms.Tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1DagListTasks(ctx, params) + return err +} + +// V1TaskGet converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskGet(ctx echo.Context) error { + var err error + // ------------- Path parameter "task" ------------- + var task openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "task", runtime.ParamLocationPath, ctx.Param("task"), &task) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter task: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskGet(ctx, task) + return err +} + +// V1LogLineList converts echo context to params. +func (w *ServerInterfaceWrapper) V1LogLineList(ctx echo.Context) error { + var err error + // ------------- Path parameter "task" ------------- + var task openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "task", runtime.ParamLocationPath, ctx.Param("task"), &task) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter task: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1LogLineList(ctx, task) + return err +} + +// V1TaskEventList converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskEventList(ctx echo.Context) error { + var err error + // ------------- Path parameter "task" ------------- + var task openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "task", runtime.ParamLocationPath, ctx.Param("task"), &task) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter task: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1TaskEventListParams + // ------------- Optional query parameter "offset" ------------- + + err = runtime.BindQueryParameter("form", true, false, "offset", ctx.QueryParams(), ¶ms.Offset) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter offset: %s", err)) + } + + // ------------- Optional query parameter "limit" ------------- + + err = runtime.BindQueryParameter("form", true, false, "limit", ctx.QueryParams(), ¶ms.Limit) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter limit: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskEventList(ctx, task, params) + return err +} + +// V1TaskListStatusMetrics converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskListStatusMetrics(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1TaskListStatusMetricsParams + // ------------- Required query parameter "since" ------------- + + err = runtime.BindQueryParameter("form", true, true, "since", ctx.QueryParams(), ¶ms.Since) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter since: %s", err)) + } + + // ------------- Optional query parameter "workflow_ids" ------------- + + err = runtime.BindQueryParameter("form", true, false, "workflow_ids", ctx.QueryParams(), ¶ms.WorkflowIds) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter workflow_ids: %s", err)) + } + + // ------------- Optional query parameter "parent_task_external_id" ------------- + + err = runtime.BindQueryParameter("form", true, false, "parent_task_external_id", ctx.QueryParams(), ¶ms.ParentTaskExternalId) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter parent_task_external_id: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskListStatusMetrics(ctx, tenant, params) + return err +} + +// V1TaskGetPointMetrics converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskGetPointMetrics(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1TaskGetPointMetricsParams + // ------------- Optional query parameter "createdAfter" ------------- + + err = runtime.BindQueryParameter("form", true, false, "createdAfter", ctx.QueryParams(), ¶ms.CreatedAfter) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter createdAfter: %s", err)) + } + + // ------------- Optional query parameter "finishedBefore" ------------- + + err = runtime.BindQueryParameter("form", true, false, "finishedBefore", ctx.QueryParams(), ¶ms.FinishedBefore) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter finishedBefore: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskGetPointMetrics(ctx, tenant, params) + return err +} + +// V1TaskCancel converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskCancel(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskCancel(ctx, tenant) + return err +} + +// V1TaskReplay converts echo context to params. +func (w *ServerInterfaceWrapper) V1TaskReplay(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1TaskReplay(ctx, tenant) + return err +} + +// V1WorkflowRunList converts echo context to params. +func (w *ServerInterfaceWrapper) V1WorkflowRunList(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1WorkflowRunListParams + // ------------- Optional query parameter "offset" ------------- + + err = runtime.BindQueryParameter("form", true, false, "offset", ctx.QueryParams(), ¶ms.Offset) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter offset: %s", err)) + } + + // ------------- Optional query parameter "limit" ------------- + + err = runtime.BindQueryParameter("form", true, false, "limit", ctx.QueryParams(), ¶ms.Limit) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter limit: %s", err)) + } + + // ------------- Optional query parameter "statuses" ------------- + + err = runtime.BindQueryParameter("form", true, false, "statuses", ctx.QueryParams(), ¶ms.Statuses) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter statuses: %s", err)) + } + + // ------------- Required query parameter "since" ------------- + + err = runtime.BindQueryParameter("form", true, true, "since", ctx.QueryParams(), ¶ms.Since) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter since: %s", err)) + } + + // ------------- Optional query parameter "until" ------------- + + err = runtime.BindQueryParameter("form", true, false, "until", ctx.QueryParams(), ¶ms.Until) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter until: %s", err)) + } + + // ------------- Optional query parameter "additional_metadata" ------------- + + err = runtime.BindQueryParameter("form", true, false, "additional_metadata", ctx.QueryParams(), ¶ms.AdditionalMetadata) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter additional_metadata: %s", err)) + } + + // ------------- Optional query parameter "workflow_ids" ------------- + + err = runtime.BindQueryParameter("form", true, false, "workflow_ids", ctx.QueryParams(), ¶ms.WorkflowIds) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter workflow_ids: %s", err)) + } + + // ------------- Optional query parameter "worker_id" ------------- + + err = runtime.BindQueryParameter("form", true, false, "worker_id", ctx.QueryParams(), ¶ms.WorkerId) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter worker_id: %s", err)) + } + + // ------------- Required query parameter "only_tasks" ------------- + + err = runtime.BindQueryParameter("form", true, true, "only_tasks", ctx.QueryParams(), ¶ms.OnlyTasks) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter only_tasks: %s", err)) + } + + // ------------- Optional query parameter "parent_task_external_id" ------------- + + err = runtime.BindQueryParameter("form", true, false, "parent_task_external_id", ctx.QueryParams(), ¶ms.ParentTaskExternalId) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter parent_task_external_id: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1WorkflowRunList(ctx, tenant, params) + return err +} + +// V1WorkflowRunDisplayNamesList converts echo context to params. +func (w *ServerInterfaceWrapper) V1WorkflowRunDisplayNamesList(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1WorkflowRunDisplayNamesListParams + // ------------- Required query parameter "external_ids" ------------- + + err = runtime.BindQueryParameter("form", true, true, "external_ids", ctx.QueryParams(), ¶ms.ExternalIds) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter external_ids: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1WorkflowRunDisplayNamesList(ctx, tenant, params) + return err +} + +// V1WorkflowRunCreate converts echo context to params. +func (w *ServerInterfaceWrapper) V1WorkflowRunCreate(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1WorkflowRunCreate(ctx, tenant) + return err +} + +// V1WorkflowRunGet converts echo context to params. +func (w *ServerInterfaceWrapper) V1WorkflowRunGet(ctx echo.Context) error { + var err error + // ------------- Path parameter "v1-workflow-run" ------------- + var v1WorkflowRun openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "v1-workflow-run", runtime.ParamLocationPath, ctx.Param("v1-workflow-run"), &v1WorkflowRun) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter v1-workflow-run: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1WorkflowRunGet(ctx, v1WorkflowRun) + return err +} + +// V1WorkflowRunTaskEventsList converts echo context to params. +func (w *ServerInterfaceWrapper) V1WorkflowRunTaskEventsList(ctx echo.Context) error { + var err error + // ------------- Path parameter "v1-workflow-run" ------------- + var v1WorkflowRun openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "v1-workflow-run", runtime.ParamLocationPath, ctx.Param("v1-workflow-run"), &v1WorkflowRun) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter v1-workflow-run: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1WorkflowRunTaskEventsListParams + // ------------- Optional query parameter "offset" ------------- + + err = runtime.BindQueryParameter("form", true, false, "offset", ctx.QueryParams(), ¶ms.Offset) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter offset: %s", err)) + } + + // ------------- Optional query parameter "limit" ------------- + + err = runtime.BindQueryParameter("form", true, false, "limit", ctx.QueryParams(), ¶ms.Limit) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter limit: %s", err)) + } + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.V1WorkflowRunTaskEventsList(ctx, v1WorkflowRun, params) + return err +} + // StepRunListArchives converts echo context to params. func (w *ServerInterfaceWrapper) StepRunListArchives(ctx echo.Context) error { var err error @@ -4507,6 +5362,19 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL router.DELETE(baseURL+"/api/v1/slack/:slack", wrapper.SlackWebhookDelete) router.DELETE(baseURL+"/api/v1/sns/:sns", wrapper.SnsDelete) router.POST(baseURL+"/api/v1/sns/:tenant/:event", wrapper.SnsUpdate) + router.GET(baseURL+"/api/v1/stable/dags/tasks", wrapper.V1DagListTasks) + router.GET(baseURL+"/api/v1/stable/tasks/:task", wrapper.V1TaskGet) + router.GET(baseURL+"/api/v1/stable/tasks/:task/logs", wrapper.V1LogLineList) + router.GET(baseURL+"/api/v1/stable/tasks/:task/task-events", wrapper.V1TaskEventList) + router.GET(baseURL+"/api/v1/stable/tenants/:tenant/task-metrics", wrapper.V1TaskListStatusMetrics) + router.GET(baseURL+"/api/v1/stable/tenants/:tenant/task-point-metrics", wrapper.V1TaskGetPointMetrics) + router.POST(baseURL+"/api/v1/stable/tenants/:tenant/tasks/cancel", wrapper.V1TaskCancel) + router.POST(baseURL+"/api/v1/stable/tenants/:tenant/tasks/replay", wrapper.V1TaskReplay) + router.GET(baseURL+"/api/v1/stable/tenants/:tenant/workflow-runs", wrapper.V1WorkflowRunList) + router.GET(baseURL+"/api/v1/stable/tenants/:tenant/workflow-runs/display-names", wrapper.V1WorkflowRunDisplayNamesList) + router.POST(baseURL+"/api/v1/stable/tenants/:tenant/workflow-runs/trigger", wrapper.V1WorkflowRunCreate) + router.GET(baseURL+"/api/v1/stable/workflow-runs/:v1-workflow-run", wrapper.V1WorkflowRunGet) + router.GET(baseURL+"/api/v1/stable/workflow-runs/:v1-workflow-run/task-events", wrapper.V1WorkflowRunTaskEventsList) router.GET(baseURL+"/api/v1/step-runs/:step-run/archives", wrapper.StepRunListArchives) router.GET(baseURL+"/api/v1/step-runs/:step-run/events", wrapper.StepRunListEvents) router.GET(baseURL+"/api/v1/step-runs/:step-run/logs", wrapper.LogLineList) @@ -5040,6 +5908,603 @@ func (response SnsUpdate405JSONResponse) VisitSnsUpdateResponse(w http.ResponseW return json.NewEncoder(w).Encode(response) } +type V1DagListTasksRequestObject struct { + Params V1DagListTasksParams +} + +type V1DagListTasksResponseObject interface { + VisitV1DagListTasksResponse(w http.ResponseWriter) error +} + +type V1DagListTasks200JSONResponse []V1DagChildren + +func (response V1DagListTasks200JSONResponse) VisitV1DagListTasksResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1DagListTasks400JSONResponse APIErrors + +func (response V1DagListTasks400JSONResponse) VisitV1DagListTasksResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1DagListTasks403JSONResponse APIErrors + +func (response V1DagListTasks403JSONResponse) VisitV1DagListTasksResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1DagListTasks501JSONResponse APIErrors + +func (response V1DagListTasks501JSONResponse) VisitV1DagListTasksResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGetRequestObject struct { + Task openapi_types.UUID `json:"task"` +} + +type V1TaskGetResponseObject interface { + VisitV1TaskGetResponse(w http.ResponseWriter) error +} + +type V1TaskGet200JSONResponse V1TaskSummary + +func (response V1TaskGet200JSONResponse) VisitV1TaskGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGet400JSONResponse APIErrors + +func (response V1TaskGet400JSONResponse) VisitV1TaskGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGet403JSONResponse APIErrors + +func (response V1TaskGet403JSONResponse) VisitV1TaskGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGet404JSONResponse APIErrors + +func (response V1TaskGet404JSONResponse) VisitV1TaskGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGet501JSONResponse APIErrors + +func (response V1TaskGet501JSONResponse) VisitV1TaskGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1LogLineListRequestObject struct { + Task openapi_types.UUID `json:"task"` +} + +type V1LogLineListResponseObject interface { + VisitV1LogLineListResponse(w http.ResponseWriter) error +} + +type V1LogLineList200JSONResponse V1LogLineList + +func (response V1LogLineList200JSONResponse) VisitV1LogLineListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1LogLineList400JSONResponse APIErrors + +func (response V1LogLineList400JSONResponse) VisitV1LogLineListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1LogLineList403JSONResponse APIErrors + +func (response V1LogLineList403JSONResponse) VisitV1LogLineListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskEventListRequestObject struct { + Task openapi_types.UUID `json:"task"` + Params V1TaskEventListParams +} + +type V1TaskEventListResponseObject interface { + VisitV1TaskEventListResponse(w http.ResponseWriter) error +} + +type V1TaskEventList200JSONResponse V1TaskEventList + +func (response V1TaskEventList200JSONResponse) VisitV1TaskEventListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskEventList400JSONResponse APIErrors + +func (response V1TaskEventList400JSONResponse) VisitV1TaskEventListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskEventList403JSONResponse APIErrors + +func (response V1TaskEventList403JSONResponse) VisitV1TaskEventListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskEventList404JSONResponse APIErrors + +func (response V1TaskEventList404JSONResponse) VisitV1TaskEventListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskEventList501JSONResponse APIErrors + +func (response V1TaskEventList501JSONResponse) VisitV1TaskEventListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskListStatusMetricsRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Params V1TaskListStatusMetricsParams +} + +type V1TaskListStatusMetricsResponseObject interface { + VisitV1TaskListStatusMetricsResponse(w http.ResponseWriter) error +} + +type V1TaskListStatusMetrics200JSONResponse V1TaskRunMetrics + +func (response V1TaskListStatusMetrics200JSONResponse) VisitV1TaskListStatusMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskListStatusMetrics400JSONResponse APIErrors + +func (response V1TaskListStatusMetrics400JSONResponse) VisitV1TaskListStatusMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskListStatusMetrics403JSONResponse APIErrors + +func (response V1TaskListStatusMetrics403JSONResponse) VisitV1TaskListStatusMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskListStatusMetrics501JSONResponse APIErrors + +func (response V1TaskListStatusMetrics501JSONResponse) VisitV1TaskListStatusMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGetPointMetricsRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Params V1TaskGetPointMetricsParams +} + +type V1TaskGetPointMetricsResponseObject interface { + VisitV1TaskGetPointMetricsResponse(w http.ResponseWriter) error +} + +type V1TaskGetPointMetrics200JSONResponse V1TaskPointMetrics + +func (response V1TaskGetPointMetrics200JSONResponse) VisitV1TaskGetPointMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGetPointMetrics400JSONResponse APIErrors + +func (response V1TaskGetPointMetrics400JSONResponse) VisitV1TaskGetPointMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGetPointMetrics403JSONResponse APIErrors + +func (response V1TaskGetPointMetrics403JSONResponse) VisitV1TaskGetPointMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskGetPointMetrics501JSONResponse APIErrors + +func (response V1TaskGetPointMetrics501JSONResponse) VisitV1TaskGetPointMetricsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskCancelRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Body *V1TaskCancelJSONRequestBody +} + +type V1TaskCancelResponseObject interface { + VisitV1TaskCancelResponse(w http.ResponseWriter) error +} + +type V1TaskCancel200Response struct { +} + +func (response V1TaskCancel200Response) VisitV1TaskCancelResponse(w http.ResponseWriter) error { + w.WriteHeader(200) + return nil +} + +type V1TaskCancel400JSONResponse APIErrors + +func (response V1TaskCancel400JSONResponse) VisitV1TaskCancelResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskCancel403JSONResponse APIErrors + +func (response V1TaskCancel403JSONResponse) VisitV1TaskCancelResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskCancel404JSONResponse APIErrors + +func (response V1TaskCancel404JSONResponse) VisitV1TaskCancelResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskCancel501JSONResponse APIErrors + +func (response V1TaskCancel501JSONResponse) VisitV1TaskCancelResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskReplayRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Body *V1TaskReplayJSONRequestBody +} + +type V1TaskReplayResponseObject interface { + VisitV1TaskReplayResponse(w http.ResponseWriter) error +} + +type V1TaskReplay200Response struct { +} + +func (response V1TaskReplay200Response) VisitV1TaskReplayResponse(w http.ResponseWriter) error { + w.WriteHeader(200) + return nil +} + +type V1TaskReplay400JSONResponse APIErrors + +func (response V1TaskReplay400JSONResponse) VisitV1TaskReplayResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskReplay403JSONResponse APIErrors + +func (response V1TaskReplay403JSONResponse) VisitV1TaskReplayResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskReplay404JSONResponse APIErrors + +func (response V1TaskReplay404JSONResponse) VisitV1TaskReplayResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type V1TaskReplay501JSONResponse APIErrors + +func (response V1TaskReplay501JSONResponse) VisitV1TaskReplayResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunListRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Params V1WorkflowRunListParams +} + +type V1WorkflowRunListResponseObject interface { + VisitV1WorkflowRunListResponse(w http.ResponseWriter) error +} + +type V1WorkflowRunList200JSONResponse V1TaskSummaryList + +func (response V1WorkflowRunList200JSONResponse) VisitV1WorkflowRunListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunList400JSONResponse APIErrors + +func (response V1WorkflowRunList400JSONResponse) VisitV1WorkflowRunListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunList403JSONResponse APIErrors + +func (response V1WorkflowRunList403JSONResponse) VisitV1WorkflowRunListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunList501JSONResponse APIErrors + +func (response V1WorkflowRunList501JSONResponse) VisitV1WorkflowRunListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunDisplayNamesListRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Params V1WorkflowRunDisplayNamesListParams +} + +type V1WorkflowRunDisplayNamesListResponseObject interface { + VisitV1WorkflowRunDisplayNamesListResponse(w http.ResponseWriter) error +} + +type V1WorkflowRunDisplayNamesList200JSONResponse V1WorkflowRunDisplayNameList + +func (response V1WorkflowRunDisplayNamesList200JSONResponse) VisitV1WorkflowRunDisplayNamesListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunDisplayNamesList400JSONResponse APIErrors + +func (response V1WorkflowRunDisplayNamesList400JSONResponse) VisitV1WorkflowRunDisplayNamesListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunDisplayNamesList403JSONResponse APIErrors + +func (response V1WorkflowRunDisplayNamesList403JSONResponse) VisitV1WorkflowRunDisplayNamesListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunDisplayNamesList501JSONResponse APIErrors + +func (response V1WorkflowRunDisplayNamesList501JSONResponse) VisitV1WorkflowRunDisplayNamesListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunCreateRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` + Body *V1WorkflowRunCreateJSONRequestBody +} + +type V1WorkflowRunCreateResponseObject interface { + VisitV1WorkflowRunCreateResponse(w http.ResponseWriter) error +} + +type V1WorkflowRunCreate200JSONResponse V1WorkflowRunDetails + +func (response V1WorkflowRunCreate200JSONResponse) VisitV1WorkflowRunCreateResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunCreate400JSONResponse APIErrors + +func (response V1WorkflowRunCreate400JSONResponse) VisitV1WorkflowRunCreateResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunCreate403JSONResponse APIErrors + +func (response V1WorkflowRunCreate403JSONResponse) VisitV1WorkflowRunCreateResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunGetRequestObject struct { + V1WorkflowRun openapi_types.UUID `json:"v1-workflow-run"` +} + +type V1WorkflowRunGetResponseObject interface { + VisitV1WorkflowRunGetResponse(w http.ResponseWriter) error +} + +type V1WorkflowRunGet200JSONResponse V1WorkflowRunDetails + +func (response V1WorkflowRunGet200JSONResponse) VisitV1WorkflowRunGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunGet400JSONResponse APIErrors + +func (response V1WorkflowRunGet400JSONResponse) VisitV1WorkflowRunGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunGet403JSONResponse APIErrors + +func (response V1WorkflowRunGet403JSONResponse) VisitV1WorkflowRunGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunGet501JSONResponse APIErrors + +func (response V1WorkflowRunGet501JSONResponse) VisitV1WorkflowRunGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunTaskEventsListRequestObject struct { + V1WorkflowRun openapi_types.UUID `json:"v1-workflow-run"` + Params V1WorkflowRunTaskEventsListParams +} + +type V1WorkflowRunTaskEventsListResponseObject interface { + VisitV1WorkflowRunTaskEventsListResponse(w http.ResponseWriter) error +} + +type V1WorkflowRunTaskEventsList200JSONResponse V1TaskEventList + +func (response V1WorkflowRunTaskEventsList200JSONResponse) VisitV1WorkflowRunTaskEventsListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunTaskEventsList400JSONResponse APIErrors + +func (response V1WorkflowRunTaskEventsList400JSONResponse) VisitV1WorkflowRunTaskEventsListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunTaskEventsList403JSONResponse APIErrors + +func (response V1WorkflowRunTaskEventsList403JSONResponse) VisitV1WorkflowRunTaskEventsListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + +type V1WorkflowRunTaskEventsList501JSONResponse APIErrors + +func (response V1WorkflowRunTaskEventsList501JSONResponse) VisitV1WorkflowRunTaskEventsListResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(501) + + return json.NewEncoder(w).Encode(response) +} + type StepRunListArchivesRequestObject struct { StepRun openapi_types.UUID `json:"step-run"` Params StepRunListArchivesParams @@ -8139,6 +9604,32 @@ type StrictServerInterface interface { SnsUpdate(ctx echo.Context, request SnsUpdateRequestObject) (SnsUpdateResponseObject, error) + V1DagListTasks(ctx echo.Context, request V1DagListTasksRequestObject) (V1DagListTasksResponseObject, error) + + V1TaskGet(ctx echo.Context, request V1TaskGetRequestObject) (V1TaskGetResponseObject, error) + + V1LogLineList(ctx echo.Context, request V1LogLineListRequestObject) (V1LogLineListResponseObject, error) + + V1TaskEventList(ctx echo.Context, request V1TaskEventListRequestObject) (V1TaskEventListResponseObject, error) + + V1TaskListStatusMetrics(ctx echo.Context, request V1TaskListStatusMetricsRequestObject) (V1TaskListStatusMetricsResponseObject, error) + + V1TaskGetPointMetrics(ctx echo.Context, request V1TaskGetPointMetricsRequestObject) (V1TaskGetPointMetricsResponseObject, error) + + V1TaskCancel(ctx echo.Context, request V1TaskCancelRequestObject) (V1TaskCancelResponseObject, error) + + V1TaskReplay(ctx echo.Context, request V1TaskReplayRequestObject) (V1TaskReplayResponseObject, error) + + V1WorkflowRunList(ctx echo.Context, request V1WorkflowRunListRequestObject) (V1WorkflowRunListResponseObject, error) + + V1WorkflowRunDisplayNamesList(ctx echo.Context, request V1WorkflowRunDisplayNamesListRequestObject) (V1WorkflowRunDisplayNamesListResponseObject, error) + + V1WorkflowRunCreate(ctx echo.Context, request V1WorkflowRunCreateRequestObject) (V1WorkflowRunCreateResponseObject, error) + + V1WorkflowRunGet(ctx echo.Context, request V1WorkflowRunGetRequestObject) (V1WorkflowRunGetResponseObject, error) + + V1WorkflowRunTaskEventsList(ctx echo.Context, request V1WorkflowRunTaskEventsListRequestObject) (V1WorkflowRunTaskEventsListResponseObject, error) + StepRunListArchives(ctx echo.Context, request StepRunListArchivesRequestObject) (StepRunListArchivesResponseObject, error) StepRunListEvents(ctx echo.Context, request StepRunListEventsRequestObject) (StepRunListEventsResponseObject, error) @@ -8658,6 +10149,355 @@ func (sh *strictHandler) SnsUpdate(ctx echo.Context, tenant openapi_types.UUID, return nil } +// V1DagListTasks operation middleware +func (sh *strictHandler) V1DagListTasks(ctx echo.Context, params V1DagListTasksParams) error { + var request V1DagListTasksRequestObject + + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1DagListTasks(ctx, request.(V1DagListTasksRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1DagListTasks") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1DagListTasksResponseObject); ok { + return validResponse.VisitV1DagListTasksResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1TaskGet operation middleware +func (sh *strictHandler) V1TaskGet(ctx echo.Context, task openapi_types.UUID) error { + var request V1TaskGetRequestObject + + request.Task = task + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskGet(ctx, request.(V1TaskGetRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1TaskGet") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskGetResponseObject); ok { + return validResponse.VisitV1TaskGetResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1LogLineList operation middleware +func (sh *strictHandler) V1LogLineList(ctx echo.Context, task openapi_types.UUID) error { + var request V1LogLineListRequestObject + + request.Task = task + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1LogLineList(ctx, request.(V1LogLineListRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1LogLineList") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1LogLineListResponseObject); ok { + return validResponse.VisitV1LogLineListResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1TaskEventList operation middleware +func (sh *strictHandler) V1TaskEventList(ctx echo.Context, task openapi_types.UUID, params V1TaskEventListParams) error { + var request V1TaskEventListRequestObject + + request.Task = task + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskEventList(ctx, request.(V1TaskEventListRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1TaskEventList") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskEventListResponseObject); ok { + return validResponse.VisitV1TaskEventListResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1TaskListStatusMetrics operation middleware +func (sh *strictHandler) V1TaskListStatusMetrics(ctx echo.Context, tenant openapi_types.UUID, params V1TaskListStatusMetricsParams) error { + var request V1TaskListStatusMetricsRequestObject + + request.Tenant = tenant + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskListStatusMetrics(ctx, request.(V1TaskListStatusMetricsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1TaskListStatusMetrics") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskListStatusMetricsResponseObject); ok { + return validResponse.VisitV1TaskListStatusMetricsResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1TaskGetPointMetrics operation middleware +func (sh *strictHandler) V1TaskGetPointMetrics(ctx echo.Context, tenant openapi_types.UUID, params V1TaskGetPointMetricsParams) error { + var request V1TaskGetPointMetricsRequestObject + + request.Tenant = tenant + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskGetPointMetrics(ctx, request.(V1TaskGetPointMetricsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1TaskGetPointMetrics") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskGetPointMetricsResponseObject); ok { + return validResponse.VisitV1TaskGetPointMetricsResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1TaskCancel operation middleware +func (sh *strictHandler) V1TaskCancel(ctx echo.Context, tenant openapi_types.UUID) error { + var request V1TaskCancelRequestObject + + request.Tenant = tenant + + var body V1TaskCancelJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskCancel(ctx, request.(V1TaskCancelRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1TaskCancel") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskCancelResponseObject); ok { + return validResponse.VisitV1TaskCancelResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1TaskReplay operation middleware +func (sh *strictHandler) V1TaskReplay(ctx echo.Context, tenant openapi_types.UUID) error { + var request V1TaskReplayRequestObject + + request.Tenant = tenant + + var body V1TaskReplayJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1TaskReplay(ctx, request.(V1TaskReplayRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1TaskReplay") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1TaskReplayResponseObject); ok { + return validResponse.VisitV1TaskReplayResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1WorkflowRunList operation middleware +func (sh *strictHandler) V1WorkflowRunList(ctx echo.Context, tenant openapi_types.UUID, params V1WorkflowRunListParams) error { + var request V1WorkflowRunListRequestObject + + request.Tenant = tenant + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1WorkflowRunList(ctx, request.(V1WorkflowRunListRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1WorkflowRunList") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1WorkflowRunListResponseObject); ok { + return validResponse.VisitV1WorkflowRunListResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1WorkflowRunDisplayNamesList operation middleware +func (sh *strictHandler) V1WorkflowRunDisplayNamesList(ctx echo.Context, tenant openapi_types.UUID, params V1WorkflowRunDisplayNamesListParams) error { + var request V1WorkflowRunDisplayNamesListRequestObject + + request.Tenant = tenant + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1WorkflowRunDisplayNamesList(ctx, request.(V1WorkflowRunDisplayNamesListRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1WorkflowRunDisplayNamesList") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1WorkflowRunDisplayNamesListResponseObject); ok { + return validResponse.VisitV1WorkflowRunDisplayNamesListResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1WorkflowRunCreate operation middleware +func (sh *strictHandler) V1WorkflowRunCreate(ctx echo.Context, tenant openapi_types.UUID) error { + var request V1WorkflowRunCreateRequestObject + + request.Tenant = tenant + + var body V1WorkflowRunCreateJSONRequestBody + if err := ctx.Bind(&body); err != nil { + return err + } + request.Body = &body + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1WorkflowRunCreate(ctx, request.(V1WorkflowRunCreateRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1WorkflowRunCreate") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1WorkflowRunCreateResponseObject); ok { + return validResponse.VisitV1WorkflowRunCreateResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1WorkflowRunGet operation middleware +func (sh *strictHandler) V1WorkflowRunGet(ctx echo.Context, v1WorkflowRun openapi_types.UUID) error { + var request V1WorkflowRunGetRequestObject + + request.V1WorkflowRun = v1WorkflowRun + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1WorkflowRunGet(ctx, request.(V1WorkflowRunGetRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1WorkflowRunGet") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1WorkflowRunGetResponseObject); ok { + return validResponse.VisitV1WorkflowRunGetResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + +// V1WorkflowRunTaskEventsList operation middleware +func (sh *strictHandler) V1WorkflowRunTaskEventsList(ctx echo.Context, v1WorkflowRun openapi_types.UUID, params V1WorkflowRunTaskEventsListParams) error { + var request V1WorkflowRunTaskEventsListRequestObject + + request.V1WorkflowRun = v1WorkflowRun + request.Params = params + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.V1WorkflowRunTaskEventsList(ctx, request.(V1WorkflowRunTaskEventsListRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "V1WorkflowRunTaskEventsList") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(V1WorkflowRunTaskEventsListResponseObject); ok { + return validResponse.VisitV1WorkflowRunTaskEventsListResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + // StepRunListArchives operation middleware func (sh *strictHandler) StepRunListArchives(ctx echo.Context, stepRun openapi_types.UUID, params StepRunListArchivesParams) error { var request StepRunListArchivesRequestObject @@ -10812,205 +12652,241 @@ func (sh *strictHandler) WorkflowVersionGet(ctx echo.Context, workflow openapi_t // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+y9eW/jOPIw/FUEvS/w7ALO2d2zswF+f7gTd7e300nWTiaYZxAEtETbnMiSRqRy/Br5", - "7g94SZRFSpSv2BMBi520xaNYrCoWi3X8dL1oFkchDAl2T3662JvCGWB/dq/6vSSJEvp3nEQxTAiC7IsX", - "+ZD+14fYS1BMUBS6Jy5wvBSTaOZ8A8SbQuJA2tthjTsufAazOIDuydHHw8OOO46SGSDuiZuikPzy0e24", - "5CWG7omLQgInMHFfO8Xhy7Mp/3bGUeKQKcJ8TnU6t5s3fIQCphnEGExgPismCQonbNLIw/cBCh90U9Lf", - "HRI5ZAodP/LSGQwJ0ADQcdDYQcSBzwgTXABngsg0He170exgyvG058NH+bcOojGCgV+GhsLAPjlkCogy", - "uYOwAzCOPAQI9J0nRKYMHhDHAfLAKChshxuCmQYRrx03gX+lKIG+e/JHYeq7rHE0+hN6hMIoaQWXiQVm", - "vyMCZ+yP/z+BY/fE/f8Octo7EIR3kFHdazYNSBLwUgJJjGuA5gckoAwLCILo6XQKwgm8Ahg/RYkGsU9T", - "SKYwcaLECSPipBgm2PFA6HisI918lDix7K/gkiQpzMAZRVEAQUjh4dMmEBB4DUMQkiaTsm5OCJ8cwvpi", - "6xn74SMifOGWkyHWw4nYV/4zo3aEHRRiAkIPWs8+RJMwjRtMjtEkdNI4Z6VGU6ZkakFalCy6tOlrx40j", - "TKbRxLLXlWhNO74EUdiN476BK6/od8puTv+MrSbFkPWhXE+piDg4jeMoIQVGPDr+8PHTL//6dY/+Mfd/", - "9Pd/Hx4daxnVRP9dgZMiD7B16aiCgi7ggr5DB8VONHYoZmFIkMcEnQrxH+4IYOS5HXcSRZMAUl7MeLwk", - "xkrMbAK7T0+ABEixPydNQirAKrhWUE42BJWGopMThUxyK3RVJiQmDrW4oV8oQvgQOYxl6V4rToXMlYup", - "kGFXOZHOibIYfYswMVBghMm3aOJ0r/rOlLZSYZwSEuOTgwNB//viCyVO3fEDYvQdvtTP8wBfCtPE04f7", - "nHTByPPh2Jp8BxBHaeJBvRjnMtHvGlZP0Awqh2IixnKeABbitCC13ePD4+O9o+O9ow/XR59ODn85+fjr", - "/q+//vrh0697h59ODg9dRV3xAYF7dAIdqpBBICCf040CTMdBoXNzwwUEHVoFaDQ6Pvr46+G/9o4//gL3", - "Pn4An/bA8Sd/7+PRv3458o+88fjfdP4ZeD6H4YQy+YdfNOCksb8omgKAiSP6rwNXc/yA6CT5rqqgG3jj", - "OnqAOvHwHKMEYt2Sb6eQsz8lVkK7O6L1vvUGzyABPuAkWXNmFCjYKFeu5+RKBtt+cX+PP32qw2EGWycT", - "LxkytEj0PBgTriMM4F8p5MKkiE+uEHDMLkedMxSaibXjPu9FIEZ79LIwgeEefCYJ2CNgwqB4BAGi++Ke", - "ZCvupCny3dcSIXF4dev9nAYPXAfrPcKQGJcMH+VdyEpf1QxZq7nyGe5eO+4pPYcCC4D6fhGkxtuRX7hS", - "xm1NtsdqQRRCtqQo9NIkgaH3co5miAxJAgicvPDTO53RDqfdi9Pe+X3/4v5qcPl10BsO3Y57Nri8ur/o", - "3faG127H/e9N76aX//Pr4PLm6n5weXNxdj+4/Ny/UPY4h5JvhhQPZoxyxuiHeob00yS/1D1NkTdlvMll", - "BsIOI8d9d3EijmaIhCjoyIkYQvUCosvFA9eJl5IPbHwdY8wjDcdRiGEZa0SK3DLGCmBVg8FHMcNxmkTh", - "bZQ8jIPo6TpBkwlMjPsIfB9RKEDwQxHMpYG9JAp7z3ECMRY6ZYlwaJMLsQHlYz2MU6IZuSR7aLOODipl", - "ghI4d9nSq8WAfrFz1JK1ceRxkJEOY1Jlf3L86MdinGA3wINOP6T9H+CLsbuBPrgayUDKMTO8GCq3AiOK", - "SBQjr5uYiHQG/jcKHXkwO3Q7nH90Bxf/lKfv8GLosDGWYe7shJqh8H+OOjPw/D/Hn34pH1UZsGZe4MaC", - "bgAT0psBFHxNojQ2SzXaBOtESIAwoWvkLeSVNMGu9X1tgeX76BF22IzltQtQ61Zeo5zwwbV7zT7JbaVr", - "dUgkrBsr2Vu5ro6bRAGs0xH4an7A2QgmA9peiw9XDFaHFSM+7FRMbkVaBRbYMnCQTvST0i+rn7QjLKVM", - "mL4aLtYMKD0e89MF28rY/NcrpXXBElU8bLT8pFguylaH7IhpNNcS15EZJNPIr1duFXT94F0UVaUsMzjb", - "+tqPT2Kgms/GY1g2+A0m9ODUDmO+E2Wg6Qaam70Aq9jSfAMz5NUS2DnSsWkMJijMzFtV6L/KWmZaGZM4", - "T02uJyrBW5nhdJuu6O5nvS/dm3Oqk3ev+gYtXBngMvFh8vnli3zEkMOEUhmCpYt+PhLTiDapCi2lySzF", - "kCR7GKg/SeZZrQxu/6woeecfhMRzkXEhkv4HaThMZzOQvNRBxrbqttytgiW5qpct5E5u+BnQGf2aaKnO", - "P/4zvLxwRi8E4n/W65yZtsmm/74cDcgxtoD5s+WU+V4Cui1QVoAoJMgZSqAnQZJSBGDP5Q/FZvlhkkAW", - "omcIQeJNtaeRid7Lxmpm4tG+WTDVLKVaIeXWrKGTpGHRKGl+Ix8DZDE0b9Vk3BiGPl1pzcCiWZOR/0ph", - "Wg8xb9Vk3CQNQwuIRbMmI+PU8yD064HOGtqPnlE5rrJEai447Nu+eoNbgMeWOLHMYl0xb/4nGmkEeZVb", - "B5PnimOHOMX+jEb7azLIl8bEBMb20mtIYKxDbKUqTNAMRinRL198rFv647Jq8KOi/sp7E1u6Tq/9TzQa", - "pGGFdONPLnbPKFmnzL/I3GQAATbcqMYoRHjabOo/OUVW7SglWt7SsHtLEF0CcRoQrZUSE5CQZovBBJAU", - "W6yHnk+8raDvQRo2I3G6+c2p3HuASTULNFmuopTWgawczHM9l7828kEkgWS7YOaaYbZNUvW46l2c9S++", - "uh13cHNxwf8a3pye9npnvTO3437p9s/ZH/yhhP/9uXv6/fLLF622QtU4vfuErdPVfFfNZotJ2DMBNr8T", - "bFR5zJ6CtfojhbhoO8ZvDG8RmtqXNQU2MZGOzNgyA+A93MLRNIoe3nyRCiyrWmI0OUchbOQLQg9T9pkq", - "ElSyyCM1iCZOgELY5OGfO4xq56DDiQa1SoqpN2+hsUnMYUt1ksi9WLMZ7nJUncNHGBQNN59vqKDpX3y5", - "dDvubXdw4Xbc3mBwOdDLFGWc7PJktf8FCHSCRHx/+7unJCu99OAfl7h/FkdoeAMVnSvuoBoEqK4BP13+", - "EE/uY0a7xx03hM/yXx86bpjO2D+we3J0SC99Rc4qdNZ5EIkWTsypMJv42OpapcCidbeDz+WRP9iNnK9L", - "6/gUERCol1jalFl2AoQJf5bI3dUPbW5xGon1X3qD/QFJgjyNPA7T2ZXdFZvRsbxo75vW+1+rWzUfC3E/", - "KHbFNg44sLtO8xHFpXpfj5rCA00GamGWjooQnfwfAAKZO0kZlVY224SK/4AOoBXRAcBkAMcoMLwnMn84", - "4TCnDsac5RLWETKXkDV4FbKJfgNBajh+ZuAZzdKZauPgL4TYYY7YwuQrdv0JhX70pN/2VdiUaxD9aF6H", - "lCaadcyAD20Xwb/pp+Df2DLoXqJQce/J0cxdhsdR4kHf1mFBuSco+yXXm0FVoLQ7la634DDMeUx7HGaf", - "lzgQ58coHYkcmxJrCiq1o0EPhmSo3Gfn3okYeCZ65l8dnSuXaoBockNdxCKxhDVhbSYDgdLcZlC6QM+7", - "E1bzSLYRHfVuLWCZH10r/iH96/04qw5gHICXv5VfKF+SYpjBxpUV6OFt16c0/3R4WLPeObhNqzYZTpTu", - "9kJ7ztJlC5+ELqFczpi9gq0auD/SUedsHJoBJxCTm8Sga90Mzh0SORiGPvPIE9dc7JBoPY/upgMiDdFf", - "VBvwYUjQGMEk0yaFAiSCJ7jjoBpzNIJBFE4kxDWysrNOv0U702alL+LQm0I/DaBCact65JpIquMS7vJr", - "f6Q1ccLNB79T1uWvzkTLnNbpH8PTb72zG/qjTm/JZl6vK9qWOpWVV597llW/JzSljdX5nA3S8FQ1NDZ+", - "sOAAbPr0UgCwWeLQSjm8LXV4S+e8nCgq/fLKRLcFFy6NHLDy0DNyUCM3vfIopkuZiuNqm+UQzkA8jRI4", - "DCKy4htZ4bajfzbnJggcRNwwI3rYm/kXvB2JF1XTsuhnJ0nlwurVAfVptH6hKAikz4D9SkuiSWMsEU3s", - "QZ9j8BwtHfUGOP+OKt9PKfmoD0flp54pCEMYmOAVnx3k6y1TmA7uPPHR9Xd+PsKF0Y9eTsH86RecZCl1", - "FcxMq6ffllg67W5eNxt8mUVvhaJtpwpLRGToLtJFRyFD7UFDYGySe3pPlykK/AQWH+tr7tlr8k6JQVKK", - "fa2FJIHAB6MAmjZXfs+i4LlArCWTpZymDDOYKUBZRYEcpJOH2ED+alWx9WtwkuqSXhwVXgAVa/eKXKkY", - "Ed6a7A+1NFDojk+jNCR6cKERykVMp3mfCgzN3zULvmAWrkTC8y1rv3q2i1JiAnFBjmRPe90xgYk9Mlfu", - "msa7VOzMEtqWrVcmbWsSJxaypsmKsy4VK6aqj8EjzupwyigwW1ml+5lAXTfxpugR7qRcan7p3ioRE9Eb", - "lb5TBdcnkCQvFVJ0bfyoXGM2wxIVNwYFCRKP+tunid634YJfZEDts6poYwh188xUYLau+voOihObhuQk", - "D1qsR7xLsR6UbuAjTBB5adJ7KPtY0d0XlGAyhFxJtqe9c9C0V0NHYX7LKAA4N3OGWQVNquce398KYt6W", - "KK0CmdYSci7SpQ1p0OPG8fuLy/vby8H33sDt5D8Oute9+/P+j/51bjzvX3y9v+7/6J3dX94wO9Zw2P96", - "wc3r193BNfure/r94vL2vHf2lVvl+xf94beigX7Qux78zg34qq2eDn15c30/6H0Z9ESfQU+ZRJ17eH5J", - "W573usNszH7v7P7z7/c3Q7YUuqYv55e394Obi3uereZ77/d79cnA0EQAqjWn6ThGQariyikWOOhf90+7", - "51WjVb11iL/uORp+9C7mEN/gLUT8zVtX+a7nKTHnk3XCRCRN6BlSW9zKpH+Rw1pLe8GM9cL72gx/IATB", - "C0EevozJZUoqRs0NEFOAnSgm0HfEJTMbRD/H2hOFmRIqLJ2RoT6tmDG5gjZdyWbzlKwpos2crkS75i0Q", - "1/q90KV1mUR7nOTcAXuAeC2uCoWTIST0P3hzLMpTLfSeY0R3mQV4MGCqx+e9+DTYeWL5/lisigMS6IA4", - "TiLgTVE44Yn/GIKr5pfpVjiRMLe1BaHgS5YZFsvwMD+3SlwotpkvAAVpAi1AYS4UKiCqSR+zqGD9nAHA", - "fKnm55bcIxaEYmfZk4sIi7f0fQPPksi+MKtF6L0YnVydsWziACIdNwVVrdbSbpYEWoDNcqGfeaStJ3PR", - "a5bksfKpSKb4FOmdN5n2crH0SHUPBoKhTM8d8rMZa7xF1YMHG6GQe2+BE7OQ1ynfKzXpRQ3tbM1RIki5", - "2QnC97QM/5sRlH1+Fcp6da1vMEx4j6t0FCCvihTYeBUZvlSYt2bTxf4tsukDsU/yjnF5e8HuSd2zH/0L", - "t+P+6P343BtUXAiq42eYhRubnZt09o+ys1lEQFCHiQIciomgau4m4817VWYIkJSvYjG7Ofd+43cz9U7J", - "7n+XF4r7WQV6C2qNTrMDyawi6IR9d5ifvl4G8/AYEjlPIGFpHEr6Du+tD+JoFo+jD8VZTXQNH9u8RD38", - "y6UIyLa9nkMzIrGLranbsOYhNTNIYCIDa+RRycdy/oH24b5z5PjgpeMcOU8QPtD/zqKQTP+54Pt8hh5t", - "oI1ZskpEXUUB8jRpergKXnUrzRKk86YavaCBZC2yX53jtgDOvDph2lm7zGTSiXuDbcAd2OhhfsPSw7/H", - "9KjqymvCYVaSmdSor6iAmPd/h014rQ3ibW0Qa7QNrCVTu7WF9tXITbfMPcAciIOvQIp1Ud4quXMfAwdh", - "J2atHRD6jgfCMCIOYDUfWDEpmaFsHvFa6LDuEldrxAC+n0CMVWNGQS+Tt+OyTYN++AbwVCetpwBP1SH/", - "D56bTshvrtrwWkxDXtbIOZ0CYpzwN5igMapDLzPJUFnyKJqLemAFGPQUPQXYXHVMOwfIyow5GJINPjX4", - "CMcBeCkQtNy/xtaPInbvDARWLMtmTnINn8xIZDwIn3KsSR1ND/sCx3ZW9u2VeWdVAZIBUYm/5WAo5dLJ", - "itKpeDKh/DyaoHDx9OqL8fdS2da3DuNyjXEdrgdwgjCpkO7biG67k84gGLZwt2RhJNtNU9VjPEUx3lXL", - "XMlSucHTfB2nDJ9Mt20ieoSrUiu1PNsxg4iCEGqYli1SU+Sz7JsmwSIP83TcWpTwMMYla0hYLBJDL4GG", - "t0P+LcvPI3iY3oSc/phVBY2T6BH50O84wElA6Ecz2YmFO42gM4EhTICI7lLjII/XhvHmaPa3kwAX25tN", - "k3IGZy2yqVTeknyURfFjFc1Z6GJkTOH5eg+IMQk+ZFe9PEsVH0ote9noydcilFsHeh7MzT3TT7XFwinI", - "366vrxzeiFUJlxScCORbpBNTsJLBXJj4zhLh1SQkE1GZngi4/VDSvGxtbRLWUsDCtFOOBf7au3Y77tUl", - "q6R3dXPNbKimE5JHOuGqCF3MXwyEpcEDoRPDhNLVfiNPLfAIUABGAZQBRzV528vTwmfopQQ6nqw5SIIX", - "/RMGVTVY4aCkX1OklT2PoEkIfSfvtIpyrUvG8gdgBANc/bzD2jCWyo+D7BiwTicDk3M6jm7LAoDJNwgS", - "MoLAIkBZbBV7rcMUQOBMZe91ZcsDnJmpetDDBIwCFr+xhZDOwLOZ8DVJ/ZZjgPXrHWZ9IynladOFidI2", - "Wax8/rzWkIDncsLpwuPSkG5JPxxHdtwwUDow/9rIdBJgmf6Ah+ZzRlxwIXOpFDQLyePndDkH2LFa2ht5", - "JHRPr/u/9Vg24OzPq+7N0OCIzn+wQdY1bfnaccVhaEwuIM5KUSG/CGRthgTR+6ZO+7wZnGuGb6qMsvZa", - "RUIRls3SkspE1LTrqrMEVLgB8Of/msmrCzZV4OHtbSNGtTsDclBk/jkfABBOUhEhZS0WhmffMT94eGeR", - "uEYfDqhXjIRE6j2TBOhLkvgP5mFLi2MQqerf5XmXR3f8fv2N+Qdd/37VG54O+lfXWm5XOFkZZtg7//Lt", - "csjjbn50L7o85Oa29/nb5eV340DSV2r5IjDy5VDLMPaPY+zxMXse0z+q/BmNDIKVftEBZEWforTIykIW", - "mpzNRsxJU6pGPQKTxdeaFXEGWuVfVJ9pnuhQMEKWE6XKb2ZelpuEFx1XqVteJtcJJMr3LLBl7m0ylBmM", - "+AP0BBLMcOflXZ0J7ZsdSsqT+r7RO02toF5Z69FUeb25spnrk8X3+vmaVh+O6+/ocur51XS0WK3aov6Z", - "7kE4A7B/psWh7P0dhYVb8Zebi9PrPpOHZzeD7udzqgOddb9WSjI6iDzoGpEtm13DB/K7/vRcKpnLhg9e", - "JujtrBaitdFTjTHJd5jHwGtk01wS/zKPPcAXrL8LyeEpWVZMMXf3ojwLHBxDD42Rl0/i/CMGGEPfeUTA", - "GaOAwOSfljUCbosVjVaeAVI8sBhz/2VuLWpuwqNDJdHt2nK3LJackifAsKfLPHnLCs9cnpTlbTI68rmH", - "asT8pkFYW9ZxbWJJm4yg0P/80mDwa6VXOXVlQz1k7ckvsyzp6mLvqoXJllzFqvJSV4FfVWCgOzylx3Rv", - "eFp5TuejVFTdUWm5IMUUyVgzyXAKYtjK7lZ2t7L7LWV3TX7nv5FoX3Wm8qrMHKVpF7r5FEnCcP2Z21rN", - "83cUXim8q8kUFoUyo7G2gShGsZ6cmbcLFhit2Wx8ynKkLVIoY511PebrXNQswnjNY8mPmtCRHOqUd6zT", - "I+aal+bPGUIb+yQYR/tNMp32o2Au7TfJo9qPOdvqc6UZF3sNJjr0BvzQX966urSZUe9KxSGsoh8hFE4T", - "qoqO9XKhInPmPTJwY92EIovV2FB251687qx6WqxfYXO1ew5vGsmbV5dfZOAMP6tVz/iBqUdffobeC+Nx", - "czTzOJsVRNjUPyJUgaHoI/MsWzBC22yIarem1wI4BmlArhIUyWxhOvZnjZxYtNIxcK2ZN38leaO3jyy5", - "pgWoWKgG13kSaY2mi7yHF9N7Ov3mYGG8tntYUXi6AWth5XnE8IzLP1oBoaYrsLXgVmrVZm1Xwpyn61QG", - "uqtnB7avqzSBNyGQd4Vw/sCb276LGB8nkDmdVGSgnYHnmhYNM2ma8mByb+WUCimq3c84hCMIEph0U8Ii", - "BBlGmexlP+ebMiWE5TzzougBQdkc0V3lP8l3wRN3yvz8lOBAEKPvULgOIOEtoHFh5d2c7lWfZf4k7C5f", - "/DWjLPdo/3D/kBFmDEMQI/fE/bB/tH/IQlHIlC3tAMToIBDpmic6L+2v8lmRtgohxk52j6S7CGSFFfdc", - "fP/K1iW9atksx4eH5YG/QRCQKZPKn3TfLyKSzVnYGffkj7uOi9PZDCQvHMK8oXxg/kOM702h9+De0f5s", - "rQkE/kv9YmkzVLXagWywyuUy4FgkMY+cJQkYj0Win6rVZ9DWLv/x6ACIMOc9FtWyxx6W8MFP9rP62yuH", - "MYBEo4ufsd+xA7LqvCyansfusO4ljM1lTuAjMFpMAEvrQcGuyI5VmsFhN03GX5Sec+4qLcVVuZ/bC7lc", - "XPrq+npX2vuPZWwNU8+DGI/TIHhxOEr9QmnjEvJeO+5HTiVeFBKRqxnEcYA8htGDP0XC23wdNacVy4wu", - "4rPm37RnIKBYgL4TJc4I+NKnnIPxYeVg6KD4EiUj5PuQ67I5fXM6qSIzSfEim9Zdx33eyxIPsIJS/ENH", - "Qxh37BJFPE3sN1felyFxPsLfg8QZPXyOuOxcCTFYZFXRkEkltkjkpBLnRWy86kX0ShZiSH5ahr0gBjig", - "rRiwFAOcWtYnBtQDMkZ7PIvKwc/sb3YaxhHWKA0D+Bg9sMSk3as+z78ivDeyGefERIxYghdpHqDdbaRE", - "NrxBJkhYt+q4S9jyBJ0z6P7eRI2bULUgHbqx12LnJBnnv1VRcrblBQr2gij1D9SrrFnbla0yJ0F5nWCD", - "OCjEBIQsAVqRiE/pZ/ncbFaC149bBoiThll819YQWI3WzhGsvt+Jrf+hvNc878kh9qKYP36LE03Zb25c", - "PfjJ/vtatd9USrFW+6UNZTZWvpG1kogNYVRO2NeNCqHVbbaoOFFzeCeQJAg+CrHGscF2rJVtBRJXMJOT", - "N0dxhVTj9HNnpvCDOrHGtiWTajU0f5YJsPdO92eMhFva3y7an8GFz3Dj6b25g1vkuG9CU9mRuCMH+SqO", - "cDrGgVJrFht3/BxhegEKnEJr0wbT1v1iw7XtNp1L7LgyZcPNl+kNCqvbJkLItp5txNwmlPe/sMlRiEhE", - "pfnBT87xrwdxEo2g+XIpX+kckD8Ek8hhdl1Re1YNvTUzfDb1VYTJIA2v2Lz2tinToZdJrg2fehUEJcLU", - "OT0x/O5v9FS4iIgDUjKNEvS/FIpIJqzgAfWiuO+8mZMAFEDf4XZ7h22P80XI836+rfqDo0BmrED2wU/2", - "HwsrvjNUC2qXKEetkm5vtC+MaSQeBuJWWueLONkm1eZoM2DchDkJ84k/bWZinlCG5eUCQRA9QV//IjBP", - "tVL0st+rVCxOdEWOCfHBTxxiK24pFoUv80uIG7DJXIV5I6OIk3vr2GQOGS2jbCGjlAg2Y5WLYSWjhFjD", - "JlJxUaxNetWFziuvxCUWafw29mb6R8dsCHiAL4taAhQYjj99KgBxtAodKE4i+g/ot2fYFrGm6RKJyDQd", - "OSCOJbWXjzXeZo4fCYz3kpQdXuLP1wPAyz/XXSBFKxlnLBIhlVmVxw+xq50c2IJp5XjmA03Au2nGFVHW", - "JHLwA4olbH+lMHnJgYvGY8wMIxpQUEh++agNuK6ejlf2Gb0YpmSfG864Tnugprj5AoZB/M6NgnTWj5uZ", - "tcB1TwAz4TOO0tDXmS0K7K8wf6YZ0J8GaeXjY8bC9TIp9/43SyTepoE86vFBW2n0bqRRXp2+lUV/H1mk", - "MP76JVEQTarlEHaCaOIEKCzpRuXnw/Noco5Cfjq2Ymg7xFDHXHEtgI8wwHRenjenYmLWsjBz5cOHoAPa", - "iyeAMKwcQ3rwOmw2BY5xlBgA4R2aAjLkvTRA3LLqzJHDIjjM64/UZBYNJy8kwjDggU/vZxk3KqE4U5ot", - "Aknef72HlCoN6s4nSpLt4WR4PWenQiaFlbPgPJo0Pwb4Z2y2U/HiCdgBrJiTwWeTe5Xypu56HKL54MUa", - "h9Ue0CQSqfbfwN+5lsRF4hvFwbl1Z85InO91Tmx1zss6is5MsTzbUkUQA/OAekaYoHBSTeC7Y5bdQFSC", - "HRPm0YxvGn/Q8uPKwgsaBBNU8qU+1K7alQtk2qop1AHXhR3ZXke21LFjfTE5C1gOzJvQ8k5BXauiVntm", - "6jRQ0ZrH42Xa23s93FQNc3Uhd9Yq6NEbh9yVT8A25M5WR10q5M7ulDzAkND/4vrwfNnFkV2qA+4UckHh", - "ZCj6WPr8v5NjUkHMEmekuictKxW8xI1oWhkfZXGr1Q9tWRgptgtTbfXJzLWd4QPnWYob8Yn0325tffPK", - "YxbripsFwNYpjAvEZLc6IkOApHVFLVynCWN+0pa/VsVfghEWjDCvPnAsvDowi1QquHbw3oZYzF05a97z", - "M+oDfLF6RKXtCrNaJW5kZMByoZXTApthUqryWMGWy4rGACrlgRYDMUlDEbUFrWCVba2fP/WJtN/oSZrt", - "59s8SLOpt+A5WoVDfYyuIJYsovcBvog6ijFASYlesoz+f1B2OzphTY948cRj/q9jKt5169FUjdAyQ222", - "bvMyZLy8FZ2LlOkGllxthvG1h9K3XgAruRlA6eNpGUBva0KuygfRXgEYAkTO7UqzMOfvt3FDsMvUotp8", - "eVTOu/cCPf73ZmaV+ZGFegqfPQj9UpCauKDIiClrPq+/mByM0uDB7PbzOQ0eBHngXCbgSqFA+7xjwUCX", - "31A44LeUDri5eGi9xLdMPjA2VYUEXrGU8FjRmwr3QPadGzKUypwFFdckNbhbCR/hPSsUDAH2CoW4MCQw", - "DsDLysXGmxU1mk82XyOaGNKgnxNdK6S2VUgNGKWuRz4xM5qljZXb5izsrN/hS/uslxsbF7qtM2S3N3bd", - "jd0Rtt9V8oE4DSrSMNPvuNnRPJBHzHs9mjkCtuVoXo1ZjQPXavXv7cBE4SMisKmDteyldxrrs6/tWSl9", - "xRR8LOQlJrHd+obp3KdzWlyTzzSfoJLWW/O34iXNUWLnHM1x+6Ye0RzcRRyhBWG0bKn3fs74ZjWumoLP", - "5Q97/N/NKm5ZsHLjGlvb5U9T5Ktq2PYydOz62VrLvZoCYlvGvboshNn+mKK3i/vYpDCXBSfseLrBLeSE", - "9YbeLnbuvlnwrSXnamp+bTPniqDYxpxbdfLN4GwkihQ3uKPJXnoW/8G+tnc0SY0KPha6o0lst8qg7o6W", - "0+JqdEEx3sFP/odNCmoggHDGSTSrC3vj1PD3UAXFsk2w8c+bT5S9ct5dRAd8H1y7RVnuLgxJ7TImLWzM", - "yuTFXylM4d6MCm4P1xbBYq0d0Tp7Ra4UGF8h+S/t9UNMsYsyY6ciA3bJ2Xv92kuB9haLAHNEEXxJ961M", - "fGuZSMVRtjuzTLBIiSg5Z1GZmAAC99iDk42rBG3Nn6fqfCUGgMBz2rCNS9vWuLRVxTDVYnKdkUoZnW1B", - "tNI8LJtKn1nktQbOOAo7t944c3dWFTe5uKWods75r4tKXNFjL44C5L3Up2yRHRzewSZhi3QluGI92nQt", - "Bzq0LGbimduN1tSz8axHvApZZaKWQoUzXFmYrzV+8hwtKk6a3B7mUN3WStqiMmYKLxiqrdaU/LNgxANM", - "QEKM7DikX/k5dtlNydRhl5V5hrzBMOFvJgygS4pQ1nMXOfPD4XFNiTGGMnGsFLAyhcAXbzxBxAmmSCvz", - "c7/OFceiZBc9IEgHZcmPC9WyGEqLM0pCoDuwMB3U5c2aq6OHdWXtWjks5PDFsFB1uoEknsdyK4u3ThaX", - "GcGqomRtui6L0qqtdyJDQJG/KrN0rY5mi5Naexm2NWK3mKGNnGfJ0ZUnqqjHsbeJJytRImzXXq7Wby7Q", - "IaaZzSCrW1XYmfZRZRseVbK9KT+qLGmf0FRPq2TdvFCaM3rhDKUt3bgjdrzOtlZw20CdxQXlQysRtq7A", - "oioiVlJU0UpO1ObU6BICZ7FIDsPaWtR83bVkGq0EqXJgQ5i59wsRwokg2L4Lwhs/4tUxyqYYOoG0Y0Xs", - "PUtSYsvDrHnLwtuYDSBJQ7FVNcEXKIxT5g/BH3d1y33dCk2lzQVQIV/Yhr+FQMnXVGkL4M0si8J/hWTI", - "h21Fy9tpB82yXBksDWK49kKxzRcKuUtrkRriLX7vKUoeqgLGcrdOo6NE6yORu6hzVNwypFKEVNXaoMjI", - "3Oh5R0duR2vE37ZXOYX8F08VIgYxsdC7f30r8A/HxoZK5Ghm9hsl+pBb23Lu9j2/qYy3iLGeS+Vq8zw9", - "Ibnwrva9zc+Gd39Y5phoK1EtfdWUIUDF2GmO40UfqSSi+fWyeYZItSaPJlGkUkinTReppItU8IJrzESF", - "qkdvlzxSB7d1kTnFglQgmPZ6upVJJYt7VA4yrL6gNhE4P9V/1r2OFzih9gQWZLrLj+VzrK8HTcXgDqsJ", - "YrsWjVduH8/N0cJFu3R9pHCnSFOL8/MBe+KoNVHzhxDO0CrQ+zV83Wejt8z99syd50a4UkpDcBiXsWYX", - "ccS2uzVob8igfaviPrTJSpBvUlOVYXUSB09BDNekRwzZ2K282Rllgm9Yq1H8jTSKzCPeonR2oWp2EGSv", - "blija1SxPgvH4g/kPZluv5UBKwfwHGDi9M9Y0sopdAIgd9CU/ARg0veN2U8+HOuyn2zAc69JmQ1V8rS+", - "NVv6Yr+ALLF/zreThdjqZYK1tNNo3mU6Jh+OQRoQ9+SwUxAVm0jMlM39aZHJefl3Z/TisAn0k4pP5ijx", - "Tahd7WPP6vWtVSZ6y8a0LNvpAGcEiDctPfZUaUzvvl6n+k7CkWHrDCx81MtPJe+6iGfQvh7VJF3iZLOJ", - "lxt84CVRWK+R0FbOn9EoB4okaDKpdZ84TaLwXaspO5M1MttY5NNpJ5BkKvF+TXJg08Vt1cmLdykzcEWu", - "ytGLMxb5MFeWMlPlM2yfNnP0sr7MmcqxueHcmQVkLKHDtgeTRo8tnQRrUmjpsXTwk/5nT/5qVwyifFRZ", - "Pw1Qwtnx0hDZ6k1gFTC6+eIQllUctJvY5uWcr6qgR1Mza36RIO5eO1XPbUsy1y478GwxZ63p6GyPzV0w", - "fTc6rFcgH+zOb0YDtnZu1fhe/3rf3iO3+R4pC+PbXiJZ+/XeILf6ekuBi0FCkWZ40Z0Dize+VW18G4JP", - "E4+thU28nW7KLFBAGyaApBhaFTeSbRe50g5ZX3G5tAHuAYW+FVSsYWOQvqPQr4dm5y0oBM2gA8YU0JJP", - "4RPAMsRPXYJ7fHh8tHdI/3d9eHjC/vd/DbgX3bt0Aj3x+oDAPQqFa1urj0I8guMogesE+TObYZUwV2B5", - "jEKEp4vDLPtvFM+rAnqlmF6fRbBsfnu39sB53bG91qzFi3A9hkDmOGiTLBc4AjR60BXZX82ea+kfvMvl", - "Hls1vFXDN6+Gt7plq1u+SWQAXrI8KhNAbRrv+vN9DaVK83OeguqnAT0ea6yGWctF7IdD2bm1Im6zFXF9", - "96KMAHbKXaJVplplameUqXwZuaheiW3Wqu58xuCZlXbDhdvLEqa1OqxWKzFoAOvVSw5+Zn/ulTKd1Hol", - "6UFuqLPsuG+SBgfGzL5aVG+tu5J+d1t/pXl/JQOemjkkGGijxnNpJQy409V6dor71nkct0fxrvs1rVeO", - "2CkGWTKD1zyGprKeJ3BC+GSOpLEPpLnmHXYn/XD17VWNgtVnL6gEbaOVRjXb0KQyiHHzN5r+sZmTp5o1", - "2Qx/KxY3X/5w61JOCkFXReXrCWJUZHHBjqyXx1IjEBLZXh8sqRKDNGyl8CalsNwBZQOayF+j3rDBUk3N", - "1VFVAr/Lm2Yrfq3Er1BI6nTilYtcnsd8z4vSkNS46LA2MiuUTMAPHgEKwCiATPoq4kZ/G/8KCc+Tjk/Z", - "jDsveuuSd+148r7CZi149eakwsmntYYb3ugLSFospV+R/VMME3zgpUkCqzkb89sBb+jQbiXuvcEw+QrJ", - "qRhsjXRHZ2pIZwzithTM25eCgV6aIPLCxLgXRQ8IdlMqu/64o6JqLritSG6S3Nn2a8h4gsg0HR14IAhG", - "wHswkvNpNIsDSCCn6Us6v6M9j+hEvBDGVzb0JcXlqRx+jsA/HB7XvCd4Yl6/PO8UAl9UfQsivhnaKoOZ", - "WH+dQ2YBd3KBxTks0YcJSMyiYEi/LoY41rU51hg868cZg64hwqJoEsD10Bsb+m9Obxx9K6a3HHF/O3pD", - "4SMi0KY0pNSGeQemdFsd33SEa9a3L+Za4ymuTmTlPxEgLDemuMBWX7Q+Vll21Dns5ZR3rbkhFmjvAHge", - "jInZ8tZl33FmYROTlKhN3Xzex12PPYkPzieqL11YQX185Tr6a70A8vr9DEmlvbenrwSyPIMVNc3o92b0", - "xfu466oQRgdfAX3xlbf0VVO/nSJpAfoKogkKzWR1Hk2wg0IHsLNxv0LBOGcDrYeW2BFMx99QjVWre3QQ", - "TSbQd1DYXp+36vpcPNYp1djek4NoEqWkhhmilNhxQ5S+va1H0Gi0ZRWHWiKtUUYZ9diS7QzORjDBUxQ3", - "uAIpneyuQfwI+ZF3E2FEayVw/aTN70Mqito70SJ3IhWD9SQZA4yfoqTCE4GLSSFJHdm+SqReyTHXp2Oc", - "TkE4ySbaJmXDY5D5GaJacb5D4pyTVZHSLZgogRMqyJKqSx9vgSs1ksxPZ11sI8HYJoaRyGufuXZCT5ck", - "ZKvz4AB4D2t5YRjSkbf4gaFG1DR8cXiECRYgVBa3Fe2k/wqGyaNGR+yH4+grJL+JQVda2kOBNM/ocLR/", - "uH+oyxmhuI38kXW9s6jacV2x2DlXuQpyvoVOAkmahAXkzenZVEqlYYjCST7F854cci+KeYhqPpvctCc4", - "mkbRw57wIjr4KX6wiMejJ4VoXfYy4r/bh9qJgcxePNlEG3bisYxdk/C158Lbnwvz8XIqmRpdd0SLOyvm", - "OBB4trkky6ayLF41xwi9B9sm1thavlmN8xuHnvu+CdRQzAzEhCapm+UNFdjJtqtlzy1iT2YTKG1RUx7N", - "eJP98WpR6VqjbXAKswxMFR6CVQ6nmjN+d9xNGzv+iRW31rCSR2kpWocqzdUOpEytplRIvGmFrauSkHmr", - "naHlNZgSGAIK54bprBAYSCXKNhfEYslrHLKW0/ScJhhiGWabO03mIzOsMpNk7uNWqRAa3Iu2MryhSVaP", - "DMA2umrz0VW665BCMQsGN3TqNCx7Tmigcr2HKJ8FI3ta3npr3lJDiJZhLBu1z567mumBW8Fg66s8zZFh", - "G+jMta4il21aObSSCPPqYSsPjAricsxZoyZapdenm1TMo58x3mP20mE8KRuk098GftaktOQJKVdQb2jx", - "akN6wCZJlMYsT2gOgtwoIyis03f44tbmcFizkFgyd7d8VGrTd2+hNrFQvvBGgkvmlTH6hsiUCE0zvSyU", - "4GUrJde1hl32nf6YWbdxSqkD+h3GVQEgEJOMpxB2xpB4U+ibsknngn/LFSlBBgtmjXmzXDEKvI2SxLSp", - "YdrUMGtIDdNINAvZgC1etQonuZVYFr41O2SC+TvI5TVLOekwtZwq2Mq7rVIBc1JcVAWcd/wbQZDAJHP8", - "62hdAZknGZcHaRK4J677evf6/wIAAP//ckHvOXADAgA=", + "H4sIAAAAAAAC/+y9+2/bOrI4/q8I/n6Buws4z7bnni1wf0gTt/U2TbJ20mLvuUFAS4zNE1nyEamk3iL/", + "+wd8ipJIifIrdiNgsSe1+BgOZ4Yzw+HMz44fT2dxBCOCO+9/drA/gVPA/jy56veSJE7o37MknsGEIMi+", + "+HEA6X8DiP0EzQiKo877DvD8FJN46n0GxJ9A4kHa22ONux34A0xnIey8P3p7eNjt3MfJFJDO+06KIvLb", + "2063Q+Yz2HnfQRGBY5h0nrv54cuzaf/27uPEIxOE+Zz6dJ2TrOEjFDBNIcZgDLNZMUlQNGaTxj6+C1H0", + "YJqS/u6R2CMT6AWxn05hRIABgK6H7j1EPPgDYYJz4IwRmaSjfT+eHkw4nvYC+Cj/NkF0j2AYlKGhMLBP", + "HpkAok3uIewBjGMfAQID7wmRCYMHzGYh8sEozG1HJwJTAyKeu50E/pWiBAad93/kpr5VjePRn9AnFEZJ", + "K7hMLFD9jgicsj/+/wTed953/r+DjPYOBOEdKKp7VtOAJAHzEkhiXAs0XyEBZVhAGMZPpxMQjeEVwPgp", + "TgyIfZpAMoGJFydeFBMvxTDBng8iz2cd6eajxJvJ/houSZJCBc4ojkMIIgoPnzaBgMBrGIGINJmUdfMi", + "+OQR1hc7z9iPHhHhC3ecDLEeXsy+8p8ZtSPsoQgTEPnQefYhGkfprMHkGI0jL51lrNRoypRMHEiLksUJ", + "bfrc7cxiTCbx2LHXlWhNO87DODqZzfoWrryi3ym7ef0ztpoUQ9aHcj2lIuLhdDaLE5JjxKPjN2/f/fbf", + "v+/RPwr/R3//x+HRsZFRbfR/InCS5wG2LhNVUNAFXDDw6KDYi+89ilkYEeQzQadD/EdnBDDyO93OOI7H", + "IaS8qHi8JMZKzGwDu09PgARIsV+QJhEVYBVcKyhHDUGloejkxRGT3BpdlQmJiUMjbugXihA+RAZjWbrX", + "ilMhc+ViKmTYVUakBVE2Q59jTCwUGGPyOR57J1d9b0Jb6TBOCJnh9wcHgv73xRdKnKbjB8zQFzivn+cB", + "znPTzCYPdxnpgpEfwHtn8h1AHKeJD81inMvE4MSyeoKmUDsUEzGW9wSwEKc5qd05Pjw+3js63jt6c330", + "7v3hb+/f/r7/+++/v3n3+97hu/eHhx1NXQkAgXt0AhOqkEUgoIDTjQZM10ORd3PDBQQdWgdoNDo+evv7", + "4X/vHb/9De69fQPe7YHjd8He26P//u0oOPLv7/9B55+CH+cwGlMmf/ObAZx0FiyKphBg4on+68BVgR8Q", + "nSTbVR10C29cxw/QJB5+zFACsWnJ3yeQsz8lVkK7e6L1vvMGTyEBAeAkWXNm5CjYKleuC3JFwbaf39/j", + "d+/qcKhg6yrxopBhRKLvwxnhOsIA/pVCLkzy+OQKAcfsctQ5RZGdWLudH3sxmKE9aiyMYbQHf5AE7BEw", + "ZlA8ghDRfem8VyvupikKOs8lQuLwmtb7IQ0fuA7We4QRsS4ZPkpbyElfNQxZq7nyGW6fu51Teg6FDgD1", + "gzxIjbcjM7hSxm1NtsdpQRRCtqQ48tMkgZE/P0dTRIYkAQSO5/z0Tqe0w+nJxWnv/K5/cXc1uPw06A2H", + "nW7nbHB5dXfR+94bXne6nX/d9G562T8/DS5vru4GlzcXZ3eDyw/9C22PMyj5ZkjxYMcoZ4x+ZGbIIE0y", + "o+5pgvwJ400uMxD2GDnudxYn4niKSITCrpyIIdQsIE64eOA68VLygY1vYowi0vAsjjAsY41IkVvGWA6s", + "ajD4KHY4TpM4+h4nD/dh/HSdoPEYJtZ9BEGAKBQg/KoJ5tLAfhJHvR+zBGIsdMoS4dAmF2IDysd6NEuJ", + "YeSS7KHNuiaotAlK4NyqpVeLAfNiC9Si2njyOFCkw5hU258MP+axGCe4DfBg0g9p/wc4t3a30AdXIxlI", + "GWaGF0PNKrCiiMQz5J8kNiKdgv/EkScPZo9uh/e3k8HF3+XpO7wYemyMZZhbnVBTFP3PUXcKfvzP8bvf", + "ykeVAtbOC9xZcBLChPSmAIWfkjid2aUabYJNIiREmNA18hbSJE1wx9leW2D5AXqEXTZjee0C1LqV1ygn", + "fHDjXrNPclvpWj0SC+/GSvZWrqvbSeIQ1ukIfDVf4XQEkwFtb8RHRwxWhxUrPtxUTO5FWgUW2DJwmI7N", + "k9Ivq5+0KzylTJg+WwxrBpQZj9npgl1lbPbrldY654nKHzZGftI8F2WvgzpiGs21hDkyhWQSB/XKrYau", + "r7yLpqqUZQZn28D48UkMVPPZegzLBt9gQg9O4zB2m0iBZhqoMHsOVrGl2QYq5NUS2DkysekMjFGk3FtV", + "6L9SLZVWxiTOUxPzRCd4JzecadM13f2s9/Hk5pzq5CdXfYsWrg1wmQQw+TD/KC8x5DCRVIZgydDPRmIa", + "0SZVoaU0maUYkqiLgfqTpMhqZXD7Z3nJW7wQEtdF1oVI+h+k0TCdTkEyr4OMbdX3crcKluSqnlrIrdzw", + "M2By+jXRUr2//XN4eeGN5gTiv9frnErbZNN/WY4G5BhbwPxqOWW+l4BuC5QVIAoJcoYS6EuQpBQB2O/w", + "i2K7/LBJIAfRM4Qg8SfG08hG72VnNXPxGO8smGqWUq2Qcqtq6CVplHdK2u/I7wFyGJq3ajLuDEYBXWnN", + "wKJZk5H/SmFaDzFv1WTcJI0iB4hFsyYj49T3IQzqgVYN3UdXVI6rPJEGA4d929ctuAV4bIkTyy7WNffm", + "P+ORQZBXhXUwea4FdohT7M94tL8mh3xpTEzgzF16DQmcmRBbqQoTNIVxSszLFx/rlv64rBr8qKm/0m5i", + "Szfptf+MR4M0qpBu/MrF7RpFdVLxRfYmAwiwxaK6RxHCk2ZT/8kpsmpHKdHylpbdW4LoEojTkBi9lJiA", + "hDRbDCaApNhhPfR84m0FfQ/SqBmJ081vTuX+A0yqWaDJcjWltA5k7WAu9FzebOSDSAJRu2DnmqHaJql6", + "XPUuzvoXnzrdzuDm4oL/Nbw5Pe31znpnnW7n40n/nP3BL0r43x9OTr9cfvxo1FaoGmcOn3ANuip2NWy2", + "mIRdE2D7PcFGlUd1FWzUHynEed8xfmF489DU3qxpsImJTGTGlhkC/+E7HE3i+OHFF6nBsqolxuNzFMFG", + "sSD0MGWfqSJBJYs8UsN47IUogk0u/nnAqHEOOpxoUKuk2HrzFgafRAFbepBEFsWqZrjNUHUOH2GYd9x8", + "uKGCpn/x8bLT7Xw/GVx0up3eYHA5MMsUbRxlPDntfw4CkyAR31/e9pRkZZYe/OMS9md+hIYWqOhcYYMa", + "EKCHBvzs8It4cjdjtHvc7UTwh/zXm24nSqfsH7jz/uiQGn15zsp1NkUQiRbejFOhmvjYyazSYDGG28Ef", + "5ZHfuI2crcsY+BQTEOpGLG3KPDshwoRfS2Th6ocuVpxBYv2LWrBfIUmQb5DHUTq9cjOxGR1LQ3vftt5/", + "OVnVfCzE46CYiW0dcOBmTvMRhVG9b0ZN7oJGgZqbpasjxCT/B4BAFk5SRqWTzzah4j+kAxhFdAgwGcB7", + "FFruE1k8nAiY0wdjwXIJ6whZSMgaogrZRN9AmFqOnyn4gabpVPdx8BtC7LFAbOHyFbv+hKIgfjJv+yp8", + "yjWIfrSvQ0oTwzqmIICui+DfzFPwb2wZdC9RpIX3ZGjmIcP3ceLDwDVgQbMTtP2S61VQ5SjtVqfrLTgM", + "Mx4zHofq8xIHYnGM0pHIsSmxpqHSOBr0YUSGmj1buCdi4NnomX/1TKFcugOiiYW6iEdiCW/C2lwGAqWZ", + "z6BkQBfDCat5RG1EV7etBSzF0Y3iH9K/Xk+w6gDOQjD/peJC+ZI0xwy2rixHDy+7Pq35u8PDmvUW4Lat", + "2uY40bq7C+2Cp8sVPgldQrmcMXsFWzUIf6SjFnwchgHHEJObxKJr3QzOPRJ7GEYBi8gTZi72SLyeS3fb", + "AZFG6C+qDQQwIugewURpk0IBEo8neOCg/uZoBMM4GkuIa2Rld51xi26uzcpYxKE/gUEaQo3Slo3ItZFU", + "t0N4yK/7kdYkCDcb/FZbV7A6Fy0LWqd/DE8/985u6I8mvUXNvN5QtC0NKiuvPossq75PaEobq4s5G6TR", + "qe5obHxhwQHY9OmlAeCyxKGTcvi91OElg/MyoqiMyysT3RYYXAY54BShZ+WgRmF65VFsRpmO42qf5RBO", + "wWwSJ3AYxmTFFlnO2jFfm3MXBA5j7pgRPdzd/AtaR+JG1bYs+tlLUrmwenVAvxqtXygKQxkz4L7Skmgy", + "OEtEE3fQCwyeoaWrW4DFe1R5f0rJR784Kl/1TEAUwdAGr/jsocDsmcJ0cO+Jj262+fkIF9Y4ejkFi6df", + "cJKl1FUwta2eflti6bS7fd1s8GUWvRWKtpsqLBGh0J2ni65GhsaDhsCZTe6ZI10mKAwSmL+sr7Gz1xSd", + "MgNJ6e1rLSQJBAEYhdC2ufK7egXPBWItmSwVNGWZwU4B2ipy5CCDPMQG8luriq1fQ5DUCenN4twNoObt", + "XlEoFSPC7zb/Qy0N5Lrj0ziNiBlcaIVyEddp1qcCQ0VbMxcL5hBKJCLfVPvVs12cEhuIC3Iku9o7uScw", + "cUfmykPTeJeKnVlC23KNyqRtbeLEQdY0WbHqUrFiqvpYIuKcDidFgWplleFnAnUniT9Bj3An5VJzo3ur", + "RExMLSpzpwquTyBJ5hVSdG38qJkxm2GJCotBQ4LEo9n6tNH7Nhj4eQY0XquKNpanbr6dCuze1cDcQQti", + "M5Cc5EGH9Yh7KdaD0g18hAki8ya9h7KPE919RAkmQ8iVZHfaOwdNezUMFOZWRg7AwswKsxqa9Mg9vr8V", + "xLwtr7RyZFpLyJlIlz6kQY87x+8uLu++Xw6+9Aadbvbj4OS6d3fe/9q/zpzn/YtPd9f9r72zu8sb5sca", + "DvufLrh7/fpkcM3+Ojn9cnH5/bx39ol75fsX/eHnvIN+0Lse/Js78HVfPR368ub6btD7OOiJPoOeNok+", + "9/D8krY8750M1Zj93tndh3/f3QzZUuiaPp5ffr8b3Fzc8Ww1X3r/vtOvDCxNBKBGd5qJYzSkaqGcYoGD", + "/nX/9OS8arSquw7x1x1Hw9feRQHxDe5CxN+8dVXsepYSs5isEyYiaULPktriu0z6F3ustfQXTFkvvG/M", + "8AciEM4J8vHljFympGLUzAExAdiLZwQGnjAy1SDmOdaeKMyWUGHpjAzZyyK3t8jCT++QjozBlY1uknnG", + "hCebzXSypjdx9oQnxjVvgcA374UpMcw43uNE2xmwK4zn/KpQNB5CQv+DN8fkPFlD78cM0V1mT0QYMNXj", + "8158Guw9sYyB7LWLBxLogdksiYE/QdGYpw5kCK6aXyZs4UTCAt8WhIIvWeZoLMPDIuUqcaF5dz4CFKYJ", + "dACFBWHogOiXApi9KzbPGQLMl2q/sMliakEkdpZd2oiH9Y7Rc+CHJLKPzO8R+XNrmKx3L5t4gMjQT0FV", + "q/XV2yWBEWC7XOirmLb15D56VmkiKy+bZJJQkSB6k4kzF0uwVHflIBjKdmEiP9uxxltUXZmwEXLZ+6xn", + "bs3BITNDZXulp82ooZ2tOUoEKTc7QfieluF/MYJyz9BCWa+u9Q2GCe9xlY5C5FeRAhuvIkeYDvPWbLrY", + "v0U2fSD2SVopl98vmKV1cva1f9Hpdr72vn7oDSpMiuoXOMxHju3hUSYPSjlcLSYgrMNEDg7NyVA1d5Px", + "inGZCgGS8nUsKtu7941bd7pVyizIywstgK0CvTm1xqTZgWRa8WyFffdYpL9ZBvMHNiT2nkDCEkGU9B3e", + "2/wMpNmLHvNjntW8z+Fj25dohn+5JANq2+s5VBGJ2+ucug1r/ihnCglM5NMceVTysby/oX247x15AZh3", + "vSPvCcIH+t9pHJHJ3xe84VfoMT7VsUtWiairOES+IdEPV8GrrFKVYp03NegFDSRrnv3qQr8FcPbVCefQ", + "2mVmJp2+ZW4GKZy+HXa6nW9HZqHDg9A2EIVsDWy/YVnpX2NWVn3lNa9wVpIQ1ark6IDY93+HPYet4+Jl", + "HRdrdCisJUF8A8fwwn5dCxd+Z9EM9ndD+Aqk2PQoXWcTHhLhIezNWGsPRIHngyiKiQdYiQpW+0omVCtu", + "mBE6bLIYaz0mIAgSiLHuOckpgdIULztQ6IfPAE9MUn4C8EQf8r9wYToh97kexUtHDXkVJu90Aoh1wm8w", + "QfeoDr3M/0Nl0KNoLsqX5WAwc8IEYHuRNOMcQFVF8zAkG7wZCRCehWCeYwS5f41dLXns3loILF9Fzp6T", + "Gz7Zkch4Fz5lWJMKoRn2BY57VaXumQWTVQGigKjE33IwlFL/qBp6Op5sKD+PxyhaPBv8Yvy9VHL4rcO4", + "XOOsDtcDOEaYVEj3bUS32wlpEQxbuFuyjpPrpulqNZ6gGd5VN2DJLbrB03wdpwyfzLRt3454CahrgB8q", + "6hURmEQgFI/hrYakaOb1z3BXaKM+iLwE3gv9HPEzG+AHL07yD3z0zroFutL8AN3OPQpJvVv82xHFx0fe", + "1qjjfTs6A+NT7elE8amQ4VFF/YwqaXkZ8ACMXTNfGIBt09c5p69TyNoC4ZVtnDHi7dsRT+/QMm9T5qUt", + "LBGvLH77a0WoKssqcs1+dQGkp5ovGBpbEahNN+OMnwrW99W0jZPk4JGjmIDprFnAqnxR0Oz5G2/CgdOn", + "1hGcIea2ehu3glczorJwa54c1h+c2jAaVY6Vi0ItRp6aw1aL0ajD3sX13bW+GLWGO57xsxQ6ezronVwX", + "slh86V9dWUJTc1zu6IF3D8HDKOL3V03ebcOmxJK9KSrOn0aEq5dN00zkQajn+Ko7G44EO+ddxYhq1iQx", + "KciC4IzSLovdNd/YoClcMP2KaGQIDnZahuHKiT9La7qzOmqcUirwboM0suHTr3yO4/COLU9ypYcvIhWa", + "/T1AAcKmGMmWZiD3HGyaXFSSIIv7Pr38enXeuy6Fe1dEsed165VXc2IaUZJGxoJOuiFguprnypdsxcbC", + "rvewtTZDjXKvztxsGVqJcnftPshrIG6GqwFr2ohplruroj5uYShW03yKwhBh6MdRgM2xAEUFz2DB0xbS", + "JinO4v1NxQgAAjGhv/29PhmjE/rp8LKbO/7VPXJ5DvapAuUrSbsUpdPhDDxFMDitpHatUA1vXqb7qmeK", + "5QH5t4YbZHlW7Lw/a0rqUtQIspfDloQuWjpcgB/2V5BKkWqnyli0cEZm6K1hdofMv2Iyg9JA+0cYJs0F", + "HhLd3Le0WaLifF7mTebFrCM5VehamKf5dGW12mOT9GULUJbuYnDJW2TMAVaeSVxDGyZaJoWL7lnSD8Qu", + "N3RlXkCDrFQyTstIW+BF3VDO03k+dwy3mbVdvC2qP+s1lO0azioVm4UKeHw72lwAV5kzqm+EZOt6R34h", + "E10WGpbxsjEDzHp13MZqJj1HW1WzVTWXSFXzurTB7Vc4FlQdas5qgzIgju+l8q2hIHfYZido/uTOJfpU", + "h7RF6J5BIkM9Cx6k+hxPeelNqWQC6vU3PREqbf8xTgzwSM2+opinfkiLyp5K8heUsOVd4RwcvKqb0WLg", + "LoNSW7DEpZy2vG/5I6VQK7TmcmUNDzT1KauAfSlNTj9hG2h0FoyvSrnLWTJ6GtqTT51u5/pk+MXoGhS5", + "Pnkk6Upf+bnFAomclSIK1aigpLY89bJvmoSN4gxFVA8d14TLHEp40ml7dOGqFomhn0DLycu/qWpKIoSJ", + "HgFe/96LYuLNkvgRBTDoesBLQBTEU9mJJacdQW8MI5hIVVM/yo7XhvHmaA62kwAX25tNk7KCsxbZVHBu", + "SfXQvPhxuijKdbEypjCs7oBl31g4D4gCraYYH2oxs8wt8b4J9Cz1PlfHTuPAQrWfr6+vPN7I8+NAUXAi", + "kO9Q/E3DioI5N/GtI8KrSUiWDas+RyXNy9bOz++MFLAw7ZQzt3/qXXe6navLIfvPzTXTQmwnJM9Li6vy", + "qWP+OlM8tPBB5M1gQulqv1FWHPAIUAhGIZTpYWuq7JenhT+gnxLo+XEkXpOGc7PdTnUcQPyJjGwp13DM", + "kgkDjNE4goGXdWLegZub/pkn2Gfz5lgIRjDE1U9pWRvGUjlPFD8GnIv/wOScjmPashBg8hmChIwgcEgn", + "L7aKvYzGFEDgTWTvddU2BJyZqXrQwwSMQuYQ2UJIp+CHnfANJRiXY4D16x12fSMpVdUzJfWmbVRlg7y5", + "2oCACxX8TMmM04huST+6j924YaB1YNnQYttJgGWxCl5IgTPiggspFL4whTQpx5PpQpEdq6W9kUfCyel1", + "/1uP1W5Wf16d3AwtARcul08cWeriiZ9M1lIQ4qzkErUAZL2vife+qdM+bwbnhuGbKqOsvVGR0IRlsyKy", + "Mu6adl11TYeKlAs81ULN5HZ80CVV4OHlIzatarcCcpBn/kK+BRCNU+HYdxYLw7MvmB88vLOWhKAc+GtW", + "jIRE6v0gCTA2wMGDfdjS4hhEuvp3eX7Cc3H++/ozy8Vy/e+r3vB00L+6NvtQMk7Whhn2zj9+vhzyQK2v", + "JxcnPMrze+/D58vLL9aBZF6aghtOp02jPZP94nCp1m3wNpj5PtXrYPOb0j/jkUWw0i8mgJzo85/xaKXp", + "IZuczVbMyZdkBvUIjBdfq/LfAaPyL5z+zctSCkZQFWyqfI1FWW4TXnTcU6lCmTKxjCHRvqskooXr1kjW", + "m+KPLcaQYIY7P+vqjWlfdShpPtR9ayagIUkAgePabNgahOe5fs2VzUyfzKc5yJ3OKCJvjuttdDl1cTVd", + "I1artqh/ZnrvogDsnxlxKHt/QVHOKv54c3F63Wfy8OxmcPLhnOpAZyefKiUZHUQedI3Ils1u4AP53Xx6", + "LlV6Z8MHLxP0bl4L0doaos2Y5AvMKhYYZFNMQGiiWMVjD3BuucSXw1OyrJiiYHtRngUenkEf3SM/m8T7", + "2wxgDAPvEQGPP2j6u5krrIhoEOHRqPJm3WWXHiqhLNyjQ60s8doq7SxWSpSXK3Gny6zUzgrPXF5C52Xq", + "b/K5h3p9g02DsLYa8cYyoC71W2HwYd5g8GutVzlwoaEesvZSpVlQgwb2bbUw2RJTrKqKeBX4omroGUqg", + "qnKnHBfDU3pM94anled0Nkqp9qge15nRck6KaZKxZpKhDOtoZXcru1vZ/VKyu6Ya9y8k2iviwhYQzWy0", + "PoFTe6SZxV6p72xNoTFkD2Gq37UumVcge2uz8ic0KxjQItOLL1+LLwZUgb4iIrVR66inolJPg0o86hVn", + "VRWe0rQL2c15gWInxuu8OCkGT8TRlSb5DVUB40hWL7eniFhXfdzvzWqEZclNqjcb80RA1pCSXGmyNbKj", + "5cWDmLZuEVYnAXvY3ISO5FCnvGOdFlpoXpo/YwjjG+6q5/KS6YwfBXMZv0kebf4Iv2qx12BsQm/IVcbl", + "ffPRit/MiHs5DmEV/QihcJpQQ+beLBcqquTeIQs31k0oKtYZZmRy5E7cDa56WmxeYXPNoIA3g+SFj6Uq", + "wg0GVvhZrXLP1S0z+jIN7E5cPTRHM09Su4L0tPVXUFVgaNpskWVzVxguG6LfelCjEt6DNCRXCYplZUAT", + "+7NG3ky0MjFw7SVBdsf2QjdnqpCuA6hYqAbXWcF4g52E/Ie5LRqDfvOwuPpwu5bTeLoBa+FCXmb7a1kX", + "IPTCIq7+/0qbzG4rSZiz0rz6q9d6dmD7usoLlCYE8qoQzsMDspuTPMbvE8hCliqqTU/Bj5oWDavm2nLc", + "8Fj3lAopqt1POYQjCBKYnKSEpddmGGWyl/2cbcqEEFad0I/jBwRlc0R3lf8kb5XfdyYsSlTLrA1m6AsU", + "gSdIxJoYAqB5N+/kqs8SphHmCcr/qiirc7R/uH/ICHMGIzBDnfedN/tH+4csjyuZsKUdgBk6CEVp9rEp", + "xv+TvJSmrSKIsae8EHQXmUuRorxzLr5/YuuSMdlsluPDw/LAnyEIyYRJ5Xem7xcxUXPmdqbz/o/bbgfL", + "ZD4UwqyhDE/4Q4zvT6D/0Lml/dlaEwiCef1iaTNUtdqBbLDK5TLgWPp+nnaeJOD+XpTkqlq9grZ2+Y9H", + "B0DUFthjKWH32LUkPvjJftZ/e+YwhpAYdPEz9jv2gCyvwEtY8MS3rHsJY4VyJXwERosJYAV4KNgVdexK", + "M3girULnPaPnjLtKS+no3M+9zVwuLm26Pt+W9v5tGVvD1PchxvdpGM49jtJAr9BRRt5zt/OWU4kfR0Rk", + "qQSzWYh8htGDP0Vx62wdNadVL0niRCQ3LkZETEFIsQADL068EQjkiwQOxpuVg2GC4mOcjFAQQK7LZvTN", + "6aSKzCTFi7p3t93Ojz1V7YM5xviHroEwbpkRRXxD4QSuvC9D4nyEX4PEGT18iLnsXAkxOJQyMpBJJbZI", + "7KUS53lsPJtF9EoWYilTXIY9JwY4oK0YcBQDnFrWJwb0A3KG9njpooOf6m92Gs5ibFAaBvAxfmAlhE+u", + "+rzokYj9UTMWxMQMsapK0j1Au7tICTW8RSZIWLfquEvY8gSdM+h+baLGTahakA7d2Guxc5KMs9+qKFlt", + "eY6C/TBOgwPdlLVru6VEP9KcYIN4KMIERKxUYZ6IT+lnGaxgV4LXj1sGiJdG6nXg1hBYjdbOEazf/oqt", + "/6rd1/zYk0PsxTMeOiFONG2/uXP14Cf773PVflMpxVrtlzaU+Vj5RtZKIjaEVTlhXzcqhFa32SJlSc3h", + "nUCSIPgoxBrHBtuxVrblSFzDTEbeHMUVUo3Tz62dwg/qxBrbFiXVamj+TAmw1073Z4yEW9rfLtqfwoXP", + "cOvpvbmDWyQ7akJT6kjckYN8FUc4HeOAObT5LmHrjp8jTA2g0Mu1tm0wbd3PN1zbbtO5xI5rUzbcfJkc", + "I7e6bSIEtfVsIwqbUN7/3CbHESIxleYHPznHPx/MkngE7calvKXzQHYRTGKP+XUZvvIPt+0Mr6a+ijEZ", + "pNEVm9fdN2U79JTk2vCpV0FQIskBpyeG3/2NngoXMfFASiZxgv5DoYhluhOejoG/+Su5OQlAIQw87rf3", + "2PZ4H4U872fbaj44cmSGQ+A/HPxk/3Hw4ntD2lC+gS9RDvsq8sa4O+1zY1qJh4G4ld75PE62SbU52gwY", + "N1FGwnzid5uZmKcjYlndQBjGTzAw3wgUqVaKXvZ7lYrFiS7PMRE++Ikj7MQtF0Nd6pf5JcIN2CQ/mJ1R", + "xMm9dWxSQEbLKFvIKCWCVaxyMaxklAgb2EQqLpq3yay60HmlSVxikcZ3Yy+mf3TtjoAHOF/UE6DBcPzu", + "XQ6Io1XoQLMkpv+AQXuGbRFr2oxIRCbpyAOzmaT28rHG2xT4kYBRCA8CMMYHKqWy1WjEzGpk7TwyAcQb", + "wTCOxvobdZXhl05a5FpW8JYOdC0qOdS7y2RFjyxXHs/Gy1jmrxQm84xnAjC+Q0H1Mbeu9wZOcqcA70sZ", + "Ps7Uu7JSHHqdY2OqpAo5RKeUt39s1tftJex23m1K+FErFE1nIZzCiJR0A+a8UDW35NU5wA9GCcMaHvyk", + "/6m5XuIZ5EdzzjdFAUIncHS1s3Gshz4FdDcd7YVk9o18Y2zZr52B3nLjZP2zXusVwuhRfh+nPCfPlvBw", + "xnAlHrYr9cSFxw/CeFynTMga8DLRjYCjyPJ6KfVXzfY6IhqcmuLhVHu5lj+6FPVppH8ej5enfPr/e9lr", + "NfsVjFYlxUr8+erkW07+3YqcViT28AOaWVTh+P4es1PdAAqKyG9vjemtqqdjud+80dwyJfvccMb1H+vZ", + "Xi9wi97qxu3RnpNxJgmz/DHPWmh+PCbwptmj+QrdXsCnCkUlaeSJntURr5w76KJ4bgb5Rn9X3X8sLY7I", + "Bh97Yx4DINFgkVi8lr4ToJXV7o35q9UbYXbdeI+iQOUgtICjXjdzn8vL+Fj4+3VGTP+FdXeRBWjx3p22", + "v5Ot73gpt50x/bT6+c2tP53lWlfK9phhbG+mSqq5SWl38TyLUUQchfQURSmB3miu/kogeAjip0jJ7QYy", + "+xMkV3TyXZfYTFaDewKTfOVvkdyvWLHjaO+Q/u/68PA9+9//WgSSzA1IB+6sRpYzSEfwPk5gAdSYwrcE", + "sDJ13wc2eHNw1y8bc6S2gHRkfNLKxy2Vj/ndWbmUxAc+y19lv5Hm+a2U29sk73iTnYmLW/2zzG9HHAVM", + "Val5h8mvEmPPl0hzenRZwdJ8oLC9MGqNYoMgKbDvysVHAmchmFc99GRlb6vEB2/yqsUHR0ET8ZFIpC0r", + "PvhArfRopYdBehS4d4XSQ7pS9pI0qrs1y1VHyRlg+waRUkzRvqvW1690i9AtV5SRcTX8LQbE3AcXUmPT", + "OrVsa/a9OUQQWJLcWuIVQRIiiAkv++sC3hq9lSEgTUBJI4LCFRjXJyr5fPZg7mHv0ZsBlDhtWZa9/k57", + "8mXYvYV8oZoPF++UE1eUF5Wu52oc8sar9dl2rekzYw9FfpgGLO4Q00M5jsK5/rsKhTMJpCic38kGdkYo", + "p+KscXXn4iIdcPYreL1FwFPT6IdWidu2S8mcAqPpUVJV8VjNjhUqVAeisMke5YY69Uq0pcOyXNHsYaRd", + "56pWuc6yiip4p9UvTd6U6jOKOtH8FlOgT6DOfu5oUuhFArbXK6/MJNCKrlZ0NRVdIidz7fNuD3gRfMoB", + "WC2aTtm906v2MwnUaUip8Tfp2GVea4nDzaUKzMsWWTSnRqaIO8aS1G5jQjXXMMNRkYFWwOB5fv75eLSn", + "/1L3GCJHciAKPKRnayGxOnDjiG3v/3UCRhT/1/FmYAyrZYDjQ4ocDNzgGLOeBmlQWN7OxlovwGXtyb1D", + "D5UcGbpbIugFWNw9Ijx7XsltDPfjXMUOO9sZv7RH1SK2jB6xX1OANQsmb2XXK5RdcCYElvzz+QAk/gQ9", + "wjoxJVrJAuUEzowSShQeZVm95MAOkkmOZ89lIuBtL6G28ymL2Hex5+1rlp24eldcV7h+L8ujHPtrzK+S", + "wtCfBmll3mnFwvUyqfFTOhd5xHWlVhq9HmnUvqz7FWWRxvjrl0QLvGmXQJVjcxo+a2/F0MtG5oTwEYZO", + "QR68ZW7mypy3gg5or48IhoH1ySCkB6/HZtPgqHgvyDo0BWTIexmDIgChE7Piffb1s88f5nwtDSe/1Pta", + "8MCnD1ACfZGNrgKKM63ZIpBk/dd7SLW5HV44t4P5GBC3cxUPQti1ARbXb5aYAF5Q6FS/LVr1bRYfnE/k", + "VvzqZe6vRG2lJjdWAqltJavCVZX2Ur+6bpWJotV9MyPtqvp1rPjFD4QJisbVBL47GTk3UJDOjQmzQrYv", + "Wnqu5ceVVZZrUEeuki/NVVarq3iALD+jpcodrqs4uStRarebLse4gOfAvgkt7+QvOCqo1Z2Zug1UtOal", + "WF99vJSuYa6u2qqzCnr0wtVWyydgW23VVUddqtqq2yl5gCGh/8X1ldllF092qQ7o1sgFReOh6OOag/Z1", + "HJMaYpY4I/U9aVkpn/3ChqaV8ZEqWVx90aYqCGO3CsWtPqmqmjF84IGYpSGfyDiO1tdXVB5VmWPcrPZx", + "ncK4QDnuVkdkCJC0rqmF63RhFCdt+WvFMfELFhevPnAcojp4uZF8DlNLooNmKZLbBAcvdo36AOdOl6i0", + "XfPEBowMvsC5y8PzDCYVKNw/w64v0LmsaAygDN3sny0IYpJGyyeJcIFwkEY8QYRwfL3IlTTbz5e5kGZT", + "b8F1tA6HfhldQSxZbgo49x5BmEJzhgqVAvIPym5H71nTo06X/uuY/+uYivfqTBZfV5vIIluGLJXuROes", + "cX8zOSzWXkW9jQJYiWUAZYynY+10VxcyG9eig7QmAEMAw0WNW5jz98uEIXBKaOLz5QUZX30U6PE/NjPr", + "QPCnUE/hDx/CoJzPkRsoslimM5/XGyYHozR8sIf9fEjDB0EeOJMJuFIo0D6vWDDQ5TcUDvglpQNuLh7a", + "KPEtkw+MTXUhgVcsJdzyRXNHhpY7KKfi2qQGDyt59emkOQLcFQphMDRLCesMaRawRf/1lBnL1PZYY9o8", + "+UM8+hP6DpoLQxrMHju3QmprhZRIK7sW+cTcaI4+Vu6bc/CzfoHz9lovczYuZK0zZLcWu8li94Tvd5V8", + "4JaYHTc7ml99qnaOgG05mlfjVstlgG8PzFdzYKLoERHYNMBa9jIHjfXZ1/aslLFiGj4WihKT2G5jw0zh", + "0xktrilmmk9QSeut+1uLkuYocQuO5rh90YhoDu4igdCCMFq2NEc/K75ZTaim4HP5wx7/9zNn4hASWGbn", + "M/Y7VoadCyvzPrtbrjHHV9Ww7Sl07PrZWsu9nEK2mXtzjMSJMCNXa7Hq3D6yc63qTWszTtidd627wgnr", + "fXq72Ln7Yo9vHTmXw7cznCsexTbm3KqTbwqnI8Z8jWw02cvM4l/Z19ZGk9So4WMhG01iu1UGTTZaRour", + "0QXFeAc/+R8OSqAHBBDefRJP6569cWr4NVRBsWwbbPzzRnn37Vp4dxEd8HVw7RZlubuwJLVTTJrbmJXJ", + "i79SmMI9WUC7qvQAJQ7WOlfrX8v/YRYYnyD5F+21y9X+d+plwC4Fe69fe8nR3mIvwLxHmGAUR6rMfCsT", + "X1gmUnGkdqdc/F9yzqIyMQEE7rELJ5dQCdqaX0/VxUoMAIHntGH7Lm2by0Ss4g2TQz3R9b1UUnS2Ba+V", + "irBsKn1mntcaBONo7NxG4xRsVh03mbilqPbO+a+LSlzRY28Wh8if16dskR083sElYYsMJbhiPdp0LQcm", + "tCzm4insRuvq2XjWIxwC/6E6UcuQNvGe4GgSxw9l5yf7/J1/bZ2fPEeLjpMm1kMB1dvEDhsqYnQTgZRM", + "4gT9BwZ84nebmfgrJJM4YCUCQBjGT+YCSnyDmB7IWUA/z9jHpRjxABOQECs7DulXfo5dnqRk4jFjpciQ", + "Nxgm/M6EAXRJEcp67iJnvjk8NuBB5x6GMnGs5LAygSAQdzxhzAkmTyvFuRlVYOinCSJzhh8/jh8QpIOy", + "5Me3Oj0wlOZnlIRAd2BhOqjLmzW8GBYJsCCQI9zKYSGHL4Z9HVUNJHERy60s3jpZXGYEJYkvhkuk6yoM", + "bGKwNjqRISDPX5VZulZHs/lJnaMMi7vaMvQWMbSV8xw5uvJEFfU49jZxZSVKhO3azdX63QUmxDTzGai6", + "VbmdaS9VtuFSRe1N+VJlSf+EoXpaTaF7BctozhnKWLpxR/x43W2t4LaBOosLyodWImxdgUVdRKykqKKT", + "nKjNqXFCCJzORHIY1tah5uuuJdNoJUhVABvCLLxfiBBOBOH2GQgvfIlXxyibYugE0o4Vb+9ZkhJXHmbN", + "WxbexmwASRqJrap5fIGiWcriIfjlrmm5z1uhqbS5ACrkC9vwlxAo2ZoqfQG8mWNR+E+QDPmwrWh5Oe2g", + "WZYri6dBDNcaFNtsUMhdWovUEHfxe09x8lD1YCwL67QGSrQxElmIOkfFd4ZUipCqWhsUGSqMnnf05Ha0", + "Tvxtu5XTyH/xVCFiEBsLvfrbtxz/cGxsqESOYeagUaIPubUt527f9ZvOeIs467lUrnbP0xOSC+/q2Nvs", + "bHj1h2WGibYS1dKmpnwClH87zXG86CWVRDQ3L5tniNRr8hgSRWqFdNp0kVq6SA0vuMZNlKt69HLJI01w", + "OxeZ0zxIOYJpzdOtTCqZ36PyI8NqA7WJwPmp/7PudjzHCbUnsCDTXb4sL7C+GTQdgzusJojtWvS9cnt5", + "bn8tnPdL178U7uZpanF+PmBXHLUuan4RwhlaB3q/hq/7bPSWuV+eubPcCFdaaQgO4zLe7DyO2Ha3Du0N", + "ObS/67iPXLISZJvUVGVYncTBEzCDa9IjhmzsVt7sjDLBN6zVKH4hjUJFxDuUzs5VzQ5DdeuGDbpGFeuz", + "51j8grwn0+23MmDlAJ4DTLz+GUtaOYFeCOQO2pKfAEz6gTX7yZtjU/aTDUTuNSmzoUueNrZmS2/sF5Al", + "7tf5brIQO91MsJZuGs2rTMcUwHuQhqTz/rCbExWbSMyk5n63yOS8/Ls3mntsAvOk4pP9lfgm1K72smf1", + "+tYqE72pMR3LdnrAGwHiT0qXPVUa06uv16nfk3BkuAYDixj18lXJqy7iGba3RzVJlzjZbOLmBh/4SRzV", + "ayS0lfdnPMqAIgkaj2vDJ06TOHrVasrOZI1UG4sCOu0YEqUS79ckB7YZbqtOXrxLmYErclWO5t69yIe5", + "spSZOp9h97SZo/n6Mmdqx+aGc2fmkLGEDtseTAY9tnQSrEmhpcfSwU/6nz35q1sxiPJR5Xw1QAlnx0tD", + "qNXbwMphdPPFIRyrOBg3sc3LWayqYEZTM29+niBun7tV121LMtcuB/BsMWet6ehsj81dcH03OqxXIB/c", + "zm9GA65+bt35Xn9739qR22xHysL4rkYka79eC3KrzVsK3AwkFGmWG90CWLzxd93HtyH4DO+xjbCJu9NN", + "uQVyaMMEkBRDp+JGsu0iJu2Q9RXGpQtwDygKnKBiDRuD9AVFQT00O+9BIWgKPXBPAS3FFD4BLJ/46Uvo", + "HB8eH+0d0v9dHx6+Z//7XwvuRfcTOoGZeANA4B6FouNaq49CPIL3cQLXCfIHNsMqYa7A8j2KEJ4sDrPs", + "v1E8rwrolWJ6fR7Bsvvt1foDi7pja9asJYpwPY5AFjjokiwXeAI0etDl2V/PnusYH7zL5R5bNbxVwzev", + "hre6ZatbvsjLALxkeVQmgNo03vXn+xpKlWbnPAU1SEN6PNZ4DVXLRfyHQ9m59SJusxdxfXaRIoCdCpdo", + "lalWmdoZZSpbRiaqV+Kbdao7rxhceWk3XLi9LGFar8NqtRKLBrBeveTgp/pzr5TppDYqyQxyQ51lx2OT", + "DDiwZvY1onprw5XMu9vGKxXjlSx4ahaQYKGNmsillTDgTlfr2SnuW+dx3B7Fux7XtF454qYYqGQGz9kb", + "msp6nsCL4JP9JY37Q5pr3mF30g9XW6/6K1hz9oJK0DZaadSwDU0qg1g3f6PpH5sFeepZk+3wt2Jx8+UP", + "ty7lpBB0VVS+nkeMmizO+ZHN8lhqBEIiu+uDJVVikEatFN6kFJY7oG1AE/lr1Rs2WKqpuTqqS+BXaWm2", + "4tdJ/AqFpE4nXrnI5XnM9/w4jUhNiA5rI7NCyQT84BGgEIxCyKSvJm7M1vgnSHiedHzKZtx50VuXvGvH", + "k/flNmtB05uTCief1htuuaPPIWmxlH559k8xTPCBnyYJrOZszK0D3tCj3Urce4Nh8gmSUzHYGumOztSQ", + "zhjEbSmYly8FA/00QWTOxLgfxw8InqRUdv1xS0VV4XFbntwkubPtN5DxGJFJOjrwQRiOgP9gJefTeDoL", + "IYGcpi/p/J7xPKIT8UIYn9jQlxSXp3L4AoG/OTyuuU/wxbxBed4JBIGo+hbGfDOMVQaVWH8uIDOHO7nA", + "/ByO6MMEJHZRMKRfF0Mc69ocawye9eOMQdcQYXE8DuF66I0N/YvTG0ffiuktQ9wvR28oekQEupSGlNow", + "78CUbqfjm45wzfr2xVxrPMX1iZziJ0KE5cbkF9jqi87HKsuOWsBeRnnXBgsxR3sHwPfhjNg9byfsO1Ye", + "NjFJidr0zed9OuvxJ/HB+UT1pQsrqI+v3ER/bRRAVr+fIam09+70lUCWZ7Ciphn93oy+eJ/OuiqE0cFX", + "QF985S191dRvp0hagL7CeIwiO1mdx2PsocgD7Gzcr1AwztlA66EldgTT8TdUY9XJjg7j8RgGHopa83mr", + "zOf8sU6pxtVODuNxnJIaZohT4sYNcfryvh5Bo/GWVRxqibRGGWXU40q2UzgdwQRP0KyBCaR1cjOD+BHy", + "NesmnhGtlcDNkza3h3QUtTbRIjaRjsF6kpwBjJ/ipCISgYtJIUk92b5KpF7JMdenY5xOQDRWE22TsuEz", + "yAKFqFac75A452SVp3QHJkrgmAqypMro4y1wpUai4nTWxTYSjG1iGIm89pprJ/R0SUKuOg8Ogf+wlhuG", + "IR15iy8YakRNwxuHR5hgAUJlcVvRTsavYJg8GnTEfnQff4Lkmxh0paU9NEizjA5H+4f7h6acEVrYyB+q", + "661D1Y7risUWQuUqyPk79BJI0iTKIa+gZ1MplUYRisbZFD/25JB78Yw/Uc1mk5v2BEeTOH7YE1FEBz/F", + "Dw7v8ehJIVqXo4z47+5P7cRA9igeNdGGg3gc365J+Npz4eXPheJ7OZ1MraE7osWtE3McCDy7GMmyqSyL", + "V80xQu/Brok1tpZvVhP8xqHnsW8CNRQzAzGhTeqqvKECO2q7WvbcIvZkPoHSFjXlUcWb7I9nh0rXBm2D", + "U5jjw1QRIVgVcGo443cn3LRx4J9YcesNK0WUll7rUKW5OoCUqdWUCok/qfB1VRIyb7UztLwGVwJDQO7c", + "sJ0VAgOpRNnmHrE48hqHrOU0M6cJhliG2QqnSfFlhlNmEhU+7pQKoYFdtJXPG5pk9VAAtq+rNv+6ymQO", + "aRSz4OOGbp2G5c4JDVSu1/DKZ8GXPS1vvTRv6U+IlmEsF7XPnbua6YFbwWDrqzzNkeH60JlrXXku27Ry", + "6CQRiuphKw+sCuJyzFmjJjql16eblM+jrxjvUd10WE/KBun0t4GfDSkteULKFdQbWrzakBmwcRKnM5Yn", + "NANBbpQVFNbpC5x3anM4rFlILJm7W14qtem7t1CbWChfeCPBJfPKWGNDZEqEppleFkrwspWS69rALvte", + "/555t3FKqQMGXcZVISAQE8VTCHv3kPgTGNiySWeCf8sVKUEGC2aNebFcMRq8jZLEtKlh2tQwa0gN00g0", + "C9mAHW61cie5k1gWsTU75IL5FeTymqWcDJhaThVs5d1WqYAZKS6qAhYD/0YQJDBRgX9dYyggiyTj8iBN", + "ws77Tuf59vn/BQAA///+bySVV2UCAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/api/v1/server/oas/transformers/api_token.go b/api/v1/server/oas/transformers/api_token.go index 5042e665e..ac69fd929 100644 --- a/api/v1/server/oas/transformers/api_token.go +++ b/api/v1/server/oas/transformers/api_token.go @@ -2,20 +2,21 @@ package transformers import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) -func ToAPIToken(token *db.APITokenModel) *gen.APIToken { +func ToAPIToken(token *dbsqlc.APIToken) *gen.APIToken { res := &gen.APIToken{ - Metadata: *toAPIMetadata(token.ID, token.CreatedAt, token.UpdatedAt), + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(token.ID), token.CreatedAt.Time, token.UpdatedAt.Time), } - if expiresAt, ok := token.ExpiresAt(); ok { - res.ExpiresAt = expiresAt + if token.ExpiresAt.Valid { + res.ExpiresAt = token.ExpiresAt.Time } - if name, ok := token.Name(); ok { - res.Name = name + if token.Name.Valid { + res.Name = token.Name.String } return res diff --git a/api/v1/server/oas/transformers/event.go b/api/v1/server/oas/transformers/event.go index 04b899f31..075e5aee7 100644 --- a/api/v1/server/oas/transformers/event.go +++ b/api/v1/server/oas/transformers/event.go @@ -5,34 +5,23 @@ import ( "fmt" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" "github.com/jackc/pgx/v5/pgtype" ) -func ToEvent(event *db.EventModel) *gen.Event { - res := &gen.Event{ - Metadata: *toAPIMetadata(event.ID, event.CreatedAt, event.UpdatedAt), - Key: event.Key, - TenantId: event.TenantID, - } - - return res -} - func ToEventList(events []*dbsqlc.Event) []gen.Event { res := make([]gen.Event, len(events)) for i, event := range events { - res[i] = dbslqEventToEvent(event) + res[i] = ToEvent(event) } return res } -func dbslqEventToEvent(event *dbsqlc.Event) gen.Event { +func ToEvent(event *dbsqlc.Event) gen.Event { return gen.Event{ Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(event.ID), event.CreatedAt.Time, event.UpdatedAt.Time), Key: event.Key, diff --git a/api/v1/server/oas/transformers/log.go b/api/v1/server/oas/transformers/log.go index cef340512..cf78dc4da 100644 --- a/api/v1/server/oas/transformers/log.go +++ b/api/v1/server/oas/transformers/log.go @@ -4,7 +4,7 @@ import ( "encoding/json" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func ToLogFromSQLC(log *dbsqlc.LogLine) *gen.LogLine { diff --git a/api/v1/server/oas/transformers/rate_limits.go b/api/v1/server/oas/transformers/rate_limits.go index d89446d99..80c46f917 100644 --- a/api/v1/server/oas/transformers/rate_limits.go +++ b/api/v1/server/oas/transformers/rate_limits.go @@ -2,7 +2,7 @@ package transformers import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" ) func ToRateLimitFromSQLC(rl *dbsqlc.ListRateLimitsForTenantNoMutateRow) (*gen.RateLimit, error) { diff --git a/api/v1/server/oas/transformers/slack.go b/api/v1/server/oas/transformers/slack.go index 09381684a..e7e85afe0 100644 --- a/api/v1/server/oas/transformers/slack.go +++ b/api/v1/server/oas/transformers/slack.go @@ -4,16 +4,17 @@ import ( "github.com/google/uuid" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) -func ToSlackWebhook(slack *db.SlackAppWebhookModel) *gen.SlackWebhook { +func ToSlackWebhook(slack *dbsqlc.SlackAppWebhook) *gen.SlackWebhook { return &gen.SlackWebhook{ - Metadata: *toAPIMetadata(slack.ID, slack.CreatedAt, slack.UpdatedAt), - TenantId: uuid.MustParse(slack.TenantID), - ChannelId: slack.ChannelID, + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(slack.ID), slack.CreatedAt.Time, slack.UpdatedAt.Time), + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(slack.TenantId)), + ChannelId: slack.ChannelId, ChannelName: slack.ChannelName, - TeamId: slack.TeamID, + TeamId: slack.TeamId, TeamName: slack.TeamName, } } diff --git a/api/v1/server/oas/transformers/sns.go b/api/v1/server/oas/transformers/sns.go index dd1df73c0..e4b26fede 100644 --- a/api/v1/server/oas/transformers/sns.go +++ b/api/v1/server/oas/transformers/sns.go @@ -6,16 +6,17 @@ import ( "github.com/google/uuid" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) -func ToSNSIntegration(sns *db.SNSIntegrationModel, serverUrl string) *gen.SNSIntegration { - ingestUrl := fmt.Sprintf("%s/api/v1/sns/%s/sns-event", serverUrl, sns.TenantID) +func ToSNSIntegration(sns *dbsqlc.SNSIntegration, serverUrl string) *gen.SNSIntegration { + ingestUrl := fmt.Sprintf("%s/api/v1/sns/%s/sns-event", serverUrl, sqlchelpers.UUIDToStr(sns.TenantId)) return &gen.SNSIntegration{ - Metadata: *toAPIMetadata(sns.ID, sns.CreatedAt, sns.UpdatedAt), + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(sns.ID), sns.CreatedAt.Time, sns.UpdatedAt.Time), TopicArn: sns.TopicArn, - TenantId: uuid.MustParse(sns.TenantID), + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(sns.TenantId)), IngestUrl: &ingestUrl, } } diff --git a/api/v1/server/oas/transformers/tenant.go b/api/v1/server/oas/transformers/tenant.go index 5c01eca60..7a4c31daa 100644 --- a/api/v1/server/oas/transformers/tenant.go +++ b/api/v1/server/oas/transformers/tenant.go @@ -4,22 +4,11 @@ import ( "strings" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) -func ToTenant(tenant *db.TenantModel) *gen.Tenant { - return &gen.Tenant{ - Metadata: *toAPIMetadata(tenant.ID, tenant.CreatedAt, tenant.UpdatedAt), - Name: tenant.Name, - Slug: tenant.Slug, - AnalyticsOptOut: &tenant.AnalyticsOptOut, - AlertMemberEmails: &tenant.AlertMemberEmails, - } -} - -func ToTenantSqlc(tenant *dbsqlc.Tenant) *gen.Tenant { +func ToTenant(tenant *dbsqlc.Tenant) *gen.Tenant { return &gen.Tenant{ Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(tenant.ID), tenant.CreatedAt.Time, tenant.UpdatedAt.Time), Name: tenant.Name, @@ -29,27 +18,27 @@ func ToTenantSqlc(tenant *dbsqlc.Tenant) *gen.Tenant { } } -func ToTenantAlertingSettings(alerting *db.TenantAlertingSettingsModel) *gen.TenantAlertingSettings { +func ToTenantAlertingSettings(alerting *dbsqlc.TenantAlertingSettings) *gen.TenantAlertingSettings { res := &gen.TenantAlertingSettings{ - Metadata: *toAPIMetadata(alerting.ID, alerting.CreatedAt, alerting.UpdatedAt), + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(alerting.ID), alerting.CreatedAt.Time, alerting.UpdatedAt.Time), MaxAlertingFrequency: alerting.MaxFrequency, EnableExpiringTokenAlerts: &alerting.EnableExpiringTokenAlerts, EnableWorkflowRunFailureAlerts: &alerting.EnableWorkflowRunFailureAlerts, EnableTenantResourceLimitAlerts: &alerting.EnableTenantResourceLimitAlerts, } - if lastAlertedAt, ok := alerting.LastAlertedAt(); ok { - res.LastAlertedAt = &lastAlertedAt + if alerting.LastAlertedAt.Valid { + res.LastAlertedAt = &alerting.LastAlertedAt.Time } return res } -func ToTenantAlertEmailGroup(group *db.TenantAlertEmailGroupModel) *gen.TenantAlertEmailGroup { +func ToTenantAlertEmailGroup(group *dbsqlc.TenantAlertEmailGroup) *gen.TenantAlertEmailGroup { emails := strings.Split(group.Emails, ",") return &gen.TenantAlertEmailGroup{ - Metadata: *toAPIMetadata(group.ID, group.CreatedAt, group.UpdatedAt), + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(group.ID), group.CreatedAt.Time, group.UpdatedAt.Time), Emails: emails, } } diff --git a/api/v1/server/oas/transformers/tenant_invite.go b/api/v1/server/oas/transformers/tenant_invite.go index a24301347..b0099712f 100644 --- a/api/v1/server/oas/transformers/tenant_invite.go +++ b/api/v1/server/oas/transformers/tenant_invite.go @@ -2,20 +2,17 @@ package transformers import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) -func ToTenantInviteLink(invite *db.TenantInviteLinkModel) *gen.TenantInvite { +func ToTenantInviteLink(invite *dbsqlc.TenantInviteLink) *gen.TenantInvite { res := &gen.TenantInvite{ - Metadata: *toAPIMetadata(invite.ID, invite.CreatedAt, invite.UpdatedAt), + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(invite.ID), invite.CreatedAt.Time, invite.UpdatedAt.Time), Email: invite.InviteeEmail, - Expires: invite.Expires, + Expires: invite.Expires.Time, Role: gen.TenantMemberRole(invite.Role), - TenantId: invite.TenantID, - } - - if invite.RelationsTenantInviteLink.Tenant != nil { - res.TenantName = &invite.Tenant().Name + TenantId: sqlchelpers.UUIDToStr(invite.TenantId), } return res diff --git a/api/v1/server/oas/transformers/user.go b/api/v1/server/oas/transformers/user.go index 092859929..8e7e7bca6 100644 --- a/api/v1/server/oas/transformers/user.go +++ b/api/v1/server/oas/transformers/user.go @@ -4,18 +4,20 @@ import ( "github.com/oapi-codegen/runtime/types" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/repository" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) -func ToUser(user *db.UserModel, hasPassword bool, hashedEmail *string) *gen.User { +func ToUser(user *dbsqlc.User, hasPassword bool, hashedEmail *string) *gen.User { var name *string - if dbName, ok := user.Name(); ok { - name = &dbName + if user.Name.Valid { + name = &user.Name.String } return &gen.User{ - Metadata: *toAPIMetadata(user.ID, user.CreatedAt, user.UpdatedAt), + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(user.ID), user.CreatedAt.Time, user.UpdatedAt.Time), Email: types.Email(user.Email), EmailHash: hashedEmail, EmailVerified: user.EmailVerified, @@ -24,28 +26,22 @@ func ToUser(user *db.UserModel, hasPassword bool, hashedEmail *string) *gen.User } } -func ToUserTenantPublic(user *db.UserModel) *gen.UserTenantPublic { - var name *string - - if dbName, ok := user.Name(); ok { - name = &dbName - } - - return &gen.UserTenantPublic{ - Email: types.Email(user.Email), - Name: name, - } -} - -func ToTenantMember(tenantMember *db.TenantMemberModel) *gen.TenantMember { +func ToTenantMember(tenantMember *dbsqlc.PopulateTenantMembersRow) *gen.TenantMember { res := &gen.TenantMember{ - Metadata: *toAPIMetadata(tenantMember.ID, tenantMember.CreatedAt, tenantMember.UpdatedAt), - User: *ToUserTenantPublic(tenantMember.User()), - Role: gen.TenantMemberRole(tenantMember.Role), - } - - if tenantMember.Tenant() != nil { - res.Tenant = ToTenant(tenantMember.Tenant()) + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(tenantMember.ID), tenantMember.CreatedAt.Time, tenantMember.UpdatedAt.Time), + User: gen.UserTenantPublic{ + Email: types.Email(tenantMember.Email), + Name: repository.StringPtr(tenantMember.Name.String), + }, + Tenant: &gen.Tenant{ + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(tenantMember.TenantId), tenantMember.TenantCreatedAt.Time, tenantMember.TenantUpdatedAt.Time), + Name: tenantMember.TenantName, + Slug: tenantMember.TenantSlug, + AnalyticsOptOut: &tenantMember.AnalyticsOptOut, + AlertMemberEmails: &tenantMember.AlertMemberEmails, + Version: gen.TenantVersion(tenantMember.TenantVersion), + }, + Role: gen.TenantMemberRole(tenantMember.Role), } return res diff --git a/api/v1/server/oas/transformers/v1/log.go b/api/v1/server/oas/transformers/v1/log.go new file mode 100644 index 000000000..e315d2078 --- /dev/null +++ b/api/v1/server/oas/transformers/v1/log.go @@ -0,0 +1,27 @@ +package transformers + +import ( + "encoding/json" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" +) + +func ToV1LogLine(log *sqlcv1.V1LogLine) *gen.V1LogLine { + res := &gen.V1LogLine{ + CreatedAt: log.CreatedAt.Time, + Message: log.Message, + } + + if log.Metadata != nil { + meta := map[string]interface{}{} + + err := json.Unmarshal(log.Metadata, &meta) + + if err == nil { + res.Metadata = meta + } + } + + return res +} diff --git a/api/v1/server/oas/transformers/v1/tasks.go b/api/v1/server/oas/transformers/v1/tasks.go new file mode 100644 index 000000000..9fd9457d2 --- /dev/null +++ b/api/v1/server/oas/transformers/v1/tasks.go @@ -0,0 +1,370 @@ +package transformers + +import ( + "encoding/json" + "math" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + "github.com/oapi-codegen/runtime/types" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" +) + +func jsonToMap(jsonBytes []byte) map[string]interface{} { + result := make(map[string]interface{}) + json.Unmarshal(jsonBytes, &result) // nolint: errcheck + return result +} + +func ToTaskSummary(task *sqlcv1.PopulateTaskRunDataRow) gen.V1TaskSummary { + additionalMetadata := jsonToMap(task.AdditionalMetadata) + + var finishedAt *time.Time + + if task.FinishedAt.Valid { + finishedAt = &task.FinishedAt.Time + } + + var startedAt *time.Time + + if task.StartedAt.Valid { + startedAt = &task.StartedAt.Time + } + + var durationPtr *int + + if task.FinishedAt.Valid && task.StartedAt.Valid { + duration := int(task.FinishedAt.Time.Sub(task.StartedAt.Time).Milliseconds()) + durationPtr = &duration + } + + taskExternalId := uuid.MustParse(sqlchelpers.UUIDToStr(task.ExternalID)) + stepId := uuid.MustParse(sqlchelpers.UUIDToStr(task.StepID)) + + return gen.V1TaskSummary{ + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(task.ExternalID), + CreatedAt: task.InsertedAt.Time, + UpdatedAt: task.InsertedAt.Time, + }, + DisplayName: task.DisplayName, + Duration: durationPtr, + StartedAt: startedAt, + FinishedAt: finishedAt, + AdditionalMetadata: &additionalMetadata, + ErrorMessage: &task.ErrorMessage.String, + Status: gen.V1TaskStatus(task.Status), + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(task.TenantID)), + WorkflowId: uuid.MustParse(sqlchelpers.UUIDToStr(task.WorkflowID)), + TaskId: int(task.ID), + TaskInsertedAt: task.InsertedAt.Time, + TaskExternalId: taskExternalId, + StepId: &stepId, + } +} + +func ToTaskSummaryRows( + tasks []*sqlcv1.PopulateTaskRunDataRow, +) []gen.V1TaskSummary { + toReturn := make([]gen.V1TaskSummary, len(tasks)) + + for i, task := range tasks { + toReturn[i] = ToTaskSummary(task) + } + + return toReturn +} + +func ToDagChildren( + tasks []*sqlcv1.PopulateTaskRunDataRow, + taskIdToDagExternalId map[int64]uuid.UUID, +) []gen.V1DagChildren { + dagIdToTasks := make(map[uuid.UUID][]gen.V1TaskSummary) + + for _, task := range tasks { + dagId := taskIdToDagExternalId[task.ID] + dagIdToTasks[dagId] = append(dagIdToTasks[dagId], ToTaskSummary(task)) + } + + toReturn := make([]gen.V1DagChildren, 0, len(dagIdToTasks)) + + for dagId, tasks := range dagIdToTasks { + dagIdCp := dagId + tasksCp := tasks + + toReturn = append(toReturn, gen.V1DagChildren{ + DagId: &dagIdCp, + Children: &tasksCp, + }) + } + + return toReturn +} + +func ToTaskSummaryMany( + tasks []*sqlcv1.PopulateTaskRunDataRow, + total int, limit, offset int64, +) gen.V1TaskSummaryList { + toReturn := ToTaskSummaryRows(tasks) + + currentPage := (offset / limit) + 1 + nextPage := currentPage + 1 + numPages := int64(math.Ceil(float64(total) / float64(limit))) + + return gen.V1TaskSummaryList{ + Rows: toReturn, + Pagination: gen.PaginationResponse{ + CurrentPage: ¤tPage, + NextPage: &nextPage, + NumPages: &numPages, + }, + } +} + +func ToTaskRunEventMany( + events []*sqlcv1.ListTaskEventsRow, + taskExternalId string, +) gen.V1TaskEventList { + toReturn := make([]gen.V1TaskEvent, len(events)) + + for i, event := range events { + // data := jsonToMap(event.Data) + // taskInput := jsonToMap(event.TaskInput) + // additionalMetadata := jsonToMap(event.AdditionalMetadata) + + var workerId *types.UUID + + if event.WorkerID.Valid { + workerUUid := uuid.MustParse(sqlchelpers.UUIDToStr(event.WorkerID)) + workerId = &workerUUid + } + + toReturn[i] = gen.V1TaskEvent{ + Id: int(event.ID), + ErrorMessage: &event.ErrorMessage.String, + EventType: gen.V1TaskEventType(event.EventType), + Message: event.AdditionalEventMessage.String, + Timestamp: event.EventTimestamp.Time, + WorkerId: workerId, + TaskId: uuid.MustParse(taskExternalId), + // TaskInput: &taskInput, + } + } + + return gen.V1TaskEventList{ + Rows: &toReturn, + Pagination: &gen.PaginationResponse{}, + } +} + +func ToWorkflowRunTaskRunEventsMany( + events []*sqlcv1.ListTaskEventsForWorkflowRunRow, +) gen.V1TaskEventList { + toReturn := make([]gen.V1TaskEvent, len(events)) + + for i, event := range events { + workerId := uuid.MustParse(sqlchelpers.UUIDToStr(event.WorkerID)) + output := string(event.Output) + taskExternalId := uuid.MustParse(sqlchelpers.UUIDToStr(event.TaskExternalID)) + + toReturn[i] = gen.V1TaskEvent{ + ErrorMessage: &event.ErrorMessage.String, + EventType: gen.V1TaskEventType(event.EventType), + Id: int(event.ID), + Message: event.AdditionalEventMessage.String, + Output: &output, + TaskDisplayName: &event.DisplayName, + TaskId: taskExternalId, + Timestamp: event.EventTimestamp.Time, + WorkerId: &workerId, + } + } + + return gen.V1TaskEventList{ + Rows: &toReturn, + Pagination: &gen.PaginationResponse{}, + } +} + +func ToTaskRunMetrics(metrics *[]v1.TaskRunMetric) gen.V1TaskRunMetrics { + statuses := []gen.V1TaskStatus{ + gen.V1TaskStatusCANCELLED, + gen.V1TaskStatusCOMPLETED, + gen.V1TaskStatusFAILED, + gen.V1TaskStatusQUEUED, + gen.V1TaskStatusRUNNING, + } + + toReturn := make([]gen.V1TaskRunMetric, len(statuses)) + + for i, status := range statuses { + metric := v1.TaskRunMetric{Count: 0} + + for _, m := range *metrics { + if m.Status == string(status) { + metric = m + break + } + } + + toReturn[i] = gen.V1TaskRunMetric{ + Count: int(metric.Count), // nolint: gosec + Status: status, + } + } + + return toReturn +} + +func ToTask(taskWithData *sqlcv1.PopulateSingleTaskRunDataRow, workflowRunExternalId *pgtype.UUID) gen.V1TaskSummary { + additionalMetadata := jsonToMap(taskWithData.AdditionalMetadata) + + var finishedAt *time.Time + + if taskWithData.FinishedAt.Valid { + finishedAt = &taskWithData.FinishedAt.Time + } + + var startedAt *time.Time + + if taskWithData.StartedAt.Valid { + startedAt = &taskWithData.StartedAt.Time + } + + var durationPtr *int + + if taskWithData.FinishedAt.Valid && taskWithData.StartedAt.Valid { + duration := int(taskWithData.FinishedAt.Time.Sub(taskWithData.StartedAt.Time).Milliseconds()) + durationPtr = &duration + } + + output := make(map[string]interface{}) + + if taskWithData.Output != nil { + output = jsonToMap(taskWithData.Output) + } + + input := jsonToMap(taskWithData.Input) + + var parsedWorkflowRunUUID *uuid.UUID + + if workflowRunExternalId != nil && workflowRunExternalId.Valid { + id := uuid.MustParse(sqlchelpers.UUIDToStr(*workflowRunExternalId)) + parsedWorkflowRunUUID = &id + } + + stepId := uuid.MustParse(sqlchelpers.UUIDToStr(taskWithData.StepID)) + + return gen.V1TaskSummary{ + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(taskWithData.ExternalID), + CreatedAt: taskWithData.InsertedAt.Time, + UpdatedAt: taskWithData.InsertedAt.Time, + }, + TaskId: int(taskWithData.ID), + TaskInsertedAt: taskWithData.InsertedAt.Time, + DisplayName: taskWithData.DisplayName, + AdditionalMetadata: &additionalMetadata, + Duration: durationPtr, + StartedAt: startedAt, + FinishedAt: finishedAt, + Output: output, + Status: gen.V1TaskStatus(taskWithData.Status), + Input: input, + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(taskWithData.TenantID)), + WorkflowId: uuid.MustParse(sqlchelpers.UUIDToStr(taskWithData.WorkflowID)), + ErrorMessage: &taskWithData.ErrorMessage.String, + WorkflowRunExternalId: parsedWorkflowRunUUID, + TaskExternalId: uuid.MustParse(sqlchelpers.UUIDToStr(taskWithData.ExternalID)), + Type: gen.V1WorkflowTypeTASK, + NumSpawnedChildren: int(taskWithData.SpawnedChildren.Int64), + StepId: &stepId, + } +} + +func ToWorkflowRunDetails( + taskRunEvents []*sqlcv1.ListTaskEventsForWorkflowRunRow, + workflowRun *v1.WorkflowRunData, + shape []*dbsqlc.GetWorkflowRunShapeRow, + tasks []*sqlcv1.PopulateTaskRunDataRow, + stepIdToTaskExternalId map[pgtype.UUID]pgtype.UUID, +) (gen.V1WorkflowRunDetails, error) { + workflowVersionId := uuid.MustParse(sqlchelpers.UUIDToStr(workflowRun.WorkflowVersionId)) + duration := int(workflowRun.FinishedAt.Time.Sub(workflowRun.StartedAt.Time).Milliseconds()) + input := jsonToMap(workflowRun.Input) + additionalMetadata := jsonToMap(workflowRun.AdditionalMetadata) + + parsedWorkflowRun := gen.V1WorkflowRun{ + AdditionalMetadata: &additionalMetadata, + CreatedAt: &workflowRun.CreatedAt.Time, + DisplayName: workflowRun.DisplayName, + Duration: &duration, + ErrorMessage: &workflowRun.ErrorMessage, + FinishedAt: &workflowRun.FinishedAt.Time, + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(workflowRun.ExternalID), + CreatedAt: workflowRun.InsertedAt.Time, + UpdatedAt: workflowRun.InsertedAt.Time, + }, + StartedAt: &workflowRun.StartedAt.Time, + Status: gen.V1TaskStatus(workflowRun.ReadableStatus), + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(workflowRun.TenantID)), + WorkflowId: uuid.MustParse(sqlchelpers.UUIDToStr(workflowRun.WorkflowID)), + WorkflowVersionId: &workflowVersionId, + Input: input, + } + + shapeRows := make([]gen.WorkflowRunShapeItemForWorkflowRunDetails, len(shape)) + + for i, shapeRow := range shape { + parentExternalId := uuid.MustParse(sqlchelpers.UUIDToStr(stepIdToTaskExternalId[shapeRow.Parentstepid])) + ChildrenStepIds := make([]uuid.UUID, len(shapeRow.Childrenstepids)) + taskName := shapeRow.Stepname.String + stepId := shapeRow.Parentstepid + + for c, child := range shapeRow.Childrenstepids { + ChildrenStepIds[c] = uuid.MustParse(sqlchelpers.UUIDToStr(child)) + } + + shapeRows[i] = gen.WorkflowRunShapeItemForWorkflowRunDetails{ + ChildrenStepIds: ChildrenStepIds, + TaskExternalId: parentExternalId, + TaskName: taskName, + StepId: uuid.MustParse(sqlchelpers.UUIDToStr(stepId)), + } + } + + parsedTaskEvents := make([]gen.V1TaskEvent, len(taskRunEvents)) + + for i, event := range taskRunEvents { + workerId := uuid.MustParse(sqlchelpers.UUIDToStr(event.WorkerID)) + output := string(event.Output) + + parsedTaskEvents[i] = gen.V1TaskEvent{ + ErrorMessage: &event.ErrorMessage.String, + EventType: gen.V1TaskEventType(event.EventType), + Id: int(event.ID), + Message: event.AdditionalEventMessage.String, + Output: &output, + TaskDisplayName: &event.DisplayName, + Timestamp: event.EventTimestamp.Time, + WorkerId: &workerId, + TaskId: uuid.MustParse(sqlchelpers.UUIDToStr(event.TaskExternalID)), + } + } + + parsedTasks := ToTaskSummaryRows(tasks) + + return gen.V1WorkflowRunDetails{ + Run: parsedWorkflowRun, + Shape: shapeRows, + TaskEvents: parsedTaskEvents, + Tasks: parsedTasks, + }, nil +} diff --git a/api/v1/server/oas/transformers/v1/worker.go b/api/v1/server/oas/transformers/v1/worker.go new file mode 100644 index 000000000..8781166d0 --- /dev/null +++ b/api/v1/server/oas/transformers/v1/worker.go @@ -0,0 +1,120 @@ +package transformers + +import ( + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" +) + +func ToSlotState(slots []*sqlcv1.ListSemaphoreSlotsWithStateForWorkerRow, remainingSlots int) *[]gen.SemaphoreSlots { + resp := make([]gen.SemaphoreSlots, len(slots)) + + for i := range slots { + slot := slots[i] + + var stepRunId uuid.UUID + var workflowRunId uuid.UUID + + if slot.ExternalID.Valid { + stepRunId = uuid.MustParse(sqlchelpers.UUIDToStr(slot.ExternalID)) + workflowRunId = uuid.MustParse(sqlchelpers.UUIDToStr(slot.ExternalID)) + } + + resp[i] = gen.SemaphoreSlots{ + StepRunId: stepRunId, + Status: gen.StepRunStatusRUNNING, + ActionId: slot.ActionID, + WorkflowRunId: workflowRunId, + TimeoutAt: &slot.TimeoutAt.Time, + StartedAt: &slot.InsertedAt.Time, + } + } + + for i := len(slots); i < remainingSlots; i++ { + resp = append(resp, gen.SemaphoreSlots{}) + } + + return &resp +} + +func ToWorkerRuntimeInfo(worker *sqlcv1.Worker) *gen.WorkerRuntimeInfo { + + runtime := &gen.WorkerRuntimeInfo{ + SdkVersion: &worker.SdkVersion.String, + LanguageVersion: &worker.LanguageVersion.String, + Os: &worker.Os.String, + RuntimeExtra: &worker.RuntimeExtra.String, + } + + if worker.Language.Valid { + lang := gen.WorkerRuntimeSDKs(worker.Language.WorkerSDKS) + runtime.Language = &lang + } + + return runtime +} + +func ToWorkerSqlc(worker *sqlcv1.Worker, remainingSlots *int, webhookUrl *string, actions []pgtype.Text) *gen.Worker { + + dispatcherId := uuid.MustParse(sqlchelpers.UUIDToStr(worker.DispatcherId)) + + maxRuns := int(worker.MaxRuns) + + status := gen.ACTIVE + + if worker.IsPaused { + status = gen.PAUSED + } + + if worker.LastHeartbeatAt.Time.Add(5 * time.Second).Before(time.Now()) { + status = gen.INACTIVE + } + + var availableRuns int + + if remainingSlots != nil { + availableRuns = *remainingSlots + } + + res := &gen.Worker{ + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(worker.ID), + CreatedAt: worker.CreatedAt.Time, + UpdatedAt: worker.UpdatedAt.Time, + }, + Name: worker.Name, + Type: gen.WorkerType(worker.Type), + Status: &status, + DispatcherId: &dispatcherId, + MaxRuns: &maxRuns, + AvailableRuns: &availableRuns, + WebhookUrl: webhookUrl, + RuntimeInfo: ToWorkerRuntimeInfo(worker), + } + + if worker.WebhookId.Valid { + wid := uuid.MustParse(sqlchelpers.UUIDToStr(worker.WebhookId)) + res.WebhookId = &wid + } + + if !worker.LastHeartbeatAt.Time.IsZero() { + res.LastHeartbeatAt = &worker.LastHeartbeatAt.Time + } + + if actions != nil { + apiActions := make([]string, len(actions)) + + for i := range actions { + apiActions[i] = actions[i].String + } + + res.Actions = &apiActions + } + + return res +} diff --git a/api/v1/server/oas/transformers/v1/workflow_runs.go b/api/v1/server/oas/transformers/v1/workflow_runs.go new file mode 100644 index 000000000..f21c1ab50 --- /dev/null +++ b/api/v1/server/oas/transformers/v1/workflow_runs.go @@ -0,0 +1,234 @@ +package transformers + +import ( + "math" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" +) + +func WorkflowRunDataToV1TaskSummary(task *v1.WorkflowRunData, workflowIdsToNames map[pgtype.UUID]string) gen.V1TaskSummary { + additionalMetadata := jsonToMap(task.AdditionalMetadata) + + var finishedAt *time.Time + + if task.FinishedAt.Valid { + finishedAt = &task.FinishedAt.Time + } + + var startedAt *time.Time + + if task.StartedAt.Valid { + startedAt = &task.StartedAt.Time + } + + var durationPtr *int + + if task.FinishedAt.Valid && task.StartedAt.Valid { + duration := int(task.FinishedAt.Time.Sub(task.StartedAt.Time).Milliseconds()) + durationPtr = &duration + } + + input := jsonToMap(task.Input) + + var output map[string]interface{} + + if task.Output != nil { + output = jsonToMap(*task.Output) + } + + workflowVersionId := uuid.MustParse(sqlchelpers.UUIDToStr(task.WorkflowVersionId)) + + var taskId int + if task.TaskId != nil { + taskId = int(*task.TaskId) + } + + var stepId uuid.UUID + if task.StepId != nil { + stepId = uuid.MustParse(sqlchelpers.UUIDToStr(*task.StepId)) + } else { + stepId = uuid.Nil + } + + var workflowName *string + + if name, ok := workflowIdsToNames[task.WorkflowID]; ok { + workflowName = &name + } + + return gen.V1TaskSummary{ + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(task.ExternalID), + CreatedAt: task.InsertedAt.Time, + UpdatedAt: task.InsertedAt.Time, + }, + CreatedAt: task.CreatedAt.Time, + DisplayName: task.DisplayName, + Duration: durationPtr, + StartedAt: startedAt, + FinishedAt: finishedAt, + Input: input, + Output: output, + AdditionalMetadata: &additionalMetadata, + ErrorMessage: &task.ErrorMessage, + Status: gen.V1TaskStatus(task.ReadableStatus), + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(task.TenantID)), + WorkflowId: uuid.MustParse(sqlchelpers.UUIDToStr(task.WorkflowID)), + WorkflowVersionId: &workflowVersionId, + TaskExternalId: uuid.MustParse(sqlchelpers.UUIDToStr(task.ExternalID)), + TaskId: taskId, + TaskInsertedAt: task.InsertedAt.Time, + Type: gen.V1WorkflowTypeDAG, + WorkflowName: workflowName, + StepId: &stepId, + } +} + +func ToWorkflowRunMany( + tasks []*v1.WorkflowRunData, + dagExternalIdToChildren map[uuid.UUID][]gen.V1TaskSummary, + workflowIdsToNames map[pgtype.UUID]string, + total int, limit, offset int64, +) gen.V1TaskSummaryList { + toReturn := make([]gen.V1TaskSummary, len(tasks)) + + for i, task := range tasks { + dagExternalId := uuid.MustParse(sqlchelpers.UUIDToStr(task.ExternalID)) + toReturn[i] = WorkflowRunDataToV1TaskSummary(task, workflowIdsToNames) + + children, ok := dagExternalIdToChildren[dagExternalId] + + if ok { + toReturn[i].Children = &children + } + } + + currentPage := (offset / limit) + 1 + nextPage := currentPage + 1 + numPages := int64(math.Ceil(float64(total) / float64(limit))) + + return gen.V1TaskSummaryList{ + Rows: toReturn, + Pagination: gen.PaginationResponse{ + CurrentPage: ¤tPage, + NextPage: &nextPage, + NumPages: &numPages, + }, + } +} + +func PopulateTaskRunDataRowToV1TaskSummary(task *sqlcv1.PopulateTaskRunDataRow, workflowName *string) gen.V1TaskSummary { + additionalMetadata := jsonToMap(task.AdditionalMetadata) + + var finishedAt *time.Time + + if task.FinishedAt.Valid { + finishedAt = &task.FinishedAt.Time + } + + var startedAt *time.Time + + if task.StartedAt.Valid { + startedAt = &task.StartedAt.Time + } + + var durationPtr *int + + if task.FinishedAt.Valid && task.StartedAt.Valid { + duration := int(task.FinishedAt.Time.Sub(task.StartedAt.Time).Milliseconds()) + durationPtr = &duration + } + + input := jsonToMap(task.Input) + output := jsonToMap(task.Output) + stepId := uuid.MustParse(sqlchelpers.UUIDToStr(task.StepID)) + + return gen.V1TaskSummary{ + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(task.ExternalID), + CreatedAt: task.InsertedAt.Time, + UpdatedAt: task.InsertedAt.Time, + }, + CreatedAt: task.InsertedAt.Time, + DisplayName: task.DisplayName, + Duration: durationPtr, + StartedAt: startedAt, + FinishedAt: finishedAt, + Input: input, + Output: output, + AdditionalMetadata: &additionalMetadata, + ErrorMessage: &task.ErrorMessage.String, + Status: gen.V1TaskStatus(task.Status), + TenantId: uuid.MustParse(sqlchelpers.UUIDToStr(task.TenantID)), + WorkflowId: uuid.MustParse(sqlchelpers.UUIDToStr(task.WorkflowID)), + WorkflowVersionId: nil, + Children: nil, + TaskExternalId: uuid.MustParse(sqlchelpers.UUIDToStr(task.ExternalID)), + TaskId: int(task.ID), + TaskInsertedAt: task.InsertedAt.Time, + Type: gen.V1WorkflowTypeTASK, + WorkflowName: workflowName, + StepId: &stepId, + } +} + +func TaskRunDataRowToWorkflowRunsMany( + tasks []*sqlcv1.PopulateTaskRunDataRow, + taskIdToWorkflowName map[int64]string, + total int, limit, offset int64, +) gen.V1TaskSummaryList { + toReturn := make([]gen.V1TaskSummary, len(tasks)) + + for i, task := range tasks { + workflowName := taskIdToWorkflowName[task.ID] + toReturn[i] = PopulateTaskRunDataRowToV1TaskSummary(task, &workflowName) + } + + currentPage := (offset / limit) + 1 + nextPage := currentPage + 1 + numPages := int64(math.Ceil(float64(total) / float64(limit))) + + return gen.V1TaskSummaryList{ + Rows: toReturn, + Pagination: gen.PaginationResponse{ + CurrentPage: ¤tPage, + NextPage: &nextPage, + NumPages: &numPages, + }, + } +} + +func ToWorkflowRunDisplayNamesList( + displayNames []*sqlcv1.ListWorkflowRunDisplayNamesRow, +) gen.V1WorkflowRunDisplayNameList { + result := make([]gen.V1WorkflowRunDisplayName, len(displayNames)) + + for i, record := range displayNames { + result[i] = gen.V1WorkflowRunDisplayName{ + DisplayName: record.DisplayName, + Metadata: gen.APIResourceMeta{ + Id: sqlchelpers.UUIDToStr(record.ExternalID), + CreatedAt: record.InsertedAt.Time, + UpdatedAt: record.InsertedAt.Time, + }, + } + } + + page := int64(1) + + return gen.V1WorkflowRunDisplayNameList{ + Rows: result, + Pagination: gen.PaginationResponse{ + CurrentPage: &page, + NextPage: nil, + NumPages: &page, + }, + } +} diff --git a/api/v1/server/oas/transformers/webhook_worker.go b/api/v1/server/oas/transformers/webhook_worker.go index e8644e1df..4c36dba73 100644 --- a/api/v1/server/oas/transformers/webhook_worker.go +++ b/api/v1/server/oas/transformers/webhook_worker.go @@ -2,8 +2,8 @@ package transformers import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func ToWebhookWorkerRequest(webhookWorker *dbsqlc.WebhookWorkerRequest) *gen.WebhookWorkerRequest { diff --git a/api/v1/server/oas/transformers/worker.go b/api/v1/server/oas/transformers/worker.go index f575bfdbc..6c770973e 100644 --- a/api/v1/server/oas/transformers/worker.go +++ b/api/v1/server/oas/transformers/worker.go @@ -8,8 +8,8 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func ToSlotState(slots []*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow, remainingSlots int) *[]gen.SemaphoreSlots { diff --git a/api/v1/server/oas/transformers/workflow.go b/api/v1/server/oas/transformers/workflow.go index 2caedc393..199fb77bd 100644 --- a/api/v1/server/oas/transformers/workflow.go +++ b/api/v1/server/oas/transformers/workflow.go @@ -1,15 +1,11 @@ package transformers import ( - "context" - "github.com/jackc/pgx/v5/pgtype" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" - "github.com/hatchet-dev/hatchet/pkg/client/types" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func ToWorkflow( @@ -155,81 +151,6 @@ func ToWorkflowVersionConcurrency(concurrency *WorkflowConcurrency) *gen.Workflo return res } -func ToWorkflowYAMLBytes(workflow *db.WorkflowModel, version *db.WorkflowVersionModel) ([]byte, error) { - res := &types.Workflow{ - Name: workflow.Name, - } - - if setVersion, ok := version.Version(); ok { - res.Version = setVersion - } - - if description, ok := workflow.Description(); ok { - res.Description = description - } - - if triggers, ok := version.Triggers(); ok && triggers != nil { - triggersResp := types.WorkflowTriggers{} - - if crons := triggers.Crons(); len(crons) > 0 { - triggersResp.Cron = make([]string, len(crons)) - - for i, cron := range crons { - triggersResp.Cron[i] = cron.Cron - } - } - - if events := triggers.Events(); len(events) > 0 { - triggersResp.Events = make([]string, len(events)) - - for i, event := range events { - triggersResp.Events[i] = event.EventKey - } - } - - res.Triggers = triggersResp - } - - if jobs := version.Jobs(); jobs != nil { - res.Jobs = make(map[string]types.WorkflowJob, len(jobs)) - - for _, job := range jobs { - jobCp := job - - jobRes := types.WorkflowJob{} - - if description, ok := jobCp.Description(); ok { - jobRes.Description = description - } - - if steps := jobCp.Steps(); steps != nil { - jobRes.Steps = make([]types.WorkflowStep, 0) - - for _, step := range steps { - stepRes := types.WorkflowStep{ - ID: step.ID, - ActionID: step.ActionID, - } - - if readableId, ok := step.ReadableID(); ok { - stepRes.ID = readableId - } - - if timeout, ok := step.Timeout(); ok { - stepRes.Timeout = timeout - } - - jobRes.Steps = append(jobRes.Steps, stepRes) - } - - res.Jobs[jobCp.Name] = jobRes - } - } - } - - return types.ToYAML(context.Background(), res) -} - func ToJob(job *dbsqlc.Job, steps []*dbsqlc.GetStepsForJobsRow) *gen.Job { res := &gen.Job{ Metadata: *toAPIMetadata( diff --git a/api/v1/server/oas/transformers/workflow_run.go b/api/v1/server/oas/transformers/workflow_run.go index b9a7fc27a..d6684bcea 100644 --- a/api/v1/server/oas/transformers/workflow_run.go +++ b/api/v1/server/oas/transformers/workflow_run.go @@ -9,8 +9,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) func ToWorkflowRunShape( diff --git a/api/v1/server/run/run.go b/api/v1/server/run/run.go index 148c688fa..af3ad2508 100644 --- a/api/v1/server/run/run.go +++ b/api/v1/server/run/run.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "time" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" @@ -25,6 +26,8 @@ import ( stepruns "github.com/hatchet-dev/hatchet/api/v1/server/handlers/step-runs" "github.com/hatchet-dev/hatchet/api/v1/server/handlers/tenants" "github.com/hatchet-dev/hatchet/api/v1/server/handlers/users" + "github.com/hatchet-dev/hatchet/api/v1/server/handlers/v1/tasks" + workflowrunsv1 "github.com/hatchet-dev/hatchet/api/v1/server/handlers/v1/workflow-runs" webhookworker "github.com/hatchet-dev/hatchet/api/v1/server/handlers/webhook-worker" "github.com/hatchet-dev/hatchet/api/v1/server/handlers/workers" workflowruns "github.com/hatchet-dev/hatchet/api/v1/server/handlers/workflow-runs" @@ -34,7 +37,7 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/middleware/populator" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/pkg/config/server" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) type apiService struct { @@ -54,6 +57,8 @@ type apiService struct { *workflowruns.WorkflowRunsService *monitoring.MonitoringService *info.InfoService + *tasks.TasksService + *workflowrunsv1.V1WorkflowRunsService } func newAPIService(config *server.ServerConfig) *apiService { @@ -74,6 +79,8 @@ func newAPIService(config *server.ServerConfig) *apiService { WebhookWorkersService: webhookworker.NewWebhookWorkersService(config), MonitoringService: monitoring.NewMonitoringService(config), InfoService: info.NewInfoService(config), + TasksService: tasks.NewTasksService(config), + V1WorkflowRunsService: workflowrunsv1.NewV1WorkflowRunsService(config), } } @@ -150,6 +157,8 @@ func (t *APIServer) getCoreEchoService() (*echo.Echo, error) { } e := echo.New() + e.HideBanner = true + e.HidePort = true g := e.Group("") @@ -171,7 +180,10 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po populatorMW := populator.NewPopulator(t.config) populatorMW.RegisterGetter("tenant", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - tenant, err := config.APIRepository.Tenant().GetTenantByID(id) + ctxTimeout, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + tenant, err := config.APIRepository.Tenant().GetTenantByID(ctxTimeout, id) if err != nil { return nil, "", err @@ -181,7 +193,10 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po }) populatorMW.RegisterGetter("api-token", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - apiToken, err := config.APIRepository.APIToken().GetAPITokenById(id) + ctxTimeout, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + apiToken, err := config.APIRepository.APIToken().GetAPITokenById(ctxTimeout, id) if err != nil { return nil, "", err @@ -190,53 +205,63 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po // at the moment, API tokens should have a tenant id, because there are no other types of // API tokens. If we add other types of API tokens, we'll need to pass in a parent id to query // for. - tenantId, ok := apiToken.TenantID() - - if !ok { + if !apiToken.TenantId.Valid { return nil, "", fmt.Errorf("api token has no tenant id") } - return apiToken, tenantId, nil + return apiToken, sqlchelpers.UUIDToStr(apiToken.TenantId), nil }) populatorMW.RegisterGetter("tenant-invite", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - tenantInvite, err := config.APIRepository.TenantInvite().GetTenantInvite(id) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + tenantInvite, err := config.APIRepository.TenantInvite().GetTenantInvite(timeoutCtx, id) if err != nil { return nil, "", err } - return tenantInvite, tenantInvite.TenantID, nil + return tenantInvite, sqlchelpers.UUIDToStr(tenantInvite.TenantId), nil }) populatorMW.RegisterGetter("slack", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - slackWebhook, err := config.APIRepository.Slack().GetSlackWebhookById(id) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + slackWebhook, err := config.APIRepository.Slack().GetSlackWebhookById(timeoutCtx, id) if err != nil { return nil, "", err } - return slackWebhook, slackWebhook.TenantID, nil + return slackWebhook, sqlchelpers.UUIDToStr(slackWebhook.TenantId), nil }) populatorMW.RegisterGetter("alert-email-group", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - emailGroup, err := config.APIRepository.TenantAlertingSettings().GetTenantAlertGroupById(id) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + emailGroup, err := config.APIRepository.TenantAlertingSettings().GetTenantAlertGroupById(timeoutCtx, id) if err != nil { return nil, "", err } - return emailGroup, emailGroup.TenantID, nil + return emailGroup, sqlchelpers.UUIDToStr(emailGroup.TenantId), nil }) populatorMW.RegisterGetter("sns", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - snsIntegration, err := config.APIRepository.SNS().GetSNSIntegrationById(id) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + snsIntegration, err := config.APIRepository.SNS().GetSNSIntegrationById(timeoutCtx, id) if err != nil { return nil, "", err } - return snsIntegration, snsIntegration.TenantID, nil + return snsIntegration, sqlchelpers.UUIDToStr(snsIntegration.TenantId), nil }) populatorMW.RegisterGetter("workflow", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { @@ -294,13 +319,16 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po }) populatorMW.RegisterGetter("event", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - event, err := config.APIRepository.Event().GetEventById(id) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + event, err := config.APIRepository.Event().GetEventById(timeoutCtx, id) if err != nil { return nil, "", err } - return event, event.TenantID, nil + return event, sqlchelpers.UUIDToStr(event.TenantId), nil }) populatorMW.RegisterGetter("worker", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { @@ -314,21 +342,38 @@ func (t *APIServer) registerSpec(g *echo.Group, spec *openapi3.T) (*populator.Po }) populatorMW.RegisterGetter("webhook", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - webhookWorker, err := config.APIRepository.WebhookWorker().GetWebhookWorkerByID(id) + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + webhookWorker, err := config.APIRepository.WebhookWorker().GetWebhookWorkerByID(timeoutCtx, id) if err != nil { return nil, "", err } - return webhookWorker, webhookWorker.TenantID, nil + return webhookWorker, sqlchelpers.UUIDToStr(webhookWorker.TenantId), nil }) - populatorMW.RegisterGetter("webhook", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { - webhookWorker, err := config.APIRepository.WebhookWorker().GetWebhookWorkerByID(id) + populatorMW.RegisterGetter("task", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + task, err := config.V1.OLAP().ReadTaskRun(ctx, id) + if err != nil { return nil, "", err } - return webhookWorker, webhookWorker.TenantID, nil + return task, sqlchelpers.UUIDToStr(task.TenantID), nil + }) + + populatorMW.RegisterGetter("v1-workflow-run", func(config *server.ServerConfig, parentId, id string) (result interface{}, uniqueParentId string, err error) { + workflowRun, err := t.config.V1.OLAP().ReadWorkflowRun(context.Background(), sqlchelpers.UUIDFromStr(id)) + + if err != nil { + return nil, "", err + } + + return workflowRun, sqlchelpers.UUIDToStr(workflowRun.WorkflowRun.TenantID), nil }) authnMW := authn.NewAuthN(t.config) diff --git a/build/package/lite.dockerfile b/build/package/lite.dockerfile index 7953ba88c..467fd29fd 100644 --- a/build/package/lite.dockerfile +++ b/build/package/lite.dockerfile @@ -2,11 +2,12 @@ # ------------------- ARG HATCHET_LITE_IMAGE ARG HATCHET_ADMIN_IMAGE +ARG HATCHET_MIGRATE_IMAGE # Stage 1: copy from the existing Go built image FROM $HATCHET_LITE_IMAGE as lite-binary-base - FROM $HATCHET_ADMIN_IMAGE as admin-binary-base +FROM $HATCHET_MIGRATE_IMAGE as migrate-binary-base # Stage 2: build the frontend FROM node:18-alpine as frontend-build @@ -30,12 +31,11 @@ RUN curl -sSf https://atlasgo.sh | sh COPY --from=lite-binary-base /hatchet/hatchet-lite ./hatchet-lite COPY --from=admin-binary-base /hatchet/hatchet-admin ./hatchet-admin +COPY --from=migrate-binary-base /hatchet/hatchet-migrate ./hatchet-migrate COPY --from=frontend-build /app/dist ./static-assets # Copy entrypoint script -COPY ./hack/db/atlas-apply.sh ./atlas-apply.sh COPY ./hack/lite/start.sh ./entrypoint.sh -COPY ./sql/migrations ./sql/migrations ENV LITE_STATIC_ASSET_DIR=/static-assets ENV LITE_FRONTEND_PORT=8081 @@ -43,7 +43,6 @@ ENV LITE_RUNTIME_PORT=8888 # Make entrypoint script executable RUN chmod +x ./entrypoint.sh -RUN chmod +x ./atlas-apply.sh EXPOSE 8888 7070 diff --git a/build/package/migrate.dockerfile b/build/package/migrate.dockerfile deleted file mode 100644 index 1c6a84df7..000000000 --- a/build/package/migrate.dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine as deployment - -# install bash via apk -RUN apk update && apk add --no-cache bash gcc musl-dev openssl bash ca-certificates curl postgresql-client - -RUN curl -sSf https://atlasgo.sh | sh - -COPY ./hack/db/atlas-apply.sh ./atlas-apply.sh -COPY ./sql/migrations ./sql/migrations - -RUN chmod +x ./atlas-apply.sh - -# Run the entrypoint script -CMD ["./atlas-apply.sh"] diff --git a/build/package/servers.dockerfile b/build/package/servers.dockerfile index 04f2e87b0..c1e292eb5 100644 --- a/build/package/servers.dockerfile +++ b/build/package/servers.dockerfile @@ -13,15 +13,11 @@ COPY go.mod go.sum ./ RUN go mod download -# prefetch the binaries, so that they will be cached and not downloaded on each change -RUN go run github.com/steebchen/prisma-client-go prefetch - COPY /api ./api COPY /api-contracts ./api-contracts COPY /internal ./internal COPY /pkg ./pkg COPY /hack ./hack -COPY /prisma ./prisma COPY /cmd ./cmd RUN go generate ./... @@ -31,7 +27,7 @@ RUN go generate ./... FROM node:18-alpine as build-openapi WORKDIR /openapi -RUN npm install -g npm@8.1 @redocly/cli@latest prisma +RUN npm install -g npm@8.1 @redocly/cli@latest COPY /api-contracts/openapi ./openapi @@ -47,8 +43,8 @@ ARG VERSION=v0.1.0-alpha.0 ARG SERVER_TARGET # check if the target is empty or not set to api, engine, lite, or admin -RUN if [ -z "$SERVER_TARGET" ] || [ "$SERVER_TARGET" != "api" ] && [ "$SERVER_TARGET" != "engine" ] && [ "$SERVER_TARGET" != "admin" ] && [ "$SERVER_TARGET" != "lite" ]; then \ - echo "SERVER_TARGET must be set to 'api', 'engine', or 'admin'"; \ +RUN if [ -z "$SERVER_TARGET" ] || [ "$SERVER_TARGET" != "api" ] && [ "$SERVER_TARGET" != "engine" ] && [ "$SERVER_TARGET" != "admin" ] && [ "$SERVER_TARGET" != "lite" ] && [ "$SERVER_TARGET" != "migrate" ]; then \ + echo "SERVER_TARGET must be set to 'api', 'engine', 'admin', 'lite', or 'migrate'"; \ exit 1; \ fi @@ -75,7 +71,6 @@ WORKDIR /hatchet # openssl and bash needed for admin build RUN apk update && apk add --no-cache gcc musl-dev openssl bash ca-certificates -COPY --from=base /hatchet/prisma ./prisma COPY --from=build-go /hatchet/bin/hatchet-${SERVER_TARGET} /hatchet/ EXPOSE 8080 diff --git a/cmd/hatchet-admin/cli/seed.go b/cmd/hatchet-admin/cli/seed.go index fd7f0f974..080b271f1 100644 --- a/cmd/hatchet-admin/cli/seed.go +++ b/cmd/hatchet-admin/cli/seed.go @@ -12,8 +12,7 @@ import ( "github.com/hatchet-dev/hatchet/pkg/config/loader" "github.com/hatchet-dev/hatchet/pkg/repository" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" ) // seedCmd seeds the database with initial data @@ -58,11 +57,11 @@ func runSeed(cf *loader.ConfigLoader) error { return err } - user, err := dc.APIRepository.User().GetUserByEmail(dc.Seed.AdminEmail) + user, err := dc.APIRepository.User().GetUserByEmail(context.Background(), dc.Seed.AdminEmail) if err != nil { - if errors.Is(err, db.ErrNotFound) { - user, err = dc.APIRepository.User().CreateUser(&repository.CreateUserOpts{ + if errors.Is(err, pgx.ErrNoRows) { + user, err = dc.APIRepository.User().CreateUser(context.Background(), &repository.CreateUserOpts{ Email: dc.Seed.AdminEmail, Name: repository.StringPtr(dc.Seed.AdminName), EmailVerified: repository.BoolPtr(true), @@ -77,16 +76,16 @@ func runSeed(cf *loader.ConfigLoader) error { } } - userId = user.ID + userId = sqlchelpers.UUIDToStr(user.ID) } - tenant, err := dc.APIRepository.Tenant().GetTenantBySlug("default") + tenant, err := dc.APIRepository.Tenant().GetTenantBySlug(context.Background(), "default") if err != nil { - if errors.Is(err, db.ErrNotFound) { + if errors.Is(err, pgx.ErrNoRows) { // seed an example tenant // initialize a tenant - sqlcTenant, err := dc.APIRepository.Tenant().CreateTenant(&repository.CreateTenantOpts{ + sqlcTenant, err := dc.APIRepository.Tenant().CreateTenant(context.Background(), &repository.CreateTenantOpts{ ID: &dc.Seed.DefaultTenantID, Name: dc.Seed.DefaultTenantName, Slug: dc.Seed.DefaultTenantSlug, @@ -96,16 +95,16 @@ func runSeed(cf *loader.ConfigLoader) error { return err } - tenant, err = dc.APIRepository.Tenant().GetTenantByID(sqlchelpers.UUIDToStr(sqlcTenant.ID)) + tenant, err = dc.APIRepository.Tenant().GetTenantByID(context.Background(), sqlchelpers.UUIDToStr(sqlcTenant.ID)) if err != nil { return err } - fmt.Println("created tenant", tenant.ID) + fmt.Println("created tenant", sqlchelpers.UUIDToStr(tenant.ID)) // add the user to the tenant - _, err = dc.APIRepository.Tenant().CreateTenantMember(tenant.ID, &repository.CreateTenantMemberOpts{ + _, err = dc.APIRepository.Tenant().CreateTenantMember(context.Background(), sqlchelpers.UUIDToStr(tenant.ID), &repository.CreateTenantMemberOpts{ Role: "OWNER", UserId: userId, }) @@ -119,7 +118,7 @@ func runSeed(cf *loader.ConfigLoader) error { } if dc.Seed.IsDevelopment { - err = seedDev(dc.EngineRepository, tenant.ID) + err = seedDev(dc.EngineRepository, sqlchelpers.UUIDToStr(tenant.ID)) if err != nil { return err diff --git a/cmd/hatchet-engine/engine/run.go b/cmd/hatchet-engine/engine/run.go index 2aa1d07af..20fa3cdb8 100644 --- a/cmd/hatchet-engine/engine/run.go +++ b/cmd/hatchet-engine/engine/run.go @@ -8,9 +8,12 @@ import ( "time" "github.com/hatchet-dev/hatchet/internal/services/admin" + adminv1 "github.com/hatchet-dev/hatchet/internal/services/admin/v1" "github.com/hatchet-dev/hatchet/internal/services/controllers/events" "github.com/hatchet-dev/hatchet/internal/services/controllers/jobs" "github.com/hatchet-dev/hatchet/internal/services/controllers/retention" + "github.com/hatchet-dev/hatchet/internal/services/controllers/v1/olap" + "github.com/hatchet-dev/hatchet/internal/services/controllers/v1/task" "github.com/hatchet-dev/hatchet/internal/services/controllers/workflows" "github.com/hatchet-dev/hatchet/internal/services/dispatcher" "github.com/hatchet-dev/hatchet/internal/services/grpc" @@ -18,6 +21,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/services/ingestor" "github.com/hatchet-dev/hatchet/internal/services/partition" "github.com/hatchet-dev/hatchet/internal/services/scheduler" + schedulerv1 "github.com/hatchet-dev/hatchet/internal/services/scheduler/v1" "github.com/hatchet-dev/hatchet/internal/services/ticker" "github.com/hatchet-dev/hatchet/internal/services/webhooks" "github.com/hatchet-dev/hatchet/internal/telemetry" @@ -228,16 +232,43 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro Name: "scheduler", Fn: cleanup, }) + + sv1, err := schedulerv1.New( + schedulerv1.WithAlerter(sc.Alerter), + schedulerv1.WithMessageQueue(sc.MessageQueueV1), + schedulerv1.WithRepository(sc.EngineRepository), + schedulerv1.WithV2Repository(sc.V1), + schedulerv1.WithLogger(sc.Logger), + schedulerv1.WithPartition(p), + schedulerv1.WithQueueLoggerConfig(&sc.AdditionalLoggers.Queue), + schedulerv1.WithSchedulerPool(sc.SchedulingPoolV1), + ) + + if err != nil { + return nil, fmt.Errorf("could not create scheduler (v1): %w", err) + } + + cleanup, err = sv1.Start() + + if err != nil { + return nil, fmt.Errorf("could not start scheduler (v1): %w", err) + } + + teardown = append(teardown, Teardown{ + Name: "schedulerv1", + Fn: cleanup, + }) } if sc.HasService("ticker") { t, err := ticker.New( ticker.WithMessageQueue(sc.MessageQueue), + ticker.WithMessageQueueV1(sc.MessageQueueV1), ticker.WithRepository(sc.EngineRepository), + ticker.WithRepositoryV1(sc.V1), ticker.WithLogger(sc.Logger), ticker.WithTenantAlerter(sc.TenantAlerter), ticker.WithEntitlementsRepository(sc.EntitlementRepository), - ticker.WithPartition(p), ) if err != nil { @@ -298,6 +329,55 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro Name: "workflows controller", Fn: cleanupWorkflows, }) + + tasks, err := task.New( + task.WithAlerter(sc.Alerter), + task.WithMessageQueue(sc.MessageQueueV1), + task.WithRepository(sc.EngineRepository), + task.WithV1Repository(sc.V1), + task.WithLogger(sc.Logger), + task.WithPartition(p), + task.WithQueueLoggerConfig(&sc.AdditionalLoggers.Queue), + task.WithPgxStatsLoggerConfig(&sc.AdditionalLoggers.PgxStats), + ) + + if err != nil { + return nil, fmt.Errorf("could not create tasks controller: %w", err) + } + + cleanupTasks, err := tasks.Start() + + if err != nil { + return nil, fmt.Errorf("could not start tasks controller: %w", err) + } + + teardown = append(teardown, Teardown{ + Name: "tasks controller", + Fn: cleanupTasks, + }) + + olap, err := olap.New( + olap.WithAlerter(sc.Alerter), + olap.WithMessageQueue(sc.MessageQueueV1), + olap.WithRepository(sc.V1), + olap.WithLogger(sc.Logger), + olap.WithPartition(p), + ) + + if err != nil { + return nil, fmt.Errorf("could not create olap controller: %w", err) + } + + cleanupOlap, err := olap.Start() + + if err != nil { + return nil, fmt.Errorf("could not start olap controller: %w", err) + } + + teardown = append(teardown, Teardown{ + Name: "olap controller", + Fn: cleanupOlap, + }) } if sc.HasService("retention") { @@ -332,7 +412,9 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro d, err := dispatcher.New( dispatcher.WithAlerter(sc.Alerter), dispatcher.WithMessageQueue(sc.MessageQueue), + dispatcher.WithMessageQueueV1(sc.MessageQueueV1), dispatcher.WithRepository(sc.EngineRepository), + dispatcher.WithRepositoryV1(sc.V1), dispatcher.WithLogger(sc.Logger), dispatcher.WithEntitlementsRepository(sc.EntitlementRepository), dispatcher.WithCache(cacheInstance), @@ -359,8 +441,10 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro sc.EngineRepository.Log(), ), ingestor.WithMessageQueue(sc.MessageQueue), + ingestor.WithMessageQueueV1(sc.MessageQueueV1), ingestor.WithEntitlementsRepository(sc.EntitlementRepository), ingestor.WithStepRunRepository(sc.EngineRepository.StepRun()), + ingestor.WithRepositoryV1(sc.V1), ) if err != nil { @@ -369,18 +453,30 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro adminSvc, err := admin.NewAdminService( admin.WithRepository(sc.EngineRepository), + admin.WithRepositoryV1(sc.V1), admin.WithMessageQueue(sc.MessageQueue), + admin.WithMessageQueueV1(sc.MessageQueueV1), admin.WithEntitlementsRepository(sc.EntitlementRepository), ) if err != nil { return nil, fmt.Errorf("could not create admin service: %w", err) } + adminv1Svc, err := adminv1.NewAdminService( + adminv1.WithRepository(sc.V1), + adminv1.WithMessageQueue(sc.MessageQueueV1), + ) + + if err != nil { + return nil, fmt.Errorf("could not create admin service (v1): %w", err) + } + grpcOpts := []grpc.ServerOpt{ grpc.WithConfig(sc), grpc.WithIngestor(ei), grpc.WithDispatcher(d), grpc.WithAdmin(adminSvc), + grpc.WithAdminV1(adminv1Svc), grpc.WithLogger(sc.Logger), grpc.WithAlerter(sc.Alerter), grpc.WithTLSConfig(sc.TLSConfig), @@ -451,7 +547,7 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro Fn: cleanup1, }) - wh := webhooks.New(sc, p) + wh := webhooks.New(sc, p, l) cleanup2, err := wh.Start() if err != nil { @@ -564,11 +660,12 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro t, err := ticker.New( ticker.WithMessageQueue(sc.MessageQueue), + ticker.WithMessageQueueV1(sc.MessageQueueV1), ticker.WithRepository(sc.EngineRepository), + ticker.WithRepositoryV1(sc.V1), ticker.WithLogger(sc.Logger), ticker.WithTenantAlerter(sc.TenantAlerter), ticker.WithEntitlementsRepository(sc.EntitlementRepository), - ticker.WithPartition(p), ) if err != nil { @@ -660,6 +757,55 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro Fn: cleanupRetention, }) + tasks, err := task.New( + task.WithAlerter(sc.Alerter), + task.WithMessageQueue(sc.MessageQueueV1), + task.WithRepository(sc.EngineRepository), + task.WithV1Repository(sc.V1), + task.WithLogger(sc.Logger), + task.WithPartition(p), + task.WithQueueLoggerConfig(&sc.AdditionalLoggers.Queue), + task.WithPgxStatsLoggerConfig(&sc.AdditionalLoggers.PgxStats), + ) + + if err != nil { + return nil, fmt.Errorf("could not create tasks controller: %w", err) + } + + cleanupTasks, err := tasks.Start() + + if err != nil { + return nil, fmt.Errorf("could not start tasks controller: %w", err) + } + + teardown = append(teardown, Teardown{ + Name: "tasks controller", + Fn: cleanupTasks, + }) + + olap, err := olap.New( + olap.WithAlerter(sc.Alerter), + olap.WithMessageQueue(sc.MessageQueueV1), + olap.WithRepository(sc.V1), + olap.WithLogger(sc.Logger), + olap.WithPartition(p), + ) + + if err != nil { + return nil, fmt.Errorf("could not create olap controller: %w", err) + } + + cleanupOlap, err := olap.Start() + + if err != nil { + return nil, fmt.Errorf("could not start olap controller: %w", err) + } + + teardown = append(teardown, Teardown{ + Name: "olap controller", + Fn: cleanupOlap, + }) + cleanup1, err := p.StartTenantWorkerPartition(ctx) if err != nil { @@ -671,7 +817,7 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro Fn: cleanup1, }) - wh := webhooks.New(sc, p) + wh := webhooks.New(sc, p, l) cleanup2, err := wh.Start() @@ -692,7 +838,9 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro d, err := dispatcher.New( dispatcher.WithAlerter(sc.Alerter), dispatcher.WithMessageQueue(sc.MessageQueue), + dispatcher.WithMessageQueueV1(sc.MessageQueueV1), dispatcher.WithRepository(sc.EngineRepository), + dispatcher.WithRepositoryV1(sc.V1), dispatcher.WithLogger(sc.Logger), dispatcher.WithEntitlementsRepository(sc.EntitlementRepository), dispatcher.WithCache(cacheInstance), @@ -720,8 +868,10 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro sc.EngineRepository.Log(), ), ingestor.WithMessageQueue(sc.MessageQueue), + ingestor.WithMessageQueueV1(sc.MessageQueueV1), ingestor.WithEntitlementsRepository(sc.EntitlementRepository), ingestor.WithStepRunRepository(sc.EngineRepository.StepRun()), + ingestor.WithRepositoryV1(sc.V1), ) if err != nil { @@ -730,7 +880,9 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro adminSvc, err := admin.NewAdminService( admin.WithRepository(sc.EngineRepository), + admin.WithRepositoryV1(sc.V1), admin.WithMessageQueue(sc.MessageQueue), + admin.WithMessageQueueV1(sc.MessageQueueV1), admin.WithEntitlementsRepository(sc.EntitlementRepository), ) @@ -738,11 +890,21 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro return nil, fmt.Errorf("could not create admin service: %w", err) } + adminv1Svc, err := adminv1.NewAdminService( + adminv1.WithRepository(sc.V1), + adminv1.WithMessageQueue(sc.MessageQueueV1), + ) + + if err != nil { + return nil, fmt.Errorf("could not create admin service (v1): %w", err) + } + grpcOpts := []grpc.ServerOpt{ grpc.WithConfig(sc), grpc.WithIngestor(ei), grpc.WithDispatcher(d), grpc.WithAdmin(adminSvc), + grpc.WithAdminV1(adminv1Svc), grpc.WithLogger(sc.Logger), grpc.WithAlerter(sc.Alerter), grpc.WithTLSConfig(sc.TLSConfig), @@ -838,6 +1000,32 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro Name: "scheduler", Fn: cleanup, }) + + sv1, err := schedulerv1.New( + schedulerv1.WithAlerter(sc.Alerter), + schedulerv1.WithMessageQueue(sc.MessageQueueV1), + schedulerv1.WithRepository(sc.EngineRepository), + schedulerv1.WithV2Repository(sc.V1), + schedulerv1.WithLogger(sc.Logger), + schedulerv1.WithPartition(p), + schedulerv1.WithQueueLoggerConfig(&sc.AdditionalLoggers.Queue), + schedulerv1.WithSchedulerPool(sc.SchedulingPoolV1), + ) + + if err != nil { + return nil, fmt.Errorf("could not create scheduler (v1): %w", err) + } + + cleanup, err = sv1.Start() + + if err != nil { + return nil, fmt.Errorf("could not start scheduler (v1): %w", err) + } + + teardown = append(teardown, Teardown{ + Name: "schedulerv1", + Fn: cleanup, + }) } teardown = append(teardown, Teardown{ diff --git a/cmd/hatchet-migrate/main.go b/cmd/hatchet-migrate/main.go new file mode 100644 index 000000000..1e80d0578 --- /dev/null +++ b/cmd/hatchet-migrate/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/hatchet-dev/hatchet/cmd/hatchet-migrate/migrate" + "github.com/hatchet-dev/hatchet/pkg/cmdutils" +) + +var printVersion bool + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "hatchet-migrate", + Short: "hatchet-migrate runs database migrations for Hatchet.", + Run: func(cmd *cobra.Command, args []string) { + if printVersion { + fmt.Println(Version) + + os.Exit(0) + } + + ctx, cancel := cmdutils.NewInterruptContext() + defer cancel() + + migrate.RunMigrations(ctx) + }, +} + +// Version will be linked by an ldflag during build +var Version = "v0.1.0-alpha.0" + +func main() { + rootCmd.PersistentFlags().BoolVar( + &printVersion, + "version", + false, + "print version and exit.", + ) + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/sql/migrations/20240115180414_init.sql b/cmd/hatchet-migrate/migrate/migrations/20240115180414_init.sql similarity index 99% rename from sql/migrations/20240115180414_init.sql rename to cmd/hatchet-migrate/migrate/migrations/20240115180414_init.sql index 5dc7bbd23..9eac538ef 100644 --- a/sql/migrations/20240115180414_init.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240115180414_init.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateEnum CREATE TYPE "TenantMemberRole" AS ENUM ('OWNER', 'ADMIN', 'MEMBER'); diff --git a/sql/migrations/20240122014727_v0_6_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240122014727_v0_6_0.sql similarity index 99% rename from sql/migrations/20240122014727_v0_6_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240122014727_v0_6_0.sql index 9ae9238df..4ab706304 100644 --- a/sql/migrations/20240122014727_v0_6_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240122014727_v0_6_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateEnum CREATE TYPE "InviteLinkStatus" AS ENUM ('PENDING', 'ACCEPTED', 'REJECTED'); diff --git a/sql/migrations/20240126235456_v0_7_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240126235456_v0_7_0.sql similarity index 99% rename from sql/migrations/20240126235456_v0_7_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240126235456_v0_7_0.sql index c908638b2..682f0f279 100644 --- a/sql/migrations/20240126235456_v0_7_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240126235456_v0_7_0.sql @@ -1,3 +1,4 @@ +-- +goose Up /* Warnings: diff --git a/sql/migrations/20240129040510_v0_8_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240129040510_v0_8_0.sql similarity index 99% rename from sql/migrations/20240129040510_v0_8_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240129040510_v0_8_0.sql index da82afdbd..589bef9d8 100644 --- a/sql/migrations/20240129040510_v0_8_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240129040510_v0_8_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateEnum CREATE TYPE "ConcurrencyLimitStrategy" AS ENUM ('CANCEL_IN_PROGRESS', 'DROP_NEWEST', 'QUEUE_NEWEST'); diff --git a/sql/migrations/20240202042355_v0_9_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240202042355_v0_9_0.sql similarity index 99% rename from sql/migrations/20240202042355_v0_9_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240202042355_v0_9_0.sql index 7cf6bf655..b11e59186 100644 --- a/sql/migrations/20240202042355_v0_9_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240202042355_v0_9_0.sql @@ -1,3 +1,4 @@ +-- +goose Up /* Warnings: diff --git a/sql/migrations/20240209132837_v0_10_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240209132837_v0_10_0.sql similarity index 91% rename from sql/migrations/20240209132837_v0_10_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240209132837_v0_10_0.sql index 2d9507ab7..e9b097de5 100644 --- a/sql/migrations/20240209132837_v0_10_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240209132837_v0_10_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- AlterTable ALTER TABLE "Step" ADD COLUMN "customUserData" JSONB; diff --git a/sql/migrations/20240215162148_v0_10_2.sql b/cmd/hatchet-migrate/migrate/migrations/20240215162148_v0_10_2.sql similarity index 98% rename from sql/migrations/20240215162148_v0_10_2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240215162148_v0_10_2.sql index 770d68023..2205379c3 100644 --- a/sql/migrations/20240215162148_v0_10_2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240215162148_v0_10_2.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create sequence and alter the table for StepRun CREATE SEQUENCE step_run_order_seq; ALTER TABLE "StepRun" ALTER COLUMN "order" TYPE BIGINT; @@ -9,4 +10,3 @@ CREATE SEQUENCE workflow_version_order_seq; ALTER TABLE "WorkflowVersion" ALTER COLUMN "order" TYPE BIGINT; ALTER SEQUENCE workflow_version_order_seq OWNED BY "WorkflowVersion"."order"; ALTER TABLE "WorkflowVersion" ALTER COLUMN "order" SET DEFAULT nextval('workflow_version_order_seq'::regclass); - diff --git a/sql/migrations/20240216133745_v0_11_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240216133745_v0_11_0.sql similarity index 98% rename from sql/migrations/20240216133745_v0_11_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240216133745_v0_11_0.sql index 66e80d4f8..cc4f3ef06 100644 --- a/sql/migrations/20240216133745_v0_11_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240216133745_v0_11_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateEnum CREATE TYPE "VcsProvider" AS ENUM ('GITHUB'); @@ -289,6 +290,7 @@ VALUES 'internal' ) ON CONFLICT DO NOTHING; +-- +goose StatementBegin CREATE OR REPLACE FUNCTION prevent_internal_name_or_slug() RETURNS trigger AS $$ BEGIN @@ -298,7 +300,8 @@ BEGIN RETURN NEW; END; $$ LANGUAGE plpgsql; +-- +goose StatementEnd CREATE TRIGGER check_name_or_slug_before_insert_or_update BEFORE INSERT OR UPDATE ON "Tenant" -FOR EACH ROW EXECUTE FUNCTION prevent_internal_name_or_slug(); \ No newline at end of file +FOR EACH ROW EXECUTE FUNCTION prevent_internal_name_or_slug(); diff --git a/sql/migrations/20240226051822_v0_12_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240226051822_v0_12_0.sql similarity index 92% rename from sql/migrations/20240226051822_v0_12_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240226051822_v0_12_0.sql index c9a23a476..0c0a64d16 100644 --- a/sql/migrations/20240226051822_v0_12_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240226051822_v0_12_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- AlterEnum ALTER TYPE "ConcurrencyLimitStrategy" ADD VALUE 'GROUP_ROUND_ROBIN'; diff --git a/sql/migrations/20240227181732_v0_13_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240227181732_v0_13_0.sql similarity index 96% rename from sql/migrations/20240227181732_v0_13_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240227181732_v0_13_0.sql index 9fbb9e533..3c311ea19 100644 --- a/sql/migrations/20240227181732_v0_13_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240227181732_v0_13_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- DropForeignKey ALTER TABLE "Worker" DROP CONSTRAINT "Worker_dispatcherId_fkey"; diff --git a/sql/migrations/20240228050417_v0_13_2.sql b/cmd/hatchet-migrate/migrate/migrations/20240228050417_v0_13_2.sql similarity index 95% rename from sql/migrations/20240228050417_v0_13_2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240228050417_v0_13_2.sql index 98726c99e..1f238165f 100644 --- a/sql/migrations/20240228050417_v0_13_2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240228050417_v0_13_2.sql @@ -1,3 +1,4 @@ +-- +goose Up -- AlterTable ALTER TABLE "GetGroupKeyRun" ADD COLUMN "scheduleTimeoutAt" TIMESTAMP(3); diff --git a/sql/migrations/20240229232811_v0_14_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240229232811_v0_14_0.sql similarity index 95% rename from sql/migrations/20240229232811_v0_14_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240229232811_v0_14_0.sql index d1e9d658b..ad48b809c 100644 --- a/sql/migrations/20240229232811_v0_14_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240229232811_v0_14_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateEnum CREATE TYPE "LogLineLevel" AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR'); @@ -18,4 +19,4 @@ CREATE TABLE ALTER TABLE "LogLine" ADD CONSTRAINT "LogLine_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON DELETE CASCADE ON UPDATE CASCADE; -- AddForeignKey -ALTER TABLE "LogLine" ADD CONSTRAINT "LogLine_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun" ("id") ON DELETE SET NULL ON UPDATE CASCADE; \ No newline at end of file +ALTER TABLE "LogLine" ADD CONSTRAINT "LogLine_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun" ("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/sql/migrations/20240304060408_v0_15_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240304060408_v0_15_0.sql similarity index 98% rename from sql/migrations/20240304060408_v0_15_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240304060408_v0_15_0.sql index c67d69611..65f672927 100644 --- a/sql/migrations/20240304060408_v0_15_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240304060408_v0_15_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateTable CREATE TABLE "SNSIntegration" ( "id" UUID NOT NULL, diff --git a/sql/migrations/20240320215205_v0_17_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240320215205_v0_17_0.sql similarity index 90% rename from sql/migrations/20240320215205_v0_17_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240320215205_v0_17_0.sql index 101d0e2a5..9c60e0e84 100644 --- a/sql/migrations/20240320215205_v0_17_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240320215205_v0_17_0.sql @@ -1,3 +1,5 @@ +-- +goose Up +-- +goose StatementBegin CREATE OR REPLACE FUNCTION convert_duration_to_interval(duration text) RETURNS interval AS $$ DECLARE num_value INT; @@ -15,4 +17,5 @@ BEGIN ELSE '0 seconds'::interval END; END; -$$ LANGUAGE plpgsql; \ No newline at end of file +$$ LANGUAGE plpgsql; +-- +goose StatementEnd diff --git a/sql/migrations/20240321215205_v0_17_1.sql b/cmd/hatchet-migrate/migrate/migrations/20240321215205_v0_17_1.sql similarity index 90% rename from sql/migrations/20240321215205_v0_17_1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240321215205_v0_17_1.sql index 6bcc6f12c..7fd4c71a3 100644 --- a/sql/migrations/20240321215205_v0_17_1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240321215205_v0_17_1.sql @@ -1,3 +1,5 @@ +-- +goose Up +-- +goose StatementBegin CREATE OR REPLACE FUNCTION convert_duration_to_interval(duration text) RETURNS interval AS $$ DECLARE num_value INT; @@ -15,4 +17,5 @@ BEGIN ELSE '5 minutes'::interval END; END; -$$ LANGUAGE plpgsql; \ No newline at end of file +$$ LANGUAGE plpgsql; +-- +goose StatementEnd diff --git a/sql/migrations/20240326151030_v0_18_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240326151030_v0_18_0.sql similarity index 99% rename from sql/migrations/20240326151030_v0_18_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240326151030_v0_18_0.sql index 23794c4ca..1bd12a5fb 100644 --- a/sql/migrations/20240326151030_v0_18_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240326151030_v0_18_0.sql @@ -1,3 +1,4 @@ +-- +goose Up /* Warnings: diff --git a/sql/migrations/20240331162333_v0_18_1.sql b/cmd/hatchet-migrate/migrate/migrations/20240331162333_v0_18_1.sql similarity index 96% rename from sql/migrations/20240331162333_v0_18_1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240331162333_v0_18_1.sql index b4a5879c1..19c5cfa7c 100644 --- a/sql/migrations/20240331162333_v0_18_1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240331162333_v0_18_1.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateTable CREATE TABLE "WorkerSemaphore" ( "workerId" UUID NOT NULL, diff --git a/sql/migrations/20240402034010_v0_19_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240402034010_v0_19_0.sql similarity index 96% rename from sql/migrations/20240402034010_v0_19_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240402034010_v0_19_0.sql index b87b525f9..79e752570 100644 --- a/sql/migrations/20240402034010_v0_19_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240402034010_v0_19_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateTable CREATE TABLE "StepRateLimit" ( "units" INTEGER NOT NULL, @@ -52,6 +53,7 @@ ALTER TABLE "StreamEvent" ADD CONSTRAINT "StreamEvent_tenantId_fkey" FOREIGN KEY -- AddForeignKey ALTER TABLE "StreamEvent" ADD CONSTRAINT "StreamEvent_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun"("id") ON DELETE SET NULL ON UPDATE CASCADE; +-- +goose StatementBegin CREATE OR REPLACE FUNCTION get_refill_value(rate_limit "RateLimit") RETURNS INTEGER AS $$ DECLARE @@ -64,4 +66,5 @@ BEGIN END IF; RETURN refill_amount; END; -$$ LANGUAGE plpgsql; \ No newline at end of file +$$ LANGUAGE plpgsql; +-- +goose StatementEnd diff --git a/sql/migrations/20240424091046_v0_21_9.sql b/cmd/hatchet-migrate/migrate/migrations/20240424091046_v0_21_9.sql similarity index 89% rename from sql/migrations/20240424091046_v0_21_9.sql rename to cmd/hatchet-migrate/migrate/migrations/20240424091046_v0_21_9.sql index 7346db4ea..851e53fda 100644 --- a/sql/migrations/20240424091046_v0_21_9.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240424091046_v0_21_9.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterTable ALTER TABLE "WorkflowTriggerCronRef" ADD COLUMN "enabled" BOOLEAN NOT NULL DEFAULT true; diff --git a/sql/migrations/20240430161943_v0_22_1.sql b/cmd/hatchet-migrate/migrate/migrations/20240430161943_v0_22_1.sql similarity index 94% rename from sql/migrations/20240430161943_v0_22_1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240430161943_v0_22_1.sql index d23aac8b0..078d136ac 100644 --- a/sql/migrations/20240430161943_v0_22_1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240430161943_v0_22_1.sql @@ -1,3 +1,4 @@ +-- +goose Up /* Warnings: diff --git a/sql/migrations/20240503190030_v0_23_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240503190030_v0_23_0.sql similarity index 88% rename from sql/migrations/20240503190030_v0_23_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240503190030_v0_23_0.sql index fb677a4d5..fca4aee2b 100644 --- a/sql/migrations/20240503190030_v0_23_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240503190030_v0_23_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterTable ALTER TABLE "Tenant" ADD COLUMN "analyticsOptOut" BOOLEAN NOT NULL DEFAULT false; diff --git a/sql/migrations/20240506194242_v0_24_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240506194242_v0_24_0.sql similarity index 98% rename from sql/migrations/20240506194242_v0_24_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240506194242_v0_24_0.sql index 908490242..08d68d274 100644 --- a/sql/migrations/20240506194242_v0_24_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240506194242_v0_24_0.sql @@ -1,3 +1,4 @@ +-- +goose Up /* Warnings: diff --git a/sql/migrations/20240507200816_v0_25_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240507200816_v0_25_0.sql similarity index 99% rename from sql/migrations/20240507200816_v0_25_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240507200816_v0_25_0.sql index 8422e38b2..5bd07dcb3 100644 --- a/sql/migrations/20240507200816_v0_25_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240507200816_v0_25_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateTable CREATE TABLE "TenantAlertingSettings" ( @@ -74,4 +75,4 @@ SELECT gen_random_uuid (), "id" FROM - "Tenant"; \ No newline at end of file + "Tenant"; diff --git a/sql/migrations/20240509213608_v0_26_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240509213608_v0_26_0.sql similarity index 97% rename from sql/migrations/20240509213608_v0_26_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240509213608_v0_26_0.sql index 8921c6f60..c629c0799 100644 --- a/sql/migrations/20240509213608_v0_26_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240509213608_v0_26_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- CreateIndex CREATE INDEX "JobRun_workflowRunId_tenantId_idx" ON "JobRun" ("workflowRunId", "tenantId"); @@ -53,4 +54,4 @@ CREATE UNIQUE INDEX "StepRunEvent_id_key" ON "StepRunEvent" ("id"); CREATE INDEX "StepRunEvent_stepRunId_idx" ON "StepRunEvent" ("stepRunId"); -- AddForeignKey -ALTER TABLE "StepRunEvent" ADD CONSTRAINT "StepRunEvent_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun" ("id") ON DELETE CASCADE ON UPDATE CASCADE; \ No newline at end of file +ALTER TABLE "StepRunEvent" ADD CONSTRAINT "StepRunEvent_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun" ("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/sql/migrations/20240514192527_v0_27_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240514192527_v0_27_0.sql similarity index 97% rename from sql/migrations/20240514192527_v0_27_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240514192527_v0_27_0.sql index 666ae42c1..0671f7305 100644 --- a/sql/migrations/20240514192527_v0_27_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240514192527_v0_27_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- AlterEnum -- This migration adds more than one value to an enum. -- With PostgreSQL versions 11 and earlier, this is not possible diff --git a/sql/migrations/20240514203126_v0_28_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240514203126_v0_28_0.sql similarity index 85% rename from sql/migrations/20240514203126_v0_28_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240514203126_v0_28_0.sql index e90289267..bd9a0d3c0 100644 --- a/sql/migrations/20240514203126_v0_28_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240514203126_v0_28_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterEnum ALTER TYPE "StepRunEventReason" ADD VALUE 'TIMEOUT_REFRESHED'; diff --git a/sql/migrations/20240517204453_v0_28_1.sql b/cmd/hatchet-migrate/migrate/migrations/20240517204453_v0_28_1.sql similarity index 85% rename from sql/migrations/20240517204453_v0_28_1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240517204453_v0_28_1.sql index 9b806a59c..43e2749d1 100644 --- a/sql/migrations/20240517204453_v0_28_1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240517204453_v0_28_1.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterEnum ALTER TYPE "StepRunEventReason" ADD VALUE 'RETRIED_BY_USER'; diff --git a/sql/migrations/20240520152239_v0_28_2.sql b/cmd/hatchet-migrate/migrate/migrations/20240520152239_v0_28_2.sql similarity index 87% rename from sql/migrations/20240520152239_v0_28_2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240520152239_v0_28_2.sql index 06c6fbf79..ceafe373c 100644 --- a/sql/migrations/20240520152239_v0_28_2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240520152239_v0_28_2.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterTable ALTER TABLE "Worker" ADD COLUMN "isActive" BOOLEAN NOT NULL DEFAULT false; diff --git a/sql/migrations/20240521205311_v0_28_3.sql b/cmd/hatchet-migrate/migrate/migrations/20240521205311_v0_28_3.sql similarity index 87% rename from sql/migrations/20240521205311_v0_28_3.sql rename to cmd/hatchet-migrate/migrate/migrations/20240521205311_v0_28_3.sql index d906f42a7..d27b4f87f 100644 --- a/sql/migrations/20240521205311_v0_28_3.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240521205311_v0_28_3.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterTable ALTER TABLE "Worker" ADD COLUMN "lastListenerEstablished" TIMESTAMP(3); diff --git a/sql/migrations/20240531142907_v0_29_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240531142907_v0_29_0.sql similarity index 86% rename from sql/migrations/20240531142907_v0_29_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240531142907_v0_29_0.sql index ce38b85c6..8248cf6f2 100644 --- a/sql/migrations/20240531142907_v0_29_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240531142907_v0_29_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- AlterTable ALTER TABLE "APIToken" ADD COLUMN "nextAlertAt" TIMESTAMP(3); diff --git a/sql/migrations/20240531200417_v_0_30_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240531200417_v_0_30_0.sql similarity index 95% rename from sql/migrations/20240531200417_v_0_30_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240531200417_v_0_30_0.sql index 92e9c6a85..01147b551 100644 --- a/sql/migrations/20240531200417_v_0_30_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240531200417_v_0_30_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- AlterTable ALTER TABLE "Tenant" ADD COLUMN "alertMemberEmails" BOOLEAN NOT NULL DEFAULT true; diff --git a/sql/migrations/20240531200418_v0_30_1.sql b/cmd/hatchet-migrate/migrate/migrations/20240531200418_v0_30_1.sql similarity index 95% rename from sql/migrations/20240531200418_v0_30_1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240531200418_v0_30_1.sql index b5984b090..bba21263f 100644 --- a/sql/migrations/20240531200418_v0_30_1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240531200418_v0_30_1.sql @@ -1,3 +1,4 @@ +-- +goose Up /* Warnings: - You are about to drop the `WorkerSemaphore` table. If the table is not empty, all the data it contains will be lost. - Made the column `maxRuns` on table `Worker` required. This step will fail if there are existing NULL values in that column. @@ -49,14 +50,15 @@ WHERE w."lastHeartbeatAt" >= NOW() - INTERVAL '10 hours' ON CONFLICT DO NOTHING; -- -- Update a null slot for each step that is currently running or assigned +-- +goose StatementBegin DO $$ DECLARE sr RECORD; wss RECORD; BEGIN -- Loop over each running or assigned step run - FOR sr IN - SELECT "id", "workerId" + FOR sr IN + SELECT "id", "workerId" FROM "StepRun" WHERE "status" IN ('RUNNING', 'ASSIGNED') LOOP @@ -66,7 +68,7 @@ BEGIN FROM "WorkerSemaphoreSlot" WHERE "workerId" = sr."workerId" AND "stepRunId" IS NULL LIMIT 1; - + -- If an available slot is found, update it with the stepRunId IF wss.id IS NOT NULL THEN UPDATE "WorkerSemaphoreSlot" @@ -75,4 +77,4 @@ BEGIN END IF; END LOOP; END $$; - +-- +goose StatementEnd diff --git a/sql/migrations/20240606145243_v0_31_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240606145243_v0_31_0.sql similarity index 99% rename from sql/migrations/20240606145243_v0_31_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240606145243_v0_31_0.sql index 96a4ec446..b80ec2d9e 100644 --- a/sql/migrations/20240606145243_v0_31_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240606145243_v0_31_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "LimitResource" CREATE TYPE "LimitResource" AS ENUM ('WORKFLOW_RUN', 'EVENT', 'WORKER', 'CRON', 'SCHEDULE'); -- Create enum type "TenantResourceLimitAlertType" diff --git a/sql/migrations/20240625180548_v0.34.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240625180548_v0_34_0.sql similarity index 99% rename from sql/migrations/20240625180548_v0.34.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240625180548_v0_34_0.sql index c6e2bd177..2dce44c00 100644 --- a/sql/migrations/20240625180548_v0.34.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240625180548_v0_34_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "WorkflowRun" table ALTER TABLE "WorkflowRun" DROP COLUMN "gitRepoBranch"; -- Create "WebhookWorker" table diff --git a/sql/migrations/20240626204339_v0.34.2.sql b/cmd/hatchet-migrate/migrate/migrations/20240626204339_v0_34_2.sql similarity index 99% rename from sql/migrations/20240626204339_v0.34.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240626204339_v0_34_2.sql index 83f3bc949..fe0de0c34 100644 --- a/sql/migrations/20240626204339_v0.34.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240626204339_v0_34_2.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create "ControllerPartition" table CREATE TABLE "ControllerPartition" ("id" text NOT NULL, "createdAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "updatedAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "lastHeartbeat" timestamp(3) NULL, PRIMARY KEY ("id")); -- Create index "ControllerPartition_id_key" to table: "ControllerPartition" @@ -17,4 +18,4 @@ CREATE INDEX "Tenant_controllerPartitionId_idx" ON "Tenant" ("controllerPartitio -- Create index "Tenant_workerPartitionId_idx" to table: "Tenant" CREATE INDEX "Tenant_workerPartitionId_idx" ON "Tenant" ("workerPartitionId"); -INSERT INTO "SecurityCheckIdent" ("id") VALUES (gen_random_uuid()); \ No newline at end of file +INSERT INTO "SecurityCheckIdent" ("id") VALUES (gen_random_uuid()); diff --git a/sql/migrations/20240701144852_v0_35_0.sql b/cmd/hatchet-migrate/migrate/migrations/20240701144852_v0_35_0.sql similarity index 88% rename from sql/migrations/20240701144852_v0_35_0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240701144852_v0_35_0.sql index 20854824c..ca05102a4 100644 --- a/sql/migrations/20240701144852_v0_35_0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240701144852_v0_35_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "Worker" table ALTER TABLE "Worker" ADD COLUMN "isPaused" boolean NOT NULL DEFAULT false; diff --git a/sql/migrations/20240703194656_v0.35.1.sql b/cmd/hatchet-migrate/migrate/migrations/20240703194656_v0_35_1.sql similarity index 98% rename from sql/migrations/20240703194656_v0.35.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240703194656_v0_35_1.sql index 9e4cb5dbf..21d5a6f92 100644 --- a/sql/migrations/20240703194656_v0.35.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240703194656_v0_35_1.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Create index "Event_createdAt_idx" to table: "Event" CREATE INDEX CONCURRENTLY IF NOT EXISTS "Event_createdAt_idx" ON "Event" ("createdAt"); diff --git a/sql/migrations/20240704211315_v0.35.2.sql b/cmd/hatchet-migrate/migrate/migrations/20240704211315_v0_35_2.sql similarity index 89% rename from sql/migrations/20240704211315_v0.35.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240704211315_v0_35_2.sql index 7ca8caeb7..50ed7fd7c 100644 --- a/sql/migrations/20240704211315_v0.35.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240704211315_v0_35_2.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "Tenant" table ALTER TABLE "Tenant" ADD COLUMN "dataRetentionPeriod" text NOT NULL DEFAULT '720h'; diff --git a/sql/migrations/20240712142946_v0.36.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240712142946_v0_36_0.sql similarity index 96% rename from sql/migrations/20240712142946_v0.36.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240712142946_v0_36_0.sql index a8c13c46b..fc603ac55 100644 --- a/sql/migrations/20240712142946_v0.36.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240712142946_v0_36_0.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Drop index "StepRun_tenantId_status_requeueAfter_createdAt_idx" from table: "StepRun" DROP INDEX CONCURRENTLY IF EXISTS "StepRun_tenantId_status_requeueAfter_createdAt_idx"; diff --git a/sql/migrations/20240715154334_v0.37.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240715154334_v0_37_0.sql similarity index 87% rename from sql/migrations/20240715154334_v0.37.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240715154334_v0_37_0.sql index 172bf58e4..d6386bedd 100644 --- a/sql/migrations/20240715154334_v0.37.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240715154334_v0_37_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "WorkflowRun" table ALTER TABLE "WorkflowRun" ADD COLUMN "duration" integer NULL; diff --git a/sql/migrations/20240716125857_v0.38.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240716125857_v0_38_0.sql similarity index 88% rename from sql/migrations/20240716125857_v0.38.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240716125857_v0_38_0.sql index 0e1708cb8..5a8ec88b1 100644 --- a/sql/migrations/20240716125857_v0.38.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240716125857_v0_38_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "APIToken" table ALTER TABLE "APIToken" ADD COLUMN "internal" boolean NOT NULL DEFAULT false; diff --git a/sql/migrations/20240716143349_v0.39.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240716143349_v0_39_0.sql similarity index 96% rename from sql/migrations/20240716143349_v0.39.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240716143349_v0_39_0.sql index b3118c578..bc843252c 100644 --- a/sql/migrations/20240716143349_v0.39.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240716143349_v0_39_0.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_deletedAt_idx" to table: "GetGroupKeyRun" CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_deletedAt_idx" ON "GetGroupKeyRun" ("deletedAt"); diff --git a/sql/migrations/20240726160629_v0.40.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240726160629_v0_40_0.sql similarity index 99% rename from sql/migrations/20240726160629_v0.40.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240726160629_v0_40_0.sql index 209950b4c..8879ef13d 100644 --- a/sql/migrations/20240726160629_v0.40.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240726160629_v0_40_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "StickyStrategy" CREATE TYPE "StickyStrategy" AS ENUM ('SOFT', 'HARD'); -- Create enum type "WorkerLabelComparator" diff --git a/sql/migrations/20240728042317_v0.41.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240728042317_v0_41_0.sql similarity index 94% rename from sql/migrations/20240728042317_v0.41.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240728042317_v0_41_0.sql index a5ae450ea..f77ff7da3 100644 --- a/sql/migrations/20240728042317_v0.41.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240728042317_v0_41_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "WorkflowKind" CREATE TYPE "WorkflowKind" AS ENUM ('FUNCTION', 'DURABLE', 'DAG'); -- Modify "WorkflowVersion" table diff --git a/sql/migrations/20240809131000_v0.42.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240809131000_v0_42_0.sql similarity index 98% rename from sql/migrations/20240809131000_v0.42.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240809131000_v0_42_0.sql index a031389f3..1038f07c6 100644 --- a/sql/migrations/20240809131000_v0.42.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240809131000_v0_42_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Add value to enum type: "StepRunStatus" ALTER TYPE "StepRunStatus" ADD VALUE 'CANCELLING'; -- Add value to enum type: "StepRunEventReason" @@ -68,7 +69,7 @@ FROM WHERE sr."id" = sr2."id"; --- For all step runs in a pending assignment state, insert them into the queue based on their created at time. +-- For all step runs in a pending assignment state, insert them into the queue based on their created at time. -- This query is idempotent and can run multiple times. WITH pending_assignments AS ( SELECT @@ -116,4 +117,4 @@ SELECT FROM pending_assignments pa WHERE - pa."id" NOT IN (SELECT "id" FROM pending_assignments_with_qi); \ No newline at end of file + pa."id" NOT IN (SELECT "id" FROM pending_assignments_with_qi); diff --git a/sql/migrations/20240812153737_v0.42.1.sql b/cmd/hatchet-migrate/migrate/migrations/20240812153737_v0_42_1.sql similarity index 90% rename from sql/migrations/20240812153737_v0.42.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240812153737_v0_42_1.sql index 8f8cbbcaf..71f78d667 100644 --- a/sql/migrations/20240812153737_v0.42.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240812153737_v0_42_1.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION ALTER TABLE "QueueItem" ADD CONSTRAINT "QueueItem_priority_check" CHECK ("priority" >= 1 AND "priority" <= 4); diff --git a/sql/migrations/20240815151244_v0.42.2.sql b/cmd/hatchet-migrate/migrate/migrations/20240815151244_v0_42_2.sql similarity index 92% rename from sql/migrations/20240815151244_v0.42.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240815151244_v0_42_2.sql index 25e5b4f5f..19b660733 100644 --- a/sql/migrations/20240815151244_v0.42.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240815151244_v0_42_2.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Create index "GetGroupKeyRun_createdAt_idx" to table: "GetGroupKeyRun" CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_createdAt_idx" ON "GetGroupKeyRun" ("createdAt"); @@ -9,4 +10,4 @@ CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_tenantId_deletedAt_statu -- Create index "GetGroupKeyRun_tenantId_idx" to table: "GetGroupKeyRun" CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_tenantId_idx" ON "GetGroupKeyRun" ("tenantId"); -- Create index "GetGroupKeyRun_workerId_idx" to table: "GetGroupKeyRun" -CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_workerId_idx" ON "GetGroupKeyRun" ("workerId"); \ No newline at end of file +CREATE INDEX CONCURRENTLY IF NOT EXISTS "GetGroupKeyRun_workerId_idx" ON "GetGroupKeyRun" ("workerId"); diff --git a/sql/migrations/20240821170947_0.42.3.sql b/cmd/hatchet-migrate/migrate/migrations/20240821170947_0_42_3.sql similarity index 97% rename from sql/migrations/20240821170947_0.42.3.sql rename to cmd/hatchet-migrate/migrate/migrations/20240821170947_0_42_3.sql index 1f69c0717..fa338d4bf 100644 --- a/sql/migrations/20240821170947_0.42.3.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240821170947_0_42_3.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "WorkerType" CREATE TYPE "WorkerType" AS ENUM ('WEBHOOK', 'MANAGED', 'SELFHOSTED'); -- Modify "Worker" table diff --git a/sql/migrations/20240823120430_0.42.4.sql b/cmd/hatchet-migrate/migrate/migrations/20240823120430_0_42_4.sql similarity index 98% rename from sql/migrations/20240823120430_0.42.4.sql rename to cmd/hatchet-migrate/migrate/migrations/20240823120430_0_42_4.sql index 066573ba9..fda06a6fb 100644 --- a/sql/migrations/20240823120430_0.42.4.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240823120430_0_42_4.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "WebhookWorkerRequestMethod" CREATE TYPE "WebhookWorkerRequestMethod" AS ENUM ('GET', 'POST', 'PUT'); -- Create "WebhookWorkerRequest" table diff --git a/sql/migrations/20240823204123_0.42.5.sql b/cmd/hatchet-migrate/migrate/migrations/20240823204123_0_42_5.sql similarity index 95% rename from sql/migrations/20240823204123_0.42.5.sql rename to cmd/hatchet-migrate/migrate/migrations/20240823204123_0_42_5.sql index aeee6b8d5..cfda5a4bd 100644 --- a/sql/migrations/20240823204123_0.42.5.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240823204123_0_42_5.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "StepRun" table ALTER TABLE "StepRun" ADD COLUMN "priority" integer NULL; -- Modify "WorkflowRun" table diff --git a/sql/migrations/20240829142550_v0.43.2.sql b/cmd/hatchet-migrate/migrate/migrations/20240829142550_v0_43_2.sql similarity index 94% rename from sql/migrations/20240829142550_v0.43.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20240829142550_v0_43_2.sql index 1e7dbf61e..463167929 100644 --- a/sql/migrations/20240829142550_v0.43.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240829142550_v0_43_2.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "ControllerPartition" table ALTER TABLE "ControllerPartition" ADD COLUMN "name" text NULL; -- Modify "TenantWorkerPartition" table diff --git a/sql/migrations/20240904120327_v0.44.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240904120327_v0_44_0.sql similarity index 99% rename from sql/migrations/20240904120327_v0.44.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240904120327_v0_44_0.sql index a9d14f402..e2637def4 100644 --- a/sql/migrations/20240904120327_v0.44.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240904120327_v0_44_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "InternalQueue" CREATE TYPE "InternalQueue" AS ENUM ('WORKER_SEMAPHORE_COUNT', 'STEP_RUN_UPDATE'); -- Create "InternalQueueItem" table diff --git a/sql/migrations/20240908154802_v0.44.1.sql b/cmd/hatchet-migrate/migrate/migrations/20240908154802_v0_44_1.sql similarity index 82% rename from sql/migrations/20240908154802_v0.44.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240908154802_v0_44_1.sql index c58ed90cd..cbf6c89df 100644 --- a/sql/migrations/20240908154802_v0.44.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240908154802_v0_44_1.sql @@ -1,15 +1,16 @@ +-- +goose Up INSERT INTO "WorkerSemaphoreCount" ("workerId", "count") -SELECT +SELECT "workerId", COUNT(*) as "count" -FROM +FROM "WorkerSemaphoreSlot" -JOIN +JOIN "Worker" w ON "WorkerSemaphoreSlot"."workerId" = w."id" -WHERE +WHERE "stepRunId" IS NULL AND w."lastHeartbeatAt" > NOW() - INTERVAL '15 seconds' -GROUP BY +GROUP BY "workerId" -ON CONFLICT ("workerId") +ON CONFLICT ("workerId") DO UPDATE SET "count" = EXCLUDED."count"; diff --git a/sql/migrations/20240910205843_v0.44.5.sql b/cmd/hatchet-migrate/migrate/migrations/20240910205843_v0_44_5.sql similarity index 94% rename from sql/migrations/20240910205843_v0.44.5.sql rename to cmd/hatchet-migrate/migrate/migrations/20240910205843_v0_44_5.sql index 0144c0303..ecdfd0b73 100644 --- a/sql/migrations/20240910205843_v0.44.5.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240910205843_v0_44_5.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create "TimeoutQueueItem" table CREATE TABLE "TimeoutQueueItem" ("id" bigserial NOT NULL, "stepRunId" uuid NOT NULL, "retryCount" integer NOT NULL, "timeoutAt" timestamp(3) NOT NULL, "tenantId" uuid NOT NULL, "isQueued" boolean NOT NULL, PRIMARY KEY ("id")); -- Create index "TimeoutQueueItem_stepRunId_retryCount_key" to table: "TimeoutQueueItem" @@ -7,15 +8,15 @@ CREATE INDEX "TimeoutQueueItem_tenantId_isQueued_timeoutAt_idx" ON "TimeoutQueue -- Migrate all running and assigned step runs to TimeoutQueueItem INSERT INTO "TimeoutQueueItem" ("stepRunId", "retryCount", "timeoutAt", "tenantId", "isQueued") -SELECT +SELECT "id" AS "stepRunId", "retryCount", "timeoutAt", "tenantId", true -FROM +FROM "StepRun" -WHERE +WHERE "status" IN ('RUNNING', 'ASSIGNED') AND "timeoutAt" IS NOT NULL -ON CONFLICT DO NOTHING; \ No newline at end of file +ON CONFLICT DO NOTHING; diff --git a/sql/migrations/20240911124017_v0.44.6.sql b/cmd/hatchet-migrate/migrate/migrations/20240911124017_v0_44_6.sql similarity index 94% rename from sql/migrations/20240911124017_v0.44.6.sql rename to cmd/hatchet-migrate/migrate/migrations/20240911124017_v0_44_6.sql index 27f92fa41..edcc42532 100644 --- a/sql/migrations/20240911124017_v0.44.6.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240911124017_v0_44_6.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create "SemaphoreQueueItem" table CREATE TABLE "SemaphoreQueueItem" ("id" bigserial NOT NULL, "stepRunId" uuid NOT NULL, "workerId" uuid NOT NULL, "tenantId" uuid NOT NULL, PRIMARY KEY ("id")); -- Create index "SemaphoreQueueItem_stepRunId_workerId_key" to table: "SemaphoreQueueItem" @@ -7,15 +8,15 @@ CREATE INDEX "SemaphoreQueueItem_tenantId_workerId_idx" ON "SemaphoreQueueItem" -- Migrate data from "WorkerSemaphoreSlot" to "SemaphoreQueueItem" INSERT INTO "SemaphoreQueueItem" ("stepRunId", "workerId", "tenantId") -SELECT +SELECT "stepRunId", "workerId", "tenantId" -FROM +FROM "WorkerSemaphoreSlot" -JOIN +JOIN "Worker" w ON "WorkerSemaphoreSlot"."workerId" = w."id" -WHERE +WHERE "stepRunId" IS NOT NULL AND w."lastHeartbeatAt" > NOW() - INTERVAL '60 seconds' -ON CONFLICT DO NOTHING; \ No newline at end of file +ON CONFLICT DO NOTHING; diff --git a/sql/migrations/20240911201831_v0.44.7.sql b/cmd/hatchet-migrate/migrate/migrations/20240911201831_v0_44_7.sql similarity index 97% rename from sql/migrations/20240911201831_v0.44.7.sql rename to cmd/hatchet-migrate/migrate/migrations/20240911201831_v0_44_7.sql index 48ec3bd69..262c8eb00 100644 --- a/sql/migrations/20240911201831_v0.44.7.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240911201831_v0_44_7.sql @@ -1,3 +1,4 @@ +-- +goose Up -- DropForeignKey ALTER TABLE "WorkerSemaphore" DROP CONSTRAINT "WorkerSemaphore_workerId_fkey"; diff --git a/sql/migrations/20240916115647_v0.44.8.sql b/cmd/hatchet-migrate/migrate/migrations/20240916115647_v0_44_8.sql similarity index 89% rename from sql/migrations/20240916115647_v0.44.8.sql rename to cmd/hatchet-migrate/migrate/migrations/20240916115647_v0_44_8.sql index 8540ef206..76c2785bd 100644 --- a/sql/migrations/20240916115647_v0.44.8.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240916115647_v0_44_8.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Drop index "SemaphoreQueueItem_stepRunId_workerId_key" from table: "SemaphoreQueueItem" DROP INDEX CONCURRENTLY IF EXISTS "SemaphoreQueueItem_stepRunId_workerId_key"; @@ -15,6 +16,6 @@ ALTER TABLE "StepRunResultArchive" ADD COLUMN "retryCount" integer NOT NULL DEFA DROP INDEX CONCURRENTLY IF EXISTS "StepRun_jobRunId_status_tenantId_idx"; -- Create new partial index "StepRun_jobRunId_status_tenantId_idx" on table: "StepRun" -CREATE INDEX CONCURRENTLY IF NOT EXISTS "StepRun_jobRunId_status_tenantId_idx" -ON "StepRun" ("jobRunId", "status", "tenantId") -WHERE "status" = 'PENDING'; \ No newline at end of file +CREATE INDEX CONCURRENTLY IF NOT EXISTS "StepRun_jobRunId_status_tenantId_idx" +ON "StepRun" ("jobRunId", "status", "tenantId") +WHERE "status" = 'PENDING'; diff --git a/sql/migrations/20240918162532_v0.45.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240918162532_v0_45_0.sql similarity index 96% rename from sql/migrations/20240918162532_v0.45.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240918162532_v0_45_0.sql index e13be254c..fd0c0baae 100644 --- a/sql/migrations/20240918162532_v0.45.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240918162532_v0_45_0.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION CREATE INDEX CONCURRENTLY IF NOT EXISTS "WorkflowRun_parentStepRunId" ON "WorkflowRun"("parentStepRunId" ASC); diff --git a/sql/migrations/20240923124809_v0.45.4.sql b/cmd/hatchet-migrate/migrate/migrations/20240923124809_v0_45_4.sql similarity index 87% rename from sql/migrations/20240923124809_v0.45.4.sql rename to cmd/hatchet-migrate/migrate/migrations/20240923124809_v0_45_4.sql index dc4c94e46..f93718346 100644 --- a/sql/migrations/20240923124809_v0.45.4.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240923124809_v0_45_4.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "WorkflowRun" table ALTER TABLE "WorkflowRun" ALTER COLUMN "duration" TYPE bigint; diff --git a/sql/migrations/20240926210650_v0.47.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240926210650_v0_47_0.sql similarity index 95% rename from sql/migrations/20240926210650_v0.47.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240926210650_v0_47_0.sql index 7454e8170..46c19eb49 100644 --- a/sql/migrations/20240926210650_v0.47.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240926210650_v0_47_0.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Add value to enum type: "StepRunEventReason" ALTER TYPE "StepRunEventReason" ADD VALUE 'RATE_LIMIT_ERROR'; @@ -15,4 +16,4 @@ CREATE INDEX CONCURRENTLY IF NOT EXISTS "idx_workflowrun_main" ON "WorkflowRun" -- Create "StepExpression" table CREATE TABLE "StepExpression" ("key" text NOT NULL, "stepId" uuid NOT NULL, "expression" text NOT NULL, "kind" "StepExpressionKind" NOT NULL, PRIMARY KEY ("key", "stepId", "kind")); -- Create "StepRunExpressionEval" table -CREATE TABLE "StepRunExpressionEval" ("key" text NOT NULL, "stepRunId" uuid NOT NULL, "valueStr" text NULL, "valueInt" integer NULL, "kind" "StepExpressionKind" NOT NULL, PRIMARY KEY ("key", "stepRunId", "kind")); \ No newline at end of file +CREATE TABLE "StepRunExpressionEval" ("key" text NOT NULL, "stepRunId" uuid NOT NULL, "valueStr" text NULL, "valueInt" integer NULL, "kind" "StepExpressionKind" NOT NULL, PRIMARY KEY ("key", "stepRunId", "kind")); diff --git a/sql/migrations/20240927172935_v0.47.1.sql b/cmd/hatchet-migrate/migrate/migrations/20240927172935_v0_47_1.sql similarity index 99% rename from sql/migrations/20240927172935_v0.47.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240927172935_v0_47_1.sql index 535145105..20eae18da 100644 --- a/sql/migrations/20240927172935_v0.47.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240927172935_v0_47_1.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "Event" table ALTER TABLE "Event" DROP CONSTRAINT "Event_tenantId_fkey"; -- Modify "GetGroupKeyRun" table diff --git a/sql/migrations/20240928144316_v0.48.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240928144316_v0_48_0.sql similarity index 93% rename from sql/migrations/20240928144316_v0.48.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240928144316_v0_48_0.sql index 21d90380c..522d0242e 100644 --- a/sql/migrations/20240928144316_v0.48.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240928144316_v0_48_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Add value to enum type: "InternalQueue" ALTER TYPE "InternalQueue" ADD VALUE 'WORKFLOW_RUN_PAUSED'; -- Modify "Workflow" table diff --git a/sql/migrations/20240930202706_v0.48.1.sql b/cmd/hatchet-migrate/migrate/migrations/20240930202706_v0_48_1.sql similarity index 93% rename from sql/migrations/20240930202706_v0.48.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20240930202706_v0_48_1.sql index aaf25938f..0bea9f722 100644 --- a/sql/migrations/20240930202706_v0.48.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240930202706_v0_48_1.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "JobRun" table ALTER TABLE "JobRun" DROP CONSTRAINT "JobRun_jobId_fkey"; -- Modify "JobRunLookupData" table diff --git a/sql/migrations/20240930233257_v0.49.0.sql b/cmd/hatchet-migrate/migrate/migrations/20240930233257_v0_49_0.sql similarity index 86% rename from sql/migrations/20240930233257_v0.49.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20240930233257_v0_49_0.sql index 4513f3170..a3d7eed49 100644 --- a/sql/migrations/20240930233257_v0.49.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20240930233257_v0_49_0.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "Event" table ALTER TABLE "Event" ADD COLUMN "insertOrder" integer NULL; diff --git a/sql/migrations/20241004122206_v0.49.1.sql b/cmd/hatchet-migrate/migrate/migrations/20241004122206_v0_49_1.sql similarity index 88% rename from sql/migrations/20241004122206_v0.49.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20241004122206_v0_49_1.sql index 9a39e07c7..e2d7826b6 100644 --- a/sql/migrations/20241004122206_v0.49.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241004122206_v0_49_1.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Add value to enum type: "InternalQueue" ALTER TYPE "InternalQueue" ADD VALUE 'STEP_RUN_UPDATE_V2'; diff --git a/sql/migrations/20241008124038_v0.49.2.sql b/cmd/hatchet-migrate/migrate/migrations/20241008124038_v0_49_2.sql similarity index 95% rename from sql/migrations/20241008124038_v0.49.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20241008124038_v0_49_2.sql index 719049e4b..21a3c5e79 100644 --- a/sql/migrations/20241008124038_v0.49.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241008124038_v0_49_2.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create "EventKey" table CREATE TABLE "EventKey" ("key" text NOT NULL, "tenantId" uuid NOT NULL, PRIMARY KEY ("key")); -- Create index "EventKey_key_tenantId_key" to table: "EventKey" diff --git a/sql/migrations/20241011205314_0.49.3.sql b/cmd/hatchet-migrate/migrate/migrations/20241011205314_0_49_3.sql similarity index 91% rename from sql/migrations/20241011205314_0.49.3.sql rename to cmd/hatchet-migrate/migrate/migrations/20241011205314_0_49_3.sql index 3bd4c8aec..9c0470449 100644 --- a/sql/migrations/20241011205314_0.49.3.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241011205314_0_49_3.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "EventKey" table ALTER TABLE "EventKey" DROP CONSTRAINT "EventKey_pkey", ADD COLUMN "id" bigserial NOT NULL, ADD PRIMARY KEY ("id"); diff --git a/sql/migrations/20241014194326_v0.50.0.sql b/cmd/hatchet-migrate/migrate/migrations/20241014194326_v0_50_0.sql similarity index 98% rename from sql/migrations/20241014194326_v0.50.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20241014194326_v0_50_0.sql index c41fd4b4d..6e795af48 100644 --- a/sql/migrations/20241014194326_v0.50.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241014194326_v0_50_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "LeaseKind" CREATE TYPE "LeaseKind" AS ENUM ('WORKER', 'QUEUE'); -- Modify "WorkflowRun" table diff --git a/sql/migrations/20241018142125_v0.50.1.sql b/cmd/hatchet-migrate/migrate/migrations/20241018142125_v0_50_1.sql similarity index 94% rename from sql/migrations/20241018142125_v0.50.1.sql rename to cmd/hatchet-migrate/migrate/migrations/20241018142125_v0_50_1.sql index 91647558d..0b27e4696 100644 --- a/sql/migrations/20241018142125_v0.50.1.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241018142125_v0_50_1.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Additional indexes on workflow CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_workflow_version_workflow_id_order diff --git a/sql/migrations/20241022124210_v0.50.2.sql b/cmd/hatchet-migrate/migrate/migrations/20241022124210_v0_50_2.sql similarity index 98% rename from sql/migrations/20241022124210_v0.50.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20241022124210_v0_50_2.sql index d11e20b76..21c024e2a 100644 --- a/sql/migrations/20241022124210_v0.50.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241022124210_v0_50_2.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create "SchedulerPartition" table CREATE TABLE "SchedulerPartition" ("id" text NOT NULL, "createdAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "updatedAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "lastHeartbeat" timestamp(3) NULL, "name" text NULL, PRIMARY KEY ("id")); -- Create index "SchedulerPartition_id_key" to table: "SchedulerPartition" diff --git a/sql/migrations/20241023112235_v0.50.3.sql b/cmd/hatchet-migrate/migrate/migrations/20241023112235_v0_50_3.sql similarity index 94% rename from sql/migrations/20241023112235_v0.50.3.sql rename to cmd/hatchet-migrate/migrate/migrations/20241023112235_v0_50_3.sql index cff973798..dad163501 100644 --- a/sql/migrations/20241023112235_v0.50.3.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241023112235_v0_50_3.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "Queue" table ALTER TABLE "Queue" ADD COLUMN "lastActive" timestamp(3) NULL; -- Create index "Queue_tenantId_lastActive_idx" to table: "Queue" @@ -27,4 +28,4 @@ FROM unique_queues ON CONFLICT ("tenantId", "name") DO UPDATE SET - "lastActive" = NOW(); \ No newline at end of file + "lastActive" = NOW(); diff --git a/sql/migrations/20241023223039_v0.50.4.sql b/cmd/hatchet-migrate/migrate/migrations/20241023223039_v0_50_4.sql similarity index 58% rename from sql/migrations/20241023223039_v0.50.4.sql rename to cmd/hatchet-migrate/migrate/migrations/20241023223039_v0_50_4.sql index c8e6f2d4b..b2b67631b 100644 --- a/sql/migrations/20241023223039_v0.50.4.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241023223039_v0_50_4.sql @@ -1,6 +1,7 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION -- Create index "WorkflowRun_parentId_parentStepRunId_childIndex_key" to table: "WorkflowRun" -CREATE INDEX CONCURRENTLY IF NOT EXISTS "WorkflowRun_parentId_parentStepRunId_childIndex_key" -ON "WorkflowRun" ("parentId", "parentStepRunId", "childIndex") +CREATE INDEX CONCURRENTLY IF NOT EXISTS "WorkflowRun_parentId_parentStepRunId_childIndex_key" +ON "WorkflowRun" ("parentId", "parentStepRunId", "childIndex") WHERE ("deletedAt" IS NULL); diff --git a/sql/migrations/20241025162439_v0.50.5.sql b/cmd/hatchet-migrate/migrate/migrations/20241025162439_v0_50_5.sql similarity index 96% rename from sql/migrations/20241025162439_v0.50.5.sql rename to cmd/hatchet-migrate/migrate/migrations/20241025162439_v0_50_5.sql index b42d20b8a..a06dee803 100644 --- a/sql/migrations/20241025162439_v0.50.5.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241025162439_v0_50_5.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Create enum type "WorkerSDKS" CREATE TYPE "WorkerSDKS" AS ENUM ('UNKNOWN', 'GO', 'PYTHON', 'TYPESCRIPT'); -- Modify "Worker" table diff --git a/sql/migrations/20241029122625_v0.51.0.sql b/cmd/hatchet-migrate/migrate/migrations/20241029122625_v0_51_0.sql similarity index 97% rename from sql/migrations/20241029122625_v0.51.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20241029122625_v0_51_0.sql index 1d1c669b4..510839600 100644 --- a/sql/migrations/20241029122625_v0.51.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241029122625_v0_51_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "WorkflowTriggerCronRef" table ALTER TABLE "WorkflowTriggerCronRef" ADD COLUMN "additionalMetadata" jsonb NULL, ADD COLUMN "createdAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, ADD COLUMN "deletedAt" timestamp(3) NULL, ADD COLUMN "updatedAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP; -- Modify "WorkflowTriggerScheduledRef" table diff --git a/sql/migrations/20241107162939_v0.51.2.sql b/cmd/hatchet-migrate/migrate/migrations/20241107162939_v0_51_2.sql similarity index 89% rename from sql/migrations/20241107162939_v0.51.2.sql rename to cmd/hatchet-migrate/migrate/migrations/20241107162939_v0_51_2.sql index 3090a2195..bfe3e8715 100644 --- a/sql/migrations/20241107162939_v0.51.2.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241107162939_v0_51_2.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "StepRun" table ALTER TABLE "StepRun" ADD COLUMN "internalRetryCount" integer NOT NULL DEFAULT 0; diff --git a/sql/migrations/20241114175346_v0.51.3.sql b/cmd/hatchet-migrate/migrate/migrations/20241114175346_v0_51_3.sql similarity index 91% rename from sql/migrations/20241114175346_v0.51.3.sql rename to cmd/hatchet-migrate/migrate/migrations/20241114175346_v0_51_3.sql index a7989b2d7..eb8714f01 100644 --- a/sql/migrations/20241114175346_v0.51.3.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241114175346_v0_51_3.sql @@ -1,2 +1,3 @@ +-- +goose Up -- Modify "Event" table ALTER TABLE "Event" ALTER COLUMN "createdAt" TYPE timestamp, ALTER COLUMN "createdAt" SET DEFAULT clock_timestamp(); diff --git a/sql/migrations/20241121142159_v0.52.0.sql b/cmd/hatchet-migrate/migrate/migrations/20241121142159_v0_52_0.sql similarity index 80% rename from sql/migrations/20241121142159_v0.52.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20241121142159_v0_52_0.sql index e870ee170..86c1e83b9 100644 --- a/sql/migrations/20241121142159_v0.52.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241121142159_v0_52_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Modify "Step" table ALTER TABLE "Step" ADD COLUMN "retryBackoffFactor" double precision NULL, ADD COLUMN "retryMaxBackoff" integer NULL; -- Create "RetryQueueItem" table @@ -10,19 +11,19 @@ CREATE TYPE "WorkflowTriggerCronRefMethods" AS ENUM ('DEFAULT', 'API'); CREATE TYPE "WorkflowTriggerScheduledRefMethods" AS ENUM ('DEFAULT', 'API'); -- Step 1: Add the new columns with "id" as nullable -ALTER TABLE "WorkflowTriggerCronRef" -ADD COLUMN "name" text NULL, -ADD COLUMN "id" uuid NULL, -ADD COLUMN "method" "WorkflowTriggerCronRefMethods" NOT NULL DEFAULT 'DEFAULT', +ALTER TABLE "WorkflowTriggerCronRef" +ADD COLUMN "name" text NULL, +ADD COLUMN "id" uuid NULL, +ADD COLUMN "method" "WorkflowTriggerCronRefMethods" NOT NULL DEFAULT 'DEFAULT', ADD CONSTRAINT "WorkflowTriggerCronRef_parentId_cron_name_key" UNIQUE ("parentId", "cron", "name"); -- Step 2: Populate "id" column with UUIDs for existing rows -UPDATE "WorkflowTriggerCronRef" -SET "id" = gen_random_uuid() +UPDATE "WorkflowTriggerCronRef" +SET "id" = gen_random_uuid() WHERE "id" IS NULL; -- Step 3: Alter "id" column to be NOT NULL -ALTER TABLE "WorkflowTriggerCronRef" +ALTER TABLE "WorkflowTriggerCronRef" ALTER COLUMN "id" SET NOT NULL; UPDATE "WorkflowTriggerCronRef" SET "name" = '' WHERE "name" IS NULL; @@ -31,17 +32,17 @@ UPDATE "WorkflowTriggerCronRef" SET "name" = '' WHERE "name" IS NULL; ALTER TABLE "WorkflowTriggerScheduledRef" ADD COLUMN "method" "WorkflowTriggerScheduledRefMethods" NOT NULL DEFAULT 'DEFAULT'; -- Modify "WorkflowRunTriggeredBy" table -ALTER TABLE "WorkflowRunTriggeredBy" +ALTER TABLE "WorkflowRunTriggeredBy" DROP CONSTRAINT "WorkflowRunTriggeredBy_cronParentId_cronSchedule_fkey", ADD COLUMN "cronName" text NULL; -ALTER TABLE "WorkflowRunTriggeredBy" -ADD CONSTRAINT "WorkflowRunTriggeredBy_cronParentId_cronSchedule_cronName_fkey" -FOREIGN KEY ("cronParentId", "cronSchedule", "cronName") -REFERENCES "WorkflowTriggerCronRef" ("parentId", "cron", "name") -ON UPDATE CASCADE +ALTER TABLE "WorkflowRunTriggeredBy" +ADD CONSTRAINT "WorkflowRunTriggeredBy_cronParentId_cronSchedule_cronName_fkey" +FOREIGN KEY ("cronParentId", "cronSchedule", "cronName") +REFERENCES "WorkflowTriggerCronRef" ("parentId", "cron", "name") +ON UPDATE CASCADE ON DELETE SET NULL NOT VALID; -- Drop index "WorkflowTriggerCronRef_parentId_cron_key" from table: "WorkflowTriggerCronRef" -DROP INDEX "WorkflowTriggerCronRef_parentId_cron_key"; \ No newline at end of file +DROP INDEX "WorkflowTriggerCronRef_parentId_cron_key"; diff --git a/sql/migrations/20241204191714_v0.52.5.sql b/cmd/hatchet-migrate/migrate/migrations/20241204191714_v0_52_5.sql similarity index 77% rename from sql/migrations/20241204191714_v0.52.5.sql rename to cmd/hatchet-migrate/migrate/migrations/20241204191714_v0_52_5.sql index 8c3ea3254..ca8693139 100644 --- a/sql/migrations/20241204191714_v0.52.5.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241204191714_v0_52_5.sql @@ -1 +1,2 @@ -UPDATE "WorkflowTriggerCronRef" SET "name" = '' WHERE "name" IS NULL; \ No newline at end of file +-- +goose Up +UPDATE "WorkflowTriggerCronRef" SET "name" = '' WHERE "name" IS NULL; diff --git a/sql/migrations/20241206231312_v0.52.12.sql b/cmd/hatchet-migrate/migrate/migrations/20241206231312_v0_52_12.sql similarity index 91% rename from sql/migrations/20241206231312_v0.52.12.sql rename to cmd/hatchet-migrate/migrate/migrations/20241206231312_v0_52_12.sql index 1e56f2e70..204b8bfc2 100644 --- a/sql/migrations/20241206231312_v0.52.12.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241206231312_v0_52_12.sql @@ -1,4 +1,5 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION CREATE INDEX CONCURRENTLY "Worker_tenantId_lastHeartbeatAt_idx" ON "Worker" ("tenantId", "lastHeartbeatAt"); diff --git a/sql/migrations/20241216175807_v0.52.13.sql b/cmd/hatchet-migrate/migrate/migrations/20241216175807_v0_52_13.sql similarity index 75% rename from sql/migrations/20241216175807_v0.52.13.sql rename to cmd/hatchet-migrate/migrate/migrations/20241216175807_v0_52_13.sql index 5db6b1cee..121669dc1 100644 --- a/sql/migrations/20241216175807_v0.52.13.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241216175807_v0_52_13.sql @@ -1,3 +1,4 @@ --- atlas:txmode none +-- +goose Up +-- +goose NO TRANSACTION CREATE INDEX CONCURRENTLY IF NOT EXISTS "LogLine_tenantId_stepRunId_idx" ON "LogLine" ("tenantId", "stepRunId" ASC); diff --git a/sql/migrations/20241217152316_v0.53.0.sql b/cmd/hatchet-migrate/migrate/migrations/20241217152316_v0_53_0.sql similarity index 94% rename from sql/migrations/20241217152316_v0.53.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20241217152316_v0_53_0.sql index 13e4a5ecc..b5edcd554 100644 --- a/sql/migrations/20241217152316_v0.53.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20241217152316_v0_53_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Add value to enum type: "ConcurrencyLimitStrategy" ALTER TYPE "ConcurrencyLimitStrategy" ADD VALUE 'CANCEL_NEWEST'; -- Add value to enum type: "WorkflowRunStatus" @@ -42,6 +43,7 @@ CREATE INDEX "MessageQueueItem_queueId_expiresAt_readAfter_status_id_idx" ON "Me ); -- Function to publish NOTIFY message on insert into MessageQueueItem +-- +goose StatementBegin CREATE OR REPLACE FUNCTION notify_message_queue_item () RETURNS TRIGGER AS $$ BEGIN @@ -52,6 +54,7 @@ BEGIN RETURN NEW; END; $$ LANGUAGE plpgsql; +-- +goose StatementEnd -- Trigger to invoke the notify function after insert CREATE TRIGGER trigger_notify_message_queue_item @@ -59,9 +62,11 @@ AFTER INSERT ON "MessageQueueItem" FOR EACH ROW EXECUTE FUNCTION notify_message_queue_item (); -- Update the existing function to prevent internal name or slug to be a no-op +-- +goose StatementBegin CREATE OR REPLACE FUNCTION prevent_internal_name_or_slug () RETURNS trigger AS $$ BEGIN RETURN NEW; END; -$$ LANGUAGE plpgsql; \ No newline at end of file +$$ LANGUAGE plpgsql; +-- +goose StatementEnd diff --git a/sql/migrations/20250127160736_v0.54.0.sql b/cmd/hatchet-migrate/migrate/migrations/20250127160736_v0_54_0.sql similarity index 95% rename from sql/migrations/20250127160736_v0.54.0.sql rename to cmd/hatchet-migrate/migrate/migrations/20250127160736_v0_54_0.sql index 10c179e01..8effcdee5 100644 --- a/sql/migrations/20250127160736_v0.54.0.sql +++ b/cmd/hatchet-migrate/migrate/migrations/20250127160736_v0_54_0.sql @@ -1,3 +1,4 @@ +-- +goose Up -- Add value to enum type: "WorkflowRunStatus" ALTER TYPE "WorkflowRunStatus" ADD VALUE 'BACKOFF'; -- Add value to enum type: "StepRunStatus" diff --git a/cmd/hatchet-migrate/migrate/migrations/20250224162902_v0_55_0.sql b/cmd/hatchet-migrate/migrate/migrations/20250224162902_v0_55_0.sql new file mode 100644 index 000000000..13566f73e --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20250224162902_v0_55_0.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +-- Create enum type "TenantMajorEngineVersion" +CREATE TYPE "TenantMajorEngineVersion" AS ENUM ('V0', 'V1'); +-- Modify "Tenant" table +ALTER TABLE "Tenant" ADD COLUMN "version" "TenantMajorEngineVersion" NOT NULL DEFAULT 'V0'; + +ALTER TYPE "LeaseKind" ADD VALUE 'CONCURRENCY_STRATEGY'; +-- +goose StatementEnd diff --git a/cmd/hatchet-migrate/migrate/migrations/20250224171706_v1_0_0.sql b/cmd/hatchet-migrate/migrate/migrations/20250224171706_v1_0_0.sql new file mode 100644 index 000000000..84c10f8aa --- /dev/null +++ b/cmd/hatchet-migrate/migrate/migrations/20250224171706_v1_0_0.sql @@ -0,0 +1,2116 @@ +-- +goose Up +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION get_v1_partitions_before_date( + targetTableName text, + targetDate date +) RETURNS TABLE(partition_name text) + LANGUAGE plpgsql AS +$$ +BEGIN + RETURN QUERY + SELECT + inhrelid::regclass::text AS partition_name + FROM + pg_inherits + WHERE + inhparent = targetTableName::regclass + AND substring(inhrelid::regclass::text, format('%s_(\d{8})', targetTableName)) ~ '^\d{8}' + AND (substring(inhrelid::regclass::text, format('%s_(\d{8})', targetTableName))::date) < targetDate; +END; +$$; + +CREATE OR REPLACE FUNCTION create_v1_range_partition( + targetTableName text, + targetDate date +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetDateStr varchar; + targetDatePlusOneDayStr varchar; + newTableName varchar; +BEGIN + SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr; + SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr; + SELECT lower(format('%s_%s', targetTableName, targetDateStr)) INTO newTableName; + -- exit if the table exists + IF EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN + RETURN 0; + END IF; + + EXECUTE + format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES)', newTableName, targetTableName); + EXECUTE + format('ALTER TABLE %s SET ( + autovacuum_vacuum_scale_factor = ''0.1'', + autovacuum_analyze_scale_factor=''0.05'', + autovacuum_vacuum_threshold=''25'', + autovacuum_analyze_threshold=''25'', + autovacuum_vacuum_cost_delay=''10'', + autovacuum_vacuum_cost_limit=''1000'' + )', newTableName); + EXECUTE + format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); + RETURN 1; +END; +$$; + +-- https://stackoverflow.com/questions/8137112/unnest-array-by-one-level +CREATE OR REPLACE FUNCTION unnest_nd_1d(a anyarray, OUT a_1d anyarray) + RETURNS SETOF anyarray + LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE STRICT AS +$func$ +BEGIN -- null is covered by STRICT + IF a = '{}' THEN -- empty + a_1d = '{}'; + RETURN NEXT; + ELSE -- all other cases + FOREACH a_1d SLICE 1 IN ARRAY a LOOP + RETURN NEXT; + END LOOP; + END IF; +END +$func$; + +-- CreateTable +CREATE TABLE v1_queue ( + tenant_id UUID NOT NULL, + name TEXT NOT NULL, + last_active TIMESTAMP(3), + + CONSTRAINT v1_queue_pkey PRIMARY KEY (tenant_id, name) +); + +CREATE TYPE v1_sticky_strategy AS ENUM ('NONE', 'SOFT', 'HARD'); + +CREATE TYPE v1_task_initial_state AS ENUM ('QUEUED', 'CANCELLED', 'SKIPPED', 'FAILED'); + +-- We need a NONE strategy to allow for tasks which were previously using a concurrency strategy to +-- enqueue if the strategy is removed. +CREATE TYPE v1_concurrency_strategy AS ENUM ('NONE', 'GROUP_ROUND_ROBIN', 'CANCEL_IN_PROGRESS', 'CANCEL_NEWEST'); + +CREATE TABLE v1_workflow_concurrency ( + -- We need an id used for stable ordering to prevent deadlocks. We must process all concurrency + -- strategies on a workflow in the same order. + id bigint GENERATED ALWAYS AS IDENTITY, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + -- If the strategy is NONE and we've removed all concurrency slots, we can set is_active to false + is_active BOOLEAN NOT NULL DEFAULT TRUE, + strategy v1_concurrency_strategy NOT NULL, + child_strategy_ids BIGINT[], + expression TEXT NOT NULL, + tenant_id UUID NOT NULL, + max_concurrency INTEGER NOT NULL, + CONSTRAINT v1_workflow_concurrency_pkey PRIMARY KEY (workflow_id, workflow_version_id, id) +); + +CREATE TABLE v1_step_concurrency ( + -- We need an id used for stable ordering to prevent deadlocks. We must process all concurrency + -- strategies on a step in the same order. + id bigint GENERATED ALWAYS AS IDENTITY, + -- The parent_strategy_id exists if concurrency is defined at the workflow level + parent_strategy_id BIGINT, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + step_id UUID NOT NULL, + -- If the strategy is NONE and we've removed all concurrency slots, we can set is_active to false + is_active BOOLEAN NOT NULL DEFAULT TRUE, + strategy v1_concurrency_strategy NOT NULL, + expression TEXT NOT NULL, + tenant_id UUID NOT NULL, + max_concurrency INTEGER NOT NULL, + CONSTRAINT v1_step_concurrency_pkey PRIMARY KEY (workflow_id, workflow_version_id, step_id, id) +); + +CREATE OR REPLACE FUNCTION create_v1_step_concurrency() +RETURNS trigger AS $$ +DECLARE + wf_concurrency_row v1_workflow_concurrency%ROWTYPE; + child_ids bigint[]; +BEGIN + IF NEW."concurrencyGroupExpression" IS NOT NULL THEN + -- Insert into v1_workflow_concurrency and capture the inserted row. + INSERT INTO v1_workflow_concurrency ( + workflow_id, + workflow_version_id, + strategy, + expression, + tenant_id, + max_concurrency + ) + SELECT + wf."id", + wv."id", + NEW."limitStrategy"::VARCHAR::v1_concurrency_strategy, + NEW."concurrencyGroupExpression", + wf."tenantId", + NEW."maxRuns" + FROM "WorkflowVersion" wv + JOIN "Workflow" wf ON wv."workflowId" = wf."id" + WHERE wv."id" = NEW."workflowVersionId" + RETURNING * INTO wf_concurrency_row; + + -- Insert into v1_step_concurrency and capture the inserted rows into a variable. + WITH inserted_steps AS ( + INSERT INTO v1_step_concurrency ( + parent_strategy_id, + workflow_id, + workflow_version_id, + step_id, + strategy, + expression, + tenant_id, + max_concurrency + ) + SELECT + wf_concurrency_row.id, + s."workflowId", + s."workflowVersionId", + s."id", + NEW."limitStrategy"::VARCHAR::v1_concurrency_strategy, + NEW."concurrencyGroupExpression", + s."tenantId", + NEW."maxRuns" + FROM ( + SELECT + s."id", + wf."id" AS "workflowId", + wv."id" AS "workflowVersionId", + wf."tenantId" + FROM "Step" s + JOIN "Job" j ON s."jobId" = j."id" + JOIN "WorkflowVersion" wv ON j."workflowVersionId" = wv."id" + JOIN "Workflow" wf ON wv."workflowId" = wf."id" + WHERE + wv."id" = NEW."workflowVersionId" + AND j."kind" = 'DEFAULT' + ) s + RETURNING * + ) + SELECT array_remove(array_agg(t.id), NULL)::bigint[] INTO child_ids + FROM inserted_steps t; + + -- Update the workflow concurrency row using its primary key. + UPDATE v1_workflow_concurrency + SET child_strategy_ids = child_ids + WHERE workflow_id = wf_concurrency_row.workflow_id + AND workflow_version_id = wf_concurrency_row.workflow_version_id + AND id = wf_concurrency_row.id; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trg_create_v1_step_concurrency +AFTER INSERT ON "WorkflowConcurrency" +FOR EACH ROW +EXECUTE FUNCTION create_v1_step_concurrency(); + +-- CreateTable +CREATE TABLE v1_task ( + id bigint GENERATED ALWAYS AS IDENTITY, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + tenant_id UUID NOT NULL, + queue TEXT NOT NULL, + action_id TEXT NOT NULL, + step_id UUID NOT NULL, + step_readable_id TEXT NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + workflow_run_id UUID NOT NULL, + schedule_timeout TEXT NOT NULL, + step_timeout TEXT, + priority INTEGER DEFAULT 1, + sticky v1_sticky_strategy NOT NULL, + desired_worker_id UUID, + external_id UUID NOT NULL, + display_name TEXT NOT NULL, + input JSONB NOT NULL, + retry_count INTEGER NOT NULL DEFAULT 0, + internal_retry_count INTEGER NOT NULL DEFAULT 0, + app_retry_count INTEGER NOT NULL DEFAULT 0, + -- step_index is relevant for tracking down the correct SIGNAL_COMPLETED event on a + -- replay of a child workflow + step_index BIGINT NOT NULL, + additional_metadata JSONB, + dag_id BIGINT, + dag_inserted_at TIMESTAMPTZ, + parent_task_external_id UUID, + parent_task_id BIGINT, + parent_task_inserted_at TIMESTAMPTZ, + child_index BIGINT, + child_key TEXT, + initial_state v1_task_initial_state NOT NULL DEFAULT 'QUEUED', + initial_state_reason TEXT, + concurrency_parent_strategy_ids BIGINT[], + concurrency_strategy_ids BIGINT[], + concurrency_keys TEXT[], + retry_backoff_factor DOUBLE PRECISION, + retry_max_backoff INTEGER, + CONSTRAINT v1_task_pkey PRIMARY KEY (id, inserted_at) +) PARTITION BY RANGE(inserted_at); + +CREATE TABLE v1_lookup_table ( + tenant_id UUID NOT NULL, + external_id UUID NOT NULL, + task_id BIGINT, + dag_id BIGINT, + inserted_at TIMESTAMPTZ NOT NULL, + + PRIMARY KEY (external_id) +); + +CREATE TYPE v1_task_event_type AS ENUM ( + 'COMPLETED', + 'FAILED', + 'CANCELLED', + 'SIGNAL_CREATED', + 'SIGNAL_COMPLETED' +); + +-- CreateTable +CREATE TABLE v1_task_event ( + id bigint GENERATED ALWAYS AS IDENTITY, + tenant_id UUID NOT NULL, + task_id bigint NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + retry_count INTEGER NOT NULL, + event_type v1_task_event_type NOT NULL, + -- The event key is an optional field that can be used to uniquely identify an event. This is + -- used for signal events to ensure that we don't create duplicate signals. + event_key TEXT, + created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + data JSONB, + CONSTRAINT v1_task_event_pkey PRIMARY KEY (task_id, task_inserted_at, id) +) PARTITION BY RANGE(task_inserted_at); + +-- Create unique index on (tenant_id, task_id, event_key) when event_key is not null +CREATE UNIQUE INDEX v1_task_event_event_key_unique_idx ON v1_task_event ( + tenant_id ASC, + task_id ASC, + task_inserted_at ASC, + event_type ASC, + event_key ASC +) WHERE event_key IS NOT NULL; + +-- CreateTable +CREATE TABLE v1_task_expression_eval ( + key TEXT NOT NULL, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + value_str TEXT, + value_int INTEGER, + kind "StepExpressionKind" NOT NULL, + + CONSTRAINT v1_task_expression_eval_pkey PRIMARY KEY (task_id, task_inserted_at, kind, key) +); + +-- CreateTable +CREATE TABLE v1_queue_item ( + id bigint GENERATED ALWAYS AS IDENTITY, + tenant_id UUID NOT NULL, + queue TEXT NOT NULL, + task_id bigint NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + external_id UUID NOT NULL, + action_id TEXT NOT NULL, + step_id UUID NOT NULL, + workflow_id UUID NOT NULL, + workflow_run_id UUID NOT NULL, + schedule_timeout_at TIMESTAMP(3), + step_timeout TEXT, + priority INTEGER NOT NULL DEFAULT 1, + sticky v1_sticky_strategy NOT NULL, + desired_worker_id UUID, + retry_count INTEGER NOT NULL DEFAULT 0, + CONSTRAINT v1_queue_item_pkey PRIMARY KEY (id) +); + +alter table v1_queue_item set ( + autovacuum_vacuum_scale_factor = '0.1', + autovacuum_analyze_scale_factor='0.05', + autovacuum_vacuum_threshold='25', + autovacuum_analyze_threshold='25', + autovacuum_vacuum_cost_delay='10', + autovacuum_vacuum_cost_limit='1000' +); + +CREATE INDEX v1_queue_item_list_idx ON v1_queue_item ( + tenant_id ASC, + queue ASC, + priority DESC, + id ASC +); + +CREATE INDEX v1_queue_item_task_idx ON v1_queue_item ( + task_id ASC, + task_inserted_at ASC, + retry_count ASC +); + +-- CreateTable +CREATE TABLE v1_task_runtime ( + task_id bigint NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + retry_count INTEGER NOT NULL, + worker_id UUID, + tenant_id UUID NOT NULL, + timeout_at TIMESTAMP(3) NOT NULL, + + CONSTRAINT v1_task_runtime_pkey PRIMARY KEY (task_id, task_inserted_at, retry_count) +); + +CREATE INDEX v1_task_runtime_tenantId_workerId_idx ON v1_task_runtime (tenant_id ASC, worker_id ASC) WHERE worker_id IS NOT NULL; + +CREATE INDEX v1_task_runtime_tenantId_timeoutAt_idx ON v1_task_runtime (tenant_id ASC, timeout_at ASC); + +alter table v1_task_runtime set ( + autovacuum_vacuum_scale_factor = '0.1', + autovacuum_analyze_scale_factor='0.05', + autovacuum_vacuum_threshold='25', + autovacuum_analyze_threshold='25', + autovacuum_vacuum_cost_delay='10', + autovacuum_vacuum_cost_limit='1000' +); + +CREATE TYPE v1_match_kind AS ENUM ('TRIGGER', 'SIGNAL'); + +CREATE TABLE v1_match ( + id bigint GENERATED ALWAYS AS IDENTITY, + tenant_id UUID NOT NULL, + kind v1_match_kind NOT NULL, + is_satisfied BOOLEAN NOT NULL DEFAULT FALSE, + signal_task_id bigint, + signal_task_inserted_at timestamptz, + signal_external_id UUID, + signal_key TEXT, + -- references the parent DAG for the task, which we can use to get input + additional metadata + trigger_dag_id bigint, + trigger_dag_inserted_at timestamptz, + -- references the step id to instantiate the task + trigger_step_id UUID, + trigger_step_index BIGINT, + -- references the external id for the new task + trigger_external_id UUID, + trigger_workflow_run_id UUID, + trigger_parent_task_external_id UUID, + trigger_parent_task_id BIGINT, + trigger_parent_task_inserted_at timestamptz, + trigger_child_index BIGINT, + trigger_child_key TEXT, + -- references the existing task id, which may be set when we're replaying a task + trigger_existing_task_id bigint, + trigger_existing_task_inserted_at timestamptz, + CONSTRAINT v1_match_pkey PRIMARY KEY (id) +); + +CREATE TYPE v1_event_type AS ENUM ('USER', 'INTERNAL'); + +-- Provides information to the caller about the action to take. This is used to differentiate +-- negative conditions from positive conditions. For example, if a task is waiting for a set of +-- tasks to fail, the success of all tasks would be a CANCEL condition, and the failure of any +-- task would be a QUEUE condition. Different actions are implicitly different groups of conditions. +CREATE TYPE v1_match_condition_action AS ENUM ('CREATE', 'QUEUE', 'CANCEL', 'SKIP'); + +CREATE TABLE v1_match_condition ( + v1_match_id bigint NOT NULL, + id bigint GENERATED ALWAYS AS IDENTITY, + tenant_id UUID NOT NULL, + registered_at timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, + event_type v1_event_type NOT NULL, + -- for INTERNAL events, this will correspond to a v1_task_event_type value + event_key TEXT NOT NULL, + event_resource_hint TEXT, + -- readable_data_key is used as the key when constructing the aggregated data for the v1_match + readable_data_key TEXT NOT NULL, + is_satisfied BOOLEAN NOT NULL DEFAULT FALSE, + action v1_match_condition_action NOT NULL DEFAULT 'QUEUE', + or_group_id UUID NOT NULL, + expression TEXT, + data JSONB, + CONSTRAINT v1_match_condition_pkey PRIMARY KEY (v1_match_id, id) +); + +CREATE INDEX v1_match_condition_filter_idx ON v1_match_condition ( + tenant_id ASC, + event_type ASC, + event_key ASC, + is_satisfied ASC, + event_resource_hint ASC +); + +CREATE TABLE v1_dag ( + id bigint GENERATED ALWAYS AS IDENTITY, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + tenant_id UUID NOT NULL, + external_id UUID NOT NULL, + display_name TEXT NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + parent_task_external_id UUID, + CONSTRAINT v1_dag_pkey PRIMARY KEY (id, inserted_at) +) PARTITION BY RANGE(inserted_at); + +CREATE TABLE v1_dag_to_task ( + dag_id BIGINT NOT NULL, + dag_inserted_at TIMESTAMPTZ NOT NULL, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + CONSTRAINT v1_dag_to_task_pkey PRIMARY KEY (dag_id, dag_inserted_at, task_id, task_inserted_at) +); + +CREATE TABLE v1_dag_data ( + dag_id BIGINT NOT NULL, + dag_inserted_at TIMESTAMPTZ NOT NULL, + input JSONB NOT NULL, + additional_metadata JSONB, + CONSTRAINT v1_dag_input_pkey PRIMARY KEY (dag_id, dag_inserted_at) +); + +-- CreateTable +CREATE TABLE v1_workflow_concurrency_slot ( + sort_id BIGINT NOT NULL, + tenant_id UUID NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + workflow_run_id UUID NOT NULL, + strategy_id BIGINT NOT NULL, + completed_child_strategy_ids BIGINT[], + child_strategy_ids BIGINT[], + priority INTEGER NOT NULL DEFAULT 1, + key TEXT NOT NULL, + is_filled BOOLEAN NOT NULL DEFAULT FALSE, + CONSTRAINT v1_workflow_concurrency_slot_pkey PRIMARY KEY (strategy_id, workflow_version_id, workflow_run_id) +); + +CREATE INDEX v1_workflow_concurrency_slot_query_idx ON v1_workflow_concurrency_slot (tenant_id, strategy_id ASC, key ASC, priority DESC, sort_id ASC); + +-- CreateTable +CREATE TABLE v1_concurrency_slot ( + sort_id BIGINT GENERATED ALWAYS AS IDENTITY, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + task_retry_count INTEGER NOT NULL, + external_id UUID NOT NULL, + tenant_id UUID NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + workflow_run_id UUID NOT NULL, + strategy_id BIGINT NOT NULL, + parent_strategy_id BIGINT, + priority INTEGER NOT NULL DEFAULT 1, + key TEXT NOT NULL, + is_filled BOOLEAN NOT NULL DEFAULT FALSE, + next_parent_strategy_ids BIGINT[], + next_strategy_ids BIGINT[], + next_keys TEXT[], + queue_to_notify TEXT NOT NULL, + schedule_timeout_at TIMESTAMP(3) NOT NULL, + CONSTRAINT v1_concurrency_slot_pkey PRIMARY KEY (task_id, task_inserted_at, task_retry_count, strategy_id) +); + +CREATE INDEX v1_concurrency_slot_query_idx ON v1_concurrency_slot (tenant_id, strategy_id ASC, key ASC, sort_id ASC); + +-- When concurrency slot is CREATED, we should check whether the parent concurrency slot exists; if not, we should create +-- the parent concurrency slot as well. +CREATE OR REPLACE FUNCTION after_v1_concurrency_slot_insert_function() +RETURNS trigger AS $$ +BEGIN + WITH parent_slot AS ( + SELECT + cs.workflow_id, cs.workflow_version_id, cs.workflow_run_id, cs.strategy_id, cs.parent_strategy_id + FROM + new_table cs + WHERE + cs.parent_strategy_id IS NOT NULL + ), parent_to_child_strategy_ids AS ( + SELECT + wc.child_strategy_ids, wc.id + FROM + parent_slot ps + JOIN v1_workflow_concurrency wc ON wc.workflow_id = ps.workflow_id AND wc.workflow_version_id = ps.workflow_version_id AND wc.id = ps.parent_strategy_id + ) + INSERT INTO v1_workflow_concurrency_slot ( + sort_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + strategy_id, + child_strategy_ids, + priority, + key + ) + SELECT + cs.sort_id, + cs.tenant_id, + cs.workflow_id, + cs.workflow_version_id, + cs.workflow_run_id, + cs.parent_strategy_id, + pcs.child_strategy_ids, + cs.priority, + cs.key + FROM + new_table cs + JOIN + parent_to_child_strategy_ids pcs ON pcs.id = cs.parent_strategy_id + WHERE + cs.parent_strategy_id IS NOT NULL + ON CONFLICT (strategy_id, workflow_version_id, workflow_run_id) DO NOTHING; + + -- If the v1_step_concurrency strategy is not active, we set it to active. + WITH inactive_strategies AS ( + SELECT + strategy.* + FROM + new_table cs + JOIN + v1_step_concurrency strategy ON strategy.workflow_id = cs.workflow_id AND strategy.workflow_version_id = cs.workflow_version_id AND strategy.id = cs.strategy_id + WHERE + strategy.is_active = FALSE + ORDER BY + strategy.id + FOR UPDATE + ) + UPDATE v1_step_concurrency strategy + SET is_active = TRUE + FROM inactive_strategies + WHERE + strategy.workflow_id = inactive_strategies.workflow_id AND + strategy.workflow_version_id = inactive_strategies.workflow_version_id AND + strategy.step_id = inactive_strategies.step_id AND + strategy.id = inactive_strategies.id; + + RETURN NULL; +END; + +$$ LANGUAGE plpgsql; + +CREATE TRIGGER after_v1_concurrency_slot_insert +AFTER INSERT ON v1_concurrency_slot +REFERENCING NEW TABLE AS new_table +FOR EACH STATEMENT +EXECUTE FUNCTION after_v1_concurrency_slot_insert_function(); + +CREATE OR REPLACE FUNCTION after_v1_concurrency_slot_delete_function() +RETURNS trigger AS $$ +BEGIN + -- When v1_concurrency_slot is DELETED, we add it to the completed_child_strategy_ids on the parent. + WITH parent_slot AS ( + SELECT + cs.workflow_id, + cs.workflow_version_id, + cs.workflow_run_id, + cs.strategy_id, + cs.parent_strategy_id + FROM + deleted_rows cs + WHERE + cs.parent_strategy_id IS NOT NULL + ), locked_parent_slots AS ( + SELECT + wcs.strategy_id, + wcs.workflow_version_id, + wcs.workflow_run_id, + cs.strategy_id AS child_strategy_id + FROM + v1_workflow_concurrency_slot wcs + JOIN + parent_slot cs ON (wcs.strategy_id, wcs.workflow_version_id, wcs.workflow_run_id) = (cs.parent_strategy_id, cs.workflow_version_id, cs.workflow_run_id) + ORDER BY + wcs.strategy_id, + wcs.workflow_version_id, + wcs.workflow_run_id + FOR UPDATE + ) + UPDATE v1_workflow_concurrency_slot wcs + SET completed_child_strategy_ids = ARRAY( + SELECT DISTINCT UNNEST(ARRAY_APPEND(wcs.completed_child_strategy_ids, cs.child_strategy_id)) + ) + FROM locked_parent_slots cs + WHERE + wcs.strategy_id = cs.strategy_id + AND wcs.workflow_version_id = cs.workflow_version_id + AND wcs.workflow_run_id = cs.workflow_run_id; + + RETURN NULL; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER after_v1_concurrency_slot_delete +AFTER DELETE ON v1_concurrency_slot +REFERENCING OLD TABLE AS deleted_rows +FOR EACH STATEMENT +EXECUTE FUNCTION after_v1_concurrency_slot_delete_function(); + +-- After we update the v1_workflow_concurrency_slot, we'd like to check whether all child_strategy_ids are +-- in the completed_child_strategy_ids. If so, we should delete the v1_workflow_concurrency_slot. +CREATE OR REPLACE FUNCTION after_v1_workflow_concurrency_slot_update_function() +RETURNS trigger AS $$ +BEGIN + -- place a lock on new_table + WITH slots_to_delete AS ( + SELECT + wcs.strategy_id, wcs.workflow_version_id, wcs.workflow_run_id + FROM + new_table wcs + WHERE + CARDINALITY(wcs.child_strategy_ids) = CARDINALITY(wcs.completed_child_strategy_ids) + ORDER BY + wcs.strategy_id, wcs.workflow_version_id, wcs.workflow_run_id + FOR UPDATE + ) + DELETE FROM + v1_workflow_concurrency_slot wcs + WHERE + (strategy_id, workflow_version_id, workflow_run_id) IN ( + SELECT + strategy_id, workflow_version_id, workflow_run_id + FROM + slots_to_delete + ); + + RETURN NULL; +END; + +$$ LANGUAGE plpgsql; + +CREATE TRIGGER after_v1_workflow_concurrency_slot_update +AFTER UPDATE ON v1_workflow_concurrency_slot +REFERENCING NEW TABLE AS new_table +FOR EACH STATEMENT +EXECUTE FUNCTION after_v1_workflow_concurrency_slot_update_function(); + +CREATE OR REPLACE FUNCTION after_v1_task_runtime_delete_function() +RETURNS trigger AS $$ +BEGIN + WITH slots_to_delete AS ( + SELECT + cs.task_inserted_at, cs.task_id, cs.task_retry_count, cs.key + FROM + deleted_rows d + JOIN v1_concurrency_slot cs ON cs.task_id = d.task_id AND cs.task_inserted_at = d.task_inserted_at AND cs.task_retry_count = d.retry_count + ORDER BY + cs.task_id, cs.task_inserted_at, cs.task_retry_count, cs.key + FOR UPDATE + ) + DELETE FROM + v1_concurrency_slot cs + WHERE + (task_inserted_at, task_id, task_retry_count, key) IN ( + SELECT + task_inserted_at, task_id, task_retry_count, key + FROM + slots_to_delete + ); + + RETURN NULL; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER after_v1_task_runtime_delete +AFTER DELETE ON v1_task_runtime +REFERENCING OLD TABLE AS deleted_rows +FOR EACH STATEMENT +EXECUTE FUNCTION after_v1_task_runtime_delete_function(); + +CREATE TABLE v1_retry_queue_item ( + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + task_retry_count INTEGER NOT NULL, + retry_after TIMESTAMPTZ NOT NULL, + tenant_id UUID NOT NULL, + + CONSTRAINT v1_retry_queue_item_pkey PRIMARY KEY (task_id, task_inserted_at, task_retry_count) +); + +CREATE INDEX v1_retry_queue_item_tenant_id_retry_after_idx ON v1_retry_queue_item (tenant_id ASC, retry_after ASC); + +CREATE OR REPLACE FUNCTION v1_task_insert_function() +RETURNS TRIGGER AS $$ +DECLARE + rec RECORD; +BEGIN + FOR rec IN SELECT * FROM new_table WHERE initial_state = 'QUEUED' AND concurrency_strategy_ids[1] IS NOT NULL AND concurrency_keys[1] IS NULL LOOP + RAISE WARNING 'New table row: %', row_to_json(rec); + END LOOP; + + -- When a task is inserted in a non-queued state, we should add all relevant completed_child_strategy_ids to the parent + -- concurrency slots. + WITH parent_slots AS ( + SELECT + nt.workflow_id, + nt.workflow_version_id, + nt.workflow_run_id, + UNNEST(nt.concurrency_strategy_ids) AS strategy_id, + UNNEST(nt.concurrency_parent_strategy_ids) AS parent_strategy_id + FROM + new_table nt + WHERE + cardinality(nt.concurrency_parent_strategy_ids) > 0 + AND nt.initial_state != 'QUEUED' + ), locked_parent_slots AS ( + SELECT + wcs.workflow_id, + wcs.workflow_version_id, + wcs.workflow_run_id, + wcs.strategy_id, + cs.strategy_id AS child_strategy_id + FROM + v1_workflow_concurrency_slot wcs + JOIN + parent_slots cs ON (wcs.strategy_id, wcs.workflow_version_id, wcs.workflow_run_id) = (cs.parent_strategy_id, cs.workflow_version_id, cs.workflow_run_id) + ORDER BY + wcs.strategy_id, wcs.workflow_version_id, wcs.workflow_run_id + FOR UPDATE + ) + UPDATE + v1_workflow_concurrency_slot wcs + SET + -- get unique completed_child_strategy_ids after append with cs.strategy_id + completed_child_strategy_ids = ARRAY( + SELECT + DISTINCT UNNEST(ARRAY_APPEND(wcs.completed_child_strategy_ids, cs.child_strategy_id)) + ) + FROM + locked_parent_slots cs + WHERE + wcs.strategy_id = cs.strategy_id + AND wcs.workflow_version_id = cs.workflow_version_id + AND wcs.workflow_run_id = cs.workflow_run_id; + + WITH new_slot_rows AS ( + SELECT + id, + inserted_at, + retry_count, + tenant_id, + priority, + concurrency_parent_strategy_ids[1] AS parent_strategy_id, + CASE + WHEN array_length(concurrency_parent_strategy_ids, 1) > 1 THEN concurrency_parent_strategy_ids[2:array_length(concurrency_parent_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_parent_strategy_ids, + concurrency_strategy_ids[1] AS strategy_id, + external_id, + workflow_run_id, + CASE + WHEN array_length(concurrency_strategy_ids, 1) > 1 THEN concurrency_strategy_ids[2:array_length(concurrency_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_strategy_ids, + concurrency_keys[1] AS key, + CASE + WHEN array_length(concurrency_keys, 1) > 1 THEN concurrency_keys[2:array_length(concurrency_keys, 1)] + ELSE '{}'::text[] + END AS next_keys, + workflow_id, + workflow_version_id, + queue, + CURRENT_TIMESTAMP + convert_duration_to_interval(schedule_timeout) AS schedule_timeout_at + FROM new_table + WHERE initial_state = 'QUEUED' AND concurrency_strategy_ids[1] IS NOT NULL + ) + INSERT INTO v1_concurrency_slot ( + task_id, + task_inserted_at, + task_retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + priority, + key, + next_keys, + queue_to_notify, + schedule_timeout_at + ) + SELECT + id, + inserted_at, + retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + COALESCE(priority, 1), + key, + next_keys, + queue, + schedule_timeout_at + FROM new_slot_rows; + + INSERT INTO v1_queue_item ( + tenant_id, + queue, + task_id, + task_inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + schedule_timeout_at, + step_timeout, + priority, + sticky, + desired_worker_id, + retry_count + ) + SELECT + tenant_id, + queue, + id, + inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + CURRENT_TIMESTAMP + convert_duration_to_interval(schedule_timeout), + step_timeout, + COALESCE(priority, 1), + sticky, + desired_worker_id, + retry_count + FROM new_table + WHERE initial_state = 'QUEUED' AND concurrency_strategy_ids[1] IS NULL; + + INSERT INTO v1_dag_to_task ( + dag_id, + dag_inserted_at, + task_id, + task_inserted_at + ) + SELECT + dag_id, + dag_inserted_at, + id, + inserted_at + FROM new_table + WHERE dag_id IS NOT NULL AND dag_inserted_at IS NOT NULL; + + INSERT INTO v1_lookup_table ( + external_id, + tenant_id, + task_id, + inserted_at + ) + SELECT + external_id, + tenant_id, + id, + inserted_at + FROM new_table + ON CONFLICT (external_id) DO NOTHING; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_task_insert_trigger +AFTER INSERT ON v1_task +REFERENCING NEW TABLE AS new_table +FOR EACH STATEMENT +EXECUTE PROCEDURE v1_task_insert_function(); + +CREATE OR REPLACE FUNCTION v1_task_update_function() +RETURNS TRIGGER AS +$$ +BEGIN + WITH new_retry_rows AS ( + SELECT + nt.id, + nt.inserted_at, + nt.retry_count, + nt.tenant_id, + -- Convert the retry_after based on min(retry_backoff_factor ^ retry_count, retry_max_backoff) + NOW() + (LEAST(nt.retry_max_backoff, POWER(nt.retry_backoff_factor, nt.app_retry_count)) * interval '1 second') AS retry_after + FROM new_table nt + JOIN old_table ot ON ot.id = nt.id + WHERE nt.initial_state = 'QUEUED' + AND nt.retry_backoff_factor IS NOT NULL + AND ot.app_retry_count IS DISTINCT FROM nt.app_retry_count + AND nt.app_retry_count != 0 + ) + INSERT INTO v1_retry_queue_item ( + task_id, + task_inserted_at, + task_retry_count, + retry_after, + tenant_id + ) + SELECT + id, + inserted_at, + retry_count, + retry_after, + tenant_id + FROM new_retry_rows; + + WITH new_slot_rows AS ( + SELECT + nt.id, + nt.inserted_at, + nt.retry_count, + nt.tenant_id, + nt.workflow_run_id, + nt.external_id, + nt.concurrency_parent_strategy_ids[1] AS parent_strategy_id, + CASE + WHEN array_length(nt.concurrency_parent_strategy_ids, 1) > 1 THEN nt.concurrency_parent_strategy_ids[2:array_length(nt.concurrency_parent_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_parent_strategy_ids, + nt.concurrency_strategy_ids[1] AS strategy_id, + CASE + WHEN array_length(nt.concurrency_strategy_ids, 1) > 1 THEN nt.concurrency_strategy_ids[2:array_length(nt.concurrency_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_strategy_ids, + nt.concurrency_keys[1] AS key, + CASE + WHEN array_length(nt.concurrency_keys, 1) > 1 THEN nt.concurrency_keys[2:array_length(nt.concurrency_keys, 1)] + ELSE '{}'::text[] + END AS next_keys, + nt.workflow_id, + nt.workflow_version_id, + nt.queue, + CURRENT_TIMESTAMP + convert_duration_to_interval(nt.schedule_timeout) AS schedule_timeout_at + FROM new_table nt + JOIN old_table ot ON ot.id = nt.id + WHERE nt.initial_state = 'QUEUED' + AND nt.concurrency_strategy_ids[1] IS NOT NULL + AND (nt.retry_backoff_factor IS NULL OR ot.app_retry_count IS NOT DISTINCT FROM nt.app_retry_count OR nt.app_retry_count = 0) + AND ot.retry_count IS DISTINCT FROM nt.retry_count + ) + INSERT INTO v1_concurrency_slot ( + task_id, + task_inserted_at, + task_retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + priority, + key, + next_keys, + queue_to_notify, + schedule_timeout_at + ) + SELECT + id, + inserted_at, + retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + 4, + key, + next_keys, + queue, + schedule_timeout_at + FROM new_slot_rows; + + INSERT INTO v1_queue_item ( + tenant_id, + queue, + task_id, + task_inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + schedule_timeout_at, + step_timeout, + priority, + sticky, + desired_worker_id, + retry_count + ) + SELECT + nt.tenant_id, + nt.queue, + nt.id, + nt.inserted_at, + nt.external_id, + nt.action_id, + nt.step_id, + nt.workflow_id, + nt.workflow_run_id, + CURRENT_TIMESTAMP + convert_duration_to_interval(nt.schedule_timeout), + nt.step_timeout, + 4, + nt.sticky, + nt.desired_worker_id, + nt.retry_count + FROM new_table nt + JOIN old_table ot ON ot.id = nt.id + WHERE nt.initial_state = 'QUEUED' + AND nt.concurrency_strategy_ids[1] IS NULL + AND (nt.retry_backoff_factor IS NULL OR ot.app_retry_count IS NOT DISTINCT FROM nt.app_retry_count OR nt.app_retry_count = 0) + AND ot.retry_count IS DISTINCT FROM nt.retry_count; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_task_update_trigger +AFTER UPDATE ON v1_task +REFERENCING NEW TABLE AS new_table OLD TABLE AS old_table +FOR EACH STATEMENT +EXECUTE PROCEDURE v1_task_update_function(); + +CREATE OR REPLACE FUNCTION v1_retry_queue_item_delete_function() +RETURNS TRIGGER AS +$$ +BEGIN + WITH new_slot_rows AS ( + SELECT + t.id, + t.inserted_at, + t.retry_count, + t.tenant_id, + t.workflow_run_id, + t.external_id, + t.concurrency_parent_strategy_ids[1] AS parent_strategy_id, + CASE + WHEN array_length(t.concurrency_parent_strategy_ids, 1) > 1 THEN t.concurrency_parent_strategy_ids[2:array_length(t.concurrency_parent_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_parent_strategy_ids, + t.concurrency_strategy_ids[1] AS strategy_id, + CASE + WHEN array_length(t.concurrency_strategy_ids, 1) > 1 THEN t.concurrency_strategy_ids[2:array_length(t.concurrency_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_strategy_ids, + t.concurrency_keys[1] AS key, + CASE + WHEN array_length(t.concurrency_keys, 1) > 1 THEN t.concurrency_keys[2:array_length(t.concurrency_keys, 1)] + ELSE '{}'::text[] + END AS next_keys, + t.workflow_id, + t.workflow_version_id, + t.queue, + CURRENT_TIMESTAMP + convert_duration_to_interval(t.schedule_timeout) AS schedule_timeout_at + FROM deleted_rows dr + JOIN + v1_task t ON t.id = dr.task_id AND t.inserted_at = dr.task_inserted_at + WHERE + dr.retry_after <= NOW() + AND t.initial_state = 'QUEUED' + AND t.concurrency_strategy_ids[1] IS NOT NULL + ) + INSERT INTO v1_concurrency_slot ( + task_id, + task_inserted_at, + task_retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + priority, + key, + next_keys, + queue_to_notify, + schedule_timeout_at + ) + SELECT + id, + inserted_at, + retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + 4, + key, + next_keys, + queue, + schedule_timeout_at + FROM new_slot_rows; + + WITH tasks AS ( + SELECT + t.* + FROM + deleted_rows dr + JOIN v1_task t ON t.id = dr.task_id AND t.inserted_at = dr.task_inserted_at + WHERE + dr.retry_after <= NOW() + AND t.initial_state = 'QUEUED' + AND t.concurrency_strategy_ids[1] IS NULL + ) + INSERT INTO v1_queue_item ( + tenant_id, + queue, + task_id, + task_inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + schedule_timeout_at, + step_timeout, + priority, + sticky, + desired_worker_id, + retry_count + ) + SELECT + tenant_id, + queue, + id, + inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + CURRENT_TIMESTAMP + convert_duration_to_interval(schedule_timeout), + step_timeout, + 4, + sticky, + desired_worker_id, + retry_count + FROM tasks; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_retry_queue_item_delete_trigger +AFTER DELETE ON v1_retry_queue_item +REFERENCING OLD TABLE AS deleted_rows +FOR EACH STATEMENT +EXECUTE PROCEDURE v1_retry_queue_item_delete_function(); + +CREATE OR REPLACE FUNCTION v1_concurrency_slot_update_function() +RETURNS TRIGGER AS +$$ +BEGIN + -- If the concurrency slot has next_keys, insert a new slot for the next key + WITH new_slot_rows AS ( + SELECT + t.id, + t.inserted_at, + t.retry_count, + t.tenant_id, + t.priority, + t.queue, + t.workflow_run_id, + t.external_id, + nt.next_parent_strategy_ids[1] AS parent_strategy_id, + CASE + WHEN array_length(nt.next_parent_strategy_ids, 1) > 1 THEN nt.next_parent_strategy_ids[2:array_length(nt.next_parent_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_parent_strategy_ids, + nt.next_strategy_ids[1] AS strategy_id, + CASE + WHEN array_length(nt.next_strategy_ids, 1) > 1 THEN nt.next_strategy_ids[2:array_length(nt.next_strategy_ids, 1)] + ELSE '{}'::bigint[] + END AS next_strategy_ids, + nt.next_keys[1] AS key, + CASE + WHEN array_length(nt.next_keys, 1) > 1 THEN nt.next_keys[2:array_length(nt.next_keys, 1)] + ELSE '{}'::text[] + END AS next_keys, + t.workflow_id, + t.workflow_version_id, + CURRENT_TIMESTAMP + convert_duration_to_interval(t.schedule_timeout) AS schedule_timeout_at + FROM new_table nt + JOIN old_table ot USING (task_id, task_inserted_at, task_retry_count, key) + JOIN v1_task t ON t.id = nt.task_id AND t.inserted_at = nt.task_inserted_at + WHERE + COALESCE(array_length(nt.next_keys, 1), 0) != 0 + AND nt.is_filled = TRUE + AND nt.is_filled IS DISTINCT FROM ot.is_filled + ) + INSERT INTO v1_concurrency_slot ( + task_id, + task_inserted_at, + task_retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + priority, + key, + next_keys, + schedule_timeout_at, + queue_to_notify + ) + SELECT + id, + inserted_at, + retry_count, + external_id, + tenant_id, + workflow_id, + workflow_version_id, + workflow_run_id, + parent_strategy_id, + next_parent_strategy_ids, + strategy_id, + next_strategy_ids, + COALESCE(priority, 1), + key, + next_keys, + schedule_timeout_at, + queue + FROM new_slot_rows; + + -- If the concurrency slot does not have next_keys, insert an item into v1_queue_item + WITH tasks AS ( + SELECT + t.* + FROM + new_table nt + JOIN old_table ot USING (task_id, task_inserted_at, task_retry_count, key) + JOIN v1_task t ON t.id = nt.task_id AND t.inserted_at = nt.task_inserted_at + WHERE + COALESCE(array_length(nt.next_keys, 1), 0) = 0 + AND nt.is_filled = TRUE + AND nt.is_filled IS DISTINCT FROM ot.is_filled + ) + INSERT INTO v1_queue_item ( + tenant_id, + queue, + task_id, + task_inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + schedule_timeout_at, + step_timeout, + priority, + sticky, + desired_worker_id, + retry_count + ) + SELECT + tenant_id, + queue, + id, + inserted_at, + external_id, + action_id, + step_id, + workflow_id, + workflow_run_id, + CURRENT_TIMESTAMP + convert_duration_to_interval(schedule_timeout), + step_timeout, + COALESCE(priority, 1), + sticky, + desired_worker_id, + retry_count + FROM tasks; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_concurrency_slot_update_trigger +AFTER UPDATE ON v1_concurrency_slot +REFERENCING NEW TABLE AS new_table OLD TABLE AS old_table +FOR EACH STATEMENT +EXECUTE PROCEDURE v1_concurrency_slot_update_function(); + +CREATE OR REPLACE FUNCTION v1_dag_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_lookup_table ( + external_id, + tenant_id, + dag_id, + inserted_at + ) + SELECT + external_id, + tenant_id, + id, + inserted_at + FROM new_table + ON CONFLICT (external_id) DO NOTHING; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_dag_insert_trigger +AFTER INSERT ON v1_dag +REFERENCING NEW TABLE AS new_table +FOR EACH STATEMENT +EXECUTE PROCEDURE v1_dag_insert_function(); + +CREATE TYPE v1_log_line_level AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR'); + +CREATE TABLE v1_log_line ( + id BIGINT GENERATED ALWAYS AS IDENTITY, + created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + tenant_id UUID NOT NULL, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + message TEXT NOT NULL, + level v1_log_line_level NOT NULL DEFAULT 'INFO', + metadata JSONB, + + PRIMARY KEY (task_id, task_inserted_at, id) +) PARTITION BY RANGE(task_inserted_at); + +--- OLAP TABLES --- + +CREATE TYPE v1_sticky_strategy_olap AS ENUM ('NONE', 'SOFT', 'HARD'); + +CREATE TYPE v1_readable_status_olap AS ENUM ( + 'QUEUED', + 'RUNNING', + 'CANCELLED', + 'FAILED', + 'COMPLETED' +); + +-- HELPER FUNCTIONS FOR PARTITIONED TABLES -- +CREATE OR REPLACE FUNCTION create_v1_partition_with_status( + newTableName text, + status v1_readable_status_olap +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetNameWithStatus varchar; +BEGIN + SELECT lower(format('%s_%s', newTableName, status::text)) INTO targetNameWithStatus; + + -- exit if the table exists + IF EXISTS (SELECT 1 FROM pg_tables WHERE tablename = targetNameWithStatus) THEN + RETURN 0; + END IF; + + EXECUTE + format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES)', targetNameWithStatus, newTableName); + EXECUTE + format('ALTER TABLE %s SET ( + autovacuum_vacuum_scale_factor = ''0.1'', + autovacuum_analyze_scale_factor=''0.05'', + autovacuum_vacuum_threshold=''25'', + autovacuum_analyze_threshold=''25'', + autovacuum_vacuum_cost_delay=''10'', + autovacuum_vacuum_cost_limit=''1000'' + )', targetNameWithStatus); + EXECUTE + format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES IN (''%s'')', newTableName, targetNameWithStatus, status); + RETURN 1; +END; +$$; + +CREATE OR REPLACE FUNCTION create_v1_olap_partition_with_date_and_status( + targetTableName text, + targetDate date +) RETURNS integer + LANGUAGE plpgsql AS +$$ +DECLARE + targetDateStr varchar; + targetDatePlusOneDayStr varchar; + newTableName varchar; +BEGIN + SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr; + SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr; + SELECT format('%s_%s', targetTableName, targetDateStr) INTO newTableName; + IF NOT EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN + EXECUTE format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES) PARTITION BY LIST (readable_status)', newTableName, targetTableName); + END IF; + + PERFORM create_v1_partition_with_status(newTableName, 'QUEUED'); + PERFORM create_v1_partition_with_status(newTableName, 'RUNNING'); + PERFORM create_v1_partition_with_status(newTableName, 'COMPLETED'); + PERFORM create_v1_partition_with_status(newTableName, 'CANCELLED'); + PERFORM create_v1_partition_with_status(newTableName, 'FAILED'); + + -- If it's not already attached, attach the partition + IF NOT EXISTS (SELECT 1 FROM pg_inherits WHERE inhrelid = newTableName::regclass) THEN + EXECUTE format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr); + END IF; + + RETURN 1; +END; +$$; + +CREATE OR REPLACE FUNCTION create_v1_hash_partitions( + targetTableName text, + num_partitions INT +) RETURNS integer +LANGUAGE plpgsql AS +$$ +DECLARE + existing_count INT; + partition_name text; + created_count INT := 0; + i INT; +BEGIN + SELECT count(*) INTO existing_count + FROM pg_inherits + WHERE inhparent = targetTableName::regclass; + + IF existing_count > num_partitions THEN + RAISE EXCEPTION 'Cannot decrease the number of partitions: we already have % partitions which is more than the target %', existing_count, num_partitions; + END IF; + + FOR i IN 0..(num_partitions - 1) LOOP + partition_name := format('%s_%s', targetTableName, i); + IF to_regclass(partition_name) IS NULL THEN + EXECUTE format('CREATE TABLE %I (LIKE %s INCLUDING INDEXES)', partition_name, targetTableName); + EXECUTE format('ALTER TABLE %I SET ( + autovacuum_vacuum_scale_factor = ''0.1'', + autovacuum_analyze_scale_factor = ''0.05'', + autovacuum_vacuum_threshold = ''25'', + autovacuum_analyze_threshold = ''25'', + autovacuum_vacuum_cost_delay = ''10'', + autovacuum_vacuum_cost_limit = ''1000'' + )', partition_name); + EXECUTE format('ALTER TABLE %s ATTACH PARTITION %I FOR VALUES WITH (modulus %s, remainder %s)', targetTableName, partition_name, num_partitions, i); + created_count := created_count + 1; + END IF; + END LOOP; + RETURN created_count; +END; +$$; + +-- TASKS DEFINITIONS -- +CREATE TABLE v1_tasks_olap ( + tenant_id UUID NOT NULL, + id BIGINT NOT NULL, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + external_id UUID NOT NULL DEFAULT gen_random_uuid(), + queue TEXT NOT NULL, + action_id TEXT NOT NULL, + step_id UUID NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + workflow_run_id UUID NOT NULL, + schedule_timeout TEXT NOT NULL, + step_timeout TEXT, + priority INTEGER DEFAULT 1, + sticky v1_sticky_strategy_olap NOT NULL, + desired_worker_id UUID, + display_name TEXT NOT NULL, + input JSONB NOT NULL, + additional_metadata JSONB, + readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED', + latest_retry_count INT NOT NULL DEFAULT 0, + latest_worker_id UUID, + dag_id BIGINT, + dag_inserted_at TIMESTAMPTZ, + parent_task_external_id UUID, + + PRIMARY KEY (inserted_at, id, readable_status) +) PARTITION BY RANGE(inserted_at); + +CREATE INDEX v1_tasks_olap_workflow_id_idx ON v1_tasks_olap (tenant_id, workflow_id); + +CREATE INDEX v1_tasks_olap_worker_id_idx ON v1_tasks_olap (tenant_id, latest_worker_id) WHERE latest_worker_id IS NOT NULL; + +SELECT create_v1_olap_partition_with_date_and_status('v1_tasks_olap', CURRENT_DATE); + +-- DAG DEFINITIONS -- +CREATE TABLE v1_dags_olap ( + id BIGINT NOT NULL, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + tenant_id UUID NOT NULL, + external_id UUID NOT NULL, + display_name TEXT NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED', + input JSONB NOT NULL, + additional_metadata JSONB, + parent_task_external_id UUID, + PRIMARY KEY (inserted_at, id, readable_status) +) PARTITION BY RANGE(inserted_at); + +CREATE INDEX v1_dags_olap_workflow_id_idx ON v1_dags_olap (tenant_id, workflow_id); + +SELECT create_v1_olap_partition_with_date_and_status('v1_dags_olap', CURRENT_DATE); + +-- RUN DEFINITIONS -- +CREATE TYPE v1_run_kind AS ENUM ('TASK', 'DAG'); + +-- v1_runs_olap represents an invocation of a workflow. it can either refer to a DAG or a task. +-- we partition this table on status to allow for efficient querying of tasks in different states. +CREATE TABLE v1_runs_olap ( + tenant_id UUID NOT NULL, + id BIGINT NOT NULL, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + external_id UUID NOT NULL DEFAULT gen_random_uuid(), + readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED', + kind v1_run_kind NOT NULL, + workflow_id UUID NOT NULL, + workflow_version_id UUID NOT NULL, + additional_metadata JSONB, + parent_task_external_id UUID, + + PRIMARY KEY (inserted_at, id, readable_status, kind) +) PARTITION BY RANGE(inserted_at); + +SELECT create_v1_olap_partition_with_date_and_status('v1_runs_olap', CURRENT_DATE); + +CREATE INDEX ix_v1_runs_olap_parent_task_external_id ON v1_runs_olap (parent_task_external_id) WHERE parent_task_external_id IS NOT NULL; + +-- LOOKUP TABLES -- +CREATE TABLE v1_lookup_table_olap ( + tenant_id UUID NOT NULL, + external_id UUID NOT NULL, + task_id BIGINT, + dag_id BIGINT, + inserted_at TIMESTAMPTZ NOT NULL, + + PRIMARY KEY (external_id) +); + +CREATE TABLE v1_dag_to_task_olap ( + dag_id BIGINT NOT NULL, + dag_inserted_at TIMESTAMPTZ NOT NULL, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + PRIMARY KEY (dag_id, dag_inserted_at, task_id, task_inserted_at) +); + +-- STATUS DEFINITION -- +CREATE TYPE v1_status_kind AS ENUM ('TASK', 'DAG'); + +CREATE TABLE v1_statuses_olap ( + external_id UUID NOT NULL, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + tenant_id UUID NOT NULL, + workflow_id UUID NOT NULL, + kind v1_run_kind NOT NULL, + readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED', + + PRIMARY KEY (external_id, inserted_at) +); + + +-- EVENT DEFINITIONS -- +CREATE TYPE v1_event_type_olap AS ENUM ( + 'RETRYING', + 'REASSIGNED', + 'RETRIED_BY_USER', + 'CREATED', + 'QUEUED', + 'REQUEUED_NO_WORKER', + 'REQUEUED_RATE_LIMIT', + 'ASSIGNED', + 'ACKNOWLEDGED', + 'SENT_TO_WORKER', + 'SLOT_RELEASED', + 'STARTED', + 'TIMEOUT_REFRESHED', + 'SCHEDULING_TIMED_OUT', + 'FINISHED', + 'FAILED', + 'CANCELLED', + 'TIMED_OUT', + 'RATE_LIMIT_ERROR', + 'SKIPPED' +); + +-- this is a hash-partitioned table on the task_id, so that we can process batches of events in parallel +-- without needing to place conflicting locks on tasks. +CREATE TABLE v1_task_events_olap_tmp ( + tenant_id UUID NOT NULL, + requeue_after TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + requeue_retries INT NOT NULL DEFAULT 0, + id bigint GENERATED ALWAYS AS IDENTITY, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + event_type v1_event_type_olap NOT NULL, + readable_status v1_readable_status_olap NOT NULL, + retry_count INT NOT NULL DEFAULT 0, + worker_id UUID, + + PRIMARY KEY (tenant_id, requeue_after, task_id, id) +) PARTITION BY HASH(task_id); + +CREATE OR REPLACE FUNCTION list_task_events_tmp( + partition_number INT, + tenant_id UUID, + event_limit INT +) RETURNS SETOF v1_task_events_olap_tmp +LANGUAGE plpgsql AS +$$ +DECLARE + partition_table text; +BEGIN + partition_table := 'v1_task_events_olap_tmp_' || partition_number::text; + RETURN QUERY EXECUTE format( + 'SELECT e.* + FROM %I e + WHERE e.tenant_id = $1 + AND e.requeue_after <= CURRENT_TIMESTAMP + ORDER BY e.requeue_after, e.task_id, e.id + LIMIT $2 + FOR UPDATE SKIP LOCKED', + partition_table) + USING tenant_id, event_limit; +END; +$$; + +CREATE TABLE v1_task_events_olap ( + tenant_id UUID NOT NULL, + id bigint GENERATED ALWAYS AS IDENTITY, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + task_id BIGINT NOT NULL, + task_inserted_at TIMESTAMPTZ NOT NULL, + event_type v1_event_type_olap NOT NULL, + workflow_id UUID NOT NULL, + event_timestamp TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + readable_status v1_readable_status_olap NOT NULL, + retry_count INT NOT NULL DEFAULT 0, + error_message TEXT, + output JSONB, + worker_id UUID, + additional__event_data TEXT, + additional__event_message TEXT, + + PRIMARY KEY (task_id, task_inserted_at, id) +); + +CREATE INDEX v1_task_events_olap_task_id_idx ON v1_task_events_olap (task_id); + +-- this is a hash-partitioned table on the dag_id, so that we can process batches of events in parallel +-- without needing to place conflicting locks on dags. +CREATE TABLE v1_task_status_updates_tmp ( + tenant_id UUID NOT NULL, + requeue_after TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + requeue_retries INT NOT NULL DEFAULT 0, + id bigint GENERATED ALWAYS AS IDENTITY, + dag_id BIGINT NOT NULL, + dag_inserted_at TIMESTAMPTZ NOT NULL, + + PRIMARY KEY (tenant_id, requeue_after, dag_id, id) +) PARTITION BY HASH(dag_id); + +CREATE OR REPLACE FUNCTION list_task_status_updates_tmp( + partition_number INT, + tenant_id UUID, + event_limit INT +) RETURNS SETOF v1_task_status_updates_tmp +LANGUAGE plpgsql AS +$$ +DECLARE + partition_table text; +BEGIN + partition_table := 'v1_task_status_updates_tmp_' || partition_number::text; + RETURN QUERY EXECUTE format( + 'SELECT e.* + FROM %I e + WHERE e.tenant_id = $1 + AND e.requeue_after <= CURRENT_TIMESTAMP + ORDER BY e.dag_id + LIMIT $2 + FOR UPDATE SKIP LOCKED', + partition_table) + USING tenant_id, event_limit; +END; +$$; + +-- TRIGGERS TO LINK TASKS, DAGS AND EVENTS -- +CREATE OR REPLACE FUNCTION v1_tasks_olap_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_runs_olap ( + tenant_id, + id, + inserted_at, + external_id, + readable_status, + kind, + workflow_id, + workflow_version_id, + additional_metadata, + parent_task_external_id + ) + SELECT + tenant_id, + id, + inserted_at, + external_id, + readable_status, + 'TASK', + workflow_id, + workflow_version_id, + additional_metadata, + parent_task_external_id + FROM new_rows + WHERE dag_id IS NULL; + + INSERT INTO v1_lookup_table_olap ( + tenant_id, + external_id, + task_id, + inserted_at + ) + SELECT + tenant_id, + external_id, + id, + inserted_at + FROM new_rows + ON CONFLICT (external_id) DO NOTHING; + + -- If the task has a dag_id and dag_inserted_at, insert into the lookup table + INSERT INTO v1_dag_to_task_olap ( + dag_id, + dag_inserted_at, + task_id, + task_inserted_at + ) + SELECT + dag_id, + dag_inserted_at, + id, + inserted_at + FROM new_rows + WHERE dag_id IS NOT NULL; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_tasks_olap_status_insert_trigger +AFTER INSERT ON v1_tasks_olap +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_tasks_olap_insert_function(); + +CREATE OR REPLACE FUNCTION v1_tasks_olap_status_update_function() +RETURNS TRIGGER AS +$$ +BEGIN + UPDATE + v1_runs_olap r + SET + readable_status = n.readable_status + FROM new_rows n + WHERE + r.id = n.id + AND r.inserted_at = n.inserted_at + AND r.kind = 'TASK'; + + -- insert tmp events into task status updates table if we have a dag_id + INSERT INTO v1_task_status_updates_tmp ( + tenant_id, + dag_id, + dag_inserted_at + ) + SELECT + tenant_id, + dag_id, + dag_inserted_at + FROM new_rows + WHERE dag_id IS NOT NULL; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_tasks_olap_status_update_trigger +AFTER UPDATE ON v1_tasks_olap +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_tasks_olap_status_update_function(); + +CREATE OR REPLACE FUNCTION v1_dags_olap_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_runs_olap ( + tenant_id, + id, + inserted_at, + external_id, + readable_status, + kind, + workflow_id, + workflow_version_id, + additional_metadata, + parent_task_external_id + ) + SELECT + tenant_id, + id, + inserted_at, + external_id, + readable_status, + 'DAG', + workflow_id, + workflow_version_id, + additional_metadata, + parent_task_external_id + FROM new_rows; + + INSERT INTO v1_lookup_table_olap ( + tenant_id, + external_id, + dag_id, + inserted_at + ) + SELECT + tenant_id, + external_id, + id, + inserted_at + FROM new_rows + ON CONFLICT (external_id) DO NOTHING; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_dags_olap_status_insert_trigger +AFTER INSERT ON v1_dags_olap +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_dags_olap_insert_function(); + +CREATE OR REPLACE FUNCTION v1_dags_olap_status_update_function() +RETURNS TRIGGER AS +$$ +BEGIN + UPDATE + v1_runs_olap r + SET + readable_status = n.readable_status + FROM new_rows n + WHERE + r.id = n.id + AND r.inserted_at = n.inserted_at + AND r.kind = 'DAG'; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_dags_olap_status_update_trigger +AFTER UPDATE ON v1_dags_olap +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_dags_olap_status_update_function(); + +CREATE OR REPLACE FUNCTION v1_runs_olap_insert_function() +RETURNS TRIGGER AS +$$ +BEGIN + INSERT INTO v1_statuses_olap ( + external_id, + inserted_at, + tenant_id, + workflow_id, + kind, + readable_status + ) + SELECT + external_id, + inserted_at, + tenant_id, + workflow_id, + kind, + readable_status + FROM new_rows; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_runs_olap_status_insert_trigger +AFTER INSERT ON v1_runs_olap +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_runs_olap_insert_function(); + +CREATE OR REPLACE FUNCTION v1_runs_olap_status_update_function() +RETURNS TRIGGER AS +$$ +BEGIN + UPDATE + v1_statuses_olap s + SET + readable_status = n.readable_status + FROM new_rows n + WHERE + s.external_id = n.external_id + AND s.inserted_at = n.inserted_at; + + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER v1_runs_olap_status_update_trigger +AFTER UPDATE ON v1_runs_olap +REFERENCING NEW TABLE AS new_rows +FOR EACH STATEMENT +EXECUTE FUNCTION v1_runs_olap_status_update_function(); + +SELECT create_v1_range_partition('v1_task', DATE 'today'); +SELECT create_v1_range_partition('v1_task_event', DATE 'today'); +SELECT create_v1_range_partition('v1_dag', DATE 'today'); +SELECT create_v1_range_partition('v1_log_line', DATE 'today'); + +-- migrate concurrency expressions +CREATE OR REPLACE FUNCTION migrate_workflow_concurrency(workflowVersionId UUID) +RETURNS VOID AS $$ +DECLARE + v0_wf_concurrency_row "WorkflowConcurrency"%ROWTYPE; + wf_concurrency_row v1_workflow_concurrency%ROWTYPE; + child_ids bigint[]; +BEGIN + SELECT * FROM "WorkflowConcurrency" + WHERE "workflowVersionId" = workflowVersionId + INTO v0_wf_concurrency_row; + + RAISE NOTICE 'Migrating workflow concurrency for workflow version %', workflowVersionId; + + IF v0_wf_concurrency_row."concurrencyGroupExpression" IS NOT NULL THEN + RAISE NOTICE 'Migrating %', row_to_json(v0_wf_concurrency_row); + + -- Insert into v1_workflow_concurrency and capture the inserted row. + INSERT INTO v1_workflow_concurrency ( + workflow_id, + workflow_version_id, + strategy, + expression, + tenant_id, + max_concurrency + ) + SELECT + wf."id", + wv."id", + v0_wf_concurrency_row."limitStrategy"::VARCHAR::v1_concurrency_strategy, + v0_wf_concurrency_row."concurrencyGroupExpression", + wf."tenantId", + v0_wf_concurrency_row."maxRuns" + FROM "WorkflowVersion" wv + JOIN "Workflow" wf ON wv."workflowId" = wf."id" + WHERE wv."id" = v0_wf_concurrency_row."workflowVersionId" + RETURNING * INTO wf_concurrency_row; + + -- Insert into v1_step_concurrency and capture the inserted rows into a variable. + WITH inserted_steps AS ( + INSERT INTO v1_step_concurrency ( + parent_strategy_id, + workflow_id, + workflow_version_id, + step_id, + strategy, + expression, + tenant_id, + max_concurrency + ) + SELECT + wf_concurrency_row.id, + s."workflowId", + s."workflowVersionId", + s."id", + v0_wf_concurrency_row."limitStrategy"::VARCHAR::v1_concurrency_strategy, + v0_wf_concurrency_row."concurrencyGroupExpression", + s."tenantId", + v0_wf_concurrency_row."maxRuns" + FROM ( + SELECT + s."id", + wf."id" AS "workflowId", + wv."id" AS "workflowVersionId", + wf."tenantId" + FROM "Step" s + JOIN "Job" j ON s."jobId" = j."id" + JOIN "WorkflowVersion" wv ON j."workflowVersionId" = wv."id" + JOIN "Workflow" wf ON wv."workflowId" = wf."id" + WHERE + wv."id" = v0_wf_concurrency_row."workflowVersionId" + AND j."kind" = 'DEFAULT' + ) s + RETURNING * + ) + SELECT array_remove(array_agg(t.id), NULL)::bigint[] INTO child_ids + FROM inserted_steps t; + + -- Update the workflow concurrency row using its primary key. + UPDATE v1_workflow_concurrency + SET child_strategy_ids = child_ids + WHERE workflow_id = wf_concurrency_row.workflow_id + AND workflow_version_id = wf_concurrency_row.workflow_version_id + AND id = wf_concurrency_row.id; + END IF; +END; +$$ LANGUAGE plpgsql; + +-- For each latest workflow, call migrate +WITH latest_workflow_versions AS ( + SELECT + DISTINCT ON("workflowId") + "workflowId", + "id" + FROM + "WorkflowVersion" + WHERE + "deletedAt" IS NULL + ORDER BY "workflowId", "order" DESC +) +SELECT + migrate_workflow_concurrency("id") +FROM + latest_workflow_versions; + +-- +goose StatementEnd diff --git a/cmd/hatchet-migrate/migrate/run.go b/cmd/hatchet-migrate/migrate/run.go new file mode 100644 index 000000000..4aeab8aca --- /dev/null +++ b/cmd/hatchet-migrate/migrate/run.go @@ -0,0 +1,276 @@ +package migrate + +import ( + "context" + "database/sql" + "embed" + "fmt" + "io/fs" + "log" + "os" + "sort" + "strconv" + "strings" + "time" + + _ "github.com/jackc/pgx/v5/stdlib" + "github.com/pressly/goose/v3" + "github.com/pressly/goose/v3/lock" + "github.com/sethvargo/go-retry" +) + +//go:embed migrations/*.sql +var embedMigrations embed.FS + +func RunMigrations(ctx context.Context) { + var db *sql.DB + var conn *sql.Conn + + retryCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + err := retry.Do(retryCtx, retry.NewConstant(1*time.Second), func(ctx context.Context) error { + var err error + if db == nil { + db, err = goose.OpenDBWithDriver("postgres", os.Getenv("DATABASE_URL")) + + if err != nil { + return retry.RetryableError(fmt.Errorf("failed to open DB: %w", err)) + } + } + + conn, err = db.Conn(ctx) + + if err != nil { + return retry.RetryableError(fmt.Errorf("failed to open DB connection: %w", err)) + } + + return nil + }) + + if err != nil { + log.Fatalf("goose: failed to open DB: %v", err) + } + + defer func() { + if err := conn.Close(); err != nil { + log.Fatalf("goose: failed to close DB connection: %v", err) + } + + if err := db.Close(); err != nil { + log.Fatalf("goose: failed to close DB: %v", err) + } + }() + + if err != nil { + log.Fatalf("goose: failed to open DB connection: %v", err) + } + + locker, err := lock.NewPostgresSessionLocker() + + if err != nil { + log.Fatalf("goose: failed to create locker: %v", err) + } + + err = locker.SessionLock(ctx, conn) + + if err != nil { + log.Fatalf("goose: failed to lock session: %v", err) + } + + // Check whether the goose migrations table exists. + var gooseExists bool + { + query := TableExists("goose_db_version") + err = conn.QueryRowContext(ctx, query).Scan(&gooseExists) + if err != nil { + log.Fatalf("goose: failed to check goose migrations table existence: %v", err) + } + } + + // If the goose migrations table doesn't exist, create it and set a baseline. + if !gooseExists { + // Create goose migrations table. + createTableSQL := CreateTable("goose_db_version") + _, err = conn.ExecContext(ctx, createTableSQL) + if err != nil { + log.Fatalf("goose: failed to create goose migrations table: %v", err) + } + + // Insert a 0 version. + insertQuery := InsertVersion("goose_db_version") + _, err = conn.ExecContext(ctx, insertQuery, 0, true) + + if err != nil { + log.Fatalf("goose: failed to insert baseline migration: %v", err) + } + + // Determine baseline version from atlas or prisma migrations. + var baseline string + + // 1. Check that the atlas_schema_revisions.atlas_schema_revisions table exists. + var atlasExists bool + atlasExistQuery := "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = 'atlas_schema_revisions' AND table_name = 'atlas_schema_revisions')" + err = conn.QueryRowContext(ctx, atlasExistQuery).Scan(&atlasExists) + if err != nil { + log.Fatalf("goose: error checking atlas_schema_revisions existence: %v", err) + } + + fmt.Printf("Does existing atlas schema exist? %v\n", atlasExists) + + // 2. If it does, check for the latest migration in the atlas schema. + if atlasExists { + var version string + atlasLatestQuery := "SELECT version FROM atlas_schema_revisions.atlas_schema_revisions ORDER BY version DESC LIMIT 1" + err = conn.QueryRowContext(ctx, atlasLatestQuery).Scan(&version) + if err == nil { + baseline = version + fmt.Printf("Baseline version from atlas: %s\n", baseline) + } + } + + // 3. If not found, check whether the _prisma_migrations table exists. + if baseline == "" { + var prismaExists bool + prismaExistQuery := "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = '_prisma_migrations')" + err = conn.QueryRowContext(ctx, prismaExistQuery).Scan(&prismaExists) + if err != nil { + log.Fatalf("goose: error checking _prisma_migrations existence: %v", err) + } + + fmt.Printf("Does existing prisma schema exist? %v\n", prismaExists) + + // 4. If it does, check for the latest migration in the prisma schema. + if prismaExists { + var migrationName string + prismaLatestQuery := "SELECT migration_name FROM _prisma_migrations ORDER BY started_at DESC LIMIT 1" + err = conn.QueryRowContext(ctx, prismaLatestQuery).Scan(&migrationName) + if err == nil { + baseline = migrationName + fmt.Printf("Baseline version from prisma: %s\n", baseline) + } + } + } + + // If a baseline version was found, check for a match in the ./migrations directory. + if baseline != "" { + fsys, err := fs.Sub(embedMigrations, "migrations") + if err != nil { + log.Fatalf("goose: failed to create sub filesystem: %v", err) + } + entries, err := fs.ReadDir(fsys, ".") + if err != nil { + log.Fatalf("goose: failed to read migrations directory: %v", err) + } + + type migration struct { + version string + filename string + } + var migrations []migration + for _, entry := range entries { + if entry.IsDir() { + continue + } + name := entry.Name() + parts := strings.SplitN(name, "_", 2) + if len(parts) < 2 { + continue + } + version := parts[0] + if version <= baseline { + fmt.Printf("Including version %s from %s\n", version, name) + migrations = append(migrations, migration{version: version, filename: name}) + } + } + + sort.Slice(migrations, func(i, j int) bool { + return migrations[i].version < migrations[j].version + }) + + for _, m := range migrations { + insertQuery := InsertVersion("goose_db_version") + v, err := strconv.ParseInt(m.version, 10, 64) + if err != nil { + log.Fatalf("goose: invalid migration version %s: %v", m.version, err) + } + _, err = conn.ExecContext(ctx, insertQuery, v, true) + if err != nil { + log.Fatalf("goose: failed to insert baseline migration %s: %v", m.filename, err) + } + } + } + } + + err = locker.SessionUnlock(ctx, conn) + + if err != nil { + log.Fatalf("goose: failed to unlock session: %v", err) + } + + // decouple from existing structure + fsys, err := fs.Sub(embedMigrations, "migrations") + if err != nil { + log.Fatalf("goose: failed to create sub filesystem: %v", err) + } + goose.SetBaseFS(fsys) + + err = goose.Up(db, ".") + if err != nil { + log.Fatalf("goose: failed to apply migrations: %v", err) + } +} + +// Copied from https://github.com/pressly/goose/blob/6a70e744c8eb2dc4bb90ba641cb03b42d8eef6cd/internal/dialect/dialectquery/postgres.go +func CreateTable(tableName string) string { + q := `CREATE TABLE %s ( + id integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + version_id bigint NOT NULL, + is_applied boolean NOT NULL, + tstamp timestamp NOT NULL DEFAULT now() + )` + return fmt.Sprintf(q, tableName) +} + +func InsertVersion(tableName string) string { + q := `INSERT INTO %s (version_id, is_applied) VALUES ($1, $2)` + return fmt.Sprintf(q, tableName) +} + +func DeleteVersion(tableName string) string { + q := `DELETE FROM %s WHERE version_id=$1` + return fmt.Sprintf(q, tableName) +} + +func GetMigrationByVersion(tableName string) string { + q := `SELECT tstamp, is_applied FROM %s WHERE version_id=$1 ORDER BY tstamp DESC LIMIT 1` + return fmt.Sprintf(q, tableName) +} + +func ListMigrations(tableName string) string { + q := `SELECT version_id, is_applied from %s ORDER BY id DESC` + return fmt.Sprintf(q, tableName) +} + +func GetLatestVersion(tableName string) string { + q := `SELECT max(version_id) FROM %s` + return fmt.Sprintf(q, tableName) +} + +func TableExists(tableName string) string { + schemaName, table := parseTableIdentifier(tableName) + if schemaName != "" { + q := `SELECT EXISTS ( SELECT 1 FROM pg_tables WHERE schemaname = '%s' AND tablename = '%s' )` + return fmt.Sprintf(q, schemaName, table) + } + q := `SELECT EXISTS ( SELECT 1 FROM pg_tables WHERE (current_schema() IS NULL OR schemaname = current_schema()) AND tablename = '%s' )` + return fmt.Sprintf(q, table) +} + +func parseTableIdentifier(name string) (schema, table string) { + schema, table, found := strings.Cut(name, ".") + if !found { + return "", name + } + return schema, table +} diff --git a/examples/assignment-sticky/run.go b/examples/assignment-sticky/run.go index cba861ae3..537452733 100644 --- a/examples/assignment-sticky/run.go +++ b/examples/assignment-sticky/run.go @@ -31,13 +31,13 @@ func run() (func() error, error) { On: worker.Events("user:create:sticky"), Name: "sticky", Description: "sticky", - StickyStrategy: types.StickyStrategyPtr(types.StickyStrategy_SOFT), + StickyStrategy: types.StickyStrategyPtr(types.StickyStrategy_HARD), Steps: []*worker.WorkflowStep{ worker.Fn(func(ctx worker.HatchetContext) (result *stepOneOutput, err error) { sticky := true - _, err = ctx.SpawnWorkflow("step-one", nil, &worker.SpawnWorkflowOpts{ + _, err = ctx.SpawnWorkflow("sticky-child", nil, &worker.SpawnWorkflowOpts{ Sticky: &sticky, }) @@ -53,12 +53,12 @@ func run() (func() error, error) { return &stepOneOutput{ Message: ctx.Worker().ID(), }, nil - }).SetName("step-two"), + }).SetName("step-two").AddParents("step-one"), worker.Fn(func(ctx worker.HatchetContext) (result *stepOneOutput, err error) { return &stepOneOutput{ Message: ctx.Worker().ID(), }, nil - }).SetName("step-three").AddParents("step-one", "step-two"), + }).SetName("step-three").AddParents("step-two"), }, }, ) @@ -66,6 +66,25 @@ func run() (func() error, error) { return nil, fmt.Errorf("error registering workflow: %w", err) } + err = w.RegisterWorkflow( + &worker.WorkflowJob{ + On: worker.NoTrigger(), + Name: "sticky-child", + Description: "sticky", + Steps: []*worker.WorkflowStep{ + worker.Fn(func(ctx worker.HatchetContext) (result *stepOneOutput, err error) { + return &stepOneOutput{ + Message: ctx.Worker().ID(), + }, nil + }).SetName("step-one"), + }, + }, + ) + + if err != nil { + return nil, fmt.Errorf("error registering workflow: %w", err) + } + go func() { log.Printf("pushing event") diff --git a/examples/cancellation/run.go b/examples/cancellation/run.go index 37b6f678b..1c0ec2f1d 100644 --- a/examples/cancellation/run.go +++ b/examples/cancellation/run.go @@ -7,9 +7,8 @@ import ( "time" "github.com/google/uuid" - "github.com/hatchet-dev/hatchet/pkg/client" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" + "github.com/hatchet-dev/hatchet/pkg/client/rest" "github.com/hatchet-dev/hatchet/pkg/worker" ) @@ -77,38 +76,48 @@ func run(events chan<- string) (func() error, error) { time.Sleep(10 * time.Second) - //workflows, err := c.API().WorkflowListWithResponse(context.Background(), uuid.MustParse(c.TenantId())) - //if err != nil { - // panic(fmt.Errorf("error listing workflows: %w", err)) - //} + workflowName := "cancellation" - client := db.NewClient() - if err := client.Connect(); err != nil { - panic(fmt.Errorf("error connecting to database: %w", err)) - } - defer client.Disconnect() + workflows, err := c.API().WorkflowListWithResponse(context.Background(), uuid.MustParse(c.TenantId()), &rest.WorkflowListParams{ + Name: &workflowName, + }) - stepRuns, err := client.StepRun.FindMany( - db.StepRun.TenantID.Equals(c.TenantId()), - db.StepRun.Status.Equals(db.StepRunStatusRunning), - ).Exec(context.Background()) if err != nil { - panic(fmt.Errorf("error finding step runs: %w", err)) + panic(fmt.Errorf("error listing workflows: %w", err)) } - if len(stepRuns) == 0 { - panic(fmt.Errorf("no step runs to cancel")) + if workflows.JSON200 == nil { + panic(fmt.Errorf("no workflows found")) } - for _, stepRun := range stepRuns { - stepRunID := stepRun.ID - log.Printf("cancelling step run id: %s", stepRunID) - res, err := c.API().StepRunUpdateCancelWithResponse(context.Background(), uuid.MustParse(c.TenantId()), uuid.MustParse(stepRunID)) - if err != nil { - panic(fmt.Errorf("error cancelling step run: %w", err)) - } + rows := *workflows.JSON200.Rows - log.Printf("step run cancelled: %v", res.JSON200) + if len(rows) == 0 { + panic(fmt.Errorf("no workflows found")) + } + + workflowId := uuid.MustParse(rows[0].Metadata.Id) + + workflowRuns, err := c.API().WorkflowRunListWithResponse(context.Background(), uuid.MustParse(c.TenantId()), &rest.WorkflowRunListParams{ + WorkflowId: &workflowId, + }) + + if err != nil { + panic(fmt.Errorf("error listing workflow runs: %w", err)) + } + + if workflowRuns.JSON200 == nil { + panic(fmt.Errorf("no workflow runs found")) + } + + workflowRunsRows := *workflowRuns.JSON200.Rows + + _, err = c.API().WorkflowRunCancelWithResponse(context.Background(), uuid.MustParse(c.TenantId()), rest.WorkflowRunsCancelRequest{ + WorkflowRunIds: []uuid.UUID{uuid.MustParse(workflowRunsRows[0].Metadata.Id)}, + }) + + if err != nil { + panic(fmt.Errorf("error cancelling workflow run: %w", err)) } }() diff --git a/examples/concurrency/main_e2e_test.go b/examples/concurrency/main_e2e_test.go index 6c2fa34d7..1aa69fb68 100644 --- a/examples/concurrency/main_e2e_test.go +++ b/examples/concurrency/main_e2e_test.go @@ -13,6 +13,8 @@ import ( ) func TestConcurrency(t *testing.T) { + t.Skip("skipping concurency test for now") + testutils.Prepare(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) diff --git a/examples/limit-concurrency/group-round-robin-advanced/main_e2e_test.go b/examples/limit-concurrency/group-round-robin-advanced/main_e2e_test.go index b4918367c..709d66409 100644 --- a/examples/limit-concurrency/group-round-robin-advanced/main_e2e_test.go +++ b/examples/limit-concurrency/group-round-robin-advanced/main_e2e_test.go @@ -13,6 +13,7 @@ import ( ) func TestAdvancedConcurrency(t *testing.T) { + t.Skip("skipping concurency test for now") testutils.Prepare(t) ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) diff --git a/examples/loadtest/cli/cli_e2e_test.go b/examples/loadtest/cli/cli_e2e_test.go index 50c8302de..a1694c12c 100644 --- a/examples/loadtest/cli/cli_e2e_test.go +++ b/examples/loadtest/cli/cli_e2e_test.go @@ -41,15 +41,6 @@ func TestLoadCLI(t *testing.T) { args args wantErr bool }{{ - name: "test simple with unlimited concurrency", - args: args{ - duration: 10 * time.Second, - eventsPerSecond: 10, - delay: 0 * time.Second, - wait: 60 * time.Second, - concurrency: 0, - }, - }, { name: "test with high step delay", args: args{ duration: 10 * time.Second, @@ -58,6 +49,15 @@ func TestLoadCLI(t *testing.T) { wait: 60 * time.Second, concurrency: 0, }, + }, { + name: "test simple with unlimited concurrency", + args: args{ + duration: 10 * time.Second, + eventsPerSecond: 10, + delay: 0 * time.Second, + wait: 60 * time.Second, + concurrency: 0, + }, }, { name: "test for many queued events and little worker throughput", args: args{ diff --git a/examples/timeout/run.go b/examples/timeout/run.go index a9c00844d..9e21e0e58 100644 --- a/examples/timeout/run.go +++ b/examples/timeout/run.go @@ -7,7 +7,6 @@ import ( "time" "github.com/hatchet-dev/hatchet/pkg/client" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" "github.com/hatchet-dev/hatchet/pkg/worker" ) @@ -57,12 +56,6 @@ func run(done chan<- string, job worker.WorkflowJob) (func() error, error) { time.Sleep(20 * time.Second) - client := db.NewClient() - if err := client.Connect(); err != nil { - panic(fmt.Errorf("error connecting to database: %w", err)) - } - defer client.Disconnect() - done <- "done" }() diff --git a/examples/webhook/main_e2e_test.go b/examples/webhook/main_e2e_test.go index cc380493a..0e82353ac 100644 --- a/examples/webhook/main_e2e_test.go +++ b/examples/webhook/main_e2e_test.go @@ -13,23 +13,12 @@ import ( "github.com/hatchet-dev/hatchet/internal/testutils" "github.com/hatchet-dev/hatchet/pkg/client" - "github.com/hatchet-dev/hatchet/pkg/repository/prisma/db" "github.com/hatchet-dev/hatchet/pkg/worker" ) func TestWebhook(t *testing.T) { testutils.Prepare(t) - prisma := db.NewClient() - if err := prisma.Connect(); err != nil { - panic(fmt.Errorf("error connecting to database: %w", err)) - } - defer func() { - if err := prisma.Disconnect(); err != nil { - panic(fmt.Errorf("error disconnecting from database: %w", err)) - } - }() - c, err := client.New() if err != nil { panic(fmt.Errorf("error creating client: %w", err)) diff --git a/frontend/app/package.json b/frontend/app/package.json index badb38909..d24dfe0fe 100644 --- a/frontend/app/package.json +++ b/frontend/app/package.json @@ -31,6 +31,7 @@ "@radix-ui/react-label": "^2.0.2", "@radix-ui/react-menubar": "^1.0.4", "@radix-ui/react-popover": "^1.0.7", + "@radix-ui/react-radio-group": "^1.2.3", "@radix-ui/react-scroll-area": "^1.1.0", "@radix-ui/react-select": "^2.0.0", "@radix-ui/react-separator": "^1.0.3", diff --git a/frontend/app/pnpm-lock.yaml b/frontend/app/pnpm-lock.yaml index 3738a8ac1..ecd2228d0 100644 --- a/frontend/app/pnpm-lock.yaml +++ b/frontend/app/pnpm-lock.yaml @@ -53,6 +53,9 @@ importers: '@radix-ui/react-popover': specifier: ^1.0.7 version: 1.0.7(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-radio-group': + specifier: ^1.2.3 + version: 1.2.3(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) '@radix-ui/react-scroll-area': specifier: ^1.1.0 version: 1.1.0(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) @@ -272,7 +275,7 @@ importers: version: 8.57.0 eslint-config-airbnb-typescript: specifier: ^17.1.0 - version: 17.1.0(@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint@8.57.0)(typescript@5.2.2))(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0))(eslint@8.57.0) + version: 17.1.0(@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint@8.57.0)(typescript@5.2.2))(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1)(eslint@8.57.0) eslint-config-prettier: specifier: ^9.1.0 version: 9.1.0(eslint@8.57.0) @@ -711,6 +714,9 @@ packages: '@radix-ui/primitive@1.1.0': resolution: {integrity: sha512-4Z8dn6Upk0qk4P74xBhZ6Hd/w0mPEzOOLxy4xiPXOXqjF7jZS0VAKk7/x/H6FyY2zCkYJqePf1G5KmkmNJ4RBA==} + '@radix-ui/primitive@1.1.1': + resolution: {integrity: sha512-SJ31y+Q/zAyShtXJc8x83i9TYdbAfHZ++tUZnvjJJqFjzsdUnKsxPL6IEtBlxKkU7yzer//GQtZSV4GbldL3YA==} + '@radix-ui/react-accordion@1.1.2': resolution: {integrity: sha512-fDG7jcoNKVjSK6yfmuAs0EnPDro0WMXIhMtXdTBWqEioVW206ku+4Lw07e+13lUkFkpoEQ2PdeMIAGpdqEAmDg==} peerDependencies: @@ -789,6 +795,19 @@ packages: '@types/react-dom': optional: true + '@radix-ui/react-collection@1.1.2': + resolution: {integrity: sha512-9z54IEKRxIa9VityapoEYMuByaG42iSy1ZXlY2KcuLSEtq8x4987/N6m15ppoMffgZX72gER2uHe1D9Y6Unlcw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + '@radix-ui/react-compose-refs@1.0.0': resolution: {integrity: sha512-0KaSv6sx787/hK3eF53iOkiSLwAGlFMx5lotrqD2pTjB18KbybKoEIgkNZTKC60YECDQTKGTRcDBILwZVqVKvA==} peerDependencies: @@ -812,6 +831,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-compose-refs@1.1.1': + resolution: {integrity: sha512-Y9VzoRDSJtgFMUCoiZBDVo084VQ5hfpXxVE+NgkdNsjiDBByiImMZKKhxMwCbdHvhlENG6a833CbFkOQvTricw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-context@1.0.0': resolution: {integrity: sha512-1pVM9RfOQ+n/N5PJK33kRSKsr1glNxomxONs5c49MliinBY6Yw2Q995qfBUUo0/Mbg05B/sGA0gkgPI7kmSHBg==} peerDependencies: @@ -835,6 +863,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-context@1.1.1': + resolution: {integrity: sha512-UASk9zi+crv9WteK/NU4PLvOoL3OuE6BWVKNF6hPRBtYBDXQ2u5iu3O59zUlJiTVvkyuycnqrztsHVJwcK9K+Q==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-dialog@1.0.0': resolution: {integrity: sha512-Yn9YU+QlHYLWwV1XfKiqnGVpWYWk6MeBVM6x/bcoyPvxgjQGoeT35482viLPctTMWoMw0PoHgqfSox7Ig+957Q==} peerDependencies: @@ -969,6 +1006,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-id@1.1.0': + resolution: {integrity: sha512-EJUrI8yYh7WOjNOqpoJaf1jlFIH2LvtgAl+YcFqNCa+4hj64ZXmPkAKOFs/ukjz3byN6bdb/AVUqHkI8/uWWMA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-label@2.0.2': resolution: {integrity: sha512-N5ehvlM7qoTLx7nWPodsPYPgMzA5WM8zZChQg8nyFJKnDO5WHdba1vv5/H6IO5LtJMfD2Q3wh1qHFGNtK0w3bQ==} peerDependencies: @@ -1085,6 +1131,19 @@ packages: '@types/react-dom': optional: true + '@radix-ui/react-presence@1.1.2': + resolution: {integrity: sha512-18TFr80t5EVgL9x1SwF/YGtfG+l0BS0PRAlCWBDoBEiDQjeKgnNZRVJp/oVBl24sr3Gbfwc/Qpj4OcWTQMsAEg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + '@radix-ui/react-primitive@1.0.0': resolution: {integrity: sha512-EyXe6mnRlHZ8b6f4ilTDrXmkLShICIuOTTj0GX4w1rp+wSxf3+TD05u1UOITC8VsJ2a9nwHvdXtOXEOl0Cw/zQ==} peerDependencies: @@ -1117,6 +1176,32 @@ packages: '@types/react-dom': optional: true + '@radix-ui/react-primitive@2.0.2': + resolution: {integrity: sha512-Ec/0d38EIuvDF+GZjcMU/Ze6MxntVJYO/fRlCPhCaVUyPY9WTalHJw54tp9sXeJo3tlShWpy41vQRgLRGOuz+w==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-radio-group@1.2.3': + resolution: {integrity: sha512-xtCsqt8Rp09FK50ItqEqTJ7Sxanz8EM8dnkVIhJrc/wkMMomSmXHvYbhv3E7Zx4oXh98aaLt9W679SUYXg4IDA==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + '@radix-ui/react-roving-focus@1.0.4': resolution: {integrity: sha512-2mUg5Mgcu001VkGy+FfzZyzbmuUWzgWkj3rvv4yu+mLw03+mTzbxZHvfcGyFp2b8EkQeMkpRQ5FiA2Vr2O6TeQ==} peerDependencies: @@ -1130,6 +1215,19 @@ packages: '@types/react-dom': optional: true + '@radix-ui/react-roving-focus@1.1.2': + resolution: {integrity: sha512-zgMQWkNO169GtGqRvYrzb0Zf8NhMHS2DuEB/TiEmVnpr5OqPU3i8lfbxaAmC2J/KYuIQxyoQQ6DxepyXp61/xw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + '@radix-ui/react-scroll-area@1.1.0': resolution: {integrity: sha512-9ArIZ9HWhsrfqS765h+GZuLoxaRHD/j0ZWOWilsCvYTpYJp8XwCqNG7Dt9Nu/TItKOdgLGkOPCodQvDc+UMwYg==} peerDependencies: @@ -1192,6 +1290,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-slot@1.1.2': + resolution: {integrity: sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-switch@1.0.3': resolution: {integrity: sha512-mxm87F88HyHztsI7N+ZUmEoARGkC22YVW5CaC+Byc+HRpuvCrOBPTAnXgf+tZ/7i0Sg/eOePGdMhUKhPaQEqow==} peerDependencies: @@ -1281,6 +1388,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-use-controllable-state@1.1.0': + resolution: {integrity: sha512-MtfMVJiSr2NjzS0Aa90NPTnvTSg6C/JLCV7ma0W6+OMV78vd8OyRpID+Ng9LxzsPbLeuBnWBA1Nq30AtBIDChw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-use-escape-keydown@1.0.0': resolution: {integrity: sha512-JwfBCUIfhXRxKExgIqGa4CQsiMemo1Xt0W/B4ei3fpzpvPENKpMKQ8mZSB6Acj3ebrAEgi2xiQvcI1PAAodvyg==} peerDependencies: @@ -1327,6 +1443,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-use-previous@1.1.0': + resolution: {integrity: sha512-Z/e78qg2YFnnXcW88A4JmTtm4ADckLno6F7OXotmkQfeuCVaKuYzqAATPhVzl3delXE7CxIV8shofPn3jPc5Og==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-use-rect@1.0.1': resolution: {integrity: sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==} peerDependencies: @@ -1345,6 +1470,15 @@ packages: '@types/react': optional: true + '@radix-ui/react-use-size@1.1.0': + resolution: {integrity: sha512-XW3/vWuIXHa+2Uwcc2ABSfcCledmXhhQPlGbfcRXbiUQI5Icjcg19BGCZVKKInYbvUCut/ufbbLLPFC5cbb1hw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@radix-ui/react-visually-hidden@1.0.3': resolution: {integrity: sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==} peerDependencies: @@ -4505,6 +4639,8 @@ snapshots: '@radix-ui/primitive@1.1.0': {} + '@radix-ui/primitive@1.1.1': {} + '@radix-ui/react-accordion@1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4593,6 +4729,18 @@ snapshots: '@types/react': 18.2.73 '@types/react-dom': 18.2.23 + '@radix-ui/react-collection@1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-context': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-primitive': 2.0.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-slot': 1.1.2(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + optionalDependencies: + '@types/react': 18.2.73 + '@types/react-dom': 18.2.23 + '@radix-ui/react-compose-refs@1.0.0(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4611,6 +4759,12 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-compose-refs@1.1.1(@types/react@18.2.73)(react@18.2.0)': + dependencies: + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-context@1.0.0(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4629,6 +4783,12 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-context@1.1.1(@types/react@18.2.73)(react@18.2.0)': + dependencies: + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-dialog@1.0.0(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4797,6 +4957,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-id@1.1.0(@types/react@18.2.73)(react@18.2.0)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-label@2.0.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4942,6 +5109,16 @@ snapshots: '@types/react': 18.2.73 '@types/react-dom': 18.2.23 + '@radix-ui/react-presence@1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + optionalDependencies: + '@types/react': 18.2.73 + '@types/react-dom': 18.2.23 + '@radix-ui/react-primitive@1.0.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4968,6 +5145,33 @@ snapshots: '@types/react': 18.2.73 '@types/react-dom': 18.2.23 + '@radix-ui/react-primitive@2.0.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': + dependencies: + '@radix-ui/react-slot': 1.1.2(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + optionalDependencies: + '@types/react': 18.2.73 + '@types/react-dom': 18.2.23 + + '@radix-ui/react-radio-group@1.2.3(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-context': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-direction': 1.1.0(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-primitive': 2.0.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-roving-focus': 1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-use-previous': 1.1.0(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-use-size': 1.1.0(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + optionalDependencies: + '@types/react': 18.2.73 + '@types/react-dom': 18.2.23 + '@radix-ui/react-roving-focus@1.0.4(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -4986,6 +5190,23 @@ snapshots: '@types/react': 18.2.73 '@types/react-dom': 18.2.23 + '@radix-ui/react-roving-focus@1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': + dependencies: + '@radix-ui/primitive': 1.1.1 + '@radix-ui/react-collection': 1.1.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-context': 1.1.1(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-direction': 1.1.0(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-id': 1.1.0(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-primitive': 2.0.2(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.2.73)(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + optionalDependencies: + '@types/react': 18.2.73 + '@types/react-dom': 18.2.23 + '@radix-ui/react-scroll-area@1.1.0(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@radix-ui/number': 1.1.0 @@ -5064,6 +5285,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-slot@1.1.2(@types/react@18.2.73)(react@18.2.0)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-switch@1.0.3(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -5171,6 +5399,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-use-controllable-state@1.1.0(@types/react@18.2.73)(react@18.2.0)': + dependencies: + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-use-escape-keydown@1.0.0(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -5210,6 +5445,12 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-use-previous@1.1.0(@types/react@18.2.73)(react@18.2.0)': + dependencies: + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-use-rect@1.0.1(@types/react@18.2.73)(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -5226,6 +5467,13 @@ snapshots: optionalDependencies: '@types/react': 18.2.73 + '@radix-ui/react-use-size@1.1.0(@types/react@18.2.73)(react@18.2.0)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.2.73)(react@18.2.0) + react: 18.2.0 + optionalDependencies: + '@types/react': 18.2.73 + '@radix-ui/react-visually-hidden@1.0.3(@types/react-dom@18.2.23)(@types/react@18.2.73)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)': dependencies: '@babel/runtime': 7.24.1 @@ -6650,7 +6898,7 @@ snapshots: escape-string-regexp@4.0.0: {} - eslint-config-airbnb-base@15.0.0(eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0))(eslint@8.57.0): + eslint-config-airbnb-base@15.0.0(eslint-plugin-import@2.29.1)(eslint@8.57.0): dependencies: confusing-browser-globals: 1.0.11 eslint: 8.57.0 @@ -6659,12 +6907,12 @@ snapshots: object.entries: 1.1.8 semver: 6.3.1 - eslint-config-airbnb-typescript@17.1.0(@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint@8.57.0)(typescript@5.2.2))(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0))(eslint@8.57.0): + eslint-config-airbnb-typescript@17.1.0(@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint@8.57.0)(typescript@5.2.2))(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1)(eslint@8.57.0): dependencies: '@typescript-eslint/eslint-plugin': 6.21.0(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint@8.57.0)(typescript@5.2.2) '@typescript-eslint/parser': 6.21.0(eslint@8.57.0)(typescript@5.2.2) eslint: 8.57.0 - eslint-config-airbnb-base: 15.0.0(eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0))(eslint@8.57.0) + eslint-config-airbnb-base: 15.0.0(eslint-plugin-import@2.29.1)(eslint@8.57.0) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) eslint-config-prettier@9.1.0(eslint@8.57.0): @@ -6684,7 +6932,7 @@ snapshots: debug: 4.3.4 enhanced-resolve: 5.16.0 eslint: 8.57.0 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1)(eslint@8.57.0))(eslint@8.57.0) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) fast-glob: 3.3.2 get-tsconfig: 4.7.3 @@ -6696,7 +6944,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1)(eslint@8.57.0))(eslint@8.57.0): + eslint-module-utils@2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0): dependencies: debug: 3.2.7 optionalDependencies: @@ -6717,7 +6965,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-plugin-import@2.29.1)(eslint@8.57.0))(eslint@8.57.0) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.2.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) hasown: 2.0.2 is-core-module: 2.13.1 is-glob: 4.0.3 diff --git a/frontend/app/src/components/molecules/nav-bar/banner.tsx b/frontend/app/src/components/molecules/nav-bar/banner.tsx new file mode 100644 index 000000000..7e6981118 --- /dev/null +++ b/frontend/app/src/components/molecules/nav-bar/banner.tsx @@ -0,0 +1,51 @@ +import React from 'react'; +import { Button } from '@/components/ui/button'; + +export interface BannerProps { + message: JSX.Element; + type?: 'info' | 'warning' | 'success' | 'error'; + actionText?: string; + onAction?: () => void; +} + +export const Banner: React.FC = ({ + message, + type = 'info', + actionText, + onAction, +}) => { + const getBgColor = () => { + switch (type) { + case 'warning': + return 'bg-amber-50 dark:bg-amber-900/20 text-amber-800 dark:text-amber-200'; + case 'success': + return 'bg-green-50 dark:bg-green-900/20 text-green-800 dark:text-green-200'; + case 'error': + return 'bg-red-50 dark:bg-red-900/20 text-red-800 dark:text-red-200'; + default: + return 'bg-blue-50 dark:bg-blue-900/20 text-blue-800 dark:text-blue-200'; + } + }; + + return ( +
+
+
+

{message}

+
+
+ {actionText && onAction && ( + + )} +
+
+
+ ); +}; diff --git a/frontend/app/src/components/molecules/nav-bar/nav-bar.tsx b/frontend/app/src/components/molecules/nav-bar/nav-bar.tsx index 1da2d30b3..883f9a9f2 100644 --- a/frontend/app/src/components/molecules/nav-bar/nav-bar.tsx +++ b/frontend/app/src/components/molecules/nav-bar/nav-bar.tsx @@ -9,8 +9,8 @@ import { DropdownMenuTrigger, } from '@/components/ui/dropdown-menu'; -import { useNavigate } from 'react-router-dom'; -import api, { User } from '@/lib/api'; +import { useLocation, useNavigate, useSearchParams } from 'react-router-dom'; +import api, { TenantVersion, User } from '@/lib/api'; import { useApiError } from '@/lib/hooks'; import { useMutation } from '@tanstack/react-query'; import hatchet from '@/assets/hatchet_logo.png'; @@ -26,15 +26,14 @@ import { BiUserCircle, } from 'react-icons/bi'; import { useTheme } from '@/components/theme-provider'; -import { useMemo } from 'react'; +import { useEffect, useMemo } from 'react'; import useApiMeta from '@/pages/auth/hooks/use-api-meta'; import { VersionInfo } from '@/pages/main/info/components/version-info'; +import { useTenant } from '@/lib/atoms'; +import { routes } from '@/router'; +import { Banner, BannerProps } from './banner'; -interface MainNavProps { - user: User; -} - -export default function MainNav({ user }: MainNavProps) { +function HelpDropdown() { const meta = useApiMeta(); const hasPylon = useMemo(() => { @@ -45,11 +44,66 @@ export default function MainNav({ user }: MainNavProps) { return !!meta.data.pylonAppId; }, [meta]); - const navigate = useNavigate(); - const { handleApiError } = useApiError({}); - const { toggleSidebarOpen } = useSidebar(); + return ( + + + + + + {hasPylon && ( + (window as any).Pylon('show')}> + + Chat with Support + + )} + + window.open('https://docs.hatchet.run/home/basics/steps', '_blank') + } + > + + Documentation + + + window.open('https://discord.com/invite/ZMeUafwH89', '_blank') + } + > + + Join Discord + + + window.open('https://hatchet.run/office-hours', '_blank') + } + > + + Schedule Office Hours + + window.open('/onboarding/get-started', '_self')} + > + + Restart Tutorial + + + + ); +} - const { toggleTheme, theme } = useTheme(); +function AccountDropdown({ user }: MainNavProps) { + const navigate = useNavigate(); + const { tenant } = useTenant(); + + const { handleApiError } = useApiError({}); + + const { toggleTheme } = useTheme(); const logoutMutation = useMutation({ mutationKey: ['user:update:logout'], @@ -63,106 +117,132 @@ export default function MainNav({ user }: MainNavProps) { }); return ( -
-
- -
- - - - - - {hasPylon && ( - (window as any).Pylon('show')}> - - Chat with Support - - )} - - window.open( - 'https://docs.hatchet.run/home/basics/steps', - '_blank', - ) - } - > - - Documentation - - - window.open('https://discord.com/invite/ZMeUafwH89', '_blank') - } - > - - Join Discord - - - window.open('https://hatchet.run/office-hours', '_blank') - } - > - - Schedule Office Hours - - window.open('/onboarding/get-started', '_self')} - > - - Restart Tutorial - - - - - - - - - -
-

- {user.name || user.email} -

-

- {user.email} -

-
-
- - - - - - toggleTheme()}> - Toggle Theme - - logoutMutation.mutate()}> - Log out - ⇧⌘Q - -
-
+ + + + + +
+

+ {user.name || user.email} +

+

+ {user.email} +

+
+
+ + + + + {tenant?.version == TenantVersion.V1 && + location.pathname.includes('v1') && ( + navigate('/')}> + View Legacy V0 Data + + )} + + toggleTheme()}> + Toggle Theme + + logoutMutation.mutate()}> + Log out + ⇧⌘Q + +
+ + ); +} + +interface MainNavProps { + user: User; + setHasBanner?: (state: boolean) => void; +} + +export default function MainNav({ user, setHasBanner }: MainNavProps) { + const { toggleSidebarOpen } = useSidebar(); + const { theme } = useTheme(); + const { tenant } = useTenant(); + const { pathname } = useLocation(); + const navigate = useNavigate(); + const [params] = useSearchParams(); + + const versionedRoutes = useMemo( + () => + routes + .at(0) + ?.children?.find((r) => r.path === '/v1/') + ?.children?.find((r) => r.path === '/v1/' && r.children?.length) + ?.children?.map((c) => c.path) + ?.map((p) => p?.replace('/v1', '')) || [], + [], + ); + + const tenantVersion = tenant?.version || TenantVersion.V0; + + const banner: BannerProps | undefined = useMemo(() => { + const shouldShowVersionUpgradeButton = + versionedRoutes.includes(pathname) && // It is a versioned route + !pathname.includes('/v1') && // The user is not already on the v1 version + tenantVersion === TenantVersion.V1; // The tenant is on the v1 version + + if (shouldShowVersionUpgradeButton) { + return { + message: ( + <> + You are viewing legacy V0 data for a tenant that was upgraded to V1 + runtime. + + ), + type: 'warning', + actionText: 'View V1', + onAction: () => { + navigate({ + pathname: '/v1' + pathname, + search: params.toString(), + }); + }, + }; + } + + return; + }, [navigate, params, pathname, tenantVersion, versionedRoutes]); + + useEffect(() => { + if (!setHasBanner) { + return; + } + setHasBanner(!!banner); + }, [setHasBanner, banner]); + + return ( +
+ {banner && } + + {/* Main Navigation Bar */} +
+
+ +
+ + +
diff --git a/frontend/app/src/components/ui/radio-group.tsx b/frontend/app/src/components/ui/radio-group.tsx new file mode 100644 index 000000000..d79d8ac49 --- /dev/null +++ b/frontend/app/src/components/ui/radio-group.tsx @@ -0,0 +1,41 @@ +import * as React from 'react'; +import * as RadioGroupPrimitive from '@radix-ui/react-radio-group'; +import { cn } from '@/lib/utils'; +import { DotFilledIcon } from '@radix-ui/react-icons'; + +const RadioGroup = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => { + return ( + + ); +}); +RadioGroup.displayName = RadioGroupPrimitive.Root.displayName; + +const RadioGroupItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => { + return ( + + + + + + ); +}); +RadioGroupItem.displayName = RadioGroupPrimitive.Item.displayName; + +export { RadioGroup, RadioGroupItem }; diff --git a/frontend/app/src/components/v1/cloud/billing/index.ts b/frontend/app/src/components/v1/cloud/billing/index.ts new file mode 100644 index 000000000..30a025e9b --- /dev/null +++ b/frontend/app/src/components/v1/cloud/billing/index.ts @@ -0,0 +1,2 @@ +export * from './payment-methods'; +export * from './subscription'; diff --git a/frontend/app/src/components/v1/cloud/billing/payment-methods.tsx b/frontend/app/src/components/v1/cloud/billing/payment-methods.tsx new file mode 100644 index 000000000..7cb10c5c0 --- /dev/null +++ b/frontend/app/src/components/v1/cloud/billing/payment-methods.tsx @@ -0,0 +1,111 @@ +import { Button } from '@/components/v1/ui/button'; +import { + FaCcAmex, + FaCcDiscover, + FaCcMastercard, + FaCcVisa, + FaCreditCard, + FaCcDinersClub, + FaCcJcb, +} from 'react-icons/fa'; + +import { LuBanknote } from 'react-icons/lu'; + +import { IconType } from 'react-icons/lib'; +import { TenantContextType } from '@/lib/outlet'; +import { useOutletContext } from 'react-router-dom'; +import { useApiError } from '@/lib/hooks'; +import { useState } from 'react'; +import { Spinner } from '@/components/v1/ui/loading'; +import { TenantPaymentMethod } from '@/lib/api/generated/cloud/data-contracts'; +import { cloudApi } from '@/lib/api/api'; + +const ccIcons: Record = { + visa: FaCcVisa, + mastercard: FaCcMastercard, + amex: FaCcAmex, + discover: FaCcDiscover, + dinersclub: FaCcDinersClub, + jcb: FaCcJcb, + generic: FaCreditCard, + link: LuBanknote, +}; + +export interface PaymentMethodsProps { + hasMethods?: boolean; + methods?: TenantPaymentMethod[]; +} + +export function PaymentMethods({ + methods = [], + hasMethods, +}: PaymentMethodsProps) { + const { tenant } = useOutletContext(); + const { handleApiError } = useApiError({}); + const [loading, setLoading] = useState(false); + + const manageClicked = async () => { + try { + setLoading(true); + const link = await cloudApi.billingPortalLinkGet(tenant.metadata.id); + if (link.data.url) { + window.location.href = link.data.url; + } + } catch (e) { + handleApiError(e as any); + } finally { + setLoading(false); + } + }; + + return ( + <> +
+
+

+ Payment Methods +

+
+ {hasMethods ? ( + <> + {methods.map((method, i) => { + const Icon = + method.brand in ccIcons + ? ccIcons[method.brand] + : ccIcons.generic; + return ( +
+
+
+ + {method.brand.toUpperCase()} + {method.last4 && ` *** *** ${method.last4} `} + {method.expiration && `(Expires {method.expiration})`} +
+
+
+ ); + })} +
+ +
+ + ) : ( +
+

+ No payment methods added. Payment method is required to upgrade + your subscription. +

+
+ +
+
+ )} +
+ + ); +} diff --git a/frontend/app/src/components/v1/cloud/billing/subscription.tsx b/frontend/app/src/components/v1/cloud/billing/subscription.tsx new file mode 100644 index 000000000..817dd324e --- /dev/null +++ b/frontend/app/src/components/v1/cloud/billing/subscription.tsx @@ -0,0 +1,270 @@ +import { ConfirmDialog } from '@/components/v1/molecules/confirm-dialog'; +import { Alert, AlertDescription, AlertTitle } from '@/components/v1/ui/alert'; +import { Badge } from '@/components/v1/ui/badge'; +import { Button } from '@/components/v1/ui/button'; +import { + Card, + CardDescription, + CardHeader, + CardTitle, +} from '@/components/v1/ui/card'; +import { Label } from '@/components/v1/ui/label'; +import { Spinner } from '@/components/v1/ui/loading'; +import { Switch } from '@/components/v1/ui/switch'; +import { queries } from '@/lib/api'; +import { cloudApi } from '@/lib/api/api'; +import { + TenantSubscription, + SubscriptionPlan, + Coupon, +} from '@/lib/api/generated/cloud/data-contracts'; +import { useApiError } from '@/lib/hooks'; +import { TenantContextType } from '@/lib/outlet'; +import queryClient from '@/query-client'; +import { ExclamationTriangleIcon } from '@radix-ui/react-icons'; +import { useMutation } from '@tanstack/react-query'; +import React, { useCallback, useEffect, useMemo, useState } from 'react'; +import { useOutletContext } from 'react-router-dom'; + +interface SubscriptionProps { + active?: TenantSubscription; + plans?: SubscriptionPlan[]; + hasPaymentMethods?: boolean; + coupons?: Coupon[]; +} + +export const Subscription: React.FC = ({ + active, + plans, + coupons, + hasPaymentMethods, +}) => { + // Implement the logic for the Subscription component here + + const [loading, setLoading] = useState(); + const [showAnnual, setShowAnnual] = useState(false); + const [isChangeConfirmOpen, setChangeConfirmOpen] = useState< + SubscriptionPlan | undefined + >(undefined); + + const { tenant } = useOutletContext(); + const { handleApiError } = useApiError({}); + const [portalLoading, setPortalLoading] = useState(false); + + const manageClicked = async () => { + try { + if (portalLoading) { + return; + } + setPortalLoading(true); + const link = await cloudApi.billingPortalLinkGet(tenant.metadata.id); + window.open(link.data.url, '_blank'); + } catch (e) { + handleApiError(e as any); + } finally { + setPortalLoading(false); + } + }; + + const subscriptionMutation = useMutation({ + mutationKey: ['user:update:logout'], + mutationFn: async ({ plan_code }: { plan_code: string }) => { + const [plan, period] = plan_code.split(':'); + setLoading(plan_code); + await cloudApi.subscriptionUpsert(tenant.metadata.id, { plan, period }); + }, + onSuccess: async () => { + await Promise.all([ + queryClient.invalidateQueries({ + queryKey: queries.tenantResourcePolicy.get(tenant.metadata.id) + .queryKey, + }), + queryClient.invalidateQueries({ + queryKey: queries.cloud.billing(tenant.metadata.id).queryKey, + }), + ]); + + setLoading(undefined); + }, + onError: handleApiError, + }); + + const activePlanCode = useMemo( + () => + active?.plan + ? [active.plan, active.period].filter((x) => !!x).join(':') + : 'free', + [active], + ); + + useEffect(() => { + return setShowAnnual(active?.period?.includes('yearly') || false); + }, [active]); + + const sortedPlans = useMemo(() => { + return plans + ?.filter( + (v) => + v.plan_code === 'free' || + (showAnnual + ? v.period?.includes('yearly') + : v.period?.includes('monthly')), + ) + .sort((a, b) => a.amount_cents - b.amount_cents); + }, [plans, showAnnual]); + + const isUpgrade = useCallback( + (plan: SubscriptionPlan) => { + if (!active) { + return true; + } + + const activePlan = sortedPlans?.find( + (p) => p.plan_code === activePlanCode, + ); + + const activeAmount = activePlan?.amount_cents || 0; + + return plan.amount_cents > activeAmount; + }, + [active, activePlanCode, sortedPlans], + ); + + return ( + <> + + Are you sure you'd like to change to {isChangeConfirmOpen?.name}{' '} + plan? +
+
+ Upgrades will be prorated and downgrades will take effect at the end + of the billing period. + + } + submitLabel={'Change Plan'} + onSubmit={async () => { + await subscriptionMutation.mutateAsync({ + plan_code: isChangeConfirmOpen!.plan_code, + }); + setLoading(undefined); + setChangeConfirmOpen(undefined); + }} + onCancel={() => setChangeConfirmOpen(undefined)} + isLoading={!!loading} + /> +
+
+

+ Subscription + {coupons?.map((coupon, i) => ( + + {coupon.name} coupon applied + + ))} +

+ +
+ { + setShowAnnual((checkedState) => !checkedState); + }} + /> + +
+
+

+ For plan details, please visit{' '} + + our pricing page + {' '} + or{' '} + + contact us + {' '} + if you have custom requirements. +

+ {!hasPaymentMethods && ( + + + + No Payment Method. + + + A payment method is required to upgrade your subscription, please{' '} + + add one + {' '} + first. + + + )} + +
+ {sortedPlans?.map((plan, i) => ( + + + + {plan.name} + + + $ + {( + plan.amount_cents / + 100 / + (plan.period == 'yearly' ? 12 : 1) + ).toLocaleString()}{' '} + per month billed {plan.period}* + + + + + + + ))} +
+ {active?.note &&

{active?.note}

} +

+ * subscription fee billed upfront {showAnnual ? 'yearly' : 'monthly'}, + overages billed at the end of each month for usage in that month +

+
+ + ); +}; diff --git a/frontend/app/src/components/v1/cloud/logging/logs.tsx b/frontend/app/src/components/v1/cloud/logging/logs.tsx new file mode 100644 index 000000000..04aaa82bd --- /dev/null +++ b/frontend/app/src/components/v1/cloud/logging/logs.tsx @@ -0,0 +1,218 @@ +import React, { useEffect, useRef, useState } from 'react'; +import AnsiToHtml from 'ansi-to-html'; +import DOMPurify from 'dompurify'; + +const convert = new AnsiToHtml({ + newline: true, + bg: 'transparent', +}); + +export interface ExtendedLogLine { + badge?: React.ReactNode; + /** @format date-time */ + timestamp?: string; + instance?: string; + line: string; +} + +type LogProps = { + logs: ExtendedLogLine[]; + onTopReached: () => void; + onBottomReached: () => void; + autoScroll?: boolean; +}; + +const options: Intl.DateTimeFormatOptions = { + year: 'numeric', + month: 'numeric', + day: 'numeric', + hour: 'numeric', + minute: 'numeric', + second: 'numeric', +}; + +const LoggingComponent: React.FC = ({ + logs, + onTopReached, + onBottomReached, + autoScroll = true, +}) => { + const containerRef = useRef(null); + const [refreshing, setRefreshing] = useState(false); + const [lastTopCall, setLastTopCall] = useState(0); + const [lastBottomCall, setLastBottomCall] = useState(0); + const [firstMount, setFirstMount] = useState(true); + const previousScrollHeightRef = useRef(0); + + const handleScroll = () => { + if (!containerRef.current) { + return; + } + const { scrollTop, scrollHeight, clientHeight } = containerRef.current; + previousScrollHeightRef.current = scrollHeight; + const now = Date.now(); + + if (scrollTop === 0 && now - lastTopCall >= 1000) { + if (logs.length > 0) { + onTopReached(); + } + setLastTopCall(now); + } else if ( + scrollTop + clientHeight >= scrollHeight && + now - lastBottomCall >= 1000 + ) { + if (logs.length > 0) { + onBottomReached(); + } + setLastBottomCall(now); + } + }; + + useEffect(() => { + setTimeout(() => { + const container = containerRef.current; + + if (container && container.scrollHeight > container.clientHeight) { + if (firstMount && autoScroll) { + container.scrollTo({ + top: container.scrollHeight, + behavior: 'smooth', + }); + + setFirstMount(false); + } + } + }, 250); + }, [containerRef, firstMount, autoScroll]); + + useEffect(() => { + if (refreshing) { + const timer = setTimeout(() => { + setRefreshing(false); + }, 1000); + return () => clearTimeout(timer); + } + }, [refreshing]); + + useEffect(() => { + if (!autoScroll) { + return; + } + + const container = containerRef.current; + if (!container) { + return; + } + + const previousScrollHeight = previousScrollHeightRef.current; + const currentScrollHeight = container.scrollHeight; + const { scrollTop, clientHeight } = container; + + const isAtBottom = scrollTop + clientHeight >= previousScrollHeight; + + if (!isAtBottom) { + const newScrollTop = + scrollTop + (currentScrollHeight - previousScrollHeight); + container.scrollTo({ top: newScrollTop }); + } else { + container.scrollTo({ top: currentScrollHeight, behavior: 'smooth' }); + } + }, [logs, autoScroll]); + + const showLogs = + logs.length > 0 + ? logs + : [ + { + line: 'Waiting for logs...', + timestamp: new Date().toISOString(), + instance: 'Hatchet', + }, + ]; + + const sortedLogs = [...showLogs].sort((a, b) => { + if (!a.timestamp || !b.timestamp) { + return 0; + } + + return new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime(); + }); + + return ( +
+ {refreshing && ( +
+ Refreshing... +
+ )} + {sortedLogs.map((log, i) => { + const sanitizedHtml = DOMPurify.sanitize( + convert.toHtml(log.line || ''), + { + USE_PROFILES: { html: true }, + }, + ); + + const logHash = log.timestamp + generateHash(log.line); + + return ( +

+ {log.badge} + {log.timestamp && ( + + {new Date(log.timestamp) + .toLocaleString('sv', options) + .replace(',', '.') + .replace(' ', 'T')} + + )} + {log.instance && ( + + {log.instance} + + )} + +

+ ); + })} +
+ ); +}; + +const generateHash = (input: string | undefined): string => { + if (!input) { + return Math.random().toString(36).substring(2, 15); + } + const trimmedInput = input.substring(0, 50); + return cyrb53(trimmedInput) + ''; +}; + +// source: https://github.com/bryc/code/blob/master/jshash/experimental/cyrb53.js +const cyrb53 = function (str: string, seed = 0) { + let h1 = 0xdeadbeef ^ seed, + h2 = 0x41c6ce57 ^ seed; + for (let i = 0, ch; i < str.length; i++) { + ch = str.charCodeAt(i); + h1 = Math.imul(h1 ^ ch, 2654435761); + h2 = Math.imul(h2 ^ ch, 1597334677); + } + h1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507); + h1 ^= Math.imul(h2 ^ (h2 >>> 13), 3266489909); + h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507); + h2 ^= Math.imul(h1 ^ (h1 >>> 13), 3266489909); + return 4294967296 * (2097151 & h2) + (h1 >>> 0); +}; + +export default LoggingComponent; diff --git a/frontend/app/src/components/v1/hooks/use-toast.ts b/frontend/app/src/components/v1/hooks/use-toast.ts new file mode 100644 index 000000000..25a2d8488 --- /dev/null +++ b/frontend/app/src/components/v1/hooks/use-toast.ts @@ -0,0 +1,190 @@ +import * as React from 'react'; + +import type { ToastActionElement, ToastProps } from '@/components/ui/toast'; + +const TOAST_LIMIT = 1; +const TOAST_REMOVE_DELAY = 1000000; + +type ToasterToast = ToastProps & { + id: string; + title?: React.ReactNode; + description?: React.ReactNode; + action?: ToastActionElement; +}; + +const actionTypes = { + ADD_TOAST: 'ADD_TOAST', + UPDATE_TOAST: 'UPDATE_TOAST', + DISMISS_TOAST: 'DISMISS_TOAST', + REMOVE_TOAST: 'REMOVE_TOAST', +} as const; + +let count = 0; + +function genId() { + count = (count + 1) % Number.MAX_SAFE_INTEGER; + return count.toString(); +} + +type ActionType = typeof actionTypes; + +type Action = + | { + type: ActionType['ADD_TOAST']; + toast: ToasterToast; + } + | { + type: ActionType['UPDATE_TOAST']; + toast: Partial; + } + | { + type: ActionType['DISMISS_TOAST']; + toastId?: ToasterToast['id']; + } + | { + type: ActionType['REMOVE_TOAST']; + toastId?: ToasterToast['id']; + }; + +interface State { + toasts: ToasterToast[]; +} + +const toastTimeouts = new Map>(); + +const addToRemoveQueue = (toastId: string) => { + if (toastTimeouts.has(toastId)) { + return; + } + + const timeout = setTimeout(() => { + toastTimeouts.delete(toastId); + dispatch({ + type: 'REMOVE_TOAST', + toastId: toastId, + }); + }, TOAST_REMOVE_DELAY); + + toastTimeouts.set(toastId, timeout); +}; + +export const reducer = (state: State, action: Action): State => { + switch (action.type) { + case 'ADD_TOAST': + return { + ...state, + toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT), + }; + + case 'UPDATE_TOAST': + return { + ...state, + toasts: state.toasts.map((t) => + t.id === action.toast.id ? { ...t, ...action.toast } : t, + ), + }; + + case 'DISMISS_TOAST': { + const { toastId } = action; + + // ! Side effects ! - This could be extracted into a dismissToast() action, + // but I'll keep it here for simplicity + if (toastId) { + addToRemoveQueue(toastId); + } else { + state.toasts.forEach((toast) => { + addToRemoveQueue(toast.id); + }); + } + + return { + ...state, + toasts: state.toasts.map((t) => + t.id === toastId || toastId === undefined + ? { + ...t, + open: false, + } + : t, + ), + }; + } + case 'REMOVE_TOAST': + if (action.toastId === undefined) { + return { + ...state, + toasts: [], + }; + } + return { + ...state, + toasts: state.toasts.filter((t) => t.id !== action.toastId), + }; + } +}; + +const listeners: Array<(state: State) => void> = []; + +let memoryState: State = { toasts: [] }; + +function dispatch(action: Action) { + memoryState = reducer(memoryState, action); + listeners.forEach((listener) => { + listener(memoryState); + }); +} + +type Toast = Omit; + +function toast({ ...props }: Toast) { + const id = genId(); + + const update = (props: ToasterToast) => + dispatch({ + type: 'UPDATE_TOAST', + toast: { ...props, id }, + }); + const dismiss = () => dispatch({ type: 'DISMISS_TOAST', toastId: id }); + + dispatch({ + type: 'ADD_TOAST', + toast: { + ...props, + id, + open: true, + onOpenChange: (open) => { + if (!open) { + dismiss(); + } + }, + }, + }); + + return { + id: id, + dismiss, + update, + }; +} + +function useToast() { + const [state, setState] = React.useState(memoryState); + + React.useEffect(() => { + listeners.push(setState); + return () => { + const index = listeners.indexOf(setState); + if (index > -1) { + listeners.splice(index, 1); + } + }; + }, [state]); + + return { + ...state, + toast, + dismiss: (toastId?: string) => dispatch({ type: 'DISMISS_TOAST', toastId }), + }; +} + +export { useToast, toast }; diff --git a/frontend/app/src/components/v1/molecules/analytics-provider.tsx b/frontend/app/src/components/v1/molecules/analytics-provider.tsx new file mode 100644 index 000000000..36dea882c --- /dev/null +++ b/frontend/app/src/components/v1/molecules/analytics-provider.tsx @@ -0,0 +1,72 @@ +import { User } from '@/lib/api'; +import { useTenant } from '@/lib/atoms'; +import useApiMeta from '@/pages/auth/hooks/use-api-meta'; +import React, { PropsWithChildren, useEffect, useMemo } from 'react'; + +interface AnalyticsProviderProps { + user: User; +} + +const AnalyticsProvider: React.FC< + PropsWithChildren & AnalyticsProviderProps +> = ({ user, children }) => { + const meta = useApiMeta(); + + const [loaded, setLoaded] = React.useState(false); + + const { tenant } = useTenant(); + + const config = useMemo(() => { + return meta.data?.posthog; + }, [meta]); + + useEffect(() => { + if (loaded) { + return; + } + + if (tenant && tenant.analyticsOptOut) { + console.log( + 'Skipping Analytics initialization due to opt-out, we respect user privacy.', + ); + return; + } + + if (!config || !tenant) { + return; + } + + console.log('Initializing Analytics, opt out in settings.'); + setLoaded(true); + const posthogScript = ` +!function(t,e){var o,n,p,r;e.__SV||(window.posthog=e,e._i=[],e.init=function(i,s,a){function g(t,e){var o=e.split(".");2==o.length&&(t=t[o[0]],e=o[1]),t[e]=function(){t.push([e].concat(Array.prototype.slice.call(arguments,0)))}}(p=t.createElement("script")).type="text/javascript",p.async=!0,p.src=s.api_host.replace(".i.posthog.com","-assets.i.posthog.com")+"/static/array.js",(r=t.getElementsByTagName("script")[0]).parentNode.insertBefore(p,r);var u=e;for(void 0!==a?u=e[a]=[]:a="posthog",u.people=u.people||[],u.toString=function(t){var e="posthog";return"posthog"!==a&&(e+="."+a),t||(e+=" (stub)"),e},u.people.toString=function(){return u.toString(1)+".people (stub)"},o="capture identify alias people.set people.set_once set_config register register_once unregister opt_out_capturing has_opted_out_capturing opt_in_capturing reset isFeatureEnabled onFeatureFlags getFeatureFlag getFeatureFlagPayload reloadFeatureFlags group updateEarlyAccessFeatureEnrollment getEarlyAccessFeatures getActiveMatchingSurveys getSurveys onSessionId".split(" "),n=0;n { + if (!config || !user) { + return; + } + + setTimeout(() => { + (window as any).posthog.identify( + user.metadata.id, // Required. Replace 'distinct_id' with your user's unique identifier + { email: user.email, name: user.name }, // $set, optional + {}, // $set_once, optional + ); + }); + }, [user, config, tenant]); + + return children; +}; + +export default AnalyticsProvider; diff --git a/frontend/app/src/components/v1/molecules/brush-chart/area-chart.tsx b/frontend/app/src/components/v1/molecules/brush-chart/area-chart.tsx new file mode 100644 index 000000000..b4c1945ea --- /dev/null +++ b/frontend/app/src/components/v1/molecules/brush-chart/area-chart.tsx @@ -0,0 +1,360 @@ +import React, { useMemo, useCallback } from 'react'; +import { Group } from '@visx/group'; +import { AreaClosed, Line, Bar } from '@visx/shape'; +import { + withTooltip, + TooltipWithBounds, + Tooltip, + defaultStyles, +} from '@visx/tooltip'; +import { GridRows, GridColumns } from '@visx/grid'; +import { WithTooltipProvidedProps } from '@visx/tooltip/lib/enhancers/withTooltip'; +import { scaleTime, scaleLinear } from '@visx/scale'; +import { AxisLeft, AxisBottom } from '@visx/axis'; +import { LinearGradient } from '@visx/gradient'; +import { curveMonotoneX } from '@visx/curve'; +import { localPoint } from '@visx/event'; +import { max, extent, bisector } from '@visx/vendor/d3-array'; +import { timeFormat } from '@visx/vendor/d3-time-format'; +import { Text } from '@visx/text'; + +const getDate = (d: MetricValue) => d.date; +const getValue = (d: MetricValue) => d.value; + +// format to 2 decimal places +export const format2Dec = (d: number) => { + if (!d.toFixed) { + return '0.00'; + } + + return `${d.toFixed(2)}`; +}; + +const bisectDate = bisector((d) => d.date).left; + +export interface MetricValue { + date: Date; + value: number; +} +type TooltipData = MetricValue; + +const formatDate = timeFormat('%y-%m-%d %I:%M:%S'); + +const accentColor = '#ffffff44'; +const background = '#1E293B'; +const background2 = '#8c77e0'; +const accentColorDark = '#8c77e0'; + +const tooltipStyles = { + ...defaultStyles, + border: '1px solid white', + color: 'white', + background, +}; + +const axisColor = '#cecece'; + +const axisBottomTickLabelProps = { + textAnchor: 'middle' as const, + fontFamily: 'Arial', + fontSize: 10, + fill: axisColor, +}; + +const axisLeftTickLabelProps = { + dx: '-0.25em', + dy: '0.25em', + fontFamily: 'Arial', + fontSize: 10, + textAnchor: 'end' as const, + fill: axisColor, +}; + +export const formatPercentTooltip = (d: number) => `${format2Dec(d)}%`; + +type AreaChartProps = { + data: MetricValue[]; + kind: 'area' | 'bar'; + gradientColor?: string; + width: number; + height: number; + hideBottomAxis?: boolean; + hideLeftAxis?: boolean; + children?: React.ReactNode; + yLabel?: string; + xLabel?: string; + yDomain?: [number, number]; + xDomain?: [Date, Date]; + centerText?: string; + tooltipFormat?: (d: number) => string; +}; + +export default withTooltip( + ({ + data, + kind, + gradientColor = background2, + width, + height, + hideBottomAxis = false, + hideLeftAxis = false, + children, + yLabel, + xLabel, + yDomain, + xDomain, + centerText, + showTooltip, + hideTooltip, + tooltipFormat, + tooltipData, + tooltipTop = 0, + tooltipLeft = 0, + }: AreaChartProps & WithTooltipProvidedProps) => { + if (width < 10) { + return null; + } + + const innerWidth = width; + const innerHeight = height; + + const dateScale = useMemo( + () => + scaleTime({ + range: [0, width], + domain: xDomain || (extent(data, getDate) as [Date, Date]), + }), + [width, data, xDomain], + ); + + const yScale = useMemo( + () => + scaleLinear({ + range: [height, 0], + domain: yDomain || [0, 1.3 * (max(data, getValue) || 0)], + nice: true, + }), + [height, data, yDomain], + ); + + const handleTooltip = useCallback( + ( + event: + | React.TouchEvent + | React.MouseEvent, + ) => { + const { x } = localPoint(event) || { x: 0 }; + const x0 = dateScale.invert(x); + const index = bisectDate(data, x0, 1); + const d0 = data[index - 1]; + const d1 = data[index]; + let d = d0; + if (d1 && getDate(d1)) { + d = + x0.valueOf() - getDate(d0).valueOf() > + getDate(d1).valueOf() - x0.valueOf() + ? d1 + : d0; + } + + showTooltip({ + tooltipData: d, + tooltipLeft: x, + tooltipTop: yScale(getValue(d)), + }); + }, + [showTooltip, yScale, dateScale, data], + ); + + let barWidth = innerWidth / data.length; + + if (barWidth <= 5) { + barWidth = 6; + } + + return ( +
+ + {centerText && ( + + {centerText} + + )} + + + + + + {kind == 'bar' && + data.map((d, i) => { + if (i == 0) { + return ( + + ); + } + + return ( + + ); + })} + {kind == 'area' && ( + + data={data} + x={(d) => dateScale(d.date) || 0} + y={(d) => yScale(d.value) || 0} + yScale={yScale} + strokeWidth={1} + stroke="url(#gradient)" + fill="url(#gradient)" + curve={curveMonotoneX} + height={innerHeight} + /> + )} + {!hideBottomAxis && ( + 520 ? 10 : 5} + stroke={axisColor} + tickStroke={axisColor} + tickLabelProps={axisBottomTickLabelProps} + label={xLabel} + /> + )} + {!hideLeftAxis && ( + + )} + {children} + + hideTooltip()} + /> + {data.length > 0 && tooltipData && ( + + + + + + )} + + {data.length > 0 && tooltipData && ( +
+ + {tooltipFormat + ? tooltipFormat(getValue(tooltipData)) + : getValue(tooltipData)} + + + {formatDate(getDate(tooltipData))} + +
+ )} +
+ ); + }, +); diff --git a/frontend/app/src/components/v1/molecules/charts/zoomable.tsx b/frontend/app/src/components/v1/molecules/charts/zoomable.tsx new file mode 100644 index 000000000..98f8ec067 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/charts/zoomable.tsx @@ -0,0 +1,454 @@ +import { useState, useMemo, useRef } from 'react'; +import { + CartesianGrid, + XAxis, + YAxis, + ReferenceArea, + ResponsiveContainer, + Bar, + BarChart, + LineChart, + Line, + Area, + AreaChart, +} from 'recharts'; +import { + ChartConfig, + ChartContainer, + ChartTooltip, + ChartTooltipContent, +} from '@/components/v1/ui/chart'; +import { capitalize, cn } from '@/lib/utils'; + +export type DataPoint = Record & { + date: string; +}; + +const getNextActiveLabel = (activeLabel: string, data: DataPoint[]) => { + const currentIndex = data.findIndex((d) => d.date === activeLabel); + if (currentIndex === -1) { + return null; + } + + // if we're at the end of the data, determine the time between the last two data points and add that to the last date + if (currentIndex === data.length - 1) { + const lastDate = new Date(data[currentIndex].date); + const secondLastDate = new Date(data[currentIndex - 1].date); + const diff = lastDate.getTime() - secondLastDate.getTime(); + return new Date(lastDate.getTime() + diff).toISOString(); + } + + return data[currentIndex + 1]?.date || activeLabel; +}; + +const getPrevActiveLabel = (activeLabel: string, data: DataPoint[]) => { + const currentIndex = data.findIndex((d) => d.date === activeLabel); + if (currentIndex === -1) { + return activeLabel; + } + + // if we're at the start of the data, determine the time between the first two data points and subtract that from the first date + if (currentIndex === 0) { + const firstDate = new Date(data[currentIndex].date); + const secondDate = new Date(data[currentIndex + 1].date); + const diff = secondDate.getTime() - firstDate.getTime(); + return new Date(firstDate.getTime() - diff).toISOString(); + } + + return data[currentIndex - 1]?.date || activeLabel; +}; + +type ZoomableChartProps = { + data: DataPoint[]; + colors?: Record; + zoom?: (startTime: string, endTime: string) => void; + showYAxis?: boolean; + kind: 'bar' | 'line' | 'area'; + className?: string; +}; + +export function ZoomableChart({ + data, + colors, + zoom, + showYAxis = true, + kind = 'bar', + className, +}: ZoomableChartProps) { + const [refAreaLeft, setRefAreaLeft] = useState(null); + const [refAreaRight, setRefAreaRight] = useState(null); + const [actualRefAreaLeft, setActualRefAreaLeft] = useState( + null, + ); + const [actualRefAreaRight, setActualRefAreaRight] = useState( + null, + ); + const [isSelecting, setIsSelecting] = useState(false); + const chartRef = useRef(null); + + const chartConfig = useMemo(() => { + const keys = Object.keys(data[0] || {}).filter((key) => key !== 'date'); + return keys.reduce((acc, key, index) => { + let color = `hsl(${(index * 360) / keys.length}, 70%, 50%)`; + + if (colors && colors[key]) { + color = colors[key]; + } + + if (index < 5) { + color = `hsl(var(--chart-${index + 1}))`; + } + + acc[key] = { + label: capitalize(key), + color: colors?.[key] || color, + }; + return acc; + }, {}); + }, [data, colors]); + + const handleMouseDown = (e: any) => { + if (e.activeLabel) { + setRefAreaLeft(e.activeLabel); + setActualRefAreaLeft(getPrevActiveLabel(e.activeLabel, data)); + setIsSelecting(true); + } + }; + + const handleMouseMove = (e: any) => { + if (isSelecting && e.activeLabel) { + setRefAreaRight(e.activeLabel); + setActualRefAreaRight(getNextActiveLabel(e.activeLabel, data)); + } + }; + + const handleMouseUp = () => { + if (actualRefAreaLeft && actualRefAreaRight) { + const [left, right] = [actualRefAreaLeft, actualRefAreaRight].sort(); + zoom?.(left, right); + } + setRefAreaLeft(null); + setActualRefAreaLeft(null); + setRefAreaRight(null); + setActualRefAreaRight(null); + setIsSelecting(false); + }; + + const minDate = new Date( + Math.min(...data.map((d) => new Date(d.date).getTime())), + ); + const maxDate = new Date( + Math.max(...data.map((d) => new Date(d.date).getTime())), + ); + + const formatXAxis = (tickItem: string) => { + const date = new Date(tickItem); + const timeDiff = maxDate.getTime() - minDate.getTime(); + const oneDay = 24 * 60 * 60 * 1000; + const sevenDays = 7 * oneDay; + + if (timeDiff > sevenDays) { + return date.toLocaleDateString([], { month: 'short', day: 'numeric' }); + } else if (timeDiff > oneDay) { + return `${date.toLocaleDateString([], { month: 'short', day: 'numeric' })} ${date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}`; + } else { + return date.toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit', + }); + } + }; + + // remove date from dataKeys + const dataKeys = Object.keys(data[0] || {}).filter((key) => key !== 'date'); + + return ( + +
+ {getChildChart(kind, { + data, + showYAxis, + formatXAxis, + handleMouseDown, + handleMouseMove, + handleMouseUp, + refAreaLeft, + refAreaRight, + chartConfig, + dataKeys, + })} +
+
+ ); +} + +function getChildChart( + kind: 'bar' | 'line' | 'area', + props: ChildChartProps, +) { + switch (kind) { + case 'bar': + return ; + case 'line': + return ; + case 'area': + return ; + } +} + +type ChildChartProps = { + data: DataPoint[]; + showYAxis?: boolean; + formatXAxis: (tickItem: string) => string; + handleMouseDown: (e: any) => void; + handleMouseMove: (e: any) => void; + handleMouseUp: () => void; + refAreaLeft: string | null; + refAreaRight: string | null; + chartConfig: ChartConfig; + dataKeys: string[]; +}; + +function ChildBarChart({ + data, + showYAxis = true, + formatXAxis, + handleMouseDown, + handleMouseMove, + handleMouseUp, + refAreaLeft, + refAreaRight, + chartConfig, + dataKeys, +}: ChildChartProps) { + return ( + + + + + {showYAxis && ( + + )} + new Date(value).toLocaleString()} + /> + } + /> + {dataKeys.map((key) => ( + + ))} + + {refAreaLeft && refAreaRight && ( + + )} + + + ); +} + +function ChildLineChart({ + data, + showYAxis = true, + formatXAxis, + handleMouseDown, + handleMouseMove, + handleMouseUp, + refAreaLeft, + refAreaRight, + chartConfig, + dataKeys, +}: ChildChartProps) { + return ( + + + + + {showYAxis && ( + + )} + new Date(value).toLocaleString()} + /> + } + /> + {dataKeys.map((key) => { + return ( + + ); + })} + + {refAreaLeft && refAreaRight && ( + + )} + + + ); +} + +function ChildAreaChart({ + data, + showYAxis = true, + formatXAxis, + handleMouseDown, + handleMouseMove, + handleMouseUp, + refAreaLeft, + refAreaRight, + chartConfig, + dataKeys, +}: ChildChartProps) { + return ( + + + + + {showYAxis && ( + + )} + new Date(value).toLocaleString()} + /> + } + /> + {dataKeys.map((key) => { + return ( + + ); + })} + + {refAreaLeft && refAreaRight && ( + + )} + + + ); +} diff --git a/frontend/app/src/components/v1/molecules/combobox/combobox.tsx b/frontend/app/src/components/v1/molecules/combobox/combobox.tsx new file mode 100644 index 000000000..e283154c1 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/combobox/combobox.tsx @@ -0,0 +1,292 @@ +import * as React from 'react'; +import { CheckIcon, PlusCircledIcon } from '@radix-ui/react-icons'; +import { Column } from '@tanstack/react-table'; + +import { cn } from '@/lib/utils'; +import { Badge } from '@/components/v1/ui/badge'; +import { Button } from '@/components/v1/ui/button'; +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList, + CommandSeparator, +} from '@/components/v1/ui/command'; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from '@/components/v1/ui/popover'; +import { Separator } from '@/components/v1/ui/separator'; +import { ToolbarType } from '../data-table/data-table-toolbar'; +import { Input } from '@/components/v1/ui/input'; +import { BiX } from 'react-icons/bi'; +import { useForm } from 'react-hook-form'; +import { z } from 'zod'; +import { zodResolver } from '@hookform/resolvers/zod'; + +interface DataTableFacetedFilterProps { + column?: Column; + title?: string; + type?: ToolbarType; + options?: { + label: string; + value: string; + icon?: React.ComponentType<{ className?: string }>; + }[]; +} + +const keyValuePairSchema = z.object({ + key: z.string().min(1, 'Key is required'), + value: z.string().min(1, 'Value is required'), +}); + +const arrayInputSchema = z.object({ + values: z.string().min(1, 'At least one value is required'), +}); + +type KeyValuePair = z.infer; +type ArrayInput = z.infer; + +export function DataTableFacetedFilter({ + column, + title, + type = ToolbarType.Checkbox, + options, +}: DataTableFacetedFilterProps) { + return ( + column?.setFilterValue(values)} + /> + ); +} + +export function Combobox({ + values = [], + title, + icon, + type = ToolbarType.Checkbox, + options, + setValues, +}: { + values?: string[]; + icon?: JSX.Element; + title?: string; + type?: ToolbarType; + options?: { + label: string; + value: string; + icon?: React.ComponentType<{ className?: string }>; + }[]; + setValues: (selectedValues: string[]) => void; +}) { + const { register, handleSubmit, reset } = useForm({ + resolver: zodResolver( + type === ToolbarType.KeyValue ? keyValuePairSchema : arrayInputSchema, + ), + defaultValues: + type === ToolbarType.KeyValue ? { key: '', value: '' } : { values: '' }, + }); + + const submit = (data: KeyValuePair | ArrayInput) => { + if ('key' in data) { + values.push(`${data.key}:${data.value}`); + } else { + data.values.split(',').forEach((value) => values.push(value.trim())); + } + setValues(values); + reset(); + }; + + const remove = (filter: string) => { + const index = values.indexOf(filter); + if (index > -1) { + values.splice(index, 1); + } + setValues(values); + }; + + return ( + + + + + )) + )} +
+ + )} + + + + {[ToolbarType.Array, ToolbarType.KeyValue].includes(type) && ( +
+
+ {values.map((filter, index) => ( + + {filter} + + + ))} +
+
+ {type === ToolbarType.KeyValue ? ( +
+ + +
+ ) : ( +
+ +
+ )} + + {values.length > 0 && ( + + )} +
+
+ )} + + {[ToolbarType.Checkbox, ToolbarType.Radio].includes(type) && ( + + + + No results found. + + {options?.map((option) => { + const isSelected = values.indexOf(option.value) != -1; + return ( + { + if (isSelected) { + values.splice(values.indexOf(option.value), 1); + } else { + if (type == 'radio') { + values = []; + } + values.push(option.value); + } + setValues(values); + }} + > +
+ +
+ {option.icon && ( + + )} + {option.label} +
+ ); + })} +
+ {values.length > 0 && ( + <> + + + setValues([])} + className="justify-center text-center" + > + Reset + + + + )} +
+
+ )} +
+ + ); +} diff --git a/frontend/app/src/components/v1/molecules/confirm-dialog.tsx b/frontend/app/src/components/v1/molecules/confirm-dialog.tsx new file mode 100644 index 000000000..90534d1db --- /dev/null +++ b/frontend/app/src/components/v1/molecules/confirm-dialog.tsx @@ -0,0 +1,60 @@ +import { Button, ButtonProps } from '@/components/v1/ui/button'; +import { Spinner } from '@/components/v1/ui/loading.tsx'; +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, +} from '@/components/v1/ui/dialog'; + +interface ConfirmDialogProps { + title: string; + description: string | JSX.Element; + + submitLabel: string; + submitVariant?: ButtonProps['variant']; + cancelLabel?: string; + className?: string; + onSubmit: () => void; + onCancel: () => void; + isLoading: boolean; + isOpen: boolean; +} + +export function ConfirmDialog({ + className, + title, + description, + submitLabel, + submitVariant = 'destructive', + cancelLabel = 'Cancel', + isOpen, + ...props +}: ConfirmDialogProps) { + return ( + + + + {title} + +
+
{description}
+
+ + +
+
+
+
+ ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-column-header.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-column-header.tsx new file mode 100644 index 000000000..3942e3ce0 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-column-header.tsx @@ -0,0 +1,71 @@ +import { + ArrowDownIcon, + ArrowUpIcon, + CaretSortIcon, + EyeNoneIcon, +} from '@radix-ui/react-icons'; +import { Column } from '@tanstack/react-table'; + +import { cn } from '@/lib/utils'; +import { Button } from '@/components/v1/ui/button'; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from '@/components/v1/ui/dropdown-menu'; + +interface DataTableColumnHeaderProps + extends React.HTMLAttributes { + column: Column; + title: string; +} + +export function DataTableColumnHeader({ + column, + title, + className, +}: DataTableColumnHeaderProps) { + if (!column.getCanSort()) { + return
{title}
; + } + + return ( +
+ + + + + + column.toggleSorting(false)}> + + Asc + + column.toggleSorting(true)}> + + Desc + + + column.toggleVisibility(false)}> + + Hide + + + +
+ ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-faceted-filter.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-faceted-filter.tsx new file mode 100644 index 000000000..065784d41 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-faceted-filter.tsx @@ -0,0 +1,34 @@ +import * as React from 'react'; +import { Column } from '@tanstack/react-table'; + +import { ToolbarType } from './data-table-toolbar'; +import { Combobox } from '../combobox/combobox'; + +interface DataTableFacetedFilterProps { + column?: Column; + title?: string; + type?: ToolbarType; + options?: { + label: string; + value: string; + icon?: React.ComponentType<{ className?: string }>; + }[]; +} + +export function DataTableFacetedFilter({ + column, + title, + type = ToolbarType.Checkbox, + options, +}: DataTableFacetedFilterProps) { + const value = column?.getFilterValue(); + return ( + column?.setFilterValue(values)} + /> + ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-pagination.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-pagination.tsx new file mode 100644 index 000000000..91f8c6464 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-pagination.tsx @@ -0,0 +1,121 @@ +import { + ChevronLeftIcon, + ChevronRightIcon, + DoubleArrowLeftIcon, + DoubleArrowRightIcon, +} from '@radix-ui/react-icons'; +import { Table } from '@tanstack/react-table'; + +import { Button } from '@/components/v1/ui/button'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/v1/ui/select'; +import { Label } from '@radix-ui/react-label'; + +interface DataTablePaginationProps { + table: Table; + onSetPageSize?: (pageSize: number) => void; + showSelectedRows?: boolean; +} + +export function DataTablePagination({ + table, + onSetPageSize, + showSelectedRows = true, +}: DataTablePaginationProps) { + const pagination = table.getState().pagination; + + return ( +
+
+ {showSelectedRows && ( +
+ {table.getFilteredSelectedRowModel().rows.length} of{' '} + {table.getFilteredRowModel().rows.length} row(s) selected. +
+ )} +
+ +
+
+ + +
+
+ Page {pagination.pageIndex + 1} of {table.getPageCount()} +
+
+ + + + +
+
+
+ ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-row-actions.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-row-actions.tsx new file mode 100644 index 000000000..4ea46a134 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-row-actions.tsx @@ -0,0 +1,70 @@ +import { DotsVerticalIcon } from '@radix-ui/react-icons'; +import { Row } from '@tanstack/react-table'; + +import { Button } from '@/components/v1/ui/button'; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from '@/components/v1/ui/dropdown-menu'; + +import { IDGetter } from './data-table'; +import { + Tooltip, + TooltipProvider, + TooltipTrigger, + TooltipContent, +} from '@/components/v1/ui/tooltip'; + +interface DataTableRowActionsProps> { + row: Row; + actions?: { + label: string; + onClick: (data: TData) => void; + disabled?: boolean | string; + }[]; +} + +export function DataTableRowActions>({ + row, + actions, +}: DataTableRowActionsProps) { + if (!actions?.length) { + return null; + } + + return ( + + + + + + {actions?.map((action) => ( + + + + action.onClick(row.original)} + disabled={!!action.disabled} + className="w-full hover:cursor-pointer" + > + {action.label} + + + {action.disabled && ( + {action.disabled} + )} + + + ))} + + + ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx new file mode 100644 index 000000000..a230ad3a4 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-toolbar.tsx @@ -0,0 +1,100 @@ +import { Cross2Icon } from '@radix-ui/react-icons'; +import { Table } from '@tanstack/react-table'; + +import { Button } from '@/components/v1/ui/button'; +import { DataTableViewOptions } from './data-table-view-options'; + +import { DataTableFacetedFilter } from './data-table-faceted-filter'; +import { Input } from '@/components/v1/ui/input.tsx'; +import { Spinner } from '@/components/v1/ui/loading'; + +export interface FilterOption { + label: string; + value: string; + icon?: React.ComponentType<{ className?: string }>; +} + +export enum ToolbarType { + Checkbox = 'checkbox', + Radio = 'radio', + KeyValue = 'key-value', + Array = 'array', +} + +export type ToolbarFilters = { + columnId: string; + title: string; + type?: ToolbarType; + options?: FilterOption[]; +}[]; + +interface DataTableToolbarProps { + table: Table; + filters: ToolbarFilters; + actions: JSX.Element[]; + setSearch?: (search: string) => void; + search?: string; + showColumnToggle?: boolean; + isLoading?: boolean; + onReset?: () => void; +} + +export function DataTableToolbar({ + table, + filters, + actions, + setSearch, + search, + showColumnToggle, + isLoading = false, + onReset, +}: DataTableToolbarProps) { + const isFiltered = table.getState().columnFilters?.length > 0; + + return ( +
+
+ {setSearch && ( + setSearch(e.target.value)} + className="h-8 w-[150px] lg:w-[250px]" + /> + )} + {filters.map((filter) => { + return ( + + ); + })} + {isFiltered && ( + + )} +
+
+ {isLoading && } + {actions && actions.length > 0 && actions} + {showColumnToggle && } +
+
+ ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table-view-options.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table-view-options.tsx new file mode 100644 index 000000000..2266e0a4e --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table-view-options.tsx @@ -0,0 +1,57 @@ +import { DropdownMenuTrigger } from '@radix-ui/react-dropdown-menu'; +import { MixerHorizontalIcon } from '@radix-ui/react-icons'; +import { Table } from '@tanstack/react-table'; + +import { Button } from '@/components/v1/ui/button'; +import { + DropdownMenu, + DropdownMenuCheckboxItem, + DropdownMenuContent, + DropdownMenuLabel, + DropdownMenuSeparator, +} from '@/components/v1/ui/dropdown-menu'; + +interface DataTableViewOptionsProps { + table: Table; +} + +export function DataTableViewOptions({ + table, +}: DataTableViewOptionsProps) { + return ( + + + + + + Toggle columns + + {table + .getAllColumns() + .filter( + (column) => + typeof column.accessorFn !== 'undefined' && column.getCanHide(), + ) + .map((column) => { + return ( + column.toggleVisibility(!!value)} + > + {column.id} + + ); + })} + + + ); +} diff --git a/frontend/app/src/components/v1/molecules/data-table/data-table.tsx b/frontend/app/src/components/v1/molecules/data-table/data-table.tsx new file mode 100644 index 000000000..1d8a4832c --- /dev/null +++ b/frontend/app/src/components/v1/molecules/data-table/data-table.tsx @@ -0,0 +1,291 @@ +import * as React from 'react'; +import { + ColumnDef, + ColumnFiltersState, + ExpandedState, + OnChangeFn, + PaginationState, + Row, + RowSelectionState, + SortingState, + VisibilityState, + flexRender, + getCoreRowModel, + getFacetedRowModel, + getFacetedUniqueValues, + getFilteredRowModel, + getPaginationRowModel, + getSortedRowModel, + useReactTable, +} from '@tanstack/react-table'; + +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from '@/components/v1/ui/table'; + +import { DataTablePagination } from './data-table-pagination'; +import { DataTableToolbar, ToolbarFilters } from './data-table-toolbar'; +import { Skeleton } from '@/components/v1/ui/skeleton'; +import { cn } from '@/lib/utils'; + +export interface IDGetter { + metadata: { + id: string; + }; + subRows?: T[]; + getRow?: () => JSX.Element; + onClick?: () => void; + isExpandable?: boolean; +} + +interface DataTableProps, TValue> { + columns: ColumnDef[]; + data: TData[]; + error?: Error | null; + filters: ToolbarFilters; + actions?: JSX.Element[]; + sorting?: SortingState; + setSorting?: OnChangeFn; + setSearch?: (search: string) => void; + search?: string; + columnFilters?: ColumnFiltersState; + setColumnFilters?: OnChangeFn; + pagination?: PaginationState; + setPagination?: OnChangeFn; + showSelectedRows?: boolean; + pageCount?: number; + onSetPageSize?: (pageSize: number) => void; + showColumnToggle?: boolean; + columnVisibility?: VisibilityState; + setColumnVisibility?: OnChangeFn; + rowSelection?: RowSelectionState; + setRowSelection?: OnChangeFn; + isLoading?: boolean; + enableRowSelection?: boolean; + getRowId?: + | (( + originalRow: TData, + index: number, + parent?: Row | undefined, + ) => string) + | undefined; + manualSorting?: boolean; + manualFiltering?: boolean; + getSubRows?: (row: TData) => TData[]; +} + +interface ExtraDataTableProps { + emptyState?: JSX.Element; + card?: { + containerStyle?: string; + component: React.FC | ((data: any) => JSX.Element); + }; + onToolbarReset?: () => void; +} + +export function DataTable, TValue>({ + columns, + error, + data, + filters, + actions = [], + sorting, + setSorting, + setSearch, + search, + columnFilters, + setColumnFilters, + pagination, + setPagination, + pageCount, + onSetPageSize, + showSelectedRows = true, + showColumnToggle, + columnVisibility, + setColumnVisibility, + rowSelection, + setRowSelection, + isLoading, + getRowId, + emptyState, + card, + manualSorting = true, + manualFiltering = true, + getSubRows, + onToolbarReset, +}: DataTableProps & ExtraDataTableProps) { + const [expanded, setExpanded] = React.useState({}); + + const loadingNoData = isLoading && !data.length; + + const tableData = React.useMemo( + () => (loadingNoData ? Array(10).fill({ metadata: {} }) : data), + [loadingNoData, data], + ); + + const tableColumns = React.useMemo( + () => + loadingNoData + ? columns.map((column) => ({ + ...column, + cell: () => , + })) + : columns, + [loadingNoData, columns], + ); + + const table = useReactTable({ + data: tableData, + columns: tableColumns, + state: { + sorting, + columnVisibility, + rowSelection: rowSelection || {}, + columnFilters, + pagination, + expanded, + }, + pageCount, + enableRowSelection: !!rowSelection, + onRowSelectionChange: setRowSelection, + onSortingChange: setSorting, + onColumnFiltersChange: setColumnFilters, + onColumnVisibilityChange: setColumnVisibility, + onPaginationChange: setPagination, + getCoreRowModel: getCoreRowModel(), + getFilteredRowModel: getFilteredRowModel(), + getPaginationRowModel: getPaginationRowModel(), + getSortedRowModel: getSortedRowModel(), + getFacetedRowModel: getFacetedRowModel(), + getFacetedUniqueValues: getFacetedUniqueValues(), + getSubRows: getSubRows, + onExpandedChange: setExpanded, + // TODO: Figure this out + getRowCanExpand: (row) => row.subRows.length > 0, + manualSorting, + manualFiltering, + manualPagination: true, + getRowId, + }); + + const getTableRow = (row: Row) => { + if (row.original.getRow) { + return row.original.getRow(); + } + + return ( + + {row.getVisibleCells().map((cell) => ( + + {flexRender(cell.column.columnDef.cell, cell.getContext())} + + ))} + + ); + }; + + const getTable = () => ( + + + {table.getHeaderGroups().map((headerGroup) => ( + + {headerGroup.headers.map((header) => { + return ( + + {header.isPlaceholder + ? null + : flexRender( + header.column.columnDef.header, + header.getContext(), + )} + + ); + })} + + ))} + + + {error ? ( + + + {error.message || 'An error occurred.'} + + + ) : table.getRowModel().rows?.length ? ( + table.getRowModel().rows.map((row) => ( + + {getTableRow(row)} + {row.getIsExpanded() && row.subRows.map((r) => getTableRow(r))} + + )) + ) : ( + + + {emptyState || 'No results.'} + + + )} + +
+ ); + + const getCards = () => ( +
+ {error + ? error.message || 'An error occurred.' + : table.getRowModel().rows?.length + ? table + .getRowModel() + .rows.map((row) => + card?.component + ? card?.component({ data: row.original }) + : null, + ) + : emptyState || 'No results.'} +
+ ); + + return ( +
+ {(setSearch || actions || (filters && filters.length > 0)) && ( + + )} +
+ {!card ? getTable() : getCards()} +
+ {pagination && ( + + )} +
+ ); +} diff --git a/frontend/app/src/components/v1/molecules/nav-bar/tenant-switcher.tsx b/frontend/app/src/components/v1/molecules/nav-bar/tenant-switcher.tsx new file mode 100644 index 000000000..7259e6e67 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/nav-bar/tenant-switcher.tsx @@ -0,0 +1,107 @@ +import { Button } from '@/components/v1/ui/button'; +import { cn } from '@/lib/utils'; +import { + BuildingOffice2Icon, + // ChartBarSquareIcon, + CheckIcon, +} from '@heroicons/react/24/outline'; +import invariant from 'tiny-invariant'; +import { + Command, + CommandEmpty, + CommandItem, + CommandList, + CommandSeparator, +} from '@/components/v1/ui/command'; +import { Link } from 'react-router-dom'; +import { Tenant, TenantMember } from '@/lib/api'; +import { CaretSortIcon, PlusCircledIcon } from '@radix-ui/react-icons'; +import { + PopoverTrigger, + Popover, + PopoverContent, +} from '@radix-ui/react-popover'; +import React from 'react'; +import { useTenant } from '@/lib/atoms'; +import { Spinner } from '@/components/v1/ui/loading.tsx'; +import useApiMeta from '@/pages/auth/hooks/use-api-meta'; + +interface TenantSwitcherProps { + className?: string; + memberships: TenantMember[]; + currTenant: Tenant; +} +export function TenantSwitcher({ + className, + memberships, + currTenant, +}: TenantSwitcherProps) { + const meta = useApiMeta(); + const { setTenant: setCurrTenant } = useTenant(); + const [open, setOpen] = React.useState(false); + + if (!currTenant) { + return ; + } + + return ( + + + + + + + + No tenants found. + {memberships.map((membership) => ( + { + invariant(membership.tenant); + setCurrTenant(membership.tenant); + setOpen(false); + }} + value={membership.tenant?.slug} + className="text-sm cursor-pointer" + > + + {membership.tenant?.name} + + + ))} + + {meta.data?.allowCreateTenant && ( + <> + + + + + + New Tenant + + + + + )} + + + + ); +} diff --git a/frontend/app/src/components/v1/molecules/relative-date.tsx b/frontend/app/src/components/v1/molecules/relative-date.tsx new file mode 100644 index 000000000..6f25959e5 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/relative-date.tsx @@ -0,0 +1,94 @@ +import React, { useEffect, useMemo, useState } from 'react'; +import TimeAgo from 'timeago-react'; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from '@/components/v1/ui/tooltip'; + +interface RelativeDateProps { + date?: Date | string; + future?: boolean; +} + +const RelativeDate: React.FC = ({ + date = '', + future = false, +}) => { + const formattedDate = useMemo( + () => (typeof date === 'string' ? new Date(date) : date), + [date], + ); + + const [countdown, setCountdown] = useState(''); + + useEffect(() => { + if (future) { + const updateCountdown = () => { + const currentDate = new Date(); + const timeDiff = formattedDate.getTime() - currentDate.getTime(); + + if (timeDiff <= 0) { + setCountdown(''); + return; + } + + const days = Math.floor(timeDiff / (1000 * 3600 * 24)); + const hours = Math.floor( + (timeDiff % (1000 * 3600 * 24)) / (1000 * 3600), + ); + const minutes = Math.floor((timeDiff % (1000 * 3600)) / (1000 * 60)); + const seconds = Math.floor((timeDiff % (1000 * 60)) / 1000); + + const countdownParts = []; + if (days > 0) { + countdownParts.push(`${days}d`); + } + if (hours > 0 || days > 0) { + countdownParts.push(`${hours}h`); + } + if (minutes > 0 || hours > 0 || days > 0) { + countdownParts.push(`${minutes}m`); + } + countdownParts.push(`${seconds}s`); + + setCountdown(countdownParts.join(' ')); + }; + + updateCountdown(); + const countdownInterval = setInterval(updateCountdown, 1000); + + return () => { + clearInterval(countdownInterval); + }; + } + }, [formattedDate, future]); + + if (date == '0001-01-01T00:00:00Z') { + return null; + } + + return ( + + + { + e.stopPropagation(); + }} + > + {future && countdown ? ( + <>{countdown} + ) : ( + + )} + + + {formattedDate.toLocaleString()} + + + + ); +}; + +export default RelativeDate; diff --git a/frontend/app/src/components/v1/molecules/support-chat.tsx b/frontend/app/src/components/v1/molecules/support-chat.tsx new file mode 100644 index 000000000..f1c804b92 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/support-chat.tsx @@ -0,0 +1,61 @@ +import { User } from '@/lib/api'; +import { useTenant } from '@/lib/atoms'; +import useApiMeta from '@/pages/auth/hooks/use-api-meta'; +import React, { PropsWithChildren, useEffect, useMemo } from 'react'; + +interface SupportChatProps { + user: User; +} + +const SupportChat: React.FC = ({ + user, + children, +}) => { + const meta = useApiMeta(); + + const { tenant } = useTenant(); + + const APP_ID = useMemo(() => { + if (!meta.data?.pylonAppId) { + return null; + } + + return meta.data.pylonAppId; + }, [meta]); + + useEffect(() => { + if (!APP_ID) { + return; + } + + const pylonScript = `(function(){var e=window;var t=document;var n=function(){n.e(arguments)};n.q=[];n.e=function(e){n.q.push(e)};e.Pylon=n;var r=function(){var e=t.createElement("script");e.setAttribute("type","text/javascript");e.setAttribute("async","true");e.setAttribute("src","https://widget.usepylon.com/widget/${APP_ID}");var n=t.getElementsByTagName("script")[0];n.parentNode.insertBefore(e,n)};if(t.readyState==="complete"){r()}else if(e.addEventListener){e.addEventListener("load",r,false)}})();`; + document.body.appendChild(document.createElement('script')).innerHTML = + pylonScript; + }, [APP_ID]); + + useEffect(() => { + if (!APP_ID || !user) { + return; + } + + (window as any).pylon = { + chat_settings: { + app_id: APP_ID, + email: user.email, + name: user.name, + email_hash: user.emailHash, + }, + }; + + (window as any).Pylon('setNewIssueCustomFields', { + user_id: user.metadata.id, + tenant_name: tenant?.name, + tenant_slug: tenant?.slug, + tenant_id: tenant?.metadata?.id, + }); + }, [user, APP_ID, tenant]); + + return children; +}; + +export default SupportChat; diff --git a/frontend/app/src/components/v1/molecules/time-picker/date-time-picker.tsx b/frontend/app/src/components/v1/molecules/time-picker/date-time-picker.tsx new file mode 100644 index 000000000..1756f4104 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/time-picker/date-time-picker.tsx @@ -0,0 +1,69 @@ +import { add, format } from 'date-fns'; +import { cn } from '@/lib/utils'; +import { Button } from '@/components/v1/ui/button'; +import { Calendar } from '@/components/v1/ui/calendar'; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from '@/components/v1/ui/popover'; +import { TimePicker } from './time-picker'; +import { CalendarIcon } from '@radix-ui/react-icons'; + +type DateTimePickerProps = { + date: Date | undefined; + setDate: (date: Date | undefined) => void; + label: string; +}; + +export function DateTimePicker({ date, setDate, label }: DateTimePickerProps) { + /** + * carry over the current time when a user clicks a new day + * instead of resetting to 00:00 + */ + const handleSelect = (newDay: Date | undefined) => { + if (!newDay) { + return; + } + if (!date) { + setDate(newDay); + return; + } + const diff = newDay.getTime() - date.getTime(); + const diffInDays = diff / (1000 * 60 * 60 * 24); + const newDateFull = add(date, { days: Math.ceil(diffInDays) }); + setDate(newDateFull); + }; + + return ( + + + + + + handleSelect(d)} + initialFocus + /> +
+ +
+
+
+ ); +} diff --git a/frontend/app/src/components/v1/molecules/time-picker/time-picker-input.tsx b/frontend/app/src/components/v1/molecules/time-picker/time-picker-input.tsx new file mode 100644 index 000000000..c2458089a --- /dev/null +++ b/frontend/app/src/components/v1/molecules/time-picker/time-picker-input.tsx @@ -0,0 +1,149 @@ +import { Input } from '@/components/v1/ui/input'; + +import { cn } from '@/lib/utils'; +import React from 'react'; +import { + Period, + TimePickerType, + getArrowByType, + getDateByType, + setDateByType, +} from './time-picker-utils'; + +export interface TimePickerInputProps + extends React.InputHTMLAttributes { + picker: TimePickerType; + date: Date | undefined; + setDate: (date: Date | undefined) => void; + period?: Period; + onRightFocus?: () => void; + onLeftFocus?: () => void; +} + +const TimePickerInput = React.forwardRef< + HTMLInputElement, + TimePickerInputProps +>( + ( + { + className, + value, + id, + name, + date = new Date(new Date().setHours(0, 0, 0, 0)), + setDate, + onChange, + onKeyDown, + picker, + period, + onLeftFocus, + onRightFocus, + ...props + }, + ref, + ) => { + const [flag, setFlag] = React.useState(false); + const [prevIntKey, setPrevIntKey] = React.useState('0'); + + /** + * allow the user to enter the second digit within 2 seconds + * otherwise start again with entering first digit + */ + React.useEffect(() => { + if (flag) { + const timer = setTimeout(() => { + setFlag(false); + }, 2000); + + return () => clearTimeout(timer); + } + }, [flag]); + + const calculatedValue = React.useMemo(() => { + return getDateByType(date, picker); + }, [date, picker]); + + const calculateNewValue = (key: string) => { + /* + * If picker is '12hours' and the first digit is 0, then the second digit is automatically set to 1. + * The second entered digit will break the condition and the value will be set to 10-12. + */ + if (picker === '12hours') { + if (flag && calculatedValue.slice(1, 2) === '1' && prevIntKey === '0') { + return '0' + key; + } + } + + return !flag ? '0' + key : calculatedValue.slice(1, 2) + key; + }; + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === 'Tab') { + return; + } + e.preventDefault(); + if (e.key === 'ArrowRight') { + onRightFocus?.(); + } + if (e.key === 'ArrowLeft') { + onLeftFocus?.(); + } + if (['ArrowUp', 'ArrowDown'].includes(e.key)) { + const step = e.key === 'ArrowUp' ? 1 : -1; + const newValue = getArrowByType(calculatedValue, step, picker); + if (flag) { + setFlag(false); + } + const tempDate = new Date(date); + setDate(setDateByType(tempDate, newValue, picker, period)); + } + if (e.key >= '0' && e.key <= '9') { + if (picker === '12hours') { + setPrevIntKey(e.key); + } + + const newValue = calculateNewValue(e.key); + if (flag) { + onRightFocus?.(); + } + setFlag((prev) => !prev); + const tempDate = new Date(date); + setDate(setDateByType(tempDate, newValue, picker, period)); + } + }; + + const valueToTwoDigits = (value: number) => { + return value < 10 ? `0${value}` : value; + }; + + return ( + { + e.preventDefault(); + onChange?.(e); + }} + min={0} + max={59} + type="number" + inputMode="numeric" + onKeyDown={(e) => { + onKeyDown?.(e); + handleKeyDown(e); + }} + {...props} + /> + ); + }, +); + +TimePickerInput.displayName = 'TimePickerInput'; + +export { TimePickerInput }; diff --git a/frontend/app/src/components/v1/molecules/time-picker/time-picker-utils.ts b/frontend/app/src/components/v1/molecules/time-picker/time-picker-utils.ts new file mode 100644 index 000000000..9bd251e6c --- /dev/null +++ b/frontend/app/src/components/v1/molecules/time-picker/time-picker-utils.ts @@ -0,0 +1,226 @@ +/** + * regular expression to check for valid hour format (01-23) + */ +export function isValidHour(value: string) { + return /^(0[0-9]|1[0-9]|2[0-3])$/.test(value); +} + +/** + * regular expression to check for valid 12 hour format (01-12) + */ +export function isValid12Hour(value: string) { + return /^(0[1-9]|1[0-2])$/.test(value); +} + +/** + * regular expression to check for valid minute format (00-59) + */ +export function isValidMinuteOrSecond(value: string) { + return /^[0-5][0-9]$/.test(value); +} + +type GetValidNumberConfig = { max: number; min?: number; loop?: boolean }; + +export function getValidNumber( + value: string, + { max, min = 0, loop = false }: GetValidNumberConfig, +) { + let numericValue = parseInt(value, 10); + + if (!isNaN(numericValue)) { + if (!loop) { + if (numericValue > max) { + numericValue = max; + } + if (numericValue < min) { + numericValue = min; + } + } else { + if (numericValue > max) { + numericValue = min; + } + if (numericValue < min) { + numericValue = max; + } + } + return numericValue.toString().padStart(2, '0'); + } + + return '00'; +} + +export function getValidHour(value: string) { + if (isValidHour(value)) { + return value; + } + return getValidNumber(value, { max: 23 }); +} + +export function getValid12Hour(value: string) { + if (isValid12Hour(value)) { + return value; + } + return getValidNumber(value, { min: 1, max: 12 }); +} + +export function getValidMinuteOrSecond(value: string) { + if (isValidMinuteOrSecond(value)) { + return value; + } + return getValidNumber(value, { max: 59 }); +} + +type GetValidArrowNumberConfig = { + min: number; + max: number; + step: number; +}; + +export function getValidArrowNumber( + value: string, + { min, max, step }: GetValidArrowNumberConfig, +) { + let numericValue = parseInt(value, 10); + if (!isNaN(numericValue)) { + numericValue += step; + return getValidNumber(String(numericValue), { min, max, loop: true }); + } + return '00'; +} + +export function getValidArrowHour(value: string, step: number) { + return getValidArrowNumber(value, { min: 0, max: 23, step }); +} + +export function getValidArrow12Hour(value: string, step: number) { + return getValidArrowNumber(value, { min: 1, max: 12, step }); +} + +export function getValidArrowMinuteOrSecond(value: string, step: number) { + return getValidArrowNumber(value, { min: 0, max: 59, step }); +} + +export function setMinutes(date: Date, value: string) { + const minutes = getValidMinuteOrSecond(value); + date.setMinutes(parseInt(minutes, 10)); + return date; +} + +export function setSeconds(date: Date, value: string) { + const seconds = getValidMinuteOrSecond(value); + date.setSeconds(parseInt(seconds, 10)); + return date; +} + +export function setHours(date: Date, value: string) { + const hours = getValidHour(value); + date.setHours(parseInt(hours, 10)); + return date; +} + +export function set12Hours(date: Date, value: string, period: Period) { + const hours = parseInt(getValid12Hour(value), 10); + const convertedHours = convert12HourTo24Hour(hours, period); + date.setHours(convertedHours); + return date; +} + +export type TimePickerType = 'minutes' | 'seconds' | 'hours' | '12hours'; +export type Period = 'AM' | 'PM'; + +export function setDateByType( + date: Date, + value: string, + type: TimePickerType, + period?: Period, +) { + switch (type) { + case 'minutes': + return setMinutes(date, value); + case 'seconds': + return setSeconds(date, value); + case 'hours': + return setHours(date, value); + case '12hours': { + if (!period) { + return date; + } + return set12Hours(date, value, period); + } + default: + return date; + } +} + +export function getDateByType(date: Date, type: TimePickerType) { + switch (type) { + case 'minutes': + return getValidMinuteOrSecond(String(date.getMinutes())); + case 'seconds': + return getValidMinuteOrSecond(String(date.getSeconds())); + case 'hours': + return getValidHour(String(date.getHours())); + case '12hours': + return getValid12Hour(String(display12HourValue(date.getHours()))); + default: + return '00'; + } +} + +export function getArrowByType( + value: string, + step: number, + type: TimePickerType, +) { + switch (type) { + case 'minutes': + return getValidArrowMinuteOrSecond(value, step); + case 'seconds': + return getValidArrowMinuteOrSecond(value, step); + case 'hours': + return getValidArrowHour(value, step); + case '12hours': + return getValidArrow12Hour(value, step); + default: + return '00'; + } +} + +/** + * handles value change of 12-hour input + * 12:00 PM is 12:00 + * 12:00 AM is 00:00 + */ +export function convert12HourTo24Hour(hour: number, period: Period) { + if (period === 'PM') { + if (hour <= 11) { + return hour + 12; + } else { + return hour; + } + } else if (period === 'AM') { + if (hour === 12) { + return 0; + } + return hour; + } + return hour; +} + +/** + * time is stored in the 24-hour form, + * but needs to be displayed to the user + * in its 12-hour representation + */ +export function display12HourValue(hours: number) { + if (hours === 0 || hours === 12) { + return '12'; + } + if (hours >= 22) { + return `${hours - 12}`; + } + if (hours % 12 > 9) { + return `${hours}`; + } + return `0${hours % 12}`; +} diff --git a/frontend/app/src/components/v1/molecules/time-picker/time-picker.tsx b/frontend/app/src/components/v1/molecules/time-picker/time-picker.tsx new file mode 100644 index 000000000..9d4cff2d1 --- /dev/null +++ b/frontend/app/src/components/v1/molecules/time-picker/time-picker.tsx @@ -0,0 +1,56 @@ +import * as React from 'react'; +import { Label } from '@/components/v1/ui/label'; +import { TimePickerInput } from './time-picker-input'; + +interface TimePickerProps { + date: Date | undefined; + setDate: (date: Date | undefined) => void; +} + +export function TimePicker({ date, setDate }: TimePickerProps) { + const minuteRef = React.useRef(null); + const hourRef = React.useRef(null); + const secondRef = React.useRef(null); + + return ( +
+
+ + minuteRef.current?.focus()} + /> +
+
+ + hourRef.current?.focus()} + onRightFocus={() => secondRef.current?.focus()} + /> +
+
+ + minuteRef.current?.focus()} + /> +
+
+ ); +} diff --git a/frontend/app/src/components/v1/ui/accordion.tsx b/frontend/app/src/components/v1/ui/accordion.tsx new file mode 100644 index 000000000..e4cca1ecf --- /dev/null +++ b/frontend/app/src/components/v1/ui/accordion.tsx @@ -0,0 +1,59 @@ +import * as React from 'react'; +import * as AccordionPrimitive from '@radix-ui/react-accordion'; +import { ChevronDownIcon } from '@radix-ui/react-icons'; + +import { cn } from '@/lib/utils'; + +const Accordion = AccordionPrimitive.Root; + +const AccordionItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +AccordionItem.displayName = 'AccordionItem'; + +const AccordionTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef & { + hideChevron?: boolean; + } +>(({ className, children, hideChevron, ...props }, ref) => ( + + svg]:rotate-180', + className, + )} + {...props} + > + {children} + {!hideChevron && ( + + )} + + +)); +AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName; + +const AccordionContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + +
{children}
+
+)); +AccordionContent.displayName = AccordionPrimitive.Content.displayName; + +export { Accordion, AccordionItem, AccordionTrigger, AccordionContent }; diff --git a/frontend/app/src/components/v1/ui/alert.tsx b/frontend/app/src/components/v1/ui/alert.tsx new file mode 100644 index 000000000..7f87c9c63 --- /dev/null +++ b/frontend/app/src/components/v1/ui/alert.tsx @@ -0,0 +1,60 @@ +import * as React from 'react'; +import { cva, type VariantProps } from 'class-variance-authority'; + +import { cn } from '@/lib/utils'; + +const alertVariants = cva( + 'relative w-full rounded-lg border px-4 py-3 text-sm [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground [&>svg~*]:pl-7', + { + variants: { + variant: { + default: 'bg-background text-foreground', + destructive: + 'border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive', + warn: 'border-yellow-400 dark:border-yellow-800', + }, + }, + defaultVariants: { + variant: 'default', + }, + }, +); + +const Alert = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes & VariantProps +>(({ className, variant, ...props }, ref) => ( +
+)); +Alert.displayName = 'Alert'; + +const AlertTitle = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)); +AlertTitle.displayName = 'AlertTitle'; + +const AlertDescription = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)); +AlertDescription.displayName = 'AlertDescription'; + +export { Alert, AlertTitle, AlertDescription }; diff --git a/frontend/app/src/components/v1/ui/avatar.tsx b/frontend/app/src/components/v1/ui/avatar.tsx new file mode 100644 index 000000000..9476e1824 --- /dev/null +++ b/frontend/app/src/components/v1/ui/avatar.tsx @@ -0,0 +1,48 @@ +import * as React from 'react'; +import * as AvatarPrimitive from '@radix-ui/react-avatar'; + +import { cn } from '@/lib/utils'; + +const Avatar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +Avatar.displayName = AvatarPrimitive.Root.displayName; + +const AvatarImage = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +AvatarImage.displayName = AvatarPrimitive.Image.displayName; + +const AvatarFallback = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName; + +export { Avatar, AvatarImage, AvatarFallback }; diff --git a/frontend/app/src/components/v1/ui/badge.tsx b/frontend/app/src/components/v1/ui/badge.tsx new file mode 100644 index 000000000..e54fa27df --- /dev/null +++ b/frontend/app/src/components/v1/ui/badge.tsx @@ -0,0 +1,42 @@ +import * as React from 'react'; +import { cva, type VariantProps } from 'class-variance-authority'; + +import { cn } from '@/lib/utils'; + +const badgeVariants = cva( + 'inline-flex items-center rounded-md border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2', + { + variants: { + variant: { + default: 'border-transparent bg-primary text-primary-foreground shadow', + secondary: + 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80', + destructive: + 'border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80', + outline: 'text-foreground', + successful: + 'border-transparent rounded-sm px-1 font-normal text-green-800 dark:text-green-300 bg-green-500/20 ring-green-500/30', + failed: + 'border-transparent rounded-sm px-1 font-normal text-red-800 dark:text-red-300 bg-red-500/20 ring-red-500/30', + inProgress: + 'border-transparent rounded-sm px-1 font-normal text-yellow-800 dark:text-yellow-300 bg-yellow-500/20 ring-yellow-500/30', + outlineDestructive: + 'border border-destructive rounded-sm px-1 font-normal text-red-800 dark:text-red-300 bg-transparent', + }, + }, + defaultVariants: { + variant: 'default', + }, + }, +); +export interface BadgeProps + extends React.HTMLAttributes, + VariantProps {} + +function Badge({ className, variant, ...props }: BadgeProps) { + return ( + + ); +} + +export { Badge, badgeVariants }; diff --git a/frontend/app/src/components/v1/ui/breadcrumb.tsx b/frontend/app/src/components/v1/ui/breadcrumb.tsx new file mode 100644 index 000000000..991082d54 --- /dev/null +++ b/frontend/app/src/components/v1/ui/breadcrumb.tsx @@ -0,0 +1,115 @@ +import * as React from 'react'; +import { ChevronRightIcon, DotsHorizontalIcon } from '@radix-ui/react-icons'; +import { Slot } from '@radix-ui/react-slot'; + +import { cn } from '@/lib/utils'; + +const Breadcrumb = React.forwardRef< + HTMLElement, + React.ComponentPropsWithoutRef<'nav'> & { + separator?: React.ReactNode; + } +>(({ ...props }, ref) =>