feat: hatchet-lite (#560)

* feat: hatchet-lite mvp

* fix: init shadow db

* fix: install atlas

* fix: correct env

* fix: wait for db ready

* fix: remove name flag

* fix: add hatchet-lite to build
This commit is contained in:
abelanger5
2024-06-06 14:03:53 -04:00
committed by GitHub
parent 5f93a41e8b
commit b0b2e26952
55 changed files with 3714 additions and 49 deletions
+11
View File
@@ -40,3 +40,14 @@ jobs:
uses: actions/checkout@v4
- name: Build migrate
run: docker build -f ./build/package/migrate.dockerfile .
lite:
runs-on: ubuntu-latest
steps:
- name: Clone repository
uses: actions/checkout@v4
- name: Build lite
run: |
docker build -f ./build/package/servers.dockerfile . --build-arg SERVER_TARGET=lite -t hatchet-lite-tmp
docker build -f ./build/package/servers.dockerfile . --build-arg SERVER_TARGET=admin -t hatchet-admin-tmp
docker build -f ./build/package/lite.dockerfile . --build-arg HATCHET_LITE_IMAGE=hatchet-lite-tmp --build-arg HATCHET_ADMIN_IMAGE=hatchet-admin-tmp
+36
View File
@@ -120,3 +120,39 @@ jobs:
- name: Push to GHCR
run: |
docker push ghcr.io/hatchet-dev/hatchet/hatchet-frontend:${{steps.tag_name.outputs.tag}}
build-push-hatchet-lite:
name: hatchet-lite
runs-on: ubuntu-latest
steps:
- name: Get tag name
id: tag_name
run: echo "tag=${GITHUB_TAG/refs\/tags\//}" >> $GITHUB_OUTPUT
env:
GITHUB_TAG: ${{ github.ref }}
- name: Checkout
uses: actions/checkout@v4
- name: Login to GHCR
id: login-ghcr
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Build
run: |
DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \
-t hatchet-lite-local:${{steps.tag_name.outputs.tag}} \
--build-arg SERVER_TARGET=lite \
--build-arg VERSION=${{steps.tag_name.outputs.tag}} \
.
DOCKER_BUILDKIT=1 docker build -f ./build/package/servers.dockerfile \
-t hatchet-admin-local:${{steps.tag_name.outputs.tag}} \
--build-arg SERVER_TARGET=admin \
--build-arg VERSION=${{steps.tag_name.outputs.tag}} \
.
DOCKER_BUILDKIT=1 docker build -f ./build/package/lite.dockerfile \
-t ghcr.io/hatchet-dev/hatchet/hatchet-lite:${{steps.tag_name.outputs.tag}} \
--build-arg HATCHET_LITE_IMAGE=hatchet-lite-local:${{steps.tag_name.outputs.tag}} \
--build-arg HATCHET_ADMIN_IMAGE=hatchet-admin-local:${{steps.tag_name.outputs.tag}} \
.
- name: Push to GHCR
run: |
docker push ghcr.io/hatchet-dev/hatchet/hatchet-lite:${{steps.tag_name.outputs.tag}}
+24 -9
View File
@@ -4,7 +4,7 @@ jobs:
generate:
runs-on: ubuntu-latest
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
steps:
- uses: actions/checkout@v4
@@ -28,12 +28,16 @@ jobs:
version: 9.1.1
run_install: false
- name: Install Atlas
run: |
curl -sSf https://atlasgo.sh | sh
- name: Compose
run: docker compose up -d
- name: Generate
run: |
go run github.com/steebchen/prisma-client-go migrate deploy
sh ./hack/db/atlas-apply.sh
task generate-all
- name: Check for diff
@@ -70,7 +74,7 @@ jobs:
integration:
runs-on: ubuntu-latest
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
steps:
- uses: actions/checkout@v4
@@ -94,6 +98,10 @@ jobs:
version: 9.1.1
run_install: false
- name: Install Atlas
run: |
curl -sSf https://atlasgo.sh | sh
- name: Compose
run: docker compose up -d
@@ -102,8 +110,7 @@ jobs:
- name: Generate
run: |
go run github.com/steebchen/prisma-client-go db push
sh ./hack/db/atlas-apply.sh
task generate-all
task generate-certs
task generate-local-encryption-keys
@@ -118,7 +125,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 30
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
steps:
- uses: actions/checkout@v4
@@ -142,6 +149,10 @@ jobs:
version: 9.1.1
run_install: false
- name: Install Atlas
run: |
curl -sSf https://atlasgo.sh | sh
- name: Compose
run: docker compose up -d
@@ -174,7 +185,7 @@ jobs:
- name: Generate
run: |
go run github.com/steebchen/prisma-client-go migrate deploy
sh ./hack/db/atlas-apply.sh
task generate-all
task generate-certs
task generate-local-encryption-keys
@@ -202,7 +213,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 30
env:
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet
DATABASE_URL: postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable
steps:
- uses: actions/checkout@v4
@@ -226,6 +237,10 @@ jobs:
version: 9.1.1
run_install: false
- name: Install Atlas
run: |
curl -sSf https://atlasgo.sh | sh
- name: Compose
run: docker compose up -d
@@ -249,7 +264,7 @@ jobs:
- name: Generate
run: |
go run github.com/steebchen/prisma-client-go migrate deploy
sh ./hack/db/atlas-apply.sh
task generate-all
task generate-certs
task generate-local-encryption-keys
+2 -2
View File
@@ -6,9 +6,9 @@ repos:
- id: mixed-line-ending
args: ["--fix=lf"]
- id: end-of-file-fixer
exclude: prisma/migrations/.*\.sql
exclude: prisma/migrations/.*\.sql|sql/migrations/.*\.sql
- id: trailing-whitespace
exclude: prisma/migrations/.*\.sql
exclude: prisma/migrations/.*\.sql|sql/migrations/.*\.sql
- id: check-yaml
- repo: https://github.com/golangci/golangci-lint
rev: v1.57.1
+10 -2
View File
@@ -57,7 +57,9 @@ tasks:
- sudo sh ./hack/dev/manage-hosts.sh add 127.0.0.1 app.dev.hatchet-tools.com
prisma-migrate:
cmds:
- sh ./hack/dev/run-go-with-env.sh run github.com/steebchen/prisma-client-go migrate dev --skip-generate
- task: generate-sqlc
- sh ./hack/dev/atlas-migrate.sh {{.CLI_ARGS}}
- DATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet?sslmode=disable' sh ./hack/db/atlas-apply.sh
prisma-push:
cmds:
- sh ./hack/dev/run-go-with-env.sh run github.com/steebchen/prisma-client-go db push
@@ -84,6 +86,10 @@ tasks:
start-ngrok:
cmds:
- ngrok http 8080
start-lite:
cmds:
- sh ./hack/dev/run-go-with-env.sh run github.com/steebchen/prisma-client-go migrate dev --skip-generate
- sh ./hack/dev/start-lite.sh
generate-all:
cmds:
- task: install-dependencies
@@ -130,7 +136,9 @@ tasks:
- sh ./generate.sh
generate-sqlc:
cmds:
- npx --yes prisma migrate diff --from-empty --to-schema-datasource prisma/schema.prisma --script > internal/repository/prisma/dbsqlc/schema.sql
- DATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5431/shadow' npx --yes prisma migrate dev --skip-generate
- DATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5431/shadow' npx --yes prisma migrate diff --from-empty --to-schema-datasource prisma/schema.prisma --script > sql/schema/schema.sql
- cp sql/schema/schema.sql internal/repository/prisma/dbsqlc/schema.sql
- go run github.com/sqlc-dev/sqlc/cmd/sqlc@v1.24.0 generate --file internal/repository/prisma/dbsqlc/sqlc.yaml
lint:
cmds:
+31 -11
View File
@@ -73,6 +73,36 @@ func NewAPIServer(config *server.ServerConfig) *APIServer {
}
func (t *APIServer) Run() (func() error, error) {
e, err := t.GetEchoServer()
if err != nil {
return nil, err
}
return t.RunWithServer(e)
}
func (t *APIServer) RunWithServer(e *echo.Echo) (func() error, error) {
routes := e.Routes()
for _, route := range routes {
fmt.Println(route.Method, route.Path)
}
go func() {
if err := e.Start(fmt.Sprintf(":%d", t.config.Runtime.Port)); err != nil && !errors.Is(err, http.ErrServerClosed) {
panic(err)
}
}()
cleanup := func() error {
return e.Shutdown(context.Background())
}
return cleanup, nil
}
func (t *APIServer) GetEchoServer() (*echo.Echo, error) {
oaspec, err := gen.GetSwagger()
if err != nil {
return nil, err
@@ -288,15 +318,5 @@ func (t *APIServer) Run() (func() error, error) {
gen.RegisterHandlers(e, myStrictApiHandler)
go func() {
if err := e.Start(fmt.Sprintf(":%d", t.config.Runtime.Port)); err != nil && !errors.Is(err, http.ErrServerClosed) {
panic(err)
}
}()
cleanup := func() error {
return e.Shutdown(context.Background())
}
return cleanup, nil
return e, nil
}
+51
View File
@@ -0,0 +1,51 @@
# This expects the hatchet-lite image to be built and available on the machine
# -------------------
ARG HATCHET_LITE_IMAGE
ARG HATCHET_ADMIN_IMAGE
# Stage 1: copy from the existing Go built image
FROM $HATCHET_LITE_IMAGE as lite-binary-base
FROM $HATCHET_ADMIN_IMAGE as admin-binary-base
# Stage 2: build the frontend
FROM node:18-alpine as frontend-build
WORKDIR /app
COPY ./frontend/app/package.json ./frontend/app/pnpm-lock.yaml ./
RUN corepack pnpm --version
RUN corepack pnpm install --frozen-lockfile && corepack pnpm store prune
COPY ./frontend/app ./
RUN npm run build
# Stage 3: run in rabbitmq alpine image
FROM rabbitmq:alpine as rabbitmq
# install bash via apk
RUN apk update && apk add --no-cache bash gcc musl-dev openssl bash ca-certificates curl postgresql-client
RUN curl -sSf https://atlasgo.sh | sh
COPY --from=lite-binary-base /hatchet/hatchet-lite ./hatchet-lite
COPY --from=admin-binary-base /hatchet/hatchet-admin ./hatchet-admin
COPY --from=frontend-build /app/dist ./static-assets
# Copy entrypoint script
COPY ./hack/db/atlas-apply.sh ./atlas-apply.sh
COPY ./hack/lite/start.sh ./entrypoint.sh
COPY ./sql/migrations ./sql/migrations
ENV LITE_STATIC_ASSET_DIR=/static-assets
ENV LITE_FRONTEND_PORT=8081
ENV LITE_RUNTIME_PORT=8888
# Make entrypoint script executable
RUN chmod +x ./entrypoint.sh
RUN chmod +x ./atlas-apply.sh
EXPOSE 8888 7070
# Run the entrypoint script
CMD ["./entrypoint.sh"]
+9 -12
View File
@@ -1,17 +1,14 @@
# Base Go environment
# -------------------
FROM golang:1.21-alpine as base
WORKDIR /hatchet
FROM alpine as deployment
# curl is needed for things like signaling cloudsql proxy container to stop after a migration
RUN apk update && apk add --no-cache curl
# install bash via apk
RUN apk update && apk add --no-cache bash gcc musl-dev openssl bash ca-certificates curl postgresql-client
COPY go.mod go.sum ./
RUN curl -sSf https://atlasgo.sh | sh
RUN go mod download
COPY ./hack/db/atlas-apply.sh ./atlas-apply.sh
COPY ./sql/migrations ./sql/migrations
RUN go run github.com/steebchen/prisma-client-go prefetch
RUN chmod +x ./atlas-apply.sh
COPY /prisma ./prisma
CMD go run github.com/steebchen/prisma-client-go migrate deploy
# Run the entrypoint script
CMD ["./atlas-apply.sh"]
+4 -4
View File
@@ -43,11 +43,11 @@ FROM base AS build-go
ARG VERSION=v0.1.0-alpha.0
# can be set to "api" or "engine"
# can be set to "api", "engine", "admin" or "lite"
ARG SERVER_TARGET
# check if the target is empty or not set to api, engine, or admin
RUN if [ -z "$SERVER_TARGET" ] || [ "$SERVER_TARGET" != "api" ] && [ "$SERVER_TARGET" != "engine" ] && [ "$SERVER_TARGET" != "admin" ]; then \
# check if the target is empty or not set to api, engine, lite, or admin
RUN if [ -z "$SERVER_TARGET" ] || [ "$SERVER_TARGET" != "api" ] && [ "$SERVER_TARGET" != "engine" ] && [ "$SERVER_TARGET" != "admin" ] && [ "$SERVER_TARGET" != "lite" ]; then \
echo "SERVER_TARGET must be set to 'api', 'engine', or 'admin'"; \
exit 1; \
fi
@@ -67,7 +67,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
# ----------------------
FROM alpine AS deployment
# can be set to "api" or "engine"
# can be set to "api", "engine", "admin" or "lite"
ARG SERVER_TARGET=engine
WORKDIR /hatchet
+7 -1
View File
@@ -38,7 +38,13 @@ func Start(cf *loader.ConfigLoader, interruptCh <-chan interface{}) error {
runner := run.NewAPIServer(sc)
apiCleanup, err := runner.Run()
e, err := runner.GetEchoServer()
if err != nil {
return err
}
apiCleanup, err := runner.RunWithServer(e)
if err != nil {
return fmt.Errorf("error starting API server: %w", err)
}
+161
View File
@@ -0,0 +1,161 @@
package main
import (
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/hatchet-dev/hatchet/cmd/hatchet-api/api"
"github.com/hatchet-dev/hatchet/cmd/hatchet-engine/engine"
"github.com/hatchet-dev/hatchet/cmd/hatchet-lite/staticfileserver"
"github.com/hatchet-dev/hatchet/internal/config/loader"
"github.com/hatchet-dev/hatchet/pkg/cmdutils"
)
var printVersion bool
var configDirectory string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "hatchet-lite",
Short: "hatchet-lite runs a Hatchet instance with static files, API and engine all served on the same instance.",
Run: func(cmd *cobra.Command, args []string) {
if printVersion {
fmt.Println(Version)
os.Exit(0)
}
cf := loader.NewConfigLoader(configDirectory)
interruptChan := cmdutils.InterruptChan()
if err := start(cf, interruptChan); err != nil {
log.Println("error starting API:", err)
os.Exit(1)
}
},
}
// Version will be linked by an ldflag during build
var Version = "v0.1.0-alpha.0"
func main() {
rootCmd.PersistentFlags().BoolVar(
&printVersion,
"version",
false,
"print version and exit.",
)
rootCmd.PersistentFlags().StringVar(
&configDirectory,
"config",
"",
"The path the config folder.",
)
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
// runs a static file server, api and engine in the same process.
func start(cf *loader.ConfigLoader, interruptCh <-chan interface{}) error {
// read static asset directory and frontend URL from the environment
staticAssetDir := os.Getenv("LITE_STATIC_ASSET_DIR")
frontendPort := os.Getenv("LITE_FRONTEND_PORT")
runtimePort := os.Getenv("LITE_RUNTIME_PORT")
if staticAssetDir == "" {
return fmt.Errorf("LITE_STATIC_ASSET_DIR environment variable is required")
}
if frontendPort == "" {
return fmt.Errorf("LITE_FRONTEND_PORT environment variable is required")
}
if runtimePort == "" {
runtimePort = "8082"
}
feURL, err := url.Parse(fmt.Sprintf("http://localhost:%s", frontendPort))
if err != nil {
return fmt.Errorf("error parsing frontend URL: %w", err)
}
_, sc, err := cf.LoadServerConfig()
if err != nil {
return fmt.Errorf("error loading server config: %w", err)
}
apiURL, err := url.Parse(fmt.Sprintf("http://localhost:%d", sc.Runtime.Port))
if err != nil {
return fmt.Errorf("error parsing API URL: %w", err)
}
// api process
go func() {
api.Start(cf, interruptCh) // nolint:errcheck
}()
// static file server
go func() {
c := staticfileserver.NewStaticFileServer(staticAssetDir)
s := &http.Server{
Addr: fmt.Sprintf(":%s", frontendPort),
Handler: c,
ReadHeaderTimeout: 5 * time.Second,
}
if err := s.ListenAndServe(); err != nil {
log.Printf("static file server failure: %s", err.Error())
os.Exit(1)
}
}()
ctx, cancel := cmdutils.NewInterruptContext()
defer cancel()
go func() {
if err := engine.Run(ctx, cf); err != nil {
log.Printf("engine failure: %s", err.Error())
os.Exit(1)
}
}()
s := &http.Server{
Addr: fmt.Sprintf(":%s", runtimePort),
ReadHeaderTimeout: 5 * time.Second,
Handler: &httputil.ReverseProxy{
Rewrite: func(r *httputil.ProxyRequest) {
if strings.HasPrefix(r.In.URL.Path, "/api") {
r.SetURL(apiURL)
} else {
r.SetURL(feURL)
}
},
},
}
go func() {
if err := s.ListenAndServe(); err != nil {
log.Printf("reverse proxy failure: %s", err.Error())
os.Exit(1)
}
}()
<-interruptCh
return nil
}
@@ -0,0 +1,39 @@
package staticfileserver
import (
"net/http"
"os"
"path"
"strings"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
)
func NewStaticFileServer(staticFilePath string) *chi.Mux {
r := chi.NewRouter()
fs := http.FileServer(http.Dir(staticFilePath))
r.Use(middleware.Logger)
r.Get("/*", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Frame-Options", "DENY")
if _, err := os.Stat(staticFilePath + r.RequestURI); os.IsNotExist(err) {
w.Header().Set("Cache-Control", "no-cache")
http.StripPrefix(r.URL.Path, fs).ServeHTTP(w, r)
} else {
// Set static files involving html, js, or empty cache to "no-cache", which means they must be validated
// for changes before the browser uses the cache
if base := path.Base(r.URL.Path); strings.Contains(base, "html") || strings.Contains(base, "js") || base == "." || base == "/" {
w.Header().Set("Cache-Control", "no-cache")
}
fs.ServeHTTP(w, r)
}
})
return r
}
+1
View File
@@ -99,6 +99,7 @@ require (
github.com/exaring/otelpgx v0.5.4
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.28.0
github.com/go-chi/chi v1.5.5
github.com/go-playground/validator/v10 v10.21.0
github.com/go-test/deep v1.1.0 // indirect
github.com/goccy/go-json v0.10.3
+2
View File
@@ -51,6 +51,8 @@ github.com/getkin/kin-openapi v0.124.0 h1:VSFNMB9C9rTKBnQ/fpyDU8ytMTr4dWI9QovSKj
github.com/getkin/kin-openapi v0.124.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM=
github.com/getsentry/sentry-go v0.28.0 h1:7Rqx9M3ythTKy2J6uZLHmc8Sz9OGgIlseuO1iBX/s0M=
github.com/getsentry/sentry-go v0.28.0/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg=
github.com/go-chi/chi v1.5.5 h1:vOB/HbEMt9QqBqErz07QehcOKHaWFtuj87tTDVz2qXE=
github.com/go-chi/chi v1.5.5/go.mod h1:C9JqLr3tIYjDOZpzn+BCuxY8z8vmca43EeMgyZt7irw=
github.com/go-co-op/gocron/v2 v2.5.0 h1:ff/TJX9GdTJBDL1il9cyd/Sj3WnS+BB7ZzwHKSNL5p8=
github.com/go-co-op/gocron/v2 v2.5.0/go.mod h1:ckPQw96ZuZLRUGu88vVpd9a6d9HakI14KWahFZtGvNw=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
+43
View File
@@ -0,0 +1,43 @@
#!/bin/bash
# Check whether DATABASE_URL is set
if [ -z "$DATABASE_URL" ]; then
echo "DATABASE_URL is not set"
exit 1
fi
# Wait up to 30 seconds for the database to be ready
echo "Waiting for database to be ready..."
timeout 30s bash -c '
until psql "$DATABASE_URL" -c "\q" 2>/dev/null; do
sleep 1
done
'
if [ $? -eq 124 ]; then
echo "Timed out waiting for the database to be ready"
exit 1
fi
# Check for prisma migrations
MIGRATION_NAME=$(psql "$DATABASE_URL" -t -c "SELECT migration_name FROM _prisma_migrations ORDER BY started_at DESC LIMIT 1;" 2>/dev/null | xargs)
if [ $? -eq 0 ] && [ -n "$MIGRATION_NAME" ]; then
echo "Using existing prisma migration: $MIGRATION_NAME"
atlas migrate apply \
--url "$DATABASE_URL" \
--baseline "$MIGRATION_NAME" \
--dir "file://sql/migrations"
else
echo "No prisma migration found. Applying all migrations..."
atlas migrate apply \
--url "$DATABASE_URL" \
--dir "file://sql/migrations"
fi
# if either of the above commands failed, exit with an error
if [ $? -ne 0 ]; then
echo "Migration failed. Exiting..."
exit 1
fi
+14
View File
@@ -0,0 +1,14 @@
#!/bin/bash
# check if the first argument is empty
if [ -z "$1" ]; then
echo "Usage: $0 <version>"
exit 1
fi
atlas migrate hash --dir "file://sql/migrations"
atlas migrate diff $1 \
--dir "file://sql/migrations" \
--to "file://sql/schema/schema.sql" \
--dev-url "docker://postgres/15/dev?search_path=public"
+25
View File
@@ -0,0 +1,25 @@
#!/bin/bash
# Directory containing the folders with migration.sql files
SOURCE_DIR="./prisma/migrations"
# Directory to store the renamed .sql files
DEST_DIR="./sql/migrations"
# Create destination directory if it doesn't exist
mkdir -p "$DEST_DIR"
# Loop through each folder in the source directory
for folder in "$SOURCE_DIR"/*; do
if [ -d "$folder" ]; then
folder_name=$(basename "$folder")
migration_file="$folder/migration.sql"
if [ -f "$migration_file" ]; then
dest_file="$DEST_DIR/${folder_name}.sql"
cp "$migration_file" "$dest_file"
fi
fi
done
echo "Migration files have been moved and renamed successfully."
+11
View File
@@ -0,0 +1,11 @@
#!/bin/bash
set -eux
caddy start
set -a
. .env
set +a
npx --yes nodemon --signal SIGINT --config nodemon.api.json --exec go run ./cmd/hatchet-lite
+23
View File
@@ -0,0 +1,23 @@
#!/bin/bash
# Start RabbitMQ
rabbitmq-server &
# Wait for RabbitMQ to be ready
until rabbitmqctl status; do
echo "Waiting for RabbitMQ to start..."
sleep 2
done
# Run migration script
./atlas-apply.sh
if [ $? -ne 0 ]; then
echo "Migration script failed. Exiting..."
exit 1
fi
# Generate config files
./hatchet-admin quickstart --skip certs --generated-config-dir ./config --overwrite=false
# Run the Go binary
./hatchet-lite --config ./config
+4 -3
View File
@@ -149,11 +149,12 @@ func (t *Message) TenantID() string {
type AckHook func(task *Message) error
type MessageQueue interface {
// AddMessage adds a task to the queue. Implementations should ensure that Start().
// AddMessage adds a task to the queue
AddMessage(ctx context.Context, queue Queue, task *Message) error
// Subscribe subscribes to the task queue.
Subscribe(queueType Queue, preAck AckHook, postAck AckHook) (func() error, error)
// Subscribe subscribes to the task queue. It returns a cleanup function that should be called when the
// subscription is no longer needed.
Subscribe(queue Queue, preAck AckHook, postAck AckHook) (func() error, error)
// RegisterTenant registers a new pub/sub mechanism for a tenant. This should be called when a
// new tenant is created. If this is not called, implementors should ensure that there's a check
+5 -5
View File
@@ -135,7 +135,7 @@ model Tenant {
stepRateLimits StepRateLimit[]
alertEmailGroups TenantAlertEmailGroup[]
// alertMemberEmails controls whether to send alert emails to tenant members in addition to the alert email groups
alertMemberEmails Boolean @default(true)
alertMemberEmails Boolean @default(true)
slackWebhooks SlackAppWebhook[]
alertingSettings TenantAlertingSettings?
}
@@ -1192,7 +1192,7 @@ model Worker {
dispatcher Dispatcher? @relation(fields: [dispatcherId], references: [id], onDelete: SetNull, onUpdate: Cascade)
dispatcherId String? @db.Uuid
maxRuns Int @default(100)
maxRuns Int @default(100)
services Service[]
@@ -1218,11 +1218,11 @@ model WorkerSemaphore {
worker Worker @relation(fields: [workerId], references: [id], onDelete: Cascade, onUpdate: Cascade)
workerId String @unique @db.Uuid
// keeps track of maxRuns - runningRuns on the worker
slots Int
slots Int
}
model WorkerSemaphoreSlot {
id String @id @unique @default(uuid()) @db.Uuid
id String @id @unique @default(uuid()) @db.Uuid
// the parent semaphore
worker Worker @relation(fields: [workerId], references: [id], onDelete: Cascade, onUpdate: Cascade)
@@ -1230,7 +1230,7 @@ model WorkerSemaphoreSlot {
// the parent step run
stepRun StepRun? @relation(fields: [stepRunId], references: [id], onDelete: Cascade, onUpdate: Cascade)
stepRunId String? @db.Uuid @unique
stepRunId String? @unique @db.Uuid
@@index([workerId])
}
+681
View File
@@ -0,0 +1,681 @@
-- CreateEnum
CREATE TYPE "TenantMemberRole" AS ENUM ('OWNER', 'ADMIN', 'MEMBER');
-- CreateEnum
CREATE TYPE "WorkflowRunStatus" AS ENUM ('PENDING', 'RUNNING', 'SUCCEEDED', 'FAILED');
-- CreateEnum
CREATE TYPE "JobRunStatus" AS ENUM ('PENDING', 'RUNNING', 'SUCCEEDED', 'FAILED', 'CANCELLED');
-- CreateEnum
CREATE TYPE "StepRunStatus" AS ENUM ('PENDING', 'PENDING_ASSIGNMENT', 'ASSIGNED', 'RUNNING', 'SUCCEEDED', 'FAILED', 'CANCELLED');
-- CreateEnum
CREATE TYPE "WorkerStatus" AS ENUM ('ACTIVE', 'INACTIVE');
-- CreateTable
CREATE TABLE "User" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"email" TEXT NOT NULL,
"emailVerified" BOOLEAN NOT NULL DEFAULT false,
"name" TEXT,
CONSTRAINT "User_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "UserPassword" (
"hash" TEXT NOT NULL,
"userId" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "UserSession" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" UUID,
"data" JSONB,
"expiresAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "UserSession_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Tenant" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"name" TEXT NOT NULL,
"slug" TEXT NOT NULL,
CONSTRAINT "Tenant_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "TenantMember" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tenantId" UUID NOT NULL,
"userId" UUID NOT NULL,
"role" "TenantMemberRole" NOT NULL,
CONSTRAINT "TenantMember_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Event" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"key" TEXT NOT NULL,
"tenantId" UUID NOT NULL,
"replayedFromId" UUID,
"data" JSONB,
CONSTRAINT "Event_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkflowTag" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tenantId" UUID NOT NULL,
"name" TEXT NOT NULL,
"color" TEXT NOT NULL DEFAULT '#93C5FD',
CONSTRAINT "WorkflowTag_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Workflow" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"name" TEXT NOT NULL,
"description" TEXT,
CONSTRAINT "Workflow_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkflowVersion" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"version" TEXT NOT NULL,
"order" SMALLSERIAL NOT NULL,
"workflowId" UUID NOT NULL,
CONSTRAINT "WorkflowVersion_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkflowTriggers" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"workflowVersionId" UUID NOT NULL,
"tenantId" UUID NOT NULL,
CONSTRAINT "WorkflowTriggers_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkflowTriggerEventRef" (
"parentId" UUID NOT NULL,
"eventKey" TEXT NOT NULL
);
-- CreateTable
CREATE TABLE "WorkflowTriggerCronRef" (
"parentId" UUID NOT NULL,
"cron" TEXT NOT NULL,
"tickerId" UUID
);
-- CreateTable
CREATE TABLE "WorkflowTriggerScheduledRef" (
"id" UUID NOT NULL,
"parentId" UUID NOT NULL,
"triggerAt" TIMESTAMP(3) NOT NULL,
"tickerId" UUID,
"input" JSONB,
CONSTRAINT "WorkflowTriggerScheduledRef_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Job" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"workflowVersionId" UUID NOT NULL,
"name" TEXT NOT NULL,
"description" TEXT,
"timeout" TEXT,
CONSTRAINT "Job_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Action" (
"id" TEXT NOT NULL,
"description" TEXT,
"tenantId" UUID NOT NULL,
CONSTRAINT "Action_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Step" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"readableId" TEXT,
"tenantId" UUID NOT NULL,
"jobId" UUID NOT NULL,
"actionId" TEXT NOT NULL,
"timeout" TEXT,
CONSTRAINT "Step_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkflowRun" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"workflowVersionId" UUID NOT NULL,
"status" "WorkflowRunStatus" NOT NULL DEFAULT 'PENDING',
"error" TEXT,
"startedAt" TIMESTAMP(3),
"finishedAt" TIMESTAMP(3),
CONSTRAINT "WorkflowRun_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkflowRunTriggeredBy" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"parentId" TEXT NOT NULL,
"eventId" UUID,
"cronParentId" UUID,
"cronSchedule" TEXT,
"scheduledId" UUID,
CONSTRAINT "WorkflowRunTriggeredBy_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "JobRun" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"workflowRunId" TEXT NOT NULL,
"jobId" UUID NOT NULL,
"tickerId" UUID,
"status" "JobRunStatus" NOT NULL DEFAULT 'PENDING',
"result" JSONB,
"startedAt" TIMESTAMP(3),
"finishedAt" TIMESTAMP(3),
"timeoutAt" TIMESTAMP(3),
"cancelledAt" TIMESTAMP(3),
"cancelledReason" TEXT,
"cancelledError" TEXT,
CONSTRAINT "JobRun_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "JobRunLookupData" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"jobRunId" UUID NOT NULL,
"tenantId" UUID NOT NULL,
"data" JSONB,
CONSTRAINT "JobRunLookupData_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "StepRun" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"jobRunId" UUID NOT NULL,
"stepId" UUID NOT NULL,
"order" SMALLSERIAL NOT NULL,
"workerId" UUID,
"tickerId" UUID,
"status" "StepRunStatus" NOT NULL DEFAULT 'PENDING',
"input" JSONB,
"output" JSONB,
"requeueAfter" TIMESTAMP(3),
"scheduleTimeoutAt" TIMESTAMP(3),
"error" TEXT,
"startedAt" TIMESTAMP(3),
"finishedAt" TIMESTAMP(3),
"timeoutAt" TIMESTAMP(3),
"cancelledAt" TIMESTAMP(3),
"cancelledReason" TEXT,
"cancelledError" TEXT,
CONSTRAINT "StepRun_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Dispatcher" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"lastHeartbeatAt" TIMESTAMP(3),
"isActive" BOOLEAN NOT NULL DEFAULT true,
CONSTRAINT "Dispatcher_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Ticker" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"lastHeartbeatAt" TIMESTAMP(3),
"isActive" BOOLEAN NOT NULL DEFAULT true,
CONSTRAINT "Ticker_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Worker" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"lastHeartbeatAt" TIMESTAMP(3),
"name" TEXT NOT NULL,
"status" "WorkerStatus" NOT NULL DEFAULT 'ACTIVE',
"dispatcherId" UUID NOT NULL,
CONSTRAINT "Worker_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Service" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"name" TEXT NOT NULL,
"description" TEXT,
"tenantId" UUID NOT NULL,
CONSTRAINT "Service_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "_WorkflowToWorkflowTag" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "_ActionToWorker" (
"A" TEXT NOT NULL,
"B" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "_StepOrder" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "_StepRunOrder" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "_ServiceToWorker" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateIndex
CREATE UNIQUE INDEX "User_id_key" ON "User"("id");
-- CreateIndex
CREATE UNIQUE INDEX "User_email_key" ON "User"("email");
-- CreateIndex
CREATE UNIQUE INDEX "UserPassword_userId_key" ON "UserPassword"("userId");
-- CreateIndex
CREATE UNIQUE INDEX "UserSession_id_key" ON "UserSession"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Tenant_id_key" ON "Tenant"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Tenant_slug_key" ON "Tenant"("slug");
-- CreateIndex
CREATE UNIQUE INDEX "TenantMember_id_key" ON "TenantMember"("id");
-- CreateIndex
CREATE UNIQUE INDEX "TenantMember_tenantId_userId_key" ON "TenantMember"("tenantId", "userId");
-- CreateIndex
CREATE UNIQUE INDEX "Event_id_key" ON "Event"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTag_id_key" ON "WorkflowTag"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTag_tenantId_name_key" ON "WorkflowTag"("tenantId", "name");
-- CreateIndex
CREATE UNIQUE INDEX "Workflow_id_key" ON "Workflow"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Workflow_tenantId_name_key" ON "Workflow"("tenantId", "name");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowVersion_id_key" ON "WorkflowVersion"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowVersion_workflowId_version_key" ON "WorkflowVersion"("workflowId", "version");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTriggers_id_key" ON "WorkflowTriggers"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTriggers_workflowVersionId_key" ON "WorkflowTriggers"("workflowVersionId");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTriggerEventRef_parentId_eventKey_key" ON "WorkflowTriggerEventRef"("parentId", "eventKey");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTriggerCronRef_parentId_cron_key" ON "WorkflowTriggerCronRef"("parentId", "cron");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTriggerScheduledRef_id_key" ON "WorkflowTriggerScheduledRef"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Job_id_key" ON "Job"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Job_workflowVersionId_name_key" ON "Job"("workflowVersionId", "name");
-- CreateIndex
CREATE UNIQUE INDEX "Action_tenantId_id_key" ON "Action"("tenantId", "id");
-- CreateIndex
CREATE UNIQUE INDEX "Step_id_key" ON "Step"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Step_jobId_readableId_key" ON "Step"("jobId", "readableId");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRun_tenantId_id_key" ON "WorkflowRun"("tenantId", "id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRunTriggeredBy_id_key" ON "WorkflowRunTriggeredBy"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRunTriggeredBy_parentId_key" ON "WorkflowRunTriggeredBy"("parentId");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRunTriggeredBy_scheduledId_key" ON "WorkflowRunTriggeredBy"("scheduledId");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRunTriggeredBy_tenantId_parentId_key" ON "WorkflowRunTriggeredBy"("tenantId", "parentId");
-- CreateIndex
CREATE UNIQUE INDEX "JobRun_id_key" ON "JobRun"("id");
-- CreateIndex
CREATE UNIQUE INDEX "JobRunLookupData_id_key" ON "JobRunLookupData"("id");
-- CreateIndex
CREATE UNIQUE INDEX "JobRunLookupData_jobRunId_key" ON "JobRunLookupData"("jobRunId");
-- CreateIndex
CREATE UNIQUE INDEX "JobRunLookupData_jobRunId_tenantId_key" ON "JobRunLookupData"("jobRunId", "tenantId");
-- CreateIndex
CREATE UNIQUE INDEX "StepRun_id_key" ON "StepRun"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Dispatcher_id_key" ON "Dispatcher"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Ticker_id_key" ON "Ticker"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Worker_id_key" ON "Worker"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Service_id_key" ON "Service"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Service_tenantId_name_key" ON "Service"("tenantId", "name");
-- CreateIndex
CREATE UNIQUE INDEX "_WorkflowToWorkflowTag_AB_unique" ON "_WorkflowToWorkflowTag"("A", "B");
-- CreateIndex
CREATE INDEX "_WorkflowToWorkflowTag_B_index" ON "_WorkflowToWorkflowTag"("B");
-- CreateIndex
CREATE UNIQUE INDEX "_ActionToWorker_AB_unique" ON "_ActionToWorker"("A", "B");
-- CreateIndex
CREATE INDEX "_ActionToWorker_B_index" ON "_ActionToWorker"("B");
-- CreateIndex
CREATE UNIQUE INDEX "_StepOrder_AB_unique" ON "_StepOrder"("A", "B");
-- CreateIndex
CREATE INDEX "_StepOrder_B_index" ON "_StepOrder"("B");
-- CreateIndex
CREATE UNIQUE INDEX "_StepRunOrder_AB_unique" ON "_StepRunOrder"("A", "B");
-- CreateIndex
CREATE INDEX "_StepRunOrder_B_index" ON "_StepRunOrder"("B");
-- CreateIndex
CREATE UNIQUE INDEX "_ServiceToWorker_AB_unique" ON "_ServiceToWorker"("A", "B");
-- CreateIndex
CREATE INDEX "_ServiceToWorker_B_index" ON "_ServiceToWorker"("B");
-- AddForeignKey
ALTER TABLE "UserPassword" ADD CONSTRAINT "UserPassword_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "UserSession" ADD CONSTRAINT "UserSession_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "TenantMember" ADD CONSTRAINT "TenantMember_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "TenantMember" ADD CONSTRAINT "TenantMember_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Event" ADD CONSTRAINT "Event_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Event" ADD CONSTRAINT "Event_replayedFromId_fkey" FOREIGN KEY ("replayedFromId") REFERENCES "Event"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTag" ADD CONSTRAINT "WorkflowTag_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Workflow" ADD CONSTRAINT "Workflow_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowVersion" ADD CONSTRAINT "WorkflowVersion_workflowId_fkey" FOREIGN KEY ("workflowId") REFERENCES "Workflow"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggers" ADD CONSTRAINT "WorkflowTriggers_workflowVersionId_fkey" FOREIGN KEY ("workflowVersionId") REFERENCES "WorkflowVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggers" ADD CONSTRAINT "WorkflowTriggers_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggerEventRef" ADD CONSTRAINT "WorkflowTriggerEventRef_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "WorkflowTriggers"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggerCronRef" ADD CONSTRAINT "WorkflowTriggerCronRef_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "WorkflowTriggers"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggerCronRef" ADD CONSTRAINT "WorkflowTriggerCronRef_tickerId_fkey" FOREIGN KEY ("tickerId") REFERENCES "Ticker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggerScheduledRef" ADD CONSTRAINT "WorkflowTriggerScheduledRef_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "WorkflowVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggerScheduledRef" ADD CONSTRAINT "WorkflowTriggerScheduledRef_tickerId_fkey" FOREIGN KEY ("tickerId") REFERENCES "Ticker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Job" ADD CONSTRAINT "Job_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Job" ADD CONSTRAINT "Job_workflowVersionId_fkey" FOREIGN KEY ("workflowVersionId") REFERENCES "WorkflowVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Action" ADD CONSTRAINT "Action_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Step" ADD CONSTRAINT "Step_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Step" ADD CONSTRAINT "Step_jobId_fkey" FOREIGN KEY ("jobId") REFERENCES "Job"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Step" ADD CONSTRAINT "Step_actionId_tenantId_fkey" FOREIGN KEY ("actionId", "tenantId") REFERENCES "Action"("id", "tenantId") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRun" ADD CONSTRAINT "WorkflowRun_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRun" ADD CONSTRAINT "WorkflowRun_workflowVersionId_fkey" FOREIGN KEY ("workflowVersionId") REFERENCES "WorkflowVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" ADD CONSTRAINT "WorkflowRunTriggeredBy_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" ADD CONSTRAINT "WorkflowRunTriggeredBy_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "WorkflowRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" ADD CONSTRAINT "WorkflowRunTriggeredBy_eventId_fkey" FOREIGN KEY ("eventId") REFERENCES "Event"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" ADD CONSTRAINT "WorkflowRunTriggeredBy_cronParentId_cronSchedule_fkey" FOREIGN KEY ("cronParentId", "cronSchedule") REFERENCES "WorkflowTriggerCronRef"("parentId", "cron") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" ADD CONSTRAINT "WorkflowRunTriggeredBy_scheduledId_fkey" FOREIGN KEY ("scheduledId") REFERENCES "WorkflowTriggerScheduledRef"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRun" ADD CONSTRAINT "JobRun_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRun" ADD CONSTRAINT "JobRun_workflowRunId_fkey" FOREIGN KEY ("workflowRunId") REFERENCES "WorkflowRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRun" ADD CONSTRAINT "JobRun_jobId_fkey" FOREIGN KEY ("jobId") REFERENCES "Job"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRun" ADD CONSTRAINT "JobRun_tickerId_fkey" FOREIGN KEY ("tickerId") REFERENCES "Ticker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRunLookupData" ADD CONSTRAINT "JobRunLookupData_jobRunId_fkey" FOREIGN KEY ("jobRunId") REFERENCES "JobRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRunLookupData" ADD CONSTRAINT "JobRunLookupData_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRun" ADD CONSTRAINT "StepRun_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRun" ADD CONSTRAINT "StepRun_jobRunId_fkey" FOREIGN KEY ("jobRunId") REFERENCES "JobRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRun" ADD CONSTRAINT "StepRun_stepId_fkey" FOREIGN KEY ("stepId") REFERENCES "Step"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRun" ADD CONSTRAINT "StepRun_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRun" ADD CONSTRAINT "StepRun_tickerId_fkey" FOREIGN KEY ("tickerId") REFERENCES "Ticker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Worker" ADD CONSTRAINT "Worker_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Worker" ADD CONSTRAINT "Worker_dispatcherId_fkey" FOREIGN KEY ("dispatcherId") REFERENCES "Dispatcher"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Service" ADD CONSTRAINT "Service_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_WorkflowToWorkflowTag" ADD CONSTRAINT "_WorkflowToWorkflowTag_A_fkey" FOREIGN KEY ("A") REFERENCES "Workflow"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_WorkflowToWorkflowTag" ADD CONSTRAINT "_WorkflowToWorkflowTag_B_fkey" FOREIGN KEY ("B") REFERENCES "WorkflowTag"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ActionToWorker" ADD CONSTRAINT "_ActionToWorker_A_fkey" FOREIGN KEY ("A") REFERENCES "Action"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ActionToWorker" ADD CONSTRAINT "_ActionToWorker_B_fkey" FOREIGN KEY ("B") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_StepOrder" ADD CONSTRAINT "_StepOrder_A_fkey" FOREIGN KEY ("A") REFERENCES "Step"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_StepOrder" ADD CONSTRAINT "_StepOrder_B_fkey" FOREIGN KEY ("B") REFERENCES "Step"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_StepRunOrder" ADD CONSTRAINT "_StepRunOrder_A_fkey" FOREIGN KEY ("A") REFERENCES "StepRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_StepRunOrder" ADD CONSTRAINT "_StepRunOrder_B_fkey" FOREIGN KEY ("B") REFERENCES "StepRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ServiceToWorker" ADD CONSTRAINT "_ServiceToWorker_A_fkey" FOREIGN KEY ("A") REFERENCES "Service"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ServiceToWorker" ADD CONSTRAINT "_ServiceToWorker_B_fkey" FOREIGN KEY ("B") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+50
View File
@@ -0,0 +1,50 @@
-- CreateEnum
CREATE TYPE "InviteLinkStatus" AS ENUM ('PENDING', 'ACCEPTED', 'REJECTED');
-- CreateTable
CREATE TABLE "UserOAuth" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"userId" UUID NOT NULL,
"provider" TEXT NOT NULL,
"providerUserId" TEXT NOT NULL,
"accessToken" TEXT NOT NULL,
"refreshToken" TEXT,
"expiresAt" TIMESTAMP(3),
CONSTRAINT "UserOAuth_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "TenantInviteLink" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tenantId" UUID NOT NULL,
"inviterEmail" TEXT NOT NULL,
"inviteeEmail" TEXT NOT NULL,
"expires" TIMESTAMP(3) NOT NULL,
"status" "InviteLinkStatus" NOT NULL DEFAULT 'PENDING',
"role" "TenantMemberRole" NOT NULL DEFAULT 'OWNER',
CONSTRAINT "TenantInviteLink_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "UserOAuth_id_key" ON "UserOAuth"("id");
-- CreateIndex
CREATE UNIQUE INDEX "UserOAuth_userId_key" ON "UserOAuth"("userId");
-- CreateIndex
CREATE UNIQUE INDEX "UserOAuth_userId_provider_key" ON "UserOAuth"("userId", "provider");
-- CreateIndex
CREATE UNIQUE INDEX "TenantInviteLink_id_key" ON "TenantInviteLink"("id");
-- AddForeignKey
ALTER TABLE "UserOAuth" ADD CONSTRAINT "UserOAuth_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "TenantInviteLink" ADD CONSTRAINT "TenantInviteLink_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+89
View File
@@ -0,0 +1,89 @@
/*
Warnings:
- The primary key for the `Action` table will be changed. If it partially fails, the table could be left without primary key constraint.
- The `refreshToken` column on the `UserOAuth` table would be dropped and recreated. This will lead to data loss if there is data in the column.
- A unique constraint covering the columns `[id]` on the table `Action` will be added. If there are existing duplicate values, this will fail.
- A unique constraint covering the columns `[tenantId,actionId]` on the table `Action` will be added. If there are existing duplicate values, this will fail.
- Added the required column `actionId` to the `Action` table without a default value. This is not possible if the table is not empty.
- Changed the type of `id` on the `Action` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
- Changed the type of `accessToken` on the `UserOAuth` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
- Added the required column `checksum` to the `WorkflowVersion` table without a default value. This is not possible if the table is not empty.
- Changed the type of `A` on the `_ActionToWorker` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
*/
-- DropForeignKey
ALTER TABLE "Step" DROP CONSTRAINT "Step_actionId_tenantId_fkey";
-- DropForeignKey
ALTER TABLE "_ActionToWorker" DROP CONSTRAINT "_ActionToWorker_A_fkey";
-- DropIndex
DROP INDEX "Action_tenantId_id_key";
-- DropIndex
DROP INDEX "WorkflowVersion_workflowId_version_key";
-- AlterTable
ALTER TABLE "Action" DROP CONSTRAINT "Action_pkey",
ADD COLUMN "actionId" TEXT NOT NULL,
DROP COLUMN "id",
ADD COLUMN "id" UUID NOT NULL,
ADD CONSTRAINT "Action_pkey" PRIMARY KEY ("id");
-- AlterTable
ALTER TABLE "UserOAuth" DROP COLUMN "accessToken",
ADD COLUMN "accessToken" BYTEA NOT NULL,
DROP COLUMN "refreshToken",
ADD COLUMN "refreshToken" BYTEA;
-- AlterTable
ALTER TABLE "WorkflowVersion" ADD COLUMN "checksum" TEXT;
-- Add a default random string value to existing rows
UPDATE "WorkflowVersion"
SET "checksum" = md5(random()::text || clock_timestamp()::text);
-- Make the checksum column NOT NULL
ALTER TABLE "WorkflowVersion" ALTER COLUMN "checksum" SET NOT NULL;
-- Update the version column to allow NULL
ALTER TABLE "WorkflowVersion" ALTER COLUMN "version" DROP NOT NULL;
-- AlterTable
ALTER TABLE "_ActionToWorker" DROP COLUMN "A",
ADD COLUMN "A" UUID NOT NULL;
-- CreateTable
CREATE TABLE "APIToken" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"expiresAt" TIMESTAMP(3),
"revoked" BOOLEAN NOT NULL DEFAULT false,
"name" TEXT,
"tenantId" UUID,
CONSTRAINT "APIToken_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "APIToken_id_key" ON "APIToken"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Action_id_key" ON "Action"("id");
-- CreateIndex
CREATE UNIQUE INDEX "Action_tenantId_actionId_key" ON "Action"("tenantId", "actionId");
-- CreateIndex
CREATE UNIQUE INDEX "_ActionToWorker_AB_unique" ON "_ActionToWorker"("A", "B");
-- AddForeignKey
ALTER TABLE "APIToken" ADD CONSTRAINT "APIToken_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Step" ADD CONSTRAINT "Step_actionId_tenantId_fkey" FOREIGN KEY ("actionId", "tenantId") REFERENCES "Action"("actionId", "tenantId") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_ActionToWorker" ADD CONSTRAINT "_ActionToWorker_A_fkey" FOREIGN KEY ("A") REFERENCES "Action"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+79
View File
@@ -0,0 +1,79 @@
-- CreateEnum
CREATE TYPE "ConcurrencyLimitStrategy" AS ENUM ('CANCEL_IN_PROGRESS', 'DROP_NEWEST', 'QUEUE_NEWEST');
-- AlterEnum
ALTER TYPE "WorkflowRunStatus" ADD VALUE 'QUEUED';
-- AlterTable
ALTER TABLE "WorkflowRun" ADD COLUMN "concurrencyGroupId" TEXT;
-- AlterTable
ALTER TABLE "WorkflowTriggerCronRef" ADD COLUMN "input" JSONB;
-- CreateTable
CREATE TABLE "WorkflowConcurrency" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"workflowVersionId" UUID NOT NULL,
"getConcurrencyGroupId" UUID,
"maxRuns" INTEGER NOT NULL DEFAULT 1,
"limitStrategy" "ConcurrencyLimitStrategy" NOT NULL DEFAULT 'CANCEL_IN_PROGRESS',
CONSTRAINT "WorkflowConcurrency_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "GetGroupKeyRun" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"workflowRunId" TEXT NOT NULL,
"workerId" UUID,
"tickerId" UUID,
"status" "StepRunStatus" NOT NULL DEFAULT 'PENDING',
"input" JSONB,
"output" TEXT,
"requeueAfter" TIMESTAMP(3),
"error" TEXT,
"startedAt" TIMESTAMP(3),
"finishedAt" TIMESTAMP(3),
"timeoutAt" TIMESTAMP(3),
"cancelledAt" TIMESTAMP(3),
"cancelledReason" TEXT,
"cancelledError" TEXT,
CONSTRAINT "GetGroupKeyRun_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowConcurrency_id_key" ON "WorkflowConcurrency"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowConcurrency_workflowVersionId_key" ON "WorkflowConcurrency"("workflowVersionId");
-- CreateIndex
CREATE UNIQUE INDEX "GetGroupKeyRun_id_key" ON "GetGroupKeyRun"("id");
-- CreateIndex
CREATE UNIQUE INDEX "GetGroupKeyRun_tenantId_workflowRunId_key" ON "GetGroupKeyRun"("tenantId", "workflowRunId");
-- AddForeignKey
ALTER TABLE "WorkflowConcurrency" ADD CONSTRAINT "WorkflowConcurrency_workflowVersionId_fkey" FOREIGN KEY ("workflowVersionId") REFERENCES "WorkflowVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowConcurrency" ADD CONSTRAINT "WorkflowConcurrency_getConcurrencyGroupId_fkey" FOREIGN KEY ("getConcurrencyGroupId") REFERENCES "Action"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GetGroupKeyRun" ADD CONSTRAINT "GetGroupKeyRun_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GetGroupKeyRun" ADD CONSTRAINT "GetGroupKeyRun_tenantId_workflowRunId_fkey" FOREIGN KEY ("tenantId", "workflowRunId") REFERENCES "WorkflowRun"("tenantId", "id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GetGroupKeyRun" ADD CONSTRAINT "GetGroupKeyRun_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GetGroupKeyRun" ADD CONSTRAINT "GetGroupKeyRun_tickerId_fkey" FOREIGN KEY ("tickerId") REFERENCES "Ticker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
+67
View File
@@ -0,0 +1,67 @@
/*
Warnings:
- The primary key for the `WorkflowRun` table will be changed. If it partially fails, the table could be left without primary key constraint.
- A unique constraint covering the columns `[workflowRunId]` on the table `GetGroupKeyRun` will be added. If there are existing duplicate values, this will fail.
- A unique constraint covering the columns `[id]` on the table `WorkflowRun` will be added. If there are existing duplicate values, this will fail.
- Changed the type of `workflowRunId` on the `GetGroupKeyRun` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
- Changed the type of `workflowRunId` on the `JobRun` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
- Changed the type of `id` on the `WorkflowRun` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
- Changed the type of `parentId` on the `WorkflowRunTriggeredBy` table. No cast exists, the column would be dropped and recreated, which cannot be done if there is data, since the column is required.
*/
-- DropForeignKey
ALTER TABLE "GetGroupKeyRun" DROP CONSTRAINT "GetGroupKeyRun_tenantId_workflowRunId_fkey";
-- DropForeignKey
ALTER TABLE "JobRun" DROP CONSTRAINT "JobRun_workflowRunId_fkey";
-- DropForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" DROP CONSTRAINT "WorkflowRunTriggeredBy_parentId_fkey";
-- DropIndex
DROP INDEX "GetGroupKeyRun_tenantId_workflowRunId_key";
-- DropIndex
DROP INDEX "WorkflowRun_tenantId_id_key";
-- DropIndex
DROP INDEX "WorkflowRunTriggeredBy_tenantId_parentId_key";
-- AlterTable
ALTER TABLE "GetGroupKeyRun" DROP COLUMN "workflowRunId",
ADD COLUMN "workflowRunId" UUID NOT NULL;
-- AlterTable
ALTER TABLE "JobRun" DROP COLUMN "workflowRunId",
ADD COLUMN "workflowRunId" UUID NOT NULL;
-- AlterTable
ALTER TABLE "WorkflowRun" DROP CONSTRAINT "WorkflowRun_pkey",
ADD COLUMN "displayName" TEXT,
DROP COLUMN "id",
ADD COLUMN "id" UUID NOT NULL,
ADD CONSTRAINT "WorkflowRun_pkey" PRIMARY KEY ("id");
-- AlterTable
ALTER TABLE "WorkflowRunTriggeredBy" ADD COLUMN "input" JSONB,
DROP COLUMN "parentId",
ADD COLUMN "parentId" UUID NOT NULL;
-- CreateIndex
CREATE UNIQUE INDEX "GetGroupKeyRun_workflowRunId_key" ON "GetGroupKeyRun"("workflowRunId");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRun_id_key" ON "WorkflowRun"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRunTriggeredBy_parentId_key" ON "WorkflowRunTriggeredBy"("parentId");
-- AddForeignKey
ALTER TABLE "GetGroupKeyRun" ADD CONSTRAINT "GetGroupKeyRun_workflowRunId_fkey" FOREIGN KEY ("workflowRunId") REFERENCES "WorkflowRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRunTriggeredBy" ADD CONSTRAINT "WorkflowRunTriggeredBy_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "WorkflowRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "JobRun" ADD CONSTRAINT "JobRun_workflowRunId_fkey" FOREIGN KEY ("workflowRunId") REFERENCES "WorkflowRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
@@ -0,0 +1,5 @@
-- AlterTable
ALTER TABLE "Step" ADD COLUMN "customUserData" JSONB;
-- AlterTable
ALTER TABLE "StepRun" ADD COLUMN "inputSchema" JSONB;
+12
View File
@@ -0,0 +1,12 @@
-- Create sequence and alter the table for StepRun
CREATE SEQUENCE step_run_order_seq;
ALTER TABLE "StepRun" ALTER COLUMN "order" TYPE BIGINT;
ALTER SEQUENCE step_run_order_seq OWNED BY "StepRun"."order";
ALTER TABLE "StepRun" ALTER COLUMN "order" SET DEFAULT nextval('step_run_order_seq'::regclass);
-- Create sequence and alter the table for WorkflowVersion
CREATE SEQUENCE workflow_version_order_seq;
ALTER TABLE "WorkflowVersion" ALTER COLUMN "order" TYPE BIGINT;
ALTER SEQUENCE workflow_version_order_seq OWNED BY "WorkflowVersion"."order";
ALTER TABLE "WorkflowVersion" ALTER COLUMN "order" SET DEFAULT nextval('workflow_version_order_seq'::regclass);
+304
View File
@@ -0,0 +1,304 @@
-- CreateEnum
CREATE TYPE "VcsProvider" AS ENUM ('GITHUB');
-- AlterTable
ALTER TABLE "Step" ADD COLUMN "retries" INTEGER NOT NULL DEFAULT 0;
-- AlterTable
ALTER TABLE "StepRun" ADD COLUMN "callerFiles" JSONB,
ADD COLUMN "gitRepoBranch" TEXT,
ADD COLUMN "retryCount" INTEGER NOT NULL DEFAULT 0;
-- AlterTable
ALTER TABLE "WorkflowRun" ADD COLUMN "gitRepoBranch" TEXT;
-- CreateTable
CREATE TABLE "WorkflowDeploymentConfig" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"workflowId" UUID NOT NULL,
"gitRepoName" TEXT NOT NULL,
"gitRepoOwner" TEXT NOT NULL,
"gitRepoBranch" TEXT NOT NULL,
"githubAppInstallationId" UUID,
CONSTRAINT "WorkflowDeploymentConfig_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "StepRunResultArchive" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"stepRunId" UUID NOT NULL,
"order" BIGSERIAL NOT NULL,
"input" JSONB,
"output" JSONB,
"error" TEXT,
"startedAt" TIMESTAMP(3),
"finishedAt" TIMESTAMP(3),
"timeoutAt" TIMESTAMP(3),
"cancelledAt" TIMESTAMP(3),
"cancelledReason" TEXT,
"cancelledError" TEXT,
CONSTRAINT "StepRunResultArchive_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "TenantVcsProvider" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"vcsProvider" "VcsProvider" NOT NULL,
"config" JSONB,
CONSTRAINT "TenantVcsProvider_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "GithubAppInstallation" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"githubAppOAuthId" UUID NOT NULL,
"installationId" INTEGER NOT NULL,
"accountName" TEXT NOT NULL,
"accountId" INTEGER NOT NULL,
"accountAvatarURL" TEXT,
"installationSettingsURL" TEXT,
"config" JSONB,
"tenantId" UUID,
"tenantVcsProviderId" UUID,
CONSTRAINT "GithubAppInstallation_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "GithubAppOAuth" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"githubUserID" INTEGER NOT NULL,
"accessToken" BYTEA NOT NULL,
"refreshToken" BYTEA,
"expiresAt" TIMESTAMP(3),
CONSTRAINT "GithubAppOAuth_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "GithubPullRequest" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"repositoryOwner" TEXT NOT NULL,
"repositoryName" TEXT NOT NULL,
"pullRequestID" INTEGER NOT NULL,
"pullRequestTitle" TEXT NOT NULL,
"pullRequestNumber" INTEGER NOT NULL,
"pullRequestHeadBranch" TEXT NOT NULL,
"pullRequestBaseBranch" TEXT NOT NULL,
"pullRequestState" TEXT NOT NULL,
CONSTRAINT "GithubPullRequest_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "GithubPullRequestComment" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"pullRequestID" UUID NOT NULL,
"moduleID" TEXT NOT NULL,
"commentID" INTEGER NOT NULL,
CONSTRAINT "GithubPullRequestComment_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "GithubWebhook" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"repositoryOwner" TEXT NOT NULL,
"repositoryName" TEXT NOT NULL,
"signingSecret" BYTEA NOT NULL,
CONSTRAINT "GithubWebhook_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "_GithubAppInstallationToGithubWebhook" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "_GithubAppOAuthToUser" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "_GithubPullRequestToWorkflowRun" (
"A" UUID NOT NULL,
"B" UUID NOT NULL
);
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowDeploymentConfig_id_key" ON "WorkflowDeploymentConfig"("id");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowDeploymentConfig_workflowId_key" ON "WorkflowDeploymentConfig"("workflowId");
-- CreateIndex
CREATE UNIQUE INDEX "StepRunResultArchive_id_key" ON "StepRunResultArchive"("id");
-- CreateIndex
CREATE UNIQUE INDEX "TenantVcsProvider_id_key" ON "TenantVcsProvider"("id");
-- CreateIndex
CREATE UNIQUE INDEX "TenantVcsProvider_tenantId_vcsProvider_key" ON "TenantVcsProvider"("tenantId", "vcsProvider");
-- CreateIndex
CREATE UNIQUE INDEX "GithubAppInstallation_id_key" ON "GithubAppInstallation"("id");
-- CreateIndex
CREATE UNIQUE INDEX "GithubAppInstallation_installationId_accountId_key" ON "GithubAppInstallation"("installationId", "accountId");
-- CreateIndex
CREATE UNIQUE INDEX "GithubAppOAuth_id_key" ON "GithubAppOAuth"("id");
-- CreateIndex
CREATE UNIQUE INDEX "GithubAppOAuth_githubUserID_key" ON "GithubAppOAuth"("githubUserID");
-- CreateIndex
CREATE UNIQUE INDEX "GithubPullRequest_id_key" ON "GithubPullRequest"("id");
-- CreateIndex
CREATE UNIQUE INDEX "GithubPullRequest_tenantId_repositoryOwner_repositoryName_p_key" ON "GithubPullRequest"("tenantId", "repositoryOwner", "repositoryName", "pullRequestNumber");
-- CreateIndex
CREATE UNIQUE INDEX "GithubPullRequestComment_id_key" ON "GithubPullRequestComment"("id");
-- CreateIndex
CREATE UNIQUE INDEX "GithubWebhook_id_key" ON "GithubWebhook"("id");
-- CreateIndex
CREATE UNIQUE INDEX "GithubWebhook_tenantId_repositoryOwner_repositoryName_key" ON "GithubWebhook"("tenantId", "repositoryOwner", "repositoryName");
-- CreateIndex
CREATE UNIQUE INDEX "_GithubAppInstallationToGithubWebhook_AB_unique" ON "_GithubAppInstallationToGithubWebhook"("A", "B");
-- CreateIndex
CREATE INDEX "_GithubAppInstallationToGithubWebhook_B_index" ON "_GithubAppInstallationToGithubWebhook"("B");
-- CreateIndex
CREATE UNIQUE INDEX "_GithubAppOAuthToUser_AB_unique" ON "_GithubAppOAuthToUser"("A", "B");
-- CreateIndex
CREATE INDEX "_GithubAppOAuthToUser_B_index" ON "_GithubAppOAuthToUser"("B");
-- CreateIndex
CREATE UNIQUE INDEX "_GithubPullRequestToWorkflowRun_AB_unique" ON "_GithubPullRequestToWorkflowRun"("A", "B");
-- CreateIndex
CREATE INDEX "_GithubPullRequestToWorkflowRun_B_index" ON "_GithubPullRequestToWorkflowRun"("B");
-- AddForeignKey
ALTER TABLE "WorkflowDeploymentConfig" ADD CONSTRAINT "WorkflowDeploymentConfig_workflowId_fkey" FOREIGN KEY ("workflowId") REFERENCES "Workflow"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowDeploymentConfig" ADD CONSTRAINT "WorkflowDeploymentConfig_githubAppInstallationId_fkey" FOREIGN KEY ("githubAppInstallationId") REFERENCES "GithubAppInstallation"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRunResultArchive" ADD CONSTRAINT "StepRunResultArchive_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "TenantVcsProvider" ADD CONSTRAINT "TenantVcsProvider_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubAppInstallation" ADD CONSTRAINT "GithubAppInstallation_githubAppOAuthId_fkey" FOREIGN KEY ("githubAppOAuthId") REFERENCES "GithubAppOAuth"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubAppInstallation" ADD CONSTRAINT "GithubAppInstallation_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubAppInstallation" ADD CONSTRAINT "GithubAppInstallation_tenantVcsProviderId_fkey" FOREIGN KEY ("tenantVcsProviderId") REFERENCES "TenantVcsProvider"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubPullRequest" ADD CONSTRAINT "GithubPullRequest_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubPullRequestComment" ADD CONSTRAINT "GithubPullRequestComment_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubPullRequestComment" ADD CONSTRAINT "GithubPullRequestComment_pullRequestID_fkey" FOREIGN KEY ("pullRequestID") REFERENCES "GithubPullRequest"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "GithubWebhook" ADD CONSTRAINT "GithubWebhook_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_GithubAppInstallationToGithubWebhook" ADD CONSTRAINT "_GithubAppInstallationToGithubWebhook_A_fkey" FOREIGN KEY ("A") REFERENCES "GithubAppInstallation"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_GithubAppInstallationToGithubWebhook" ADD CONSTRAINT "_GithubAppInstallationToGithubWebhook_B_fkey" FOREIGN KEY ("B") REFERENCES "GithubWebhook"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_GithubAppOAuthToUser" ADD CONSTRAINT "_GithubAppOAuthToUser_A_fkey" FOREIGN KEY ("A") REFERENCES "GithubAppOAuth"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_GithubAppOAuthToUser" ADD CONSTRAINT "_GithubAppOAuthToUser_B_fkey" FOREIGN KEY ("B") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_GithubPullRequestToWorkflowRun" ADD CONSTRAINT "_GithubPullRequestToWorkflowRun_A_fkey" FOREIGN KEY ("A") REFERENCES "GithubPullRequest"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "_GithubPullRequestToWorkflowRun" ADD CONSTRAINT "_GithubPullRequestToWorkflowRun_B_fkey" FOREIGN KEY ("B") REFERENCES "WorkflowRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
INSERT INTO
"Tenant" (
"id",
"createdAt",
"updatedAt",
"deletedAt",
"name",
"slug"
)
VALUES
(
'8d420720-ef03-41dc-9c73-1c93f276db97',
CURRENT_TIMESTAMP,
CURRENT_TIMESTAMP,
NULL,
'internal',
'internal'
) ON CONFLICT DO NOTHING;
CREATE OR REPLACE FUNCTION prevent_internal_name_or_slug()
RETURNS trigger AS $$
BEGIN
IF NEW."name" = 'internal' OR NEW."slug" = 'internal' THEN
RAISE EXCEPTION 'Values "internal" for "name" or "slug" are not allowed.';
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER check_name_or_slug_before_insert_or_update
BEFORE INSERT OR UPDATE ON "Tenant"
FOR EACH ROW EXECUTE FUNCTION prevent_internal_name_or_slug();
@@ -0,0 +1,5 @@
-- AlterEnum
ALTER TYPE "ConcurrencyLimitStrategy" ADD VALUE 'GROUP_ROUND_ROBIN';
-- AlterTable
ALTER TABLE "Worker" ADD COLUMN "maxRuns" INTEGER;
@@ -0,0 +1,8 @@
-- DropForeignKey
ALTER TABLE "Worker" DROP CONSTRAINT "Worker_dispatcherId_fkey";
-- AlterTable
ALTER TABLE "Worker" ALTER COLUMN "dispatcherId" DROP NOT NULL;
-- AddForeignKey
ALTER TABLE "Worker" ADD CONSTRAINT "Worker_dispatcherId_fkey" FOREIGN KEY ("dispatcherId") REFERENCES "Dispatcher"("id") ON DELETE SET NULL ON UPDATE CASCADE;
@@ -0,0 +1,8 @@
-- AlterTable
ALTER TABLE "GetGroupKeyRun" ADD COLUMN "scheduleTimeoutAt" TIMESTAMP(3);
-- AlterTable
ALTER TABLE "Step" ADD COLUMN "scheduleTimeout" TEXT NOT NULL DEFAULT '5m';
-- AlterTable
ALTER TABLE "WorkflowVersion" ADD COLUMN "scheduleTimeout" TEXT NOT NULL DEFAULT '5m';
+21
View File
@@ -0,0 +1,21 @@
-- CreateEnum
CREATE TYPE "LogLineLevel" AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR');
-- CreateTable
CREATE TABLE
"LogLine" (
"id" BIGSERIAL NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tenantId" UUID NOT NULL,
"stepRunId" UUID,
"message" TEXT NOT NULL,
"level" "LogLineLevel" NOT NULL DEFAULT 'INFO',
"metadata" JSONB,
CONSTRAINT "LogLine_pkey" PRIMARY KEY ("id")
);
-- AddForeignKey
ALTER TABLE "LogLine" ADD CONSTRAINT "LogLine_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "LogLine" ADD CONSTRAINT "LogLine_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun" ("id") ON DELETE SET NULL ON UPDATE CASCADE;
+19
View File
@@ -0,0 +1,19 @@
-- CreateTable
CREATE TABLE "SNSIntegration" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tenantId" UUID NOT NULL,
"topicArn" TEXT NOT NULL,
CONSTRAINT "SNSIntegration_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "SNSIntegration_id_key" ON "SNSIntegration"("id");
-- CreateIndex
CREATE UNIQUE INDEX "SNSIntegration_tenantId_topicArn_key" ON "SNSIntegration"("tenantId", "topicArn");
-- AddForeignKey
ALTER TABLE "SNSIntegration" ADD CONSTRAINT "SNSIntegration_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+18
View File
@@ -0,0 +1,18 @@
CREATE OR REPLACE FUNCTION convert_duration_to_interval(duration text) RETURNS interval AS $$
DECLARE
num_value INT;
BEGIN
num_value := substring(duration from '^\d+');
RETURN CASE
WHEN duration LIKE '%ms' THEN make_interval(secs => num_value::float / 1000)
WHEN duration LIKE '%s' THEN make_interval(secs => num_value)
WHEN duration LIKE '%m' THEN make_interval(mins => num_value)
WHEN duration LIKE '%h' THEN make_interval(hours => num_value)
WHEN duration LIKE '%d' THEN make_interval(days => num_value)
WHEN duration LIKE '%w' THEN make_interval(days => num_value * 7)
WHEN duration LIKE '%y' THEN make_interval(months => num_value * 12)
ELSE '0 seconds'::interval
END;
END;
$$ LANGUAGE plpgsql;
+18
View File
@@ -0,0 +1,18 @@
CREATE OR REPLACE FUNCTION convert_duration_to_interval(duration text) RETURNS interval AS $$
DECLARE
num_value INT;
BEGIN
num_value := substring(duration from '^\d+');
RETURN CASE
WHEN duration LIKE '%ms' THEN make_interval(secs => num_value::float / 1000)
WHEN duration LIKE '%s' THEN make_interval(secs => num_value)
WHEN duration LIKE '%m' THEN make_interval(mins => num_value)
WHEN duration LIKE '%h' THEN make_interval(hours => num_value)
WHEN duration LIKE '%d' THEN make_interval(days => num_value)
WHEN duration LIKE '%w' THEN make_interval(days => num_value * 7)
WHEN duration LIKE '%y' THEN make_interval(months => num_value * 12)
ELSE '5 minutes'::interval
END;
END;
$$ LANGUAGE plpgsql;
+36
View File
@@ -0,0 +1,36 @@
/*
Warnings:
- A unique constraint covering the columns `[parentId,parentStepRunId,childKey]` on the table `WorkflowRun` will be added. If there are existing duplicate values, this will fail.
- A unique constraint covering the columns `[parentId,parentStepRunId,childKey]` on the table `WorkflowTriggerScheduledRef` will be added. If there are existing duplicate values, this will fail.
*/
-- AlterTable
ALTER TABLE "WorkflowRun" ADD COLUMN "childIndex" INTEGER,
ADD COLUMN "childKey" TEXT,
ADD COLUMN "parentId" UUID,
ADD COLUMN "parentStepRunId" UUID;
-- AlterTable
ALTER TABLE "WorkflowTriggerScheduledRef" ADD COLUMN "childIndex" INTEGER,
ADD COLUMN "childKey" TEXT,
ADD COLUMN "parentStepRunId" UUID,
ADD COLUMN "parentWorkflowRunId" UUID;
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowRun_parentId_parentStepRunId_childKey_key" ON "WorkflowRun"("parentId", "parentStepRunId", "childKey");
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowTriggerScheduledRef_parentId_parentStepRunId_childK_key" ON "WorkflowTriggerScheduledRef"("parentId", "parentStepRunId", "childKey");
-- AddForeignKey
ALTER TABLE "WorkflowTriggerScheduledRef" ADD CONSTRAINT "WorkflowTriggerScheduledRef_parentWorkflowRunId_fkey" FOREIGN KEY ("parentWorkflowRunId") REFERENCES "WorkflowRun"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowTriggerScheduledRef" ADD CONSTRAINT "WorkflowTriggerScheduledRef_parentStepRunId_fkey" FOREIGN KEY ("parentStepRunId") REFERENCES "StepRun"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRun" ADD CONSTRAINT "WorkflowRun_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "WorkflowRun"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkflowRun" ADD CONSTRAINT "WorkflowRun_parentStepRunId_fkey" FOREIGN KEY ("parentStepRunId") REFERENCES "StepRun"("id") ON DELETE SET NULL ON UPDATE CASCADE;
+11
View File
@@ -0,0 +1,11 @@
-- CreateTable
CREATE TABLE "WorkerSemaphore" (
"workerId" UUID NOT NULL,
"slots" INTEGER NOT NULL
);
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphore_workerId_key" ON "WorkerSemaphore"("workerId");
-- AddForeignKey
ALTER TABLE "WorkerSemaphore" ADD CONSTRAINT "WorkerSemaphore_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+67
View File
@@ -0,0 +1,67 @@
-- CreateTable
CREATE TABLE "StepRateLimit" (
"units" INTEGER NOT NULL,
"stepId" UUID NOT NULL,
"rateLimitKey" TEXT NOT NULL,
"tenantId" UUID NOT NULL
);
-- CreateTable
CREATE TABLE "RateLimit" (
"tenantId" UUID NOT NULL,
"key" TEXT NOT NULL,
"limitValue" INTEGER NOT NULL,
"value" INTEGER NOT NULL,
"window" TEXT NOT NULL,
"lastRefill" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- CreateTable
CREATE TABLE "StreamEvent" (
"id" BIGSERIAL NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tenantId" UUID NOT NULL,
"stepRunId" UUID,
"message" BYTEA NOT NULL,
"metadata" JSONB,
CONSTRAINT "StreamEvent_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "StepRateLimit_stepId_rateLimitKey_key" ON "StepRateLimit"("stepId", "rateLimitKey");
-- CreateIndex
CREATE UNIQUE INDEX "RateLimit_tenantId_key_key" ON "RateLimit"("tenantId", "key");
-- AddForeignKey
ALTER TABLE "StepRateLimit" ADD CONSTRAINT "StepRateLimit_stepId_fkey" FOREIGN KEY ("stepId") REFERENCES "Step"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRateLimit" ADD CONSTRAINT "StepRateLimit_tenantId_rateLimitKey_fkey" FOREIGN KEY ("tenantId", "rateLimitKey") REFERENCES "RateLimit"("tenantId", "key") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StepRateLimit" ADD CONSTRAINT "StepRateLimit_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "RateLimit" ADD CONSTRAINT "RateLimit_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StreamEvent" ADD CONSTRAINT "StreamEvent_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "StreamEvent" ADD CONSTRAINT "StreamEvent_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun"("id") ON DELETE SET NULL ON UPDATE CASCADE;
CREATE OR REPLACE FUNCTION get_refill_value(rate_limit "RateLimit")
RETURNS INTEGER AS $$
DECLARE
refill_amount INTEGER;
BEGIN
IF NOW() - rate_limit."lastRefill" >= rate_limit."window"::INTERVAL THEN
refill_amount := rate_limit."limitValue";
ELSE
refill_amount := rate_limit."value";
END IF;
RETURN refill_amount;
END;
$$ LANGUAGE plpgsql;
@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "WorkflowTriggerCronRef" ADD COLUMN "enabled" BOOLEAN NOT NULL DEFAULT true;
+11
View File
@@ -0,0 +1,11 @@
/*
Warnings:
- You are about to drop the column `status` on the `Worker` table. All the data in the column will be lost.
*/
-- AlterTable
ALTER TABLE "Worker" DROP COLUMN "status";
-- DropEnum
DROP TYPE "WorkerStatus";
@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "Tenant" ADD COLUMN "analyticsOptOut" BOOLEAN NOT NULL DEFAULT false;
+26
View File
@@ -0,0 +1,26 @@
/*
Warnings:
- A unique constraint covering the columns `[onFailureJobId]` on the table `WorkflowVersion` will be added. If there are existing duplicate values, this will fail.
*/
-- CreateEnum
CREATE TYPE "JobKind" AS ENUM ('DEFAULT', 'ON_FAILURE');
-- AlterTable
ALTER TABLE "Event" ADD COLUMN "additionalMetadata" JSONB;
-- AlterTable
ALTER TABLE "Job" ADD COLUMN "kind" "JobKind" NOT NULL DEFAULT 'DEFAULT';
-- AlterTable
ALTER TABLE "WorkflowRun" ADD COLUMN "additionalMetadata" JSONB;
-- AlterTable
ALTER TABLE "WorkflowVersion" ADD COLUMN "onFailureJobId" UUID;
-- CreateIndex
CREATE UNIQUE INDEX "WorkflowVersion_onFailureJobId_key" ON "WorkflowVersion"("onFailureJobId");
-- AddForeignKey
ALTER TABLE "WorkflowVersion" ADD CONSTRAINT "WorkflowVersion_onFailureJobId_fkey" FOREIGN KEY ("onFailureJobId") REFERENCES "Job"("id") ON DELETE SET NULL ON UPDATE CASCADE;
+77
View File
@@ -0,0 +1,77 @@
-- CreateTable
CREATE TABLE
"TenantAlertingSettings" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"maxFrequency" TEXT NOT NULL DEFAULT '1h',
"lastAlertedAt" TIMESTAMP(3),
"tickerId" UUID,
CONSTRAINT "TenantAlertingSettings_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE
"TenantAlertEmailGroup" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"emails" TEXT NOT NULL,
CONSTRAINT "TenantAlertEmailGroup_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE
"SlackAppWebhook" (
"id" UUID NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"deletedAt" TIMESTAMP(3),
"tenantId" UUID NOT NULL,
"teamId" TEXT NOT NULL,
"teamName" TEXT NOT NULL,
"channelId" TEXT NOT NULL,
"channelName" TEXT NOT NULL,
"webhookURL" BYTEA NOT NULL,
CONSTRAINT "SlackAppWebhook_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "TenantAlertingSettings_id_key" ON "TenantAlertingSettings" ("id");
-- CreateIndex
CREATE UNIQUE INDEX "TenantAlertingSettings_tenantId_key" ON "TenantAlertingSettings" ("tenantId");
-- CreateIndex
CREATE UNIQUE INDEX "TenantAlertEmailGroup_id_key" ON "TenantAlertEmailGroup" ("id");
-- CreateIndex
CREATE UNIQUE INDEX "SlackAppWebhook_id_key" ON "SlackAppWebhook" ("id");
-- CreateIndex
CREATE UNIQUE INDEX "SlackAppWebhook_tenantId_teamId_channelId_key" ON "SlackAppWebhook" ("tenantId", "teamId", "channelId");
-- AddForeignKey
ALTER TABLE "TenantAlertingSettings" ADD CONSTRAINT "TenantAlertingSettings_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "TenantAlertingSettings" ADD CONSTRAINT "TenantAlertingSettings_tickerId_fkey" FOREIGN KEY ("tickerId") REFERENCES "Ticker" ("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "TenantAlertEmailGroup" ADD CONSTRAINT "TenantAlertEmailGroup_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "SlackAppWebhook" ADD CONSTRAINT "SlackAppWebhook_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- Insert "TenantAlertingSettings" for every existing tenant
INSERT INTO
"TenantAlertingSettings" ("id", "tenantId")
SELECT
gen_random_uuid (),
"id"
FROM
"Tenant";
+56
View File
@@ -0,0 +1,56 @@
-- CreateIndex
CREATE INDEX "JobRun_workflowRunId_tenantId_idx" ON "JobRun" ("workflowRunId", "tenantId");
-- CreateIndex
CREATE INDEX "StepRun_tenantId_status_requeueAfter_createdAt_idx" ON "StepRun" ("tenantId", "status", "requeueAfter", "createdAt");
-- CreateIndex
CREATE INDEX "StepRun_stepId_idx" ON "StepRun" ("stepId");
-- CreateIndex
CREATE INDEX "StepRun_jobRunId_status_idx" ON "StepRun" ("jobRunId", "status");
-- CreateIndex
CREATE INDEX "StepRun_id_tenantId_idx" ON "StepRun" ("id", "tenantId");
-- CreateIndex
CREATE INDEX "StepRun_jobRunId_tenantId_order_idx" ON "StepRun" ("jobRunId", "tenantId", "order");
-- CreateEnum
CREATE TYPE "StepRunEventReason" AS ENUM (
'REQUEUED_NO_WORKER',
'REQUEUED_RATE_LIMIT',
'SCHEDULING_TIMED_OUT',
'ASSIGNED',
'STARTED',
'FINISHED',
'FAILED',
'RETRYING',
'CANCELLED'
);
-- CreateEnum
CREATE TYPE "StepRunEventSeverity" AS ENUM ('INFO', 'WARNING', 'CRITICAL');
-- CreateTable
CREATE TABLE
"StepRunEvent" (
"id" BIGSERIAL NOT NULL,
"timeFirstSeen" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"timeLastSeen" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"stepRunId" UUID NOT NULL,
"reason" "StepRunEventReason" NOT NULL,
"severity" "StepRunEventSeverity" NOT NULL,
"message" TEXT NOT NULL,
"count" INTEGER NOT NULL,
"data" JSONB
);
-- CreateIndex
CREATE UNIQUE INDEX "StepRunEvent_id_key" ON "StepRunEvent" ("id");
-- CreateIndex
CREATE INDEX "StepRunEvent_stepRunId_idx" ON "StepRunEvent" ("stepRunId");
-- AddForeignKey
ALTER TABLE "StepRunEvent" ADD CONSTRAINT "StepRunEvent_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun" ("id") ON DELETE CASCADE ON UPDATE CASCADE;
+14
View File
@@ -0,0 +1,14 @@
-- AlterEnum
-- This migration adds more than one value to an enum.
-- With PostgreSQL versions 11 and earlier, this is not possible
-- in a single migration. This can be worked around by creating
-- multiple migrations, each migration adding only one value to
-- the enum.
ALTER TYPE "StepRunEventReason" ADD VALUE 'TIMED_OUT';
ALTER TYPE "StepRunEventReason" ADD VALUE 'REASSIGNED';
ALTER TYPE "StepRunEventReason" ADD VALUE 'SLOT_RELEASED';
-- AlterTable
ALTER TABLE "StepRun" ADD COLUMN "semaphoreReleased" BOOLEAN NOT NULL DEFAULT false;
@@ -0,0 +1,2 @@
-- AlterEnum
ALTER TYPE "StepRunEventReason" ADD VALUE 'TIMEOUT_REFRESHED';
@@ -0,0 +1,2 @@
-- AlterEnum
ALTER TYPE "StepRunEventReason" ADD VALUE 'RETRIED_BY_USER';
@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "Worker" ADD COLUMN "isActive" BOOLEAN NOT NULL DEFAULT false;
@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "Worker" ADD COLUMN "lastListenerEstablished" TIMESTAMP(3);
@@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "APIToken" ADD COLUMN "nextAlertAt" TIMESTAMP(3);
@@ -0,0 +1,6 @@
-- AlterTable
ALTER TABLE "Tenant" ADD COLUMN "alertMemberEmails" BOOLEAN NOT NULL DEFAULT true;
-- AlterTable
ALTER TABLE "TenantAlertingSettings" ADD COLUMN "enableExpiringTokenAlerts" BOOLEAN NOT NULL DEFAULT true,
ADD COLUMN "enableWorkflowRunFailureAlerts" BOOLEAN NOT NULL DEFAULT false;
+78
View File
@@ -0,0 +1,78 @@
/* Warnings:
- You are about to drop the `WorkerSemaphore` table. If the table is not empty, all the data it contains will be lost.
- Made the column `maxRuns` on table `Worker` required. This step will fail if there are existing NULL values in that column.
*/
-- Update existing workers with NULL maxRuns to have a default value
UPDATE "Worker" SET "maxRuns" = 100 WHERE "maxRuns" IS NULL;
-- AlterTable
ALTER TABLE "Worker" ALTER COLUMN "maxRuns" SET NOT NULL,
ALTER COLUMN "maxRuns" SET DEFAULT 100;
-- CreateTable
CREATE TABLE IF NOT EXISTS "WorkerSemaphoreSlot" (
"id" UUID NOT NULL,
"workerId" UUID NOT NULL,
"stepRunId" UUID,
CONSTRAINT "WorkerSemaphoreSlot_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX IF NOT EXISTS "WorkerSemaphoreSlot_id_key" ON "WorkerSemaphoreSlot"("id");
-- CreateIndex
CREATE UNIQUE INDEX IF NOT EXISTS "WorkerSemaphoreSlot_stepRunId_key" ON "WorkerSemaphoreSlot"("stepRunId");
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreSlot"
ADD CONSTRAINT "WorkerSemaphoreSlot_workerId_fkey"
FOREIGN KEY ("workerId") REFERENCES "Worker"("id")
ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreSlot"
ADD CONSTRAINT "WorkerSemaphoreSlot_stepRunId_fkey"
FOREIGN KEY ("stepRunId") REFERENCES "StepRun"("id")
ON DELETE CASCADE ON UPDATE CASCADE;
-- CreateIndex
CREATE INDEX "WorkerSemaphoreSlot_workerId_idx" ON "WorkerSemaphoreSlot"("workerId");
-- Create maxRun semaphore slots for each worker with a recent heartbeat
INSERT INTO "WorkerSemaphoreSlot" ("id", "workerId")
SELECT gen_random_uuid(), w.id
FROM "Worker" w
CROSS JOIN generate_series(1, COALESCE(w."maxRuns", 100))
WHERE w."lastHeartbeatAt" >= NOW() - INTERVAL '10 hours'
ON CONFLICT DO NOTHING;
-- -- Update a null slot for each step that is currently running or assigned
DO $$
DECLARE
sr RECORD;
wss RECORD;
BEGIN
-- Loop over each running or assigned step run
FOR sr IN
SELECT "id", "workerId"
FROM "StepRun"
WHERE "status" IN ('RUNNING', 'ASSIGNED')
LOOP
-- Find one available WorkerSemaphoreSlot for the current workerId
SELECT "id"
INTO wss
FROM "WorkerSemaphoreSlot"
WHERE "workerId" = sr."workerId" AND "stepRunId" IS NULL
LIMIT 1;
-- If an available slot is found, update it with the stepRunId
IF wss.id IS NOT NULL THEN
UPDATE "WorkerSemaphoreSlot"
SET "stepRunId" = sr.id
WHERE "id" = wss.id;
END IF;
END LOOP;
END $$;
+33
View File
@@ -0,0 +1,33 @@
h1:JJ+ixgxQ7UzKYCWn0uFrl3w3AjPFlq3FXcztV+/sg5k=
20240115180414_init.sql h1:Ef3ZyjAHkmJPdGF/dEWCahbwgcg6uGJKnDxW2JCRi2k=
20240122014727_v0_6_0.sql h1:o/LdlteAeFgoHJ3e/M4Xnghqt9826IE/Y/h0q95Acuo=
20240126235456_v0_7_0.sql h1:KiVzt/hXgQ6esbdC6OMJOOWuYEXmy1yeCpmsVAHTFKs=
20240129040510_v0_8_0.sql h1:clarRyfsk+lMHkpB34aqF/g8p0fLLf+i9C2RGTscJNw=
20240202042355_v0_9_0.sql h1:AHsV34k3zPeweaKqOoL+OoWVw0lKTXboeVbYkRRfDRY=
20240209132837_v0_10_0.sql h1:UaFDyiIeTcqOAhpZ7hGbUDH1+Od6loBYI2rDXxXwgyk=
20240215162148_v0_10_2.sql h1:5Rx5GBMm1cwcWeEpmt1SfOrM7uE/LfB1yr2z0ftkwhg=
20240216133745_v0_11_0.sql h1:mm/VQMqSwBjvBDII5LwAWVVQilUVZ+CMGIwgduY9zGM=
20240226051822_v0_12_0.sql h1:ujRIRr4O22P2l0JbXnv9qFziExrCqP8yBbSDblvOSdI=
20240227181732_v0_13_0.sql h1:5ieDRSVssfRnHFO+p7uOLf/dVSnOr4bmLxi9QS/quvY=
20240228050417_v0_13_2.sql h1:73oi1hgvI3UiXq43NRP/OBENoXOIAJnx1JyYrV2Hu6M=
20240229232811_v0_14_0.sql h1:da+gR8skoPPU/zSCGRe0P2uvGRy7viCA6FnR+Bnecws=
20240304060408_v0_15_0.sql h1:qqn0OMqsXQQY0pRsfbemuOuDbhIsM7SZQmoFMCT3rFA=
20240320215205_v0_17_0.sql h1:93perTfOP1fhJS/uJOcQbXxKMvQ0Hz8ysflF2Fo23iQ=
20240321215205_v0_17_1.sql h1:ZWQ1oQKccfgW21rvEG8Mn0RV/2qQYHZ6RFzC9reURrw=
20240326151030_v0_18_0.sql h1:JzhNFlPsIA/H7LA7h+vzrj/hAPzPUWlH38krXkgs0FY=
20240331162333_v0_18_1.sql h1:T/ex8sfcIUwse4GYZNKC8ZuhiKIzO1G2UgqLp9O/Z3Q=
20240402034010_v0_19_0.sql h1:V6y6gAWacmIpEcFqUs6El5KA2u/C2TYqqULxXvcFtRk=
20240424091046_v0_21_9.sql h1:rmeyK17yjPl12PwP0oY/MQXTxn5Hj+KMhvS5eDnuu9s=
20240430161943_v0_22_1.sql h1:NxjtFaLyqlFRI6wgryOfKeoHpR7eka2IpkmW1fN9uMo=
20240503190030_v0_23_0.sql h1:IEfvIoHuD91G0tObWWDbNZLydb1QtrBweKrDJrALtWc=
20240506194242_v0_24_0.sql h1:aWGQJgU3Jats/0eHgwSXK4JXGZxnWNpltwpdmf2Etxs=
20240507200816_v0_25_0.sql h1:KjxnVEoE5mrNYS98i60Nt/zTCGc2y8Zc/kwds8STXnw=
20240509213608_v0_26_0.sql h1:eYnKs9LKgP0UZAyACyMfByuU9lXJlJJwQeTkHu9EPGo=
20240514192527_v0_27_0.sql h1:F7gMfARFPNqEPMllgF+xR/3N4ImxwyjFei+SnD3tNEI=
20240514203126_v0_28_0.sql h1:iOG3IzBro0Uo+xo3n6G9K5mUp3sWnyY4imzgMcWj3+A=
20240517204453_v0_28_1.sql h1:Xr7x7FdvrVaW2F2xqE6EfUFYArGIzqzogDn1lEeyzE4=
20240520152239_v0_28_2.sql h1:oefjZzI76xmcVaDgGprr/BnP4JqiCt8q4LITUNE0fLA=
20240521205311_v0_28_3.sql h1:kwsHJod6PAhikqBDMpFlGeQ4azHDRxglJmS9sqbsVrw=
20240531142907_v0_29_0.sql h1:lhXwSIKk4deWOq/YKcSYCF/4kbZ0lh0vcbun/vqWOgc=
20240531200417_v_0_30_0.sql h1:Fcse6apNWV+sXg+HzQzi2/0WMFu0GmnNSv4fn8OCa64=
20240531200418_v0_30_1.sql h1:jPAKmGkP0Ecq1mUk9o2qr5S0fEV46oXicdlGh1TmBQg=
File diff suppressed because it is too large Load Diff