mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2025-12-17 06:44:49 -06:00
2205 lines
63 KiB
Go
2205 lines
63 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.29.0
|
|
// source: workflows.sql
|
|
|
|
package dbsqlc
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/jackc/pgx/v5/pgtype"
|
|
)
|
|
|
|
const addStepParents = `-- name: AddStepParents :exec
|
|
INSERT INTO "_StepOrder" ("A", "B")
|
|
SELECT
|
|
step."id",
|
|
$1::uuid
|
|
FROM
|
|
unnest($2::text[]) AS parent_readable_id
|
|
JOIN
|
|
"Step" AS step ON step."readableId" = parent_readable_id AND step."jobId" = $3::uuid
|
|
`
|
|
|
|
type AddStepParentsParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
Parents []string `json:"parents"`
|
|
Jobid pgtype.UUID `json:"jobid"`
|
|
}
|
|
|
|
func (q *Queries) AddStepParents(ctx context.Context, db DBTX, arg AddStepParentsParams) error {
|
|
_, err := db.Exec(ctx, addStepParents, arg.ID, arg.Parents, arg.Jobid)
|
|
return err
|
|
}
|
|
|
|
const addWorkflowTag = `-- name: AddWorkflowTag :exec
|
|
INSERT INTO "_WorkflowToWorkflowTag" ("A", "B")
|
|
SELECT $1::uuid, $2::uuid
|
|
ON CONFLICT DO NOTHING
|
|
`
|
|
|
|
type AddWorkflowTagParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
Tags pgtype.UUID `json:"tags"`
|
|
}
|
|
|
|
func (q *Queries) AddWorkflowTag(ctx context.Context, db DBTX, arg AddWorkflowTagParams) error {
|
|
_, err := db.Exec(ctx, addWorkflowTag, arg.ID, arg.Tags)
|
|
return err
|
|
}
|
|
|
|
const countCronWorkflows = `-- name: CountCronWorkflows :one
|
|
WITH latest_versions AS (
|
|
SELECT DISTINCT ON("workflowId")
|
|
workflowVersions."id" AS "workflowVersionId",
|
|
workflowVersions."workflowId"
|
|
FROM
|
|
"WorkflowVersion" as workflowVersions
|
|
JOIN
|
|
"Workflow" as workflow ON workflow."id" = workflowVersions."workflowId"
|
|
WHERE
|
|
workflow."tenantId" = $1::uuid
|
|
AND workflowVersions."deletedAt" IS NULL
|
|
ORDER BY "workflowId", "order" DESC
|
|
)
|
|
SELECT
|
|
count(c.*)
|
|
FROM
|
|
latest_versions
|
|
JOIN
|
|
"WorkflowTriggers" as t ON t."workflowVersionId" = latest_versions."workflowVersionId"
|
|
JOIN
|
|
"WorkflowTriggerCronRef" as c ON c."parentId" = t."id"
|
|
JOIN
|
|
"Workflow" w on w."id" = latest_versions."workflowId"
|
|
WHERE
|
|
t."deletedAt" IS NULL
|
|
AND w."tenantId" = $1::uuid
|
|
AND ($2::uuid IS NULL OR c."id" = $2::uuid)
|
|
AND ($3::uuid IS NULL OR w."id" = $3::uuid)
|
|
AND ($4::jsonb IS NULL OR
|
|
c."additionalMetadata" @> $4::jsonb)
|
|
AND ($5::TEXT IS NULL OR c."name" = $5::TEXT)
|
|
AND ($6::TEXT IS NULL OR w."name" = $6::TEXT)
|
|
`
|
|
|
|
type CountCronWorkflowsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Crontriggerid pgtype.UUID `json:"crontriggerid"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
CronName pgtype.Text `json:"cronName"`
|
|
WorkflowName pgtype.Text `json:"workflowName"`
|
|
}
|
|
|
|
// Get all of the latest workflow versions for the tenant
|
|
func (q *Queries) CountCronWorkflows(ctx context.Context, db DBTX, arg CountCronWorkflowsParams) (int64, error) {
|
|
row := db.QueryRow(ctx, countCronWorkflows,
|
|
arg.Tenantid,
|
|
arg.Crontriggerid,
|
|
arg.Workflowid,
|
|
arg.AdditionalMetadata,
|
|
arg.CronName,
|
|
arg.WorkflowName,
|
|
)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const countRoundRobinGroupKeys = `-- name: CountRoundRobinGroupKeys :one
|
|
SELECT
|
|
COUNT(DISTINCT "concurrencyGroupId") AS total
|
|
FROM
|
|
"WorkflowRun" r1
|
|
JOIN
|
|
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
|
WHERE
|
|
r1."tenantId" = $1::uuid AND
|
|
workflowVersion."deletedAt" IS NULL AND
|
|
r1."deletedAt" IS NULL AND
|
|
(
|
|
$2::"WorkflowRunStatus" IS NULL OR
|
|
r1."status" = $2::"WorkflowRunStatus"
|
|
) AND
|
|
workflowVersion."workflowId" = $3::uuid
|
|
`
|
|
|
|
type CountRoundRobinGroupKeysParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Status NullWorkflowRunStatus `json:"status"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
}
|
|
|
|
func (q *Queries) CountRoundRobinGroupKeys(ctx context.Context, db DBTX, arg CountRoundRobinGroupKeysParams) (int64, error) {
|
|
row := db.QueryRow(ctx, countRoundRobinGroupKeys, arg.Tenantid, arg.Status, arg.Workflowid)
|
|
var total int64
|
|
err := row.Scan(&total)
|
|
return total, err
|
|
}
|
|
|
|
const countWorkflowRunsRoundRobin = `-- name: CountWorkflowRunsRoundRobin :one
|
|
SELECT COUNT(*) AS total
|
|
FROM
|
|
"WorkflowRun" r1
|
|
JOIN
|
|
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
|
WHERE
|
|
r1."tenantId" = $1::uuid AND
|
|
workflowVersion."deletedAt" IS NULL AND
|
|
r1."deletedAt" IS NULL AND
|
|
(
|
|
$2::"WorkflowRunStatus" IS NULL OR
|
|
r1."status" = $2::"WorkflowRunStatus"
|
|
) AND
|
|
workflowVersion."workflowId" = $3::uuid AND
|
|
r1."concurrencyGroupId" IS NOT NULL AND
|
|
(
|
|
$4::text IS NULL OR
|
|
r1."concurrencyGroupId" = $4::text
|
|
)
|
|
`
|
|
|
|
type CountWorkflowRunsRoundRobinParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Status NullWorkflowRunStatus `json:"status"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
GroupKey pgtype.Text `json:"groupKey"`
|
|
}
|
|
|
|
func (q *Queries) CountWorkflowRunsRoundRobin(ctx context.Context, db DBTX, arg CountWorkflowRunsRoundRobinParams) (int64, error) {
|
|
row := db.QueryRow(ctx, countWorkflowRunsRoundRobin,
|
|
arg.Tenantid,
|
|
arg.Status,
|
|
arg.Workflowid,
|
|
arg.GroupKey,
|
|
)
|
|
var total int64
|
|
err := row.Scan(&total)
|
|
return total, err
|
|
}
|
|
|
|
const countWorkflows = `-- name: CountWorkflows :one
|
|
SELECT
|
|
count(workflows) OVER() AS total
|
|
FROM
|
|
"Workflow" as workflows
|
|
WHERE
|
|
workflows."tenantId" = $1 AND
|
|
workflows."deletedAt" IS NULL AND
|
|
(
|
|
$2::text IS NULL OR
|
|
workflows."id" IN (
|
|
SELECT
|
|
DISTINCT ON(t1."workflowId") t1."workflowId"
|
|
FROM
|
|
"WorkflowVersion" AS t1
|
|
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
|
|
WHERE
|
|
(
|
|
j2."id" IN (
|
|
SELECT
|
|
t3."parentId"
|
|
FROM
|
|
"WorkflowTriggerEventRef" AS t3
|
|
WHERE
|
|
t3."eventKey" = $2::text
|
|
AND t3."parentId" IS NOT NULL
|
|
)
|
|
AND j2."id" IS NOT NULL
|
|
AND t1."workflowId" IS NOT NULL
|
|
)
|
|
ORDER BY
|
|
t1."workflowId" DESC, t1."order" DESC
|
|
)
|
|
)
|
|
`
|
|
|
|
type CountWorkflowsParams struct {
|
|
TenantId pgtype.UUID `json:"tenantId"`
|
|
EventKey pgtype.Text `json:"eventKey"`
|
|
}
|
|
|
|
func (q *Queries) CountWorkflows(ctx context.Context, db DBTX, arg CountWorkflowsParams) (int64, error) {
|
|
row := db.QueryRow(ctx, countWorkflows, arg.TenantId, arg.EventKey)
|
|
var total int64
|
|
err := row.Scan(&total)
|
|
return total, err
|
|
}
|
|
|
|
const createJob = `-- name: CreateJob :one
|
|
INSERT INTO "Job" (
|
|
"id",
|
|
"createdAt",
|
|
"updatedAt",
|
|
"deletedAt",
|
|
"tenantId",
|
|
"workflowVersionId",
|
|
"name",
|
|
"description",
|
|
"timeout",
|
|
"kind"
|
|
) VALUES (
|
|
$1::uuid,
|
|
coalesce($2::timestamp, CURRENT_TIMESTAMP),
|
|
coalesce($3::timestamp, CURRENT_TIMESTAMP),
|
|
$4::timestamp,
|
|
$5::uuid,
|
|
$6::uuid,
|
|
$7::text,
|
|
$8::text,
|
|
$9::text,
|
|
coalesce($10::"JobKind", 'DEFAULT')
|
|
) RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "workflowVersionId", name, description, timeout, kind
|
|
`
|
|
|
|
type CreateJobParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
CreatedAt pgtype.Timestamp `json:"createdAt"`
|
|
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
|
|
Deletedat pgtype.Timestamp `json:"deletedat"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Workflowversionid pgtype.UUID `json:"workflowversionid"`
|
|
Name string `json:"name"`
|
|
Description string `json:"description"`
|
|
Timeout string `json:"timeout"`
|
|
Kind NullJobKind `json:"kind"`
|
|
}
|
|
|
|
func (q *Queries) CreateJob(ctx context.Context, db DBTX, arg CreateJobParams) (*Job, error) {
|
|
row := db.QueryRow(ctx, createJob,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Deletedat,
|
|
arg.Tenantid,
|
|
arg.Workflowversionid,
|
|
arg.Name,
|
|
arg.Description,
|
|
arg.Timeout,
|
|
arg.Kind,
|
|
)
|
|
var i Job
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.TenantId,
|
|
&i.WorkflowVersionId,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Timeout,
|
|
&i.Kind,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createSchedules = `-- name: CreateSchedules :many
|
|
INSERT INTO "WorkflowTriggerScheduledRef" (
|
|
"id",
|
|
"parentId",
|
|
"triggerAt",
|
|
"input",
|
|
"additionalMetadata",
|
|
"priority"
|
|
) VALUES (
|
|
gen_random_uuid(),
|
|
$1::uuid,
|
|
unnest($2::timestamp[]),
|
|
$3::jsonb,
|
|
$4::json,
|
|
COALESCE($5::integer, 1)
|
|
) RETURNING id, "parentId", "triggerAt", "tickerId", input, "childIndex", "childKey", "parentStepRunId", "parentWorkflowRunId", "additionalMetadata", "createdAt", "deletedAt", "updatedAt", method, priority
|
|
`
|
|
|
|
type CreateSchedulesParams struct {
|
|
Workflowrunid pgtype.UUID `json:"workflowrunid"`
|
|
Triggertimes []pgtype.Timestamp `json:"triggertimes"`
|
|
Input []byte `json:"input"`
|
|
Additionalmetadata []byte `json:"additionalmetadata"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
}
|
|
|
|
func (q *Queries) CreateSchedules(ctx context.Context, db DBTX, arg CreateSchedulesParams) ([]*WorkflowTriggerScheduledRef, error) {
|
|
rows, err := db.Query(ctx, createSchedules,
|
|
arg.Workflowrunid,
|
|
arg.Triggertimes,
|
|
arg.Input,
|
|
arg.Additionalmetadata,
|
|
arg.Priority,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*WorkflowTriggerScheduledRef
|
|
for rows.Next() {
|
|
var i WorkflowTriggerScheduledRef
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.ParentId,
|
|
&i.TriggerAt,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.ChildIndex,
|
|
&i.ChildKey,
|
|
&i.ParentStepRunId,
|
|
&i.ParentWorkflowRunId,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Method,
|
|
&i.Priority,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const createStep = `-- name: CreateStep :one
|
|
INSERT INTO "Step" (
|
|
"id",
|
|
"createdAt",
|
|
"updatedAt",
|
|
"deletedAt",
|
|
"readableId",
|
|
"tenantId",
|
|
"jobId",
|
|
"actionId",
|
|
"timeout",
|
|
"customUserData",
|
|
"retries",
|
|
"scheduleTimeout",
|
|
"retryBackoffFactor",
|
|
"retryMaxBackoff"
|
|
) VALUES (
|
|
$1::uuid,
|
|
coalesce($2::timestamp, CURRENT_TIMESTAMP),
|
|
coalesce($3::timestamp, CURRENT_TIMESTAMP),
|
|
$4::timestamp,
|
|
$5::text,
|
|
$6::uuid,
|
|
$7::uuid,
|
|
$8::text,
|
|
$9::text,
|
|
coalesce($10::jsonb, '{}'),
|
|
coalesce($11::integer, 0),
|
|
coalesce($12::text, '5m'),
|
|
$13,
|
|
$14
|
|
) RETURNING id, "createdAt", "updatedAt", "deletedAt", "readableId", "tenantId", "jobId", "actionId", timeout, "customUserData", retries, "retryBackoffFactor", "retryMaxBackoff", "scheduleTimeout"
|
|
`
|
|
|
|
type CreateStepParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
CreatedAt pgtype.Timestamp `json:"createdAt"`
|
|
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
|
|
Deletedat pgtype.Timestamp `json:"deletedat"`
|
|
Readableid string `json:"readableid"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Jobid pgtype.UUID `json:"jobid"`
|
|
Actionid string `json:"actionid"`
|
|
Timeout pgtype.Text `json:"timeout"`
|
|
CustomUserData []byte `json:"customUserData"`
|
|
Retries pgtype.Int4 `json:"retries"`
|
|
ScheduleTimeout pgtype.Text `json:"scheduleTimeout"`
|
|
RetryBackoffFactor pgtype.Float8 `json:"retryBackoffFactor"`
|
|
RetryMaxBackoff pgtype.Int4 `json:"retryMaxBackoff"`
|
|
}
|
|
|
|
func (q *Queries) CreateStep(ctx context.Context, db DBTX, arg CreateStepParams) (*Step, error) {
|
|
row := db.QueryRow(ctx, createStep,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Deletedat,
|
|
arg.Readableid,
|
|
arg.Tenantid,
|
|
arg.Jobid,
|
|
arg.Actionid,
|
|
arg.Timeout,
|
|
arg.CustomUserData,
|
|
arg.Retries,
|
|
arg.ScheduleTimeout,
|
|
arg.RetryBackoffFactor,
|
|
arg.RetryMaxBackoff,
|
|
)
|
|
var i Step
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.ReadableId,
|
|
&i.TenantId,
|
|
&i.JobId,
|
|
&i.ActionId,
|
|
&i.Timeout,
|
|
&i.CustomUserData,
|
|
&i.Retries,
|
|
&i.RetryBackoffFactor,
|
|
&i.RetryMaxBackoff,
|
|
&i.ScheduleTimeout,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createStepExpressions = `-- name: CreateStepExpressions :exec
|
|
INSERT INTO "StepExpression" (
|
|
"key",
|
|
"stepId",
|
|
"expression",
|
|
"kind"
|
|
) VALUES (
|
|
unnest($1::text[]),
|
|
$2::uuid,
|
|
unnest($3::text[]),
|
|
unnest(cast($4::text[] as"StepExpressionKind"[]))
|
|
) ON CONFLICT ("key", "stepId", "kind") DO UPDATE
|
|
SET
|
|
"expression" = EXCLUDED."expression"
|
|
`
|
|
|
|
type CreateStepExpressionsParams struct {
|
|
Keys []string `json:"keys"`
|
|
Stepid pgtype.UUID `json:"stepid"`
|
|
Expressions []string `json:"expressions"`
|
|
Kinds []string `json:"kinds"`
|
|
}
|
|
|
|
func (q *Queries) CreateStepExpressions(ctx context.Context, db DBTX, arg CreateStepExpressionsParams) error {
|
|
_, err := db.Exec(ctx, createStepExpressions,
|
|
arg.Keys,
|
|
arg.Stepid,
|
|
arg.Expressions,
|
|
arg.Kinds,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const createStepRateLimit = `-- name: CreateStepRateLimit :one
|
|
INSERT INTO "StepRateLimit" (
|
|
"units",
|
|
"stepId",
|
|
"rateLimitKey",
|
|
"tenantId",
|
|
"kind"
|
|
) VALUES (
|
|
$1::integer,
|
|
$2::uuid,
|
|
$3::text,
|
|
$4::uuid,
|
|
$5
|
|
) RETURNING units, "stepId", "rateLimitKey", "tenantId", kind
|
|
`
|
|
|
|
type CreateStepRateLimitParams struct {
|
|
Units int32 `json:"units"`
|
|
Stepid pgtype.UUID `json:"stepid"`
|
|
Ratelimitkey string `json:"ratelimitkey"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Kind StepRateLimitKind `json:"kind"`
|
|
}
|
|
|
|
func (q *Queries) CreateStepRateLimit(ctx context.Context, db DBTX, arg CreateStepRateLimitParams) (*StepRateLimit, error) {
|
|
row := db.QueryRow(ctx, createStepRateLimit,
|
|
arg.Units,
|
|
arg.Stepid,
|
|
arg.Ratelimitkey,
|
|
arg.Tenantid,
|
|
arg.Kind,
|
|
)
|
|
var i StepRateLimit
|
|
err := row.Scan(
|
|
&i.Units,
|
|
&i.StepId,
|
|
&i.RateLimitKey,
|
|
&i.TenantId,
|
|
&i.Kind,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflow = `-- name: CreateWorkflow :one
|
|
INSERT INTO "Workflow" (
|
|
"id",
|
|
"createdAt",
|
|
"updatedAt",
|
|
"deletedAt",
|
|
"tenantId",
|
|
"name",
|
|
"description"
|
|
) VALUES (
|
|
$1::uuid,
|
|
coalesce($2::timestamp, CURRENT_TIMESTAMP),
|
|
coalesce($3::timestamp, CURRENT_TIMESTAMP),
|
|
$4::timestamp,
|
|
$5::uuid,
|
|
$6::text,
|
|
$7::text
|
|
) RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", name, description, "isPaused"
|
|
`
|
|
|
|
type CreateWorkflowParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
CreatedAt pgtype.Timestamp `json:"createdAt"`
|
|
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
|
|
Deletedat pgtype.Timestamp `json:"deletedat"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Name string `json:"name"`
|
|
Description string `json:"description"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflow(ctx context.Context, db DBTX, arg CreateWorkflowParams) (*Workflow, error) {
|
|
row := db.QueryRow(ctx, createWorkflow,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Deletedat,
|
|
arg.Tenantid,
|
|
arg.Name,
|
|
arg.Description,
|
|
)
|
|
var i Workflow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.TenantId,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.IsPaused,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowConcurrency = `-- name: CreateWorkflowConcurrency :one
|
|
INSERT INTO "WorkflowConcurrency" (
|
|
"id",
|
|
"createdAt",
|
|
"updatedAt",
|
|
"workflowVersionId",
|
|
"getConcurrencyGroupId",
|
|
"maxRuns",
|
|
"limitStrategy",
|
|
"concurrencyGroupExpression"
|
|
) VALUES (
|
|
gen_random_uuid(),
|
|
coalesce($1::timestamp, CURRENT_TIMESTAMP),
|
|
coalesce($2::timestamp, CURRENT_TIMESTAMP),
|
|
$3::uuid,
|
|
$4::uuid,
|
|
coalesce($5::integer, 1),
|
|
coalesce($6::"ConcurrencyLimitStrategy", 'CANCEL_IN_PROGRESS'),
|
|
$7::text
|
|
) RETURNING id, "createdAt", "updatedAt", "workflowVersionId", "getConcurrencyGroupId", "maxRuns", "limitStrategy", "concurrencyGroupExpression"
|
|
`
|
|
|
|
type CreateWorkflowConcurrencyParams struct {
|
|
CreatedAt pgtype.Timestamp `json:"createdAt"`
|
|
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
|
|
Workflowversionid pgtype.UUID `json:"workflowversionid"`
|
|
GetConcurrencyGroupId pgtype.UUID `json:"getConcurrencyGroupId"`
|
|
MaxRuns pgtype.Int4 `json:"maxRuns"`
|
|
LimitStrategy NullConcurrencyLimitStrategy `json:"limitStrategy"`
|
|
ConcurrencyGroupExpression pgtype.Text `json:"concurrencyGroupExpression"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowConcurrency(ctx context.Context, db DBTX, arg CreateWorkflowConcurrencyParams) (*WorkflowConcurrency, error) {
|
|
row := db.QueryRow(ctx, createWorkflowConcurrency,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Workflowversionid,
|
|
arg.GetConcurrencyGroupId,
|
|
arg.MaxRuns,
|
|
arg.LimitStrategy,
|
|
arg.ConcurrencyGroupExpression,
|
|
)
|
|
var i WorkflowConcurrency
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkflowVersionId,
|
|
&i.GetConcurrencyGroupId,
|
|
&i.MaxRuns,
|
|
&i.LimitStrategy,
|
|
&i.ConcurrencyGroupExpression,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowTriggerCronRef = `-- name: CreateWorkflowTriggerCronRef :one
|
|
INSERT INTO "WorkflowTriggerCronRef" (
|
|
"parentId",
|
|
"cron",
|
|
"name",
|
|
"input",
|
|
"additionalMetadata",
|
|
"id",
|
|
"method",
|
|
"priority"
|
|
) VALUES (
|
|
$1::uuid,
|
|
$2::text,
|
|
$3::text,
|
|
$4::jsonb,
|
|
$5::jsonb,
|
|
gen_random_uuid(),
|
|
COALESCE($6::"WorkflowTriggerCronRefMethods", 'DEFAULT'),
|
|
COALESCE($7::integer, 1)
|
|
) RETURNING "parentId", cron, "tickerId", input, enabled, "additionalMetadata", "createdAt", "deletedAt", "updatedAt", name, id, method, priority
|
|
`
|
|
|
|
type CreateWorkflowTriggerCronRefParams struct {
|
|
Workflowtriggersid pgtype.UUID `json:"workflowtriggersid"`
|
|
Crontrigger string `json:"crontrigger"`
|
|
Name pgtype.Text `json:"name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
Method NullWorkflowTriggerCronRefMethods `json:"method"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowTriggerCronRef(ctx context.Context, db DBTX, arg CreateWorkflowTriggerCronRefParams) (*WorkflowTriggerCronRef, error) {
|
|
row := db.QueryRow(ctx, createWorkflowTriggerCronRef,
|
|
arg.Workflowtriggersid,
|
|
arg.Crontrigger,
|
|
arg.Name,
|
|
arg.Input,
|
|
arg.AdditionalMetadata,
|
|
arg.Method,
|
|
arg.Priority,
|
|
)
|
|
var i WorkflowTriggerCronRef
|
|
err := row.Scan(
|
|
&i.ParentId,
|
|
&i.Cron,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.Enabled,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.ID,
|
|
&i.Method,
|
|
&i.Priority,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowTriggerCronRefForWorkflow = `-- name: CreateWorkflowTriggerCronRefForWorkflow :one
|
|
WITH latest_version AS (
|
|
SELECT "id" FROM "WorkflowVersion"
|
|
WHERE "workflowId" = $7::uuid
|
|
ORDER BY "order" DESC
|
|
LIMIT 1
|
|
),
|
|
latest_trigger AS (
|
|
SELECT "id" FROM "WorkflowTriggers"
|
|
WHERE "workflowVersionId" = (SELECT "id" FROM latest_version)
|
|
ORDER BY "createdAt" DESC
|
|
LIMIT 1
|
|
)
|
|
INSERT INTO "WorkflowTriggerCronRef" (
|
|
"parentId",
|
|
"cron",
|
|
"name",
|
|
"input",
|
|
"additionalMetadata",
|
|
"id",
|
|
"method",
|
|
"priority"
|
|
) VALUES (
|
|
(SELECT "id" FROM latest_trigger),
|
|
$1::text,
|
|
$2::text,
|
|
$3::jsonb,
|
|
$4::jsonb,
|
|
gen_random_uuid(),
|
|
COALESCE($5::"WorkflowTriggerCronRefMethods", 'DEFAULT'),
|
|
COALESCE($6::integer, 1)
|
|
) RETURNING "parentId", cron, "tickerId", input, enabled, "additionalMetadata", "createdAt", "deletedAt", "updatedAt", name, id, method, priority
|
|
`
|
|
|
|
type CreateWorkflowTriggerCronRefForWorkflowParams struct {
|
|
Crontrigger string `json:"crontrigger"`
|
|
Name pgtype.Text `json:"name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
Method NullWorkflowTriggerCronRefMethods `json:"method"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowTriggerCronRefForWorkflow(ctx context.Context, db DBTX, arg CreateWorkflowTriggerCronRefForWorkflowParams) (*WorkflowTriggerCronRef, error) {
|
|
row := db.QueryRow(ctx, createWorkflowTriggerCronRefForWorkflow,
|
|
arg.Crontrigger,
|
|
arg.Name,
|
|
arg.Input,
|
|
arg.AdditionalMetadata,
|
|
arg.Method,
|
|
arg.Priority,
|
|
arg.Workflowid,
|
|
)
|
|
var i WorkflowTriggerCronRef
|
|
err := row.Scan(
|
|
&i.ParentId,
|
|
&i.Cron,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.Enabled,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.ID,
|
|
&i.Method,
|
|
&i.Priority,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowTriggerEventRef = `-- name: CreateWorkflowTriggerEventRef :one
|
|
INSERT INTO "WorkflowTriggerEventRef" (
|
|
"parentId",
|
|
"eventKey"
|
|
) VALUES (
|
|
$1::uuid,
|
|
$2::text
|
|
) RETURNING "parentId", "eventKey"
|
|
`
|
|
|
|
type CreateWorkflowTriggerEventRefParams struct {
|
|
Workflowtriggersid pgtype.UUID `json:"workflowtriggersid"`
|
|
Eventtrigger string `json:"eventtrigger"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowTriggerEventRef(ctx context.Context, db DBTX, arg CreateWorkflowTriggerEventRefParams) (*WorkflowTriggerEventRef, error) {
|
|
row := db.QueryRow(ctx, createWorkflowTriggerEventRef, arg.Workflowtriggersid, arg.Eventtrigger)
|
|
var i WorkflowTriggerEventRef
|
|
err := row.Scan(&i.ParentId, &i.EventKey)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowTriggerScheduledRef = `-- name: CreateWorkflowTriggerScheduledRef :one
|
|
INSERT INTO "WorkflowTriggerScheduledRef" (
|
|
"id",
|
|
"parentId",
|
|
"triggerAt",
|
|
"input",
|
|
"additionalMetadata",
|
|
"priority"
|
|
) VALUES (
|
|
gen_random_uuid(),
|
|
$1::uuid,
|
|
$2::timestamp,
|
|
$3::jsonb,
|
|
$4::jsonb,
|
|
COALESCE($5::integer, 1)
|
|
) RETURNING id, "parentId", "triggerAt", "tickerId", input, "childIndex", "childKey", "parentStepRunId", "parentWorkflowRunId", "additionalMetadata", "createdAt", "deletedAt", "updatedAt", method, priority
|
|
`
|
|
|
|
type CreateWorkflowTriggerScheduledRefParams struct {
|
|
Workflowversionid pgtype.UUID `json:"workflowversionid"`
|
|
Scheduledtrigger pgtype.Timestamp `json:"scheduledtrigger"`
|
|
Input []byte `json:"input"`
|
|
Additionalmetadata []byte `json:"additionalmetadata"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowTriggerScheduledRef(ctx context.Context, db DBTX, arg CreateWorkflowTriggerScheduledRefParams) (*WorkflowTriggerScheduledRef, error) {
|
|
row := db.QueryRow(ctx, createWorkflowTriggerScheduledRef,
|
|
arg.Workflowversionid,
|
|
arg.Scheduledtrigger,
|
|
arg.Input,
|
|
arg.Additionalmetadata,
|
|
arg.Priority,
|
|
)
|
|
var i WorkflowTriggerScheduledRef
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.ParentId,
|
|
&i.TriggerAt,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.ChildIndex,
|
|
&i.ChildKey,
|
|
&i.ParentStepRunId,
|
|
&i.ParentWorkflowRunId,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Method,
|
|
&i.Priority,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowTriggerScheduledRefForWorkflow = `-- name: CreateWorkflowTriggerScheduledRefForWorkflow :one
|
|
WITH latest_version AS (
|
|
SELECT "id" FROM "WorkflowVersion"
|
|
WHERE "workflowId" = $6::uuid
|
|
ORDER BY "order" DESC
|
|
LIMIT 1
|
|
),
|
|
latest_trigger AS (
|
|
SELECT "id" FROM "WorkflowTriggers"
|
|
WHERE "workflowVersionId" = (SELECT "id" FROM latest_version)
|
|
ORDER BY "createdAt" DESC
|
|
LIMIT 1
|
|
)
|
|
INSERT INTO "WorkflowTriggerScheduledRef" (
|
|
"id",
|
|
"parentId",
|
|
"triggerAt",
|
|
"input",
|
|
"additionalMetadata",
|
|
"method",
|
|
"priority"
|
|
) VALUES (
|
|
gen_random_uuid(),
|
|
(SELECT "id" FROM latest_version),
|
|
$1::timestamp,
|
|
$2::jsonb,
|
|
$3::jsonb,
|
|
COALESCE($4::"WorkflowTriggerScheduledRefMethods", 'DEFAULT'),
|
|
COALESCE($5::integer, 1)
|
|
) RETURNING id, "parentId", "triggerAt", "tickerId", input, "childIndex", "childKey", "parentStepRunId", "parentWorkflowRunId", "additionalMetadata", "createdAt", "deletedAt", "updatedAt", method, priority
|
|
`
|
|
|
|
type CreateWorkflowTriggerScheduledRefForWorkflowParams struct {
|
|
Scheduledtrigger pgtype.Timestamp `json:"scheduledtrigger"`
|
|
Input []byte `json:"input"`
|
|
Additionalmetadata []byte `json:"additionalmetadata"`
|
|
Method NullWorkflowTriggerScheduledRefMethods `json:"method"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowTriggerScheduledRefForWorkflow(ctx context.Context, db DBTX, arg CreateWorkflowTriggerScheduledRefForWorkflowParams) (*WorkflowTriggerScheduledRef, error) {
|
|
row := db.QueryRow(ctx, createWorkflowTriggerScheduledRefForWorkflow,
|
|
arg.Scheduledtrigger,
|
|
arg.Input,
|
|
arg.Additionalmetadata,
|
|
arg.Method,
|
|
arg.Priority,
|
|
arg.Workflowid,
|
|
)
|
|
var i WorkflowTriggerScheduledRef
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.ParentId,
|
|
&i.TriggerAt,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.ChildIndex,
|
|
&i.ChildKey,
|
|
&i.ParentStepRunId,
|
|
&i.ParentWorkflowRunId,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Method,
|
|
&i.Priority,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowTriggers = `-- name: CreateWorkflowTriggers :one
|
|
INSERT INTO "WorkflowTriggers" (
|
|
"id",
|
|
"createdAt",
|
|
"updatedAt",
|
|
"deletedAt",
|
|
"workflowVersionId",
|
|
"tenantId"
|
|
) VALUES (
|
|
$1::uuid,
|
|
CURRENT_TIMESTAMP,
|
|
CURRENT_TIMESTAMP,
|
|
NULL,
|
|
$2::uuid,
|
|
$3::uuid
|
|
) RETURNING id, "createdAt", "updatedAt", "deletedAt", "workflowVersionId", "tenantId"
|
|
`
|
|
|
|
type CreateWorkflowTriggersParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
Workflowversionid pgtype.UUID `json:"workflowversionid"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowTriggers(ctx context.Context, db DBTX, arg CreateWorkflowTriggersParams) (*WorkflowTriggers, error) {
|
|
row := db.QueryRow(ctx, createWorkflowTriggers, arg.ID, arg.Workflowversionid, arg.Tenantid)
|
|
var i WorkflowTriggers
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.WorkflowVersionId,
|
|
&i.TenantId,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const createWorkflowVersion = `-- name: CreateWorkflowVersion :one
|
|
INSERT INTO "WorkflowVersion" (
|
|
"id",
|
|
"createdAt",
|
|
"updatedAt",
|
|
"deletedAt",
|
|
"checksum",
|
|
"version",
|
|
"workflowId",
|
|
"scheduleTimeout",
|
|
"sticky",
|
|
"kind",
|
|
"defaultPriority"
|
|
) VALUES (
|
|
$1::uuid,
|
|
coalesce($2::timestamp, CURRENT_TIMESTAMP),
|
|
coalesce($3::timestamp, CURRENT_TIMESTAMP),
|
|
$4::timestamp,
|
|
$5::text,
|
|
$6::text,
|
|
$7::uuid,
|
|
coalesce($8::text, '5m'),
|
|
$9::"StickyStrategy",
|
|
coalesce($10::"WorkflowKind", 'DAG'),
|
|
$11::integer
|
|
) RETURNING id, "createdAt", "updatedAt", "deletedAt", version, "order", "workflowId", checksum, "scheduleTimeout", "onFailureJobId", sticky, kind, "defaultPriority"
|
|
`
|
|
|
|
type CreateWorkflowVersionParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
CreatedAt pgtype.Timestamp `json:"createdAt"`
|
|
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
|
|
Deletedat pgtype.Timestamp `json:"deletedat"`
|
|
Checksum string `json:"checksum"`
|
|
Version pgtype.Text `json:"version"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
ScheduleTimeout pgtype.Text `json:"scheduleTimeout"`
|
|
Sticky NullStickyStrategy `json:"sticky"`
|
|
Kind NullWorkflowKind `json:"kind"`
|
|
DefaultPriority pgtype.Int4 `json:"defaultPriority"`
|
|
}
|
|
|
|
func (q *Queries) CreateWorkflowVersion(ctx context.Context, db DBTX, arg CreateWorkflowVersionParams) (*WorkflowVersion, error) {
|
|
row := db.QueryRow(ctx, createWorkflowVersion,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Deletedat,
|
|
arg.Checksum,
|
|
arg.Version,
|
|
arg.Workflowid,
|
|
arg.ScheduleTimeout,
|
|
arg.Sticky,
|
|
arg.Kind,
|
|
arg.DefaultPriority,
|
|
)
|
|
var i WorkflowVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.Version,
|
|
&i.Order,
|
|
&i.WorkflowId,
|
|
&i.Checksum,
|
|
&i.ScheduleTimeout,
|
|
&i.OnFailureJobId,
|
|
&i.Sticky,
|
|
&i.Kind,
|
|
&i.DefaultPriority,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const deleteWorkflowTriggerCronRef = `-- name: DeleteWorkflowTriggerCronRef :exec
|
|
DELETE FROM "WorkflowTriggerCronRef"
|
|
WHERE
|
|
"id" = $1::uuid
|
|
`
|
|
|
|
func (q *Queries) DeleteWorkflowTriggerCronRef(ctx context.Context, db DBTX, id pgtype.UUID) error {
|
|
_, err := db.Exec(ctx, deleteWorkflowTriggerCronRef, id)
|
|
return err
|
|
}
|
|
|
|
const getLatestWorkflowVersionForWorkflows = `-- name: GetLatestWorkflowVersionForWorkflows :many
|
|
WITH latest_versions AS (
|
|
SELECT DISTINCT ON (workflowVersions."workflowId")
|
|
workflowVersions."id" AS workflowVersionId,
|
|
workflowVersions."workflowId",
|
|
workflowVersions."order"
|
|
FROM
|
|
"WorkflowVersion" as workflowVersions
|
|
WHERE
|
|
workflowVersions."workflowId" = ANY($2::uuid[]) AND
|
|
workflowVersions."deletedAt" IS NULL
|
|
ORDER BY
|
|
workflowVersions."workflowId", workflowVersions."order" DESC
|
|
)
|
|
SELECT
|
|
workflowVersions."id"
|
|
FROM
|
|
latest_versions
|
|
JOIN
|
|
"WorkflowVersion" as workflowVersions ON workflowVersions."id" = latest_versions.workflowVersionId
|
|
JOIN
|
|
"Workflow" as w ON w."id" = workflowVersions."workflowId"
|
|
LEFT JOIN
|
|
"WorkflowConcurrency" as wc ON wc."workflowVersionId" = workflowVersions."id"
|
|
WHERE
|
|
w."tenantId" = $1::uuid AND
|
|
w."deletedAt" IS NULL AND
|
|
workflowVersions."deletedAt" IS NULL
|
|
`
|
|
|
|
type GetLatestWorkflowVersionForWorkflowsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Workflowids []pgtype.UUID `json:"workflowids"`
|
|
}
|
|
|
|
func (q *Queries) GetLatestWorkflowVersionForWorkflows(ctx context.Context, db DBTX, arg GetLatestWorkflowVersionForWorkflowsParams) ([]pgtype.UUID, error) {
|
|
rows, err := db.Query(ctx, getLatestWorkflowVersionForWorkflows, arg.Tenantid, arg.Workflowids)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []pgtype.UUID
|
|
for rows.Next() {
|
|
var id pgtype.UUID
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkflowById = `-- name: GetWorkflowById :one
|
|
SELECT
|
|
w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w.name, w.description, w."isPaused",
|
|
wv."id" as "workflowVersionId"
|
|
FROM
|
|
"Workflow" as w
|
|
LEFT JOIN "WorkflowVersion" as wv ON w."id" = wv."workflowId"
|
|
WHERE
|
|
w."id" = $1::uuid AND
|
|
w."deletedAt" IS NULL
|
|
ORDER BY
|
|
wv."order" DESC
|
|
LIMIT 1
|
|
`
|
|
|
|
type GetWorkflowByIdRow struct {
|
|
Workflow Workflow `json:"workflow"`
|
|
WorkflowVersionId pgtype.UUID `json:"workflowVersionId"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowById(ctx context.Context, db DBTX, id pgtype.UUID) (*GetWorkflowByIdRow, error) {
|
|
row := db.QueryRow(ctx, getWorkflowById, id)
|
|
var i GetWorkflowByIdRow
|
|
err := row.Scan(
|
|
&i.Workflow.ID,
|
|
&i.Workflow.CreatedAt,
|
|
&i.Workflow.UpdatedAt,
|
|
&i.Workflow.DeletedAt,
|
|
&i.Workflow.TenantId,
|
|
&i.Workflow.Name,
|
|
&i.Workflow.Description,
|
|
&i.Workflow.IsPaused,
|
|
&i.WorkflowVersionId,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const getWorkflowByName = `-- name: GetWorkflowByName :one
|
|
SELECT
|
|
id, "createdAt", "updatedAt", "deletedAt", "tenantId", name, description, "isPaused"
|
|
FROM
|
|
"Workflow" as workflows
|
|
WHERE
|
|
workflows."tenantId" = $1::uuid AND
|
|
workflows."name" = $2::text AND
|
|
workflows."deletedAt" IS NULL
|
|
`
|
|
|
|
type GetWorkflowByNameParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Name string `json:"name"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowByName(ctx context.Context, db DBTX, arg GetWorkflowByNameParams) (*Workflow, error) {
|
|
row := db.QueryRow(ctx, getWorkflowByName, arg.Tenantid, arg.Name)
|
|
var i Workflow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.TenantId,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.IsPaused,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const getWorkflowLatestVersion = `-- name: GetWorkflowLatestVersion :one
|
|
SELECT
|
|
"id"
|
|
FROM
|
|
"WorkflowVersion" as workflowVersions
|
|
WHERE
|
|
workflowVersions."workflowId" = $1::uuid AND
|
|
workflowVersions."deletedAt" IS NULL
|
|
ORDER BY
|
|
workflowVersions."order" DESC
|
|
LIMIT 1
|
|
`
|
|
|
|
func (q *Queries) GetWorkflowLatestVersion(ctx context.Context, db DBTX, workflowid pgtype.UUID) (pgtype.UUID, error) {
|
|
row := db.QueryRow(ctx, getWorkflowLatestVersion, workflowid)
|
|
var id pgtype.UUID
|
|
err := row.Scan(&id)
|
|
return id, err
|
|
}
|
|
|
|
const getWorkflowVersionById = `-- name: GetWorkflowVersionById :one
|
|
SELECT
|
|
wv.id, wv."createdAt", wv."updatedAt", wv."deletedAt", wv.version, wv."order", wv."workflowId", wv.checksum, wv."scheduleTimeout", wv."onFailureJobId", wv.sticky, wv.kind, wv."defaultPriority",
|
|
w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w.name, w.description, w."isPaused",
|
|
wc."id" as "concurrencyId",
|
|
wc."maxRuns" as "concurrencyMaxRuns",
|
|
wc."getConcurrencyGroupId" as "concurrencyGroupId",
|
|
wc."limitStrategy" as "concurrencyLimitStrategy"
|
|
FROM
|
|
"WorkflowVersion" as wv
|
|
JOIN "Workflow" as w on w."id" = wv."workflowId"
|
|
LEFT JOIN "WorkflowConcurrency" as wc ON wc."workflowVersionId" = wv."id"
|
|
WHERE
|
|
wv."id" = $1::uuid AND
|
|
wv."deletedAt" IS NULL
|
|
LIMIT 1
|
|
`
|
|
|
|
type GetWorkflowVersionByIdRow struct {
|
|
WorkflowVersion WorkflowVersion `json:"workflow_version"`
|
|
Workflow Workflow `json:"workflow"`
|
|
ConcurrencyId pgtype.UUID `json:"concurrencyId"`
|
|
ConcurrencyMaxRuns pgtype.Int4 `json:"concurrencyMaxRuns"`
|
|
ConcurrencyGroupId pgtype.UUID `json:"concurrencyGroupId"`
|
|
ConcurrencyLimitStrategy NullConcurrencyLimitStrategy `json:"concurrencyLimitStrategy"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowVersionById(ctx context.Context, db DBTX, id pgtype.UUID) (*GetWorkflowVersionByIdRow, error) {
|
|
row := db.QueryRow(ctx, getWorkflowVersionById, id)
|
|
var i GetWorkflowVersionByIdRow
|
|
err := row.Scan(
|
|
&i.WorkflowVersion.ID,
|
|
&i.WorkflowVersion.CreatedAt,
|
|
&i.WorkflowVersion.UpdatedAt,
|
|
&i.WorkflowVersion.DeletedAt,
|
|
&i.WorkflowVersion.Version,
|
|
&i.WorkflowVersion.Order,
|
|
&i.WorkflowVersion.WorkflowId,
|
|
&i.WorkflowVersion.Checksum,
|
|
&i.WorkflowVersion.ScheduleTimeout,
|
|
&i.WorkflowVersion.OnFailureJobId,
|
|
&i.WorkflowVersion.Sticky,
|
|
&i.WorkflowVersion.Kind,
|
|
&i.WorkflowVersion.DefaultPriority,
|
|
&i.Workflow.ID,
|
|
&i.Workflow.CreatedAt,
|
|
&i.Workflow.UpdatedAt,
|
|
&i.Workflow.DeletedAt,
|
|
&i.Workflow.TenantId,
|
|
&i.Workflow.Name,
|
|
&i.Workflow.Description,
|
|
&i.Workflow.IsPaused,
|
|
&i.ConcurrencyId,
|
|
&i.ConcurrencyMaxRuns,
|
|
&i.ConcurrencyGroupId,
|
|
&i.ConcurrencyLimitStrategy,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const getWorkflowVersionCronTriggerRefs = `-- name: GetWorkflowVersionCronTriggerRefs :many
|
|
SELECT
|
|
wtc."parentId", wtc.cron, wtc."tickerId", wtc.input, wtc.enabled, wtc."additionalMetadata", wtc."createdAt", wtc."deletedAt", wtc."updatedAt", wtc.name, wtc.id, wtc.method, wtc.priority
|
|
FROM
|
|
"WorkflowTriggerCronRef" as wtc
|
|
JOIN "WorkflowTriggers" as wt ON wt."id" = wtc."parentId"
|
|
WHERE
|
|
wt."workflowVersionId" = $1::uuid
|
|
`
|
|
|
|
func (q *Queries) GetWorkflowVersionCronTriggerRefs(ctx context.Context, db DBTX, workflowversionid pgtype.UUID) ([]*WorkflowTriggerCronRef, error) {
|
|
rows, err := db.Query(ctx, getWorkflowVersionCronTriggerRefs, workflowversionid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*WorkflowTriggerCronRef
|
|
for rows.Next() {
|
|
var i WorkflowTriggerCronRef
|
|
if err := rows.Scan(
|
|
&i.ParentId,
|
|
&i.Cron,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.Enabled,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.ID,
|
|
&i.Method,
|
|
&i.Priority,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkflowVersionEventTriggerRefs = `-- name: GetWorkflowVersionEventTriggerRefs :many
|
|
SELECT
|
|
wtc."parentId", wtc."eventKey"
|
|
FROM
|
|
"WorkflowTriggerEventRef" as wtc
|
|
JOIN "WorkflowTriggers" as wt ON wt."id" = wtc."parentId"
|
|
WHERE
|
|
wt."workflowVersionId" = $1::uuid
|
|
`
|
|
|
|
func (q *Queries) GetWorkflowVersionEventTriggerRefs(ctx context.Context, db DBTX, workflowversionid pgtype.UUID) ([]*WorkflowTriggerEventRef, error) {
|
|
rows, err := db.Query(ctx, getWorkflowVersionEventTriggerRefs, workflowversionid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*WorkflowTriggerEventRef
|
|
for rows.Next() {
|
|
var i WorkflowTriggerEventRef
|
|
if err := rows.Scan(&i.ParentId, &i.EventKey); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkflowVersionForEngine = `-- name: GetWorkflowVersionForEngine :many
|
|
SELECT
|
|
workflowversions.id, workflowversions."createdAt", workflowversions."updatedAt", workflowversions."deletedAt", workflowversions.version, workflowversions."order", workflowversions."workflowId", workflowversions.checksum, workflowversions."scheduleTimeout", workflowversions."onFailureJobId", workflowversions.sticky, workflowversions.kind, workflowversions."defaultPriority",
|
|
w."name" as "workflowName",
|
|
wc."limitStrategy" as "concurrencyLimitStrategy",
|
|
wc."maxRuns" as "concurrencyMaxRuns",
|
|
wc."getConcurrencyGroupId" as "concurrencyGroupId",
|
|
wc."concurrencyGroupExpression" as "concurrencyGroupExpression"
|
|
FROM
|
|
"WorkflowVersion" as workflowVersions
|
|
JOIN
|
|
"Workflow" as w ON w."id" = workflowVersions."workflowId"
|
|
LEFT JOIN
|
|
"WorkflowConcurrency" as wc ON wc."workflowVersionId" = workflowVersions."id"
|
|
WHERE
|
|
workflowVersions."id" = ANY($1::uuid[]) AND
|
|
w."tenantId" = $2::uuid AND
|
|
w."deletedAt" IS NULL AND
|
|
workflowVersions."deletedAt" IS NULL
|
|
`
|
|
|
|
type GetWorkflowVersionForEngineParams struct {
|
|
Ids []pgtype.UUID `json:"ids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type GetWorkflowVersionForEngineRow struct {
|
|
WorkflowVersion WorkflowVersion `json:"workflow_version"`
|
|
WorkflowName string `json:"workflowName"`
|
|
ConcurrencyLimitStrategy NullConcurrencyLimitStrategy `json:"concurrencyLimitStrategy"`
|
|
ConcurrencyMaxRuns pgtype.Int4 `json:"concurrencyMaxRuns"`
|
|
ConcurrencyGroupId pgtype.UUID `json:"concurrencyGroupId"`
|
|
ConcurrencyGroupExpression pgtype.Text `json:"concurrencyGroupExpression"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowVersionForEngine(ctx context.Context, db DBTX, arg GetWorkflowVersionForEngineParams) ([]*GetWorkflowVersionForEngineRow, error) {
|
|
rows, err := db.Query(ctx, getWorkflowVersionForEngine, arg.Ids, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*GetWorkflowVersionForEngineRow
|
|
for rows.Next() {
|
|
var i GetWorkflowVersionForEngineRow
|
|
if err := rows.Scan(
|
|
&i.WorkflowVersion.ID,
|
|
&i.WorkflowVersion.CreatedAt,
|
|
&i.WorkflowVersion.UpdatedAt,
|
|
&i.WorkflowVersion.DeletedAt,
|
|
&i.WorkflowVersion.Version,
|
|
&i.WorkflowVersion.Order,
|
|
&i.WorkflowVersion.WorkflowId,
|
|
&i.WorkflowVersion.Checksum,
|
|
&i.WorkflowVersion.ScheduleTimeout,
|
|
&i.WorkflowVersion.OnFailureJobId,
|
|
&i.WorkflowVersion.Sticky,
|
|
&i.WorkflowVersion.Kind,
|
|
&i.WorkflowVersion.DefaultPriority,
|
|
&i.WorkflowName,
|
|
&i.ConcurrencyLimitStrategy,
|
|
&i.ConcurrencyMaxRuns,
|
|
&i.ConcurrencyGroupId,
|
|
&i.ConcurrencyGroupExpression,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkflowVersionScheduleTriggerRefs = `-- name: GetWorkflowVersionScheduleTriggerRefs :many
|
|
SELECT
|
|
wtc.id, wtc."parentId", wtc."triggerAt", wtc."tickerId", wtc.input, wtc."childIndex", wtc."childKey", wtc."parentStepRunId", wtc."parentWorkflowRunId", wtc."additionalMetadata", wtc."createdAt", wtc."deletedAt", wtc."updatedAt", wtc.method, wtc.priority
|
|
FROM
|
|
"WorkflowTriggerScheduledRef" as wtc
|
|
JOIN "WorkflowTriggers" as wt ON wt."id" = wtc."parentId"
|
|
WHERE
|
|
wt."workflowVersionId" = $1::uuid
|
|
`
|
|
|
|
func (q *Queries) GetWorkflowVersionScheduleTriggerRefs(ctx context.Context, db DBTX, workflowversionid pgtype.UUID) ([]*WorkflowTriggerScheduledRef, error) {
|
|
rows, err := db.Query(ctx, getWorkflowVersionScheduleTriggerRefs, workflowversionid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*WorkflowTriggerScheduledRef
|
|
for rows.Next() {
|
|
var i WorkflowTriggerScheduledRef
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.ParentId,
|
|
&i.TriggerAt,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.ChildIndex,
|
|
&i.ChildKey,
|
|
&i.ParentStepRunId,
|
|
&i.ParentWorkflowRunId,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt,
|
|
&i.DeletedAt,
|
|
&i.UpdatedAt,
|
|
&i.Method,
|
|
&i.Priority,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkflowWorkerCount = `-- name: GetWorkflowWorkerCount :one
|
|
WITH UniqueWorkers AS (
|
|
SELECT DISTINCT w."id" AS workerId
|
|
FROM "Worker" w
|
|
JOIN "_ActionToWorker" atw ON w."id" = atw."B"
|
|
JOIN "Action" a ON atw."A" = a."id"
|
|
JOIN "Step" s ON a."actionId" = s."actionId"
|
|
JOIN "Job" j ON s."jobId" = j."id"
|
|
JOIN "WorkflowVersion" workflowVersion ON j."workflowVersionId" = workflowVersion."id"
|
|
WHERE
|
|
w."tenantId" = $1::uuid
|
|
AND workflowVersion."deletedAt" IS NULL
|
|
AND w."deletedAt" IS NULL
|
|
AND w."dispatcherId" IS NOT NULL
|
|
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
|
|
AND w."isActive" = true
|
|
AND w."isPaused" = false
|
|
AND workflowVersion."workflowId" = $2::uuid
|
|
),
|
|
workers AS (
|
|
SELECT SUM("maxRuns") AS maxR
|
|
FROM "Worker"
|
|
WHERE "id" IN (SELECT workerId FROM UniqueWorkers)
|
|
),
|
|
slots AS (
|
|
SELECT COUNT(*) AS usedSlotCount
|
|
FROM "SemaphoreQueueItem" sqi
|
|
WHERE sqi."workerId" IN (SELECT workerId FROM UniqueWorkers)
|
|
)
|
|
SELECT
|
|
COALESCE(maxR, 0) AS totalSlotCount,
|
|
COALESCE(maxR, 0) - COALESCE(usedSlotCount, 0) AS freeSlotCount
|
|
FROM workers, slots
|
|
`
|
|
|
|
type GetWorkflowWorkerCountParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
}
|
|
|
|
type GetWorkflowWorkerCountRow struct {
|
|
Totalslotcount int64 `json:"totalslotcount"`
|
|
Freeslotcount int32 `json:"freeslotcount"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowWorkerCount(ctx context.Context, db DBTX, arg GetWorkflowWorkerCountParams) (*GetWorkflowWorkerCountRow, error) {
|
|
row := db.QueryRow(ctx, getWorkflowWorkerCount, arg.Tenantid, arg.Workflowid)
|
|
var i GetWorkflowWorkerCountRow
|
|
err := row.Scan(&i.Totalslotcount, &i.Freeslotcount)
|
|
return &i, err
|
|
}
|
|
|
|
const getWorkflowsByNames = `-- name: GetWorkflowsByNames :many
|
|
SELECT
|
|
workflows.id, workflows."createdAt", workflows."updatedAt", workflows."deletedAt", workflows."tenantId", workflows.name, workflows.description, workflows."isPaused"
|
|
FROM
|
|
"Workflow" as workflows
|
|
WHERE
|
|
workflows."tenantId" = $1::uuid AND
|
|
workflows."name" = ANY($2::text[]) AND
|
|
workflows."deletedAt" IS NULL
|
|
`
|
|
|
|
type GetWorkflowsByNamesParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Names []string `json:"names"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowsByNames(ctx context.Context, db DBTX, arg GetWorkflowsByNamesParams) ([]*Workflow, error) {
|
|
rows, err := db.Query(ctx, getWorkflowsByNames, arg.Tenantid, arg.Names)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*Workflow
|
|
for rows.Next() {
|
|
var i Workflow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.TenantId,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.IsPaused,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const handleWorkflowUnpaused = `-- name: HandleWorkflowUnpaused :exec
|
|
WITH matching_qis AS (
|
|
-- We know that we're going to need to scan all the queue items in this queue
|
|
-- for the tenant, so we write this query in such a way that the index is used.
|
|
SELECT
|
|
qi."id"
|
|
FROM
|
|
"InternalQueueItem" qi
|
|
WHERE
|
|
qi."isQueued" = true
|
|
AND qi."tenantId" = $2::uuid
|
|
AND qi."queue" = 'WORKFLOW_RUN_PAUSED'
|
|
AND qi."priority" = 1
|
|
ORDER BY
|
|
qi."id" DESC
|
|
)
|
|
UPDATE "InternalQueueItem"
|
|
SET "priority" = 4
|
|
FROM
|
|
matching_qis
|
|
WHERE
|
|
"InternalQueueItem"."id" = matching_qis."id"
|
|
AND "data"->>'workflow_id' = $1::text
|
|
`
|
|
|
|
type HandleWorkflowUnpausedParams struct {
|
|
Workflowid string `json:"workflowid"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
// We update all the queue items to have a higher priority so we can unpause them
|
|
func (q *Queries) HandleWorkflowUnpaused(ctx context.Context, db DBTX, arg HandleWorkflowUnpausedParams) error {
|
|
_, err := db.Exec(ctx, handleWorkflowUnpaused, arg.Workflowid, arg.Tenantid)
|
|
return err
|
|
}
|
|
|
|
const linkOnFailureJob = `-- name: LinkOnFailureJob :one
|
|
UPDATE "WorkflowVersion"
|
|
SET "onFailureJobId" = $1::uuid
|
|
WHERE "id" = $2::uuid
|
|
RETURNING id, "createdAt", "updatedAt", "deletedAt", version, "order", "workflowId", checksum, "scheduleTimeout", "onFailureJobId", sticky, kind, "defaultPriority"
|
|
`
|
|
|
|
type LinkOnFailureJobParams struct {
|
|
Jobid pgtype.UUID `json:"jobid"`
|
|
Workflowversionid pgtype.UUID `json:"workflowversionid"`
|
|
}
|
|
|
|
func (q *Queries) LinkOnFailureJob(ctx context.Context, db DBTX, arg LinkOnFailureJobParams) (*WorkflowVersion, error) {
|
|
row := db.QueryRow(ctx, linkOnFailureJob, arg.Jobid, arg.Workflowversionid)
|
|
var i WorkflowVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.Version,
|
|
&i.Order,
|
|
&i.WorkflowId,
|
|
&i.Checksum,
|
|
&i.ScheduleTimeout,
|
|
&i.OnFailureJobId,
|
|
&i.Sticky,
|
|
&i.Kind,
|
|
&i.DefaultPriority,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const listCronWorkflows = `-- name: ListCronWorkflows :many
|
|
WITH latest_versions AS (
|
|
SELECT DISTINCT ON("workflowId")
|
|
workflowVersions."id" AS "workflowVersionId",
|
|
workflowVersions."workflowId"
|
|
FROM
|
|
"WorkflowVersion" as workflowVersions
|
|
JOIN
|
|
"Workflow" as workflow ON workflow."id" = workflowVersions."workflowId"
|
|
WHERE
|
|
workflow."tenantId" = $1::uuid
|
|
AND workflowVersions."deletedAt" IS NULL
|
|
ORDER BY "workflowId", "order" DESC
|
|
)
|
|
SELECT
|
|
latest_versions."workflowVersionId",
|
|
w."name" as "workflowName",
|
|
w."id" as "workflowId",
|
|
w."tenantId",
|
|
t."id" as "triggerId",
|
|
c."id" as "cronId",
|
|
t.id, t."createdAt", t."updatedAt", t."deletedAt", t."workflowVersionId", t."tenantId",
|
|
c."parentId", c.cron, c."tickerId", c.input, c.enabled, c."additionalMetadata", c."createdAt", c."deletedAt", c."updatedAt", c.name, c.id, c.method, c.priority
|
|
FROM
|
|
latest_versions
|
|
JOIN
|
|
"WorkflowTriggers" as t ON t."workflowVersionId" = latest_versions."workflowVersionId"
|
|
JOIN
|
|
"WorkflowTriggerCronRef" as c ON c."parentId" = t."id"
|
|
JOIN
|
|
"Workflow" w on w."id" = latest_versions."workflowId"
|
|
WHERE
|
|
t."deletedAt" IS NULL
|
|
AND w."tenantId" = $1::uuid
|
|
AND ($2::uuid IS NULL OR c."id" = $2::uuid)
|
|
AND ($3::uuid IS NULL OR w."id" = $3::uuid)
|
|
AND ($4::jsonb IS NULL OR
|
|
c."additionalMetadata" @> $4::jsonb)
|
|
AND ($5::TEXT IS NULL OR c."name" = $5::TEXT)
|
|
AND ($6::TEXT IS NULL OR w."name" = $6::TEXT)
|
|
ORDER BY
|
|
case when $7 = 'name ASC' THEN w."name" END ASC,
|
|
case when $7 = 'name DESC' THEN w."name" END DESC,
|
|
case when $7 = 'createdAt ASC' THEN c."createdAt" END ASC ,
|
|
case when $7 = 'createdAt DESC' THEN c."createdAt" END DESC,
|
|
t."id" ASC
|
|
OFFSET
|
|
COALESCE($8, 0)
|
|
LIMIT
|
|
COALESCE($9, 50)
|
|
`
|
|
|
|
type ListCronWorkflowsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Crontriggerid pgtype.UUID `json:"crontriggerid"`
|
|
Workflowid pgtype.UUID `json:"workflowid"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
CronName pgtype.Text `json:"cronName"`
|
|
WorkflowName pgtype.Text `json:"workflowName"`
|
|
Orderby interface{} `json:"orderby"`
|
|
Offset interface{} `json:"offset"`
|
|
Limit interface{} `json:"limit"`
|
|
}
|
|
|
|
type ListCronWorkflowsRow struct {
|
|
WorkflowVersionId pgtype.UUID `json:"workflowVersionId"`
|
|
WorkflowName string `json:"workflowName"`
|
|
WorkflowId pgtype.UUID `json:"workflowId"`
|
|
TenantId pgtype.UUID `json:"tenantId"`
|
|
TriggerId pgtype.UUID `json:"triggerId"`
|
|
CronId pgtype.UUID `json:"cronId"`
|
|
ID pgtype.UUID `json:"id"`
|
|
CreatedAt pgtype.Timestamp `json:"createdAt"`
|
|
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
|
|
DeletedAt pgtype.Timestamp `json:"deletedAt"`
|
|
WorkflowVersionId_2 pgtype.UUID `json:"workflowVersionId_2"`
|
|
TenantId_2 pgtype.UUID `json:"tenantId_2"`
|
|
ParentId pgtype.UUID `json:"parentId"`
|
|
Cron string `json:"cron"`
|
|
TickerId pgtype.UUID `json:"tickerId"`
|
|
Input []byte `json:"input"`
|
|
Enabled bool `json:"enabled"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
CreatedAt_2 pgtype.Timestamp `json:"createdAt_2"`
|
|
DeletedAt_2 pgtype.Timestamp `json:"deletedAt_2"`
|
|
UpdatedAt_2 pgtype.Timestamp `json:"updatedAt_2"`
|
|
Name pgtype.Text `json:"name"`
|
|
ID_2 pgtype.UUID `json:"id_2"`
|
|
Method WorkflowTriggerCronRefMethods `json:"method"`
|
|
Priority int32 `json:"priority"`
|
|
}
|
|
|
|
// Get all of the latest workflow versions for the tenant
|
|
func (q *Queries) ListCronWorkflows(ctx context.Context, db DBTX, arg ListCronWorkflowsParams) ([]*ListCronWorkflowsRow, error) {
|
|
rows, err := db.Query(ctx, listCronWorkflows,
|
|
arg.Tenantid,
|
|
arg.Crontriggerid,
|
|
arg.Workflowid,
|
|
arg.AdditionalMetadata,
|
|
arg.CronName,
|
|
arg.WorkflowName,
|
|
arg.Orderby,
|
|
arg.Offset,
|
|
arg.Limit,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListCronWorkflowsRow
|
|
for rows.Next() {
|
|
var i ListCronWorkflowsRow
|
|
if err := rows.Scan(
|
|
&i.WorkflowVersionId,
|
|
&i.WorkflowName,
|
|
&i.WorkflowId,
|
|
&i.TenantId,
|
|
&i.TriggerId,
|
|
&i.CronId,
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.WorkflowVersionId_2,
|
|
&i.TenantId_2,
|
|
&i.ParentId,
|
|
&i.Cron,
|
|
&i.TickerId,
|
|
&i.Input,
|
|
&i.Enabled,
|
|
&i.AdditionalMetadata,
|
|
&i.CreatedAt_2,
|
|
&i.DeletedAt_2,
|
|
&i.UpdatedAt_2,
|
|
&i.Name,
|
|
&i.ID_2,
|
|
&i.Method,
|
|
&i.Priority,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listPausedWorkflows = `-- name: ListPausedWorkflows :many
|
|
SELECT
|
|
"id"
|
|
FROM
|
|
"Workflow"
|
|
WHERE
|
|
"tenantId" = $1::uuid AND
|
|
"isPaused" = true AND
|
|
"deletedAt" IS NULL
|
|
`
|
|
|
|
func (q *Queries) ListPausedWorkflows(ctx context.Context, db DBTX, tenantid pgtype.UUID) ([]pgtype.UUID, error) {
|
|
rows, err := db.Query(ctx, listPausedWorkflows, tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []pgtype.UUID
|
|
for rows.Next() {
|
|
var id pgtype.UUID
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listWorkflows = `-- name: ListWorkflows :many
|
|
SELECT
|
|
workflows.id, workflows."createdAt", workflows."updatedAt", workflows."deletedAt", workflows."tenantId", workflows.name, workflows.description, workflows."isPaused"
|
|
FROM
|
|
"Workflow" as workflows
|
|
WHERE
|
|
workflows."tenantId" = $1::uuid AND
|
|
workflows."deletedAt" IS NULL AND
|
|
(
|
|
$2::text IS NULL OR
|
|
workflows.name like concat('%', $2::text, '%')
|
|
)
|
|
ORDER BY
|
|
case when $3 = 'createdAt ASC' THEN workflows."createdAt" END ASC ,
|
|
case when $3 = 'createdAt DESC' then workflows."createdAt" END DESC
|
|
OFFSET
|
|
COALESCE($4, 0)
|
|
LIMIT
|
|
COALESCE($5, 50)
|
|
`
|
|
|
|
type ListWorkflowsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Search pgtype.Text `json:"search"`
|
|
Orderby interface{} `json:"orderby"`
|
|
Offset interface{} `json:"offset"`
|
|
Limit interface{} `json:"limit"`
|
|
}
|
|
|
|
type ListWorkflowsRow struct {
|
|
Workflow Workflow `json:"workflow"`
|
|
}
|
|
|
|
func (q *Queries) ListWorkflows(ctx context.Context, db DBTX, arg ListWorkflowsParams) ([]*ListWorkflowsRow, error) {
|
|
rows, err := db.Query(ctx, listWorkflows,
|
|
arg.Tenantid,
|
|
arg.Search,
|
|
arg.Orderby,
|
|
arg.Offset,
|
|
arg.Limit,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListWorkflowsRow
|
|
for rows.Next() {
|
|
var i ListWorkflowsRow
|
|
if err := rows.Scan(
|
|
&i.Workflow.ID,
|
|
&i.Workflow.CreatedAt,
|
|
&i.Workflow.UpdatedAt,
|
|
&i.Workflow.DeletedAt,
|
|
&i.Workflow.TenantId,
|
|
&i.Workflow.Name,
|
|
&i.Workflow.Description,
|
|
&i.Workflow.IsPaused,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listWorkflowsForEvent = `-- name: ListWorkflowsForEvent :many
|
|
WITH latest_versions AS (
|
|
SELECT DISTINCT ON("workflowId")
|
|
workflowVersions."id" AS "workflowVersionId"
|
|
FROM
|
|
"WorkflowVersion" as workflowVersions
|
|
JOIN
|
|
"Workflow" as workflow ON workflow."id" = workflowVersions."workflowId"
|
|
WHERE
|
|
workflow."tenantId" = $2::uuid
|
|
AND workflowVersions."deletedAt" IS NULL
|
|
ORDER BY "workflowId", "order" DESC
|
|
)
|
|
SELECT
|
|
latest_versions."workflowVersionId"
|
|
FROM
|
|
latest_versions
|
|
JOIN
|
|
"WorkflowTriggers" as triggers ON triggers."workflowVersionId" = latest_versions."workflowVersionId"
|
|
JOIN
|
|
"WorkflowTriggerEventRef" as eventRef ON eventRef."parentId" = triggers."id"
|
|
WHERE
|
|
eventRef."eventKey" = $1::text
|
|
`
|
|
|
|
type ListWorkflowsForEventParams struct {
|
|
Eventkey string `json:"eventkey"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
// Get all of the latest workflow versions for the tenant
|
|
// select the workflow versions that have the event trigger
|
|
func (q *Queries) ListWorkflowsForEvent(ctx context.Context, db DBTX, arg ListWorkflowsForEventParams) ([]pgtype.UUID, error) {
|
|
rows, err := db.Query(ctx, listWorkflowsForEvent, arg.Eventkey, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []pgtype.UUID
|
|
for rows.Next() {
|
|
var workflowVersionId pgtype.UUID
|
|
if err := rows.Scan(&workflowVersionId); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, workflowVersionId)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listWorkflowsLatestRuns = `-- name: ListWorkflowsLatestRuns :many
|
|
SELECT
|
|
DISTINCT ON (workflow."id") runs."createdAt", runs."updatedAt", runs."deletedAt", runs."tenantId", runs."workflowVersionId", runs.status, runs.error, runs."startedAt", runs."finishedAt", runs."concurrencyGroupId", runs."displayName", runs.id, runs."childIndex", runs."childKey", runs."parentId", runs."parentStepRunId", runs."additionalMetadata", runs.duration, runs.priority, runs."insertOrder", workflow."id" as "workflowId"
|
|
FROM
|
|
"WorkflowRun" as runs
|
|
LEFT JOIN
|
|
"WorkflowVersion" as workflowVersion ON runs."workflowVersionId" = workflowVersion."id"
|
|
LEFT JOIN
|
|
"Workflow" as workflow ON workflowVersion."workflowId" = workflow."id"
|
|
WHERE
|
|
runs."tenantId" = $1 AND
|
|
runs."deletedAt" IS NULL AND
|
|
workflow."deletedAt" IS NULL AND
|
|
workflowVersion."deletedAt" IS NULL AND
|
|
(
|
|
$2::text IS NULL OR
|
|
workflow."id" IN (
|
|
SELECT
|
|
DISTINCT ON(t1."workflowId") t1."workflowId"
|
|
FROM
|
|
"WorkflowVersion" AS t1
|
|
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
|
|
WHERE
|
|
(
|
|
j2."id" IN (
|
|
SELECT
|
|
t3."parentId"
|
|
FROM
|
|
"WorkflowTriggerEventRef" AS t3
|
|
WHERE
|
|
t3."eventKey" = $2::text
|
|
AND t3."parentId" IS NOT NULL
|
|
)
|
|
AND j2."id" IS NOT NULL
|
|
AND t1."workflowId" IS NOT NULL
|
|
)
|
|
ORDER BY
|
|
t1."workflowId" DESC, t1."order" DESC
|
|
)
|
|
)
|
|
ORDER BY
|
|
workflow."id" DESC, runs."createdAt" DESC
|
|
`
|
|
|
|
type ListWorkflowsLatestRunsParams struct {
|
|
TenantId pgtype.UUID `json:"tenantId"`
|
|
EventKey pgtype.Text `json:"eventKey"`
|
|
}
|
|
|
|
type ListWorkflowsLatestRunsRow struct {
|
|
WorkflowRun WorkflowRun `json:"workflow_run"`
|
|
WorkflowId pgtype.UUID `json:"workflowId"`
|
|
}
|
|
|
|
func (q *Queries) ListWorkflowsLatestRuns(ctx context.Context, db DBTX, arg ListWorkflowsLatestRunsParams) ([]*ListWorkflowsLatestRunsRow, error) {
|
|
rows, err := db.Query(ctx, listWorkflowsLatestRuns, arg.TenantId, arg.EventKey)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListWorkflowsLatestRunsRow
|
|
for rows.Next() {
|
|
var i ListWorkflowsLatestRunsRow
|
|
if err := rows.Scan(
|
|
&i.WorkflowRun.CreatedAt,
|
|
&i.WorkflowRun.UpdatedAt,
|
|
&i.WorkflowRun.DeletedAt,
|
|
&i.WorkflowRun.TenantId,
|
|
&i.WorkflowRun.WorkflowVersionId,
|
|
&i.WorkflowRun.Status,
|
|
&i.WorkflowRun.Error,
|
|
&i.WorkflowRun.StartedAt,
|
|
&i.WorkflowRun.FinishedAt,
|
|
&i.WorkflowRun.ConcurrencyGroupId,
|
|
&i.WorkflowRun.DisplayName,
|
|
&i.WorkflowRun.ID,
|
|
&i.WorkflowRun.ChildIndex,
|
|
&i.WorkflowRun.ChildKey,
|
|
&i.WorkflowRun.ParentId,
|
|
&i.WorkflowRun.ParentStepRunId,
|
|
&i.WorkflowRun.AdditionalMetadata,
|
|
&i.WorkflowRun.Duration,
|
|
&i.WorkflowRun.Priority,
|
|
&i.WorkflowRun.InsertOrder,
|
|
&i.WorkflowId,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const lockWorkflowVersion = `-- name: LockWorkflowVersion :one
|
|
SELECT
|
|
"id"
|
|
FROM
|
|
"WorkflowVersion"
|
|
WHERE
|
|
"workflowId" = $1::uuid AND
|
|
"deletedAt" IS NULL
|
|
ORDER BY
|
|
"order" DESC
|
|
LIMIT 1
|
|
FOR UPDATE
|
|
`
|
|
|
|
func (q *Queries) LockWorkflowVersion(ctx context.Context, db DBTX, workflowid pgtype.UUID) (pgtype.UUID, error) {
|
|
row := db.QueryRow(ctx, lockWorkflowVersion, workflowid)
|
|
var id pgtype.UUID
|
|
err := row.Scan(&id)
|
|
return id, err
|
|
}
|
|
|
|
const moveCronTriggerToNewWorkflowTriggers = `-- name: MoveCronTriggerToNewWorkflowTriggers :exec
|
|
WITH triggersToUpdate AS (
|
|
SELECT cronTrigger."id" FROM "WorkflowTriggerCronRef" cronTrigger
|
|
JOIN "WorkflowTriggers" triggers ON triggers."id" = cronTrigger."parentId"
|
|
WHERE triggers."workflowVersionId" = $2::uuid
|
|
AND cronTrigger."method" = 'API'
|
|
)
|
|
UPDATE "WorkflowTriggerCronRef"
|
|
SET "parentId" = $1::uuid
|
|
WHERE "id" IN (SELECT "id" FROM triggersToUpdate)
|
|
`
|
|
|
|
type MoveCronTriggerToNewWorkflowTriggersParams struct {
|
|
Newworkflowtriggerid pgtype.UUID `json:"newworkflowtriggerid"`
|
|
Oldworkflowversionid pgtype.UUID `json:"oldworkflowversionid"`
|
|
}
|
|
|
|
func (q *Queries) MoveCronTriggerToNewWorkflowTriggers(ctx context.Context, db DBTX, arg MoveCronTriggerToNewWorkflowTriggersParams) error {
|
|
_, err := db.Exec(ctx, moveCronTriggerToNewWorkflowTriggers, arg.Newworkflowtriggerid, arg.Oldworkflowversionid)
|
|
return err
|
|
}
|
|
|
|
const moveScheduledTriggerToNewWorkflowTriggers = `-- name: MoveScheduledTriggerToNewWorkflowTriggers :exec
|
|
WITH triggersToUpdate AS (
|
|
SELECT scheduledTrigger."id" FROM "WorkflowTriggerScheduledRef" scheduledTrigger
|
|
JOIN "WorkflowTriggers" triggers ON triggers."id" = scheduledTrigger."parentId"
|
|
WHERE triggers."workflowVersionId" = $2::uuid
|
|
AND scheduledTrigger."method" = 'API'
|
|
)
|
|
UPDATE "WorkflowTriggerScheduledRef"
|
|
SET "parentId" = $1::uuid
|
|
WHERE "id" IN (SELECT "id" FROM triggersToUpdate)
|
|
`
|
|
|
|
type MoveScheduledTriggerToNewWorkflowTriggersParams struct {
|
|
Newworkflowtriggerid pgtype.UUID `json:"newworkflowtriggerid"`
|
|
Oldworkflowversionid pgtype.UUID `json:"oldworkflowversionid"`
|
|
}
|
|
|
|
func (q *Queries) MoveScheduledTriggerToNewWorkflowTriggers(ctx context.Context, db DBTX, arg MoveScheduledTriggerToNewWorkflowTriggersParams) error {
|
|
_, err := db.Exec(ctx, moveScheduledTriggerToNewWorkflowTriggers, arg.Newworkflowtriggerid, arg.Oldworkflowversionid)
|
|
return err
|
|
}
|
|
|
|
const softDeleteWorkflow = `-- name: SoftDeleteWorkflow :one
|
|
WITH versions AS (
|
|
UPDATE "WorkflowVersion"
|
|
SET "deletedAt" = CURRENT_TIMESTAMP
|
|
WHERE "workflowId" = $1::uuid
|
|
)
|
|
UPDATE "Workflow"
|
|
SET
|
|
-- set name to the current name plus a random suffix to avoid conflicts
|
|
"name" = "name" || '-' || gen_random_uuid(),
|
|
"deletedAt" = CURRENT_TIMESTAMP
|
|
WHERE "id" = $1::uuid
|
|
RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", name, description, "isPaused"
|
|
`
|
|
|
|
func (q *Queries) SoftDeleteWorkflow(ctx context.Context, db DBTX, id pgtype.UUID) (*Workflow, error) {
|
|
row := db.QueryRow(ctx, softDeleteWorkflow, id)
|
|
var i Workflow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.TenantId,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.IsPaused,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const updateWorkflow = `-- name: UpdateWorkflow :one
|
|
UPDATE "Workflow"
|
|
SET
|
|
"updatedAt" = CURRENT_TIMESTAMP,
|
|
"isPaused" = coalesce($1::boolean, "isPaused")
|
|
WHERE "id" = $2::uuid
|
|
RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", name, description, "isPaused"
|
|
`
|
|
|
|
type UpdateWorkflowParams struct {
|
|
IsPaused pgtype.Bool `json:"isPaused"`
|
|
ID pgtype.UUID `json:"id"`
|
|
}
|
|
|
|
func (q *Queries) UpdateWorkflow(ctx context.Context, db DBTX, arg UpdateWorkflowParams) (*Workflow, error) {
|
|
row := db.QueryRow(ctx, updateWorkflow, arg.IsPaused, arg.ID)
|
|
var i Workflow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.DeletedAt,
|
|
&i.TenantId,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.IsPaused,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const upsertAction = `-- name: UpsertAction :one
|
|
INSERT INTO "Action" (
|
|
"id",
|
|
"actionId",
|
|
"tenantId"
|
|
)
|
|
VALUES (
|
|
gen_random_uuid(),
|
|
LOWER($1::text),
|
|
$2::uuid
|
|
)
|
|
ON CONFLICT ("tenantId", "actionId") DO UPDATE
|
|
SET
|
|
"tenantId" = EXCLUDED."tenantId"
|
|
WHERE
|
|
"Action"."tenantId" = $2 AND "Action"."actionId" = LOWER($1::text)
|
|
RETURNING description, "tenantId", "actionId", id
|
|
`
|
|
|
|
type UpsertActionParams struct {
|
|
Action string `json:"action"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
func (q *Queries) UpsertAction(ctx context.Context, db DBTX, arg UpsertActionParams) (*Action, error) {
|
|
row := db.QueryRow(ctx, upsertAction, arg.Action, arg.Tenantid)
|
|
var i Action
|
|
err := row.Scan(
|
|
&i.Description,
|
|
&i.TenantId,
|
|
&i.ActionId,
|
|
&i.ID,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const upsertWorkflowTag = `-- name: UpsertWorkflowTag :exec
|
|
INSERT INTO "WorkflowTag" (
|
|
"id",
|
|
"tenantId",
|
|
"name",
|
|
"color"
|
|
)
|
|
VALUES (
|
|
COALESCE($1::uuid, gen_random_uuid()),
|
|
$2::uuid,
|
|
$3::text,
|
|
COALESCE($4::text, '#93C5FD')
|
|
)
|
|
ON CONFLICT ("tenantId", "name") DO UPDATE
|
|
SET
|
|
"color" = COALESCE(EXCLUDED."color", "WorkflowTag"."color")
|
|
WHERE
|
|
"WorkflowTag"."tenantId" = $2 AND "WorkflowTag"."name" = $3
|
|
`
|
|
|
|
type UpsertWorkflowTagParams struct {
|
|
ID pgtype.UUID `json:"id"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Tagname string `json:"tagname"`
|
|
TagColor pgtype.Text `json:"tagColor"`
|
|
}
|
|
|
|
func (q *Queries) UpsertWorkflowTag(ctx context.Context, db DBTX, arg UpsertWorkflowTagParams) error {
|
|
_, err := db.Exec(ctx, upsertWorkflowTag,
|
|
arg.ID,
|
|
arg.Tenantid,
|
|
arg.Tagname,
|
|
arg.TagColor,
|
|
)
|
|
return err
|
|
}
|