Files
hatchet/pkg/repository/v1/sqlcv1/tasks.sql.go
T
Mohammed Nafees b58359d7b3 Do not run cleanup on v1_workflow_concurrency_slot (#2463)
* do not run cleanup on v1_concurrency_slot

* fix health endpoints for engine
2025-10-30 15:34:50 +01:00

2158 lines
60 KiB
Go

// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.29.0
// source: tasks.sql
package sqlcv1
import (
"context"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
const analyzeV1Dag = `-- name: AnalyzeV1Dag :exec
ANALYZE v1_dag
`
func (q *Queries) AnalyzeV1Dag(ctx context.Context, db DBTX) error {
_, err := db.Exec(ctx, analyzeV1Dag)
return err
}
const analyzeV1Task = `-- name: AnalyzeV1Task :exec
ANALYZE v1_task
`
func (q *Queries) AnalyzeV1Task(ctx context.Context, db DBTX) error {
_, err := db.Exec(ctx, analyzeV1Task)
return err
}
const analyzeV1TaskEvent = `-- name: AnalyzeV1TaskEvent :exec
ANALYZE v1_task_event
`
func (q *Queries) AnalyzeV1TaskEvent(ctx context.Context, db DBTX) error {
_, err := db.Exec(ctx, analyzeV1TaskEvent)
return err
}
const cleanupV1ConcurrencySlot = `-- name: CleanupV1ConcurrencySlot :execresult
WITH locked_cs AS (
SELECT cs.task_id, cs.task_inserted_at, cs.task_retry_count
FROM v1_concurrency_slot cs
WHERE NOT EXISTS (
SELECT 1
FROM v1_task vt
WHERE cs.task_id = vt.id
AND cs.task_inserted_at = vt.inserted_at
)
ORDER BY cs.task_id, cs.task_inserted_at, cs.task_retry_count, cs.strategy_id
LIMIT $1::int
FOR UPDATE SKIP LOCKED
)
DELETE FROM v1_concurrency_slot
WHERE (task_id, task_inserted_at, task_retry_count) IN (
SELECT task_id, task_inserted_at, task_retry_count
FROM locked_cs
)
`
func (q *Queries) CleanupV1ConcurrencySlot(ctx context.Context, db DBTX, batchsize int32) (pgconn.CommandTag, error) {
return db.Exec(ctx, cleanupV1ConcurrencySlot, batchsize)
}
const cleanupV1TaskRuntime = `-- name: CleanupV1TaskRuntime :execresult
WITH locked_trs AS (
SELECT vtr.task_id, vtr.task_inserted_at, vtr.retry_count
FROM v1_task_runtime vtr
WHERE NOT EXISTS (
SELECT 1
FROM v1_task vt
WHERE vtr.task_id = vt.id
AND vtr.task_inserted_at = vt.inserted_at
)
ORDER BY vtr.task_id ASC
LIMIT $1::int
FOR UPDATE SKIP LOCKED
)
DELETE FROM v1_task_runtime
WHERE (task_id, task_inserted_at, retry_count) IN (
SELECT task_id, task_inserted_at, retry_count
FROM locked_trs
)
`
func (q *Queries) CleanupV1TaskRuntime(ctx context.Context, db DBTX, batchsize int32) (pgconn.CommandTag, error) {
return db.Exec(ctx, cleanupV1TaskRuntime, batchsize)
}
const cleanupWorkflowConcurrencySlotsAfterInsert = `-- name: CleanupWorkflowConcurrencySlotsAfterInsert :exec
WITH input AS (
SELECT
UNNEST($1::bigint[]) AS parent_strategy_id,
UNNEST($2::uuid[]) AS workflow_version_id,
UNNEST($3::uuid[]) AS workflow_run_id
ORDER BY parent_strategy_id, workflow_version_id, workflow_run_id
)
SELECT
cleanup_workflow_concurrency_slots(
rec.parent_strategy_id,
rec.workflow_version_id,
rec.workflow_run_id
)
FROM
input rec
`
type CleanupWorkflowConcurrencySlotsAfterInsertParams struct {
Concurrencyparentstrategyids []int64 `json:"concurrencyparentstrategyids"`
Workflowversionids []pgtype.UUID `json:"workflowversionids"`
Workflowrunids []pgtype.UUID `json:"workflowrunids"`
}
// Cleans up workflow concurrency slots when tasks have been inserted in a non-QUEUED state.
// NOTE: this comes after the insert into v1_dag_to_task and v1_lookup_table, because we case on these tables for cleanup
func (q *Queries) CleanupWorkflowConcurrencySlotsAfterInsert(ctx context.Context, db DBTX, arg CleanupWorkflowConcurrencySlotsAfterInsertParams) error {
_, err := db.Exec(ctx, cleanupWorkflowConcurrencySlotsAfterInsert, arg.Concurrencyparentstrategyids, arg.Workflowversionids, arg.Workflowrunids)
return err
}
const createPartitions = `-- name: CreatePartitions :exec
SELECT
create_v1_range_partition('v1_task', $1::date),
create_v1_range_partition('v1_dag', $1::date),
create_v1_range_partition('v1_task_event', $1::date),
create_v1_range_partition('v1_log_line', $1::date),
create_v1_range_partition('v1_payload', $1::date)
`
func (q *Queries) CreatePartitions(ctx context.Context, db DBTX, date pgtype.Date) error {
_, err := db.Exec(ctx, createPartitions, date)
return err
}
const defaultTaskActivityGauge = `-- name: DefaultTaskActivityGauge :one
SELECT
COUNT(*)
FROM
v1_queue
WHERE
tenant_id = $1::uuid
AND last_active > $2::timestamptz
`
type DefaultTaskActivityGaugeParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Activesince pgtype.Timestamptz `json:"activesince"`
}
func (q *Queries) DefaultTaskActivityGauge(ctx context.Context, db DBTX, arg DefaultTaskActivityGaugeParams) (int64, error) {
row := db.QueryRow(ctx, defaultTaskActivityGauge, arg.Tenantid, arg.Activesince)
var count int64
err := row.Scan(&count)
return count, err
}
const deleteMatchingSignalEvents = `-- name: DeleteMatchingSignalEvents :exec
WITH input AS (
SELECT
task_id, task_inserted_at, event_key
FROM
(
SELECT
unnest($1::bigint[]) AS task_id,
unnest($2::timestamptz[]) AS task_inserted_at,
unnest($3::text[]) AS event_key
) AS subquery
), matching_events AS (
SELECT
e.task_id, e.task_inserted_at, e.id
FROM
v1_task_event e
JOIN
input i ON i.task_id = e.task_id AND i.task_inserted_at = e.task_inserted_at AND i.event_key = e.event_key
WHERE
e.tenant_id = $4::uuid
AND e.event_type = $5::v1_task_event_type
ORDER BY
e.id
FOR UPDATE
)
DELETE FROM
v1_task_event
WHERE
(task_id, task_inserted_at, id) IN (SELECT task_id, task_inserted_at, id FROM matching_events)
`
type DeleteMatchingSignalEventsParams struct {
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Eventkeys []string `json:"eventkeys"`
Tenantid pgtype.UUID `json:"tenantid"`
Eventtype V1TaskEventType `json:"eventtype"`
}
func (q *Queries) DeleteMatchingSignalEvents(ctx context.Context, db DBTX, arg DeleteMatchingSignalEventsParams) error {
_, err := db.Exec(ctx, deleteMatchingSignalEvents,
arg.Taskids,
arg.Taskinsertedats,
arg.Eventkeys,
arg.Tenantid,
arg.Eventtype,
)
return err
}
const ensureTablePartitionsExist = `-- name: EnsureTablePartitionsExist :one
WITH tomorrow_date AS (
SELECT (NOW() + INTERVAL '1 day')::date AS date
), expected_partitions AS (
SELECT
'v1_task_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD') AS expected_partition_name
UNION ALL
SELECT 'v1_dag_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD')
UNION ALL
SELECT 'v1_task_event_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD')
UNION ALL
SELECT 'v1_log_line_' || to_char((SELECT date FROM tomorrow_date), 'YYYYMMDD')
), partition_check AS (
SELECT
COUNT(*) AS total_tables,
COUNT(pt.tablename) AS existing_partitions
FROM expected_partitions ep
LEFT JOIN pg_catalog.pg_tables pt ON pt.tablename = ep.expected_partition_name
)
SELECT
CASE
WHEN existing_partitions = total_tables THEN TRUE
ELSE FALSE
END AS all_partitions_exist
FROM partition_check
`
func (q *Queries) EnsureTablePartitionsExist(ctx context.Context, db DBTX) (bool, error) {
row := db.QueryRow(ctx, ensureTablePartitionsExist)
var all_partitions_exist bool
err := row.Scan(&all_partitions_exist)
return all_partitions_exist, err
}
const failTaskAppFailure = `-- name: FailTaskAppFailure :many
WITH input AS (
SELECT
task_id, task_inserted_at, task_retry_count, is_non_retryable
FROM
(
SELECT
unnest($1::bigint[]) AS task_id,
unnest($2::timestamptz[]) AS task_inserted_at,
unnest($3::integer[]) AS task_retry_count,
unnest($4::boolean[]) AS is_non_retryable
) AS subquery
), locked_tasks AS (
SELECT
t.id,
t.inserted_at,
t.step_id
FROM
v1_task t
JOIN
input i ON i.task_id = t.id AND i.task_inserted_at = t.inserted_at AND i.task_retry_count = t.retry_count
WHERE
t.tenant_id = $5::uuid
-- order by the task id to get a stable lock order
ORDER BY
id
FOR UPDATE
), tasks_to_steps AS (
SELECT
t.id,
t.step_id,
s."retries"
FROM
locked_tasks t
JOIN
"Step" s ON s."id" = t.step_id
)
UPDATE
v1_task
SET
retry_count = retry_count + 1,
app_retry_count = app_retry_count + 1
FROM
tasks_to_steps
WHERE
(v1_task.id, v1_task.inserted_at, v1_task.retry_count) IN (
SELECT task_id, task_inserted_at, task_retry_count
FROM input
WHERE is_non_retryable = FALSE
)
AND tasks_to_steps."retries" > v1_task.app_retry_count
RETURNING
v1_task.id,
v1_task.inserted_at,
v1_task.retry_count,
v1_task.app_retry_count,
v1_task.retry_backoff_factor,
v1_task.retry_max_backoff
`
type FailTaskAppFailureParams struct {
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Taskretrycounts []int32 `json:"taskretrycounts"`
Isnonretryables []bool `json:"isnonretryables"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type FailTaskAppFailureRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
AppRetryCount int32 `json:"app_retry_count"`
RetryBackoffFactor pgtype.Float8 `json:"retry_backoff_factor"`
RetryMaxBackoff pgtype.Int4 `json:"retry_max_backoff"`
}
// Fails a task due to an application-level error
func (q *Queries) FailTaskAppFailure(ctx context.Context, db DBTX, arg FailTaskAppFailureParams) ([]*FailTaskAppFailureRow, error) {
rows, err := db.Query(ctx, failTaskAppFailure,
arg.Taskids,
arg.Taskinsertedats,
arg.Taskretrycounts,
arg.Isnonretryables,
arg.Tenantid,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*FailTaskAppFailureRow
for rows.Next() {
var i FailTaskAppFailureRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.RetryCount,
&i.AppRetryCount,
&i.RetryBackoffFactor,
&i.RetryMaxBackoff,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const failTaskInternalFailure = `-- name: FailTaskInternalFailure :many
WITH input AS (
SELECT
task_id, task_inserted_at, task_retry_count
FROM
(
SELECT
unnest($2::bigint[]) AS task_id,
unnest($3::timestamptz[]) AS task_inserted_at,
unnest($4::integer[]) AS task_retry_count
) AS subquery
), locked_tasks AS (
SELECT
t.id
FROM
v1_task t
-- only fail tasks which have a v1_task_runtime equivalent to the current retry count. otherwise,
-- a cancellation which deletes the v1_task_runtime might lead to a future failure event, which triggers
-- a retry.
JOIN
input i ON i.task_id = t.id AND i.task_inserted_at = t.inserted_at AND i.task_retry_count = t.retry_count
WHERE
t.tenant_id = $5::uuid
-- order by the task id to get a stable lock order
ORDER BY
id
FOR UPDATE
)
UPDATE
v1_task
SET
retry_count = retry_count + 1,
internal_retry_count = internal_retry_count + 1
FROM
locked_tasks
WHERE
(v1_task.id, v1_task.inserted_at, v1_task.retry_count) IN (
SELECT task_id, task_inserted_at, task_retry_count
FROM input
)
AND $1::int > v1_task.internal_retry_count
RETURNING
v1_task.id,
v1_task.inserted_at,
v1_task.retry_count
`
type FailTaskInternalFailureParams struct {
Maxinternalretries int32 `json:"maxinternalretries"`
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Taskretrycounts []int32 `json:"taskretrycounts"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type FailTaskInternalFailureRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
}
// Fails a task due to an application-level error
func (q *Queries) FailTaskInternalFailure(ctx context.Context, db DBTX, arg FailTaskInternalFailureParams) ([]*FailTaskInternalFailureRow, error) {
rows, err := db.Query(ctx, failTaskInternalFailure,
arg.Maxinternalretries,
arg.Taskids,
arg.Taskinsertedats,
arg.Taskretrycounts,
arg.Tenantid,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*FailTaskInternalFailureRow
for rows.Next() {
var i FailTaskInternalFailureRow
if err := rows.Scan(&i.ID, &i.InsertedAt, &i.RetryCount); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const flattenExternalIds = `-- name: FlattenExternalIds :many
WITH lookup_rows AS (
SELECT
tenant_id, external_id, task_id, dag_id, inserted_at
FROM
v1_lookup_table l
WHERE
l.external_id = ANY($1::uuid[])
AND l.tenant_id = $2::uuid
), tasks_from_dags AS (
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.external_id,
t.workflow_run_id,
t.additional_metadata,
t.dag_id,
t.dag_inserted_at,
t.parent_task_id,
t.child_index,
t.child_key,
d.external_id AS workflow_run_external_id
FROM
lookup_rows l
JOIN
v1_dag d ON d.id = l.dag_id AND d.inserted_at = l.inserted_at
JOIN
v1_dag_to_task dt ON dt.dag_id = d.id AND dt.dag_inserted_at = d.inserted_at
JOIN
v1_task t ON t.id = dt.task_id AND t.inserted_at = dt.task_inserted_at
WHERE
l.dag_id IS NOT NULL
)
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.external_id,
t.workflow_run_id,
t.additional_metadata,
t.dag_id,
t.dag_inserted_at,
t.parent_task_id,
t.child_index,
t.child_key,
t.external_id AS workflow_run_external_id
FROM
lookup_rows l
JOIN
v1_task t ON t.id = l.task_id AND t.inserted_at = l.inserted_at
WHERE
l.task_id IS NOT NULL
UNION ALL
SELECT
id, inserted_at, retry_count, external_id, workflow_run_id, additional_metadata, dag_id, dag_inserted_at, parent_task_id, child_index, child_key, workflow_run_external_id
FROM
tasks_from_dags
`
type FlattenExternalIdsParams struct {
Externalids []pgtype.UUID `json:"externalids"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type FlattenExternalIdsRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
ExternalID pgtype.UUID `json:"external_id"`
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
AdditionalMetadata []byte `json:"additional_metadata"`
DagID pgtype.Int8 `json:"dag_id"`
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
ParentTaskID pgtype.Int8 `json:"parent_task_id"`
ChildIndex pgtype.Int8 `json:"child_index"`
ChildKey pgtype.Text `json:"child_key"`
WorkflowRunExternalID pgtype.UUID `json:"workflow_run_external_id"`
}
// Union the tasks from the lookup table with the tasks from the DAGs
func (q *Queries) FlattenExternalIds(ctx context.Context, db DBTX, arg FlattenExternalIdsParams) ([]*FlattenExternalIdsRow, error) {
rows, err := db.Query(ctx, flattenExternalIds, arg.Externalids, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*FlattenExternalIdsRow
for rows.Next() {
var i FlattenExternalIdsRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.RetryCount,
&i.ExternalID,
&i.WorkflowRunID,
&i.AdditionalMetadata,
&i.DagID,
&i.DagInsertedAt,
&i.ParentTaskID,
&i.ChildIndex,
&i.ChildKey,
&i.WorkflowRunExternalID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getTenantTaskStats = `-- name: GetTenantTaskStats :many
WITH queued_tasks AS (
SELECT
t.step_readable_id,
t.queue,
COUNT(*) as count
FROM
v1_queue_item qi
JOIN
v1_task t ON qi.task_id = t.id AND qi.task_inserted_at = t.inserted_at AND qi.retry_count = t.retry_count
WHERE
qi.tenant_id = $1::uuid
GROUP BY
t.step_readable_id,
t.queue
), retry_queued_tasks AS (
SELECT
t.step_readable_id,
t.queue,
COUNT(*) as count
FROM
v1_retry_queue_item rqi
JOIN
v1_task t ON rqi.task_id = t.id AND rqi.task_inserted_at = t.inserted_at AND rqi.task_retry_count = t.retry_count
WHERE
rqi.tenant_id = $1::uuid
GROUP BY
t.step_readable_id,
t.queue
), rate_limited_queued_tasks AS (
SELECT
t.step_readable_id,
t.queue,
COUNT(*) as count
FROM
v1_rate_limited_queue_items rqi
JOIN
v1_task t ON rqi.task_id = t.id AND rqi.task_inserted_at = t.inserted_at
WHERE
rqi.tenant_id = $1::uuid
GROUP BY
t.step_readable_id,
t.queue
), concurrency_queued_tasks AS (
SELECT
t.step_readable_id,
t.queue,
sc.expression,
sc.strategy,
cs.key,
COUNT(*) as count
FROM
v1_concurrency_slot cs
JOIN
v1_task t ON cs.task_id = t.id AND cs.task_inserted_at = t.inserted_at AND cs.task_retry_count = t.retry_count
JOIN
v1_step_concurrency sc ON sc.workflow_id = t.workflow_id AND sc.workflow_version_id = t.workflow_version_id AND sc.step_id = t.step_id AND cs.strategy_id = sc.id
WHERE
cs.tenant_id = $1::uuid
AND cs.is_filled = FALSE
AND sc.tenant_id = $1::uuid
AND sc.is_active = TRUE
AND sc.id = ANY(t.concurrency_strategy_ids)
GROUP BY
t.step_readable_id,
t.queue,
sc.expression,
sc.strategy,
cs.key
), running_tasks AS (
SELECT
t.step_readable_id,
COALESCE(sc.expression, '') as expression,
COALESCE(sc.strategy, 'NONE'::v1_concurrency_strategy) as strategy,
COALESCE(cs.key, '') as key,
COUNT(*) as count
FROM
v1_task_runtime tr
JOIN
v1_task t ON tr.task_id = t.id AND tr.task_inserted_at = t.inserted_at AND tr.retry_count = t.retry_count
LEFT JOIN
v1_concurrency_slot cs ON cs.task_id = t.id AND cs.task_inserted_at = t.inserted_at AND cs.task_retry_count = t.retry_count
LEFT JOIN
v1_step_concurrency sc ON sc.workflow_id = t.workflow_id AND sc.workflow_version_id = t.workflow_version_id AND sc.step_id = t.step_id
WHERE
t.tenant_id = $1::uuid
AND tr.tenant_id = $1::uuid
AND tr.worker_id IS NOT NULL
AND (t.concurrency_strategy_ids IS NULL OR array_length(t.concurrency_strategy_ids, 1) IS NULL OR sc.id = ANY(t.concurrency_strategy_ids))
GROUP BY
t.step_readable_id,
sc.expression,
sc.strategy,
cs.key
)
SELECT
'queued' as task_status,
step_readable_id,
queue,
NULL::text as expression,
NULL::text as strategy,
NULL::text as key,
count
FROM queued_tasks
UNION ALL
SELECT
'queued' as task_status,
step_readable_id,
queue,
NULL::text as expression,
NULL::text as strategy,
NULL::text as key,
count
FROM retry_queued_tasks
UNION ALL
SELECT
'queued' as task_status,
step_readable_id,
queue,
NULL::text as expression,
NULL::text as strategy,
NULL::text as key,
count
FROM rate_limited_queued_tasks
UNION ALL
SELECT
'queued' as task_status,
step_readable_id,
queue,
expression,
strategy::text,
key,
count
FROM concurrency_queued_tasks
UNION ALL
SELECT
'running' as task_status,
step_readable_id,
''::text as queue,
expression,
strategy::text,
key,
count
FROM running_tasks
`
type GetTenantTaskStatsRow struct {
TaskStatus string `json:"task_status"`
StepReadableID string `json:"step_readable_id"`
Queue string `json:"queue"`
Expression pgtype.Text `json:"expression"`
Strategy pgtype.Text `json:"strategy"`
Key pgtype.Text `json:"key"`
Count int64 `json:"count"`
}
func (q *Queries) GetTenantTaskStats(ctx context.Context, db DBTX, tenantid pgtype.UUID) ([]*GetTenantTaskStatsRow, error) {
rows, err := db.Query(ctx, getTenantTaskStats, tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*GetTenantTaskStatsRow
for rows.Next() {
var i GetTenantTaskStatsRow
if err := rows.Scan(
&i.TaskStatus,
&i.StepReadableID,
&i.Queue,
&i.Expression,
&i.Strategy,
&i.Key,
&i.Count,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listAllTasksInDags = `-- name: ListAllTasksInDags :many
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.dag_id,
t.dag_inserted_at,
t.step_readable_id,
t.step_id,
t.workflow_id,
t.external_id
FROM
v1_task t
JOIN
v1_dag_to_task dt ON dt.task_id = t.id
WHERE
t.tenant_id = $1::uuid
AND dt.dag_id = ANY($2::bigint[])
`
type ListAllTasksInDagsParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Dagids []int64 `json:"dagids"`
}
type ListAllTasksInDagsRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
DagID pgtype.Int8 `json:"dag_id"`
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
StepReadableID string `json:"step_readable_id"`
StepID pgtype.UUID `json:"step_id"`
WorkflowID pgtype.UUID `json:"workflow_id"`
ExternalID pgtype.UUID `json:"external_id"`
}
func (q *Queries) ListAllTasksInDags(ctx context.Context, db DBTX, arg ListAllTasksInDagsParams) ([]*ListAllTasksInDagsRow, error) {
rows, err := db.Query(ctx, listAllTasksInDags, arg.Tenantid, arg.Dagids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListAllTasksInDagsRow
for rows.Next() {
var i ListAllTasksInDagsRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.RetryCount,
&i.DagID,
&i.DagInsertedAt,
&i.StepReadableID,
&i.StepID,
&i.WorkflowID,
&i.ExternalID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listMatchingSignalEvents = `-- name: ListMatchingSignalEvents :many
WITH input AS (
SELECT
task_id, task_inserted_at, event_key
FROM
(
SELECT
unnest($3::bigint[]) AS task_id,
unnest($4::timestamptz[]) AS task_inserted_at,
unnest($5::text[]) AS event_key
) AS subquery
)
SELECT
e.id, e.inserted_at, e.tenant_id, e.task_id, e.task_inserted_at, e.retry_count, e.event_type, e.event_key, e.created_at, e.data, e.external_id
FROM
v1_task_event e
JOIN
input i ON i.task_id = e.task_id AND i.task_inserted_at = e.task_inserted_at AND i.event_key = e.event_key
WHERE
e.tenant_id = $1::uuid
AND e.event_type = $2::v1_task_event_type
`
type ListMatchingSignalEventsParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Eventtype V1TaskEventType `json:"eventtype"`
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Eventkeys []string `json:"eventkeys"`
}
func (q *Queries) ListMatchingSignalEvents(ctx context.Context, db DBTX, arg ListMatchingSignalEventsParams) ([]*V1TaskEvent, error) {
rows, err := db.Query(ctx, listMatchingSignalEvents,
arg.Tenantid,
arg.Eventtype,
arg.Taskids,
arg.Taskinsertedats,
arg.Eventkeys,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*V1TaskEvent
for rows.Next() {
var i V1TaskEvent
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.TenantID,
&i.TaskID,
&i.TaskInsertedAt,
&i.RetryCount,
&i.EventType,
&i.EventKey,
&i.CreatedAt,
&i.Data,
&i.ExternalID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listMatchingTaskEvents = `-- name: ListMatchingTaskEvents :many
WITH input AS (
SELECT
task_external_id, event_types
FROM
(
SELECT
unnest($2::uuid[]) AS task_external_id,
-- can match any of the event types
unnest_nd_1d($3::text[][]) AS event_types
) AS subquery
)
SELECT
t.external_id,
e.id, e.inserted_at, e.tenant_id, e.task_id, e.task_inserted_at, e.retry_count, e.event_type, e.event_key, e.created_at, e.data, e.external_id
FROM
v1_lookup_table l
JOIN
v1_task t ON t.id = l.task_id AND t.inserted_at = l.inserted_at
JOIN
v1_task_event e ON e.tenant_id = $1::uuid AND e.task_id = t.id AND e.task_inserted_at = t.inserted_at
JOIN
input i ON i.task_external_id = l.external_id AND e.event_type::text = ANY(i.event_types)
WHERE
l.tenant_id = $1::uuid
AND l.external_id = ANY($2::uuid[])
AND (e.retry_count = -1 OR e.retry_count = t.retry_count)
`
type ListMatchingTaskEventsParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Taskexternalids []pgtype.UUID `json:"taskexternalids"`
Eventtypes [][]string `json:"eventtypes"`
}
type ListMatchingTaskEventsRow struct {
ExternalID pgtype.UUID `json:"external_id"`
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
TenantID pgtype.UUID `json:"tenant_id"`
TaskID int64 `json:"task_id"`
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
RetryCount int32 `json:"retry_count"`
EventType V1TaskEventType `json:"event_type"`
EventKey pgtype.Text `json:"event_key"`
CreatedAt pgtype.Timestamp `json:"created_at"`
Data []byte `json:"data"`
ExternalID_2 pgtype.UUID `json:"external_id_2"`
}
// Lists the task events for the **latest** retry of a task, or task events which intentionally
// aren't associated with a retry count (if the retry_count = -1).
func (q *Queries) ListMatchingTaskEvents(ctx context.Context, db DBTX, arg ListMatchingTaskEventsParams) ([]*ListMatchingTaskEventsRow, error) {
rows, err := db.Query(ctx, listMatchingTaskEvents, arg.Tenantid, arg.Taskexternalids, arg.Eventtypes)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListMatchingTaskEventsRow
for rows.Next() {
var i ListMatchingTaskEventsRow
if err := rows.Scan(
&i.ExternalID,
&i.ID,
&i.InsertedAt,
&i.TenantID,
&i.TaskID,
&i.TaskInsertedAt,
&i.RetryCount,
&i.EventType,
&i.EventKey,
&i.CreatedAt,
&i.Data,
&i.ExternalID_2,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listPartitionsBeforeDate = `-- name: ListPartitionsBeforeDate :many
WITH task_partitions AS (
SELECT 'v1_task' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_task', $1::date) AS p
), dag_partitions AS (
SELECT 'v1_dag' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_dag', $1::date) AS p
), task_event_partitions AS (
SELECT 'v1_task_event' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_task_event', $1::date) AS p
), log_line_partitions AS (
SELECT 'v1_log_line' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_log_line', $1::date) AS p
), payload_partitions AS (
SELECT 'v1_payload' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_payload', $1::date) AS p
)
SELECT
parent_table, partition_name
FROM
task_partitions
UNION ALL
SELECT
parent_table, partition_name
FROM
dag_partitions
UNION ALL
SELECT
parent_table, partition_name
FROM
task_event_partitions
UNION ALL
SELECT
parent_table, partition_name
FROM
log_line_partitions
UNION ALL
SELECT
parent_table, partition_name
FROM
payload_partitions
`
type ListPartitionsBeforeDateRow struct {
ParentTable string `json:"parent_table"`
PartitionName string `json:"partition_name"`
}
func (q *Queries) ListPartitionsBeforeDate(ctx context.Context, db DBTX, date pgtype.Date) ([]*ListPartitionsBeforeDateRow, error) {
rows, err := db.Query(ctx, listPartitionsBeforeDate, date)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListPartitionsBeforeDateRow
for rows.Next() {
var i ListPartitionsBeforeDateRow
if err := rows.Scan(&i.ParentTable, &i.PartitionName); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTaskExpressionEvals = `-- name: ListTaskExpressionEvals :many
WITH input AS (
SELECT
task_id, task_inserted_at
FROM
(
SELECT
unnest($1::bigint[]) AS task_id,
unnest($2::timestamptz[]) AS task_inserted_at
) AS subquery
)
SELECT
key, task_id, task_inserted_at, value_str, value_int, kind
FROM
v1_task_expression_eval te
WHERE
(task_id, task_inserted_at) IN (
SELECT
task_id,
task_inserted_at
FROM
input
)
`
type ListTaskExpressionEvalsParams struct {
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
}
func (q *Queries) ListTaskExpressionEvals(ctx context.Context, db DBTX, arg ListTaskExpressionEvalsParams) ([]*V1TaskExpressionEval, error) {
rows, err := db.Query(ctx, listTaskExpressionEvals, arg.Taskids, arg.Taskinsertedats)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*V1TaskExpressionEval
for rows.Next() {
var i V1TaskExpressionEval
if err := rows.Scan(
&i.Key,
&i.TaskID,
&i.TaskInsertedAt,
&i.ValueStr,
&i.ValueInt,
&i.Kind,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTaskMetas = `-- name: ListTaskMetas :many
SELECT
id,
inserted_at,
external_id,
retry_count,
workflow_id,
workflow_run_id
FROM
v1_task
WHERE
tenant_id = $1
AND id = ANY($2::bigint[])
`
type ListTaskMetasParams struct {
TenantID pgtype.UUID `json:"tenant_id"`
Ids []int64 `json:"ids"`
}
type ListTaskMetasRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
ExternalID pgtype.UUID `json:"external_id"`
RetryCount int32 `json:"retry_count"`
WorkflowID pgtype.UUID `json:"workflow_id"`
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
}
func (q *Queries) ListTaskMetas(ctx context.Context, db DBTX, arg ListTaskMetasParams) ([]*ListTaskMetasRow, error) {
rows, err := db.Query(ctx, listTaskMetas, arg.TenantID, arg.Ids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListTaskMetasRow
for rows.Next() {
var i ListTaskMetasRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.ExternalID,
&i.RetryCount,
&i.WorkflowID,
&i.WorkflowRunID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTaskParentOutputs = `-- name: ListTaskParentOutputs :many
WITH input AS (
SELECT
UNNEST($1::BIGINT[]) AS task_id,
UNNEST($2::TIMESTAMPTZ[]) AS task_inserted_at
), task_outputs AS (
SELECT
t.id,
t.inserted_at,
e.retry_count,
t.tenant_id,
t.dag_id,
t.dag_inserted_at,
t.step_readable_id,
t.workflow_run_id,
t.step_id,
t.workflow_id,
e.id AS task_event_id,
e.inserted_at AS task_event_inserted_at,
e.data AS output
FROM
v1_task t1
JOIN
v1_dag_to_task dt ON dt.dag_id = t1.dag_id AND dt.dag_inserted_at = t1.dag_inserted_at
JOIN
v1_task t ON t.id = dt.task_id AND t.inserted_at = dt.task_inserted_at
JOIN
v1_task_event e ON e.task_id = t.id AND e.task_inserted_at = t.inserted_at AND e.event_type = 'COMPLETED'
WHERE
(t1.id, t1.inserted_at) IN (
SELECT
task_id,
task_inserted_at
FROM
input
)
AND t1.tenant_id = $3::uuid
AND t1.dag_id IS NOT NULL
), max_retry_counts AS (
SELECT
id,
inserted_at,
MAX(retry_count) AS max_retry_count
FROM
task_outputs
GROUP BY
id, inserted_at
)
SELECT
DISTINCT ON (task_outputs.id, task_outputs.inserted_at, task_outputs.retry_count)
task_outputs.task_event_id,
task_outputs.task_event_inserted_at,
task_outputs.workflow_run_id,
task_outputs.output
FROM
task_outputs
JOIN
max_retry_counts mrc ON task_outputs.id = mrc.id
AND task_outputs.inserted_at = mrc.inserted_at
AND task_outputs.retry_count = mrc.max_retry_count
ORDER BY
task_outputs.id,
task_outputs.inserted_at,
task_outputs.retry_count DESC
`
type ListTaskParentOutputsParams struct {
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type ListTaskParentOutputsRow struct {
TaskEventID int64 `json:"task_event_id"`
TaskEventInsertedAt pgtype.Timestamptz `json:"task_event_inserted_at"`
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
Output []byte `json:"output"`
}
// Lists the outputs of parent steps for a list of tasks. This is recursive because it looks at all grandparents
// of the tasks as well.
func (q *Queries) ListTaskParentOutputs(ctx context.Context, db DBTX, arg ListTaskParentOutputsParams) ([]*ListTaskParentOutputsRow, error) {
rows, err := db.Query(ctx, listTaskParentOutputs, arg.Taskids, arg.Taskinsertedats, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListTaskParentOutputsRow
for rows.Next() {
var i ListTaskParentOutputsRow
if err := rows.Scan(
&i.TaskEventID,
&i.TaskEventInsertedAt,
&i.WorkflowRunID,
&i.Output,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTasks = `-- name: ListTasks :many
SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff
FROM
v1_task
WHERE
tenant_id = $1
AND id = ANY($2::bigint[])
`
type ListTasksParams struct {
TenantID pgtype.UUID `json:"tenant_id"`
Ids []int64 `json:"ids"`
}
func (q *Queries) ListTasks(ctx context.Context, db DBTX, arg ListTasksParams) ([]*V1Task, error) {
rows, err := db.Query(ctx, listTasks, arg.TenantID, arg.Ids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*V1Task
for rows.Next() {
var i V1Task
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.TenantID,
&i.Queue,
&i.ActionID,
&i.StepID,
&i.StepReadableID,
&i.WorkflowID,
&i.WorkflowVersionID,
&i.WorkflowRunID,
&i.ScheduleTimeout,
&i.StepTimeout,
&i.Priority,
&i.Sticky,
&i.DesiredWorkerID,
&i.ExternalID,
&i.DisplayName,
&i.Input,
&i.RetryCount,
&i.InternalRetryCount,
&i.AppRetryCount,
&i.StepIndex,
&i.AdditionalMetadata,
&i.DagID,
&i.DagInsertedAt,
&i.ParentTaskExternalID,
&i.ParentTaskID,
&i.ParentTaskInsertedAt,
&i.ChildIndex,
&i.ChildKey,
&i.InitialState,
&i.InitialStateReason,
&i.ConcurrencyParentStrategyIds,
&i.ConcurrencyStrategyIds,
&i.ConcurrencyKeys,
&i.RetryBackoffFactor,
&i.RetryMaxBackoff,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTasksForReplay = `-- name: ListTasksForReplay :many
WITH RECURSIVE augmented_tasks AS (
-- First, select the tasks from the input
SELECT
id,
inserted_at,
tenant_id,
dag_id,
dag_inserted_at,
step_id
FROM
v1_task
WHERE
(id, inserted_at) IN (
SELECT
unnest($1::bigint[]),
unnest($2::timestamptz[])
)
AND tenant_id = $3::uuid
UNION
-- Then, select the tasks that are children of the input tasks
SELECT
t.id,
t.inserted_at,
t.tenant_id,
t.dag_id,
t.dag_inserted_at,
t.step_id
FROM
augmented_tasks at
JOIN
"Step" s1 ON s1."id" = at.step_id
JOIN
v1_dag_to_task dt ON dt.dag_id = at.dag_id AND dt.dag_inserted_at = at.dag_inserted_at
JOIN
v1_task t ON t.id = dt.task_id AND t.inserted_at = dt.task_inserted_at
JOIN
"Step" s2 ON s2."id" = t.step_id
JOIN
"_StepOrder" so ON so."B" = s2."id" AND so."A" = s1."id"
), locked_tasks AS (
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.dag_id,
t.dag_inserted_at,
t.step_readable_id,
t.step_id,
t.workflow_id,
t.external_id,
t.input,
t.additional_metadata,
t.parent_task_external_id,
t.parent_task_id,
t.parent_task_inserted_at,
t.step_index,
t.child_index,
t.child_key
FROM
v1_task t
WHERE
(t.id, t.inserted_at) IN (
SELECT
id, inserted_at
FROM
augmented_tasks
)
AND t.tenant_id = $3::uuid
-- order by the task id to get a stable lock order
ORDER BY
id
FOR UPDATE
), step_orders AS (
SELECT
t.step_id,
array_agg(so."A")::uuid[] as "parents"
FROM
locked_tasks t
JOIN
"Step" s ON s."id" = t.step_id
JOIN
"_StepOrder" so ON so."B" = s."id"
GROUP BY
t.step_id
)
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.dag_id,
t.dag_inserted_at,
t.step_readable_id,
t.step_id,
t.workflow_id,
t.external_id,
t.input,
t.additional_metadata,
t.parent_task_external_id,
t.parent_task_id,
t.parent_task_inserted_at,
t.step_index,
t.child_index,
t.child_key,
j."kind" as "jobKind",
COALESCE(so."parents", '{}'::uuid[]) as "parents"
FROM
locked_tasks t
JOIN
"Step" s ON s."id" = t.step_id
JOIN
"Job" j ON j."id" = s."jobId"
LEFT JOIN
step_orders so ON so.step_id = t.step_id
`
type ListTasksForReplayParams struct {
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type ListTasksForReplayRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
DagID pgtype.Int8 `json:"dag_id"`
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
StepReadableID string `json:"step_readable_id"`
StepID pgtype.UUID `json:"step_id"`
WorkflowID pgtype.UUID `json:"workflow_id"`
ExternalID pgtype.UUID `json:"external_id"`
Input []byte `json:"input"`
AdditionalMetadata []byte `json:"additional_metadata"`
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
ParentTaskID pgtype.Int8 `json:"parent_task_id"`
ParentTaskInsertedAt pgtype.Timestamptz `json:"parent_task_inserted_at"`
StepIndex int64 `json:"step_index"`
ChildIndex pgtype.Int8 `json:"child_index"`
ChildKey pgtype.Text `json:"child_key"`
JobKind JobKind `json:"jobKind"`
Parents []pgtype.UUID `json:"parents"`
}
// Lists tasks for replay by recursively selecting all tasks that are children of the input tasks,
// then locks the tasks for replay.
func (q *Queries) ListTasksForReplay(ctx context.Context, db DBTX, arg ListTasksForReplayParams) ([]*ListTasksForReplayRow, error) {
rows, err := db.Query(ctx, listTasksForReplay, arg.Taskids, arg.Taskinsertedats, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListTasksForReplayRow
for rows.Next() {
var i ListTasksForReplayRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.RetryCount,
&i.DagID,
&i.DagInsertedAt,
&i.StepReadableID,
&i.StepID,
&i.WorkflowID,
&i.ExternalID,
&i.Input,
&i.AdditionalMetadata,
&i.ParentTaskExternalID,
&i.ParentTaskID,
&i.ParentTaskInsertedAt,
&i.StepIndex,
&i.ChildIndex,
&i.ChildKey,
&i.JobKind,
&i.Parents,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTasksToReassign = `-- name: ListTasksToReassign :many
WITH tasks_on_inactive_workers AS (
SELECT
runtime.task_id,
runtime.task_inserted_at,
runtime.retry_count
FROM
"Worker" w
JOIN
v1_task_runtime runtime ON w."id" = runtime.worker_id
WHERE
w."tenantId" = $1::uuid
AND w."lastHeartbeatAt" < NOW() - INTERVAL '30 seconds'
LIMIT
COALESCE($2::integer, 1000)
)
SELECT
v1_task.id,
v1_task.inserted_at,
v1_task.retry_count
FROM
v1_task
JOIN
tasks_on_inactive_workers lrs ON lrs.task_id = v1_task.id AND lrs.task_inserted_at = v1_task.inserted_at
`
type ListTasksToReassignParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Limit pgtype.Int4 `json:"limit"`
}
type ListTasksToReassignRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
}
func (q *Queries) ListTasksToReassign(ctx context.Context, db DBTX, arg ListTasksToReassignParams) ([]*ListTasksToReassignRow, error) {
rows, err := db.Query(ctx, listTasksToReassign, arg.Tenantid, arg.Limit)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListTasksToReassignRow
for rows.Next() {
var i ListTasksToReassignRow
if err := rows.Scan(&i.ID, &i.InsertedAt, &i.RetryCount); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listTasksToTimeout = `-- name: ListTasksToTimeout :many
WITH expired_runtimes AS (
SELECT
task_id,
task_inserted_at,
retry_count,
worker_id
FROM
v1_task_runtime
WHERE
tenant_id = $1::uuid
AND timeout_at <= NOW()
ORDER BY
task_id, task_inserted_at, retry_count
LIMIT
COALESCE($2::integer, 1000)
FOR UPDATE SKIP LOCKED
)
SELECT
v1_task.id,
v1_task.inserted_at,
v1_task.retry_count,
v1_task.step_id,
v1_task.external_id,
v1_task.workflow_run_id,
v1_task.step_timeout,
v1_task.app_retry_count,
v1_task.retry_backoff_factor,
v1_task.retry_max_backoff,
expired_runtimes.worker_id
FROM
v1_task
JOIN
expired_runtimes ON expired_runtimes.task_id = v1_task.id AND expired_runtimes.task_inserted_at = v1_task.inserted_at
`
type ListTasksToTimeoutParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Limit pgtype.Int4 `json:"limit"`
}
type ListTasksToTimeoutRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
RetryCount int32 `json:"retry_count"`
StepID pgtype.UUID `json:"step_id"`
ExternalID pgtype.UUID `json:"external_id"`
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
StepTimeout pgtype.Text `json:"step_timeout"`
AppRetryCount int32 `json:"app_retry_count"`
RetryBackoffFactor pgtype.Float8 `json:"retry_backoff_factor"`
RetryMaxBackoff pgtype.Int4 `json:"retry_max_backoff"`
WorkerID pgtype.UUID `json:"worker_id"`
}
func (q *Queries) ListTasksToTimeout(ctx context.Context, db DBTX, arg ListTasksToTimeoutParams) ([]*ListTasksToTimeoutRow, error) {
rows, err := db.Query(ctx, listTasksToTimeout, arg.Tenantid, arg.Limit)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListTasksToTimeoutRow
for rows.Next() {
var i ListTasksToTimeoutRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.RetryCount,
&i.StepID,
&i.ExternalID,
&i.WorkflowRunID,
&i.StepTimeout,
&i.AppRetryCount,
&i.RetryBackoffFactor,
&i.RetryMaxBackoff,
&i.WorkerID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const lockDAGsForReplay = `-- name: LockDAGsForReplay :many
SELECT
id
FROM
v1_dag
WHERE
id = ANY($1::bigint[])
AND tenant_id = $2::uuid
ORDER BY id
FOR UPDATE SKIP LOCKED
`
type LockDAGsForReplayParams struct {
Dagids []int64 `json:"dagids"`
Tenantid pgtype.UUID `json:"tenantid"`
}
// Locks a list of DAGs for replay. Returns successfully locked DAGs which can be replayed.
// We skip locked tasks because replays are the only thing that can lock a DAG for updates
func (q *Queries) LockDAGsForReplay(ctx context.Context, db DBTX, arg LockDAGsForReplayParams) ([]int64, error) {
rows, err := db.Query(ctx, lockDAGsForReplay, arg.Dagids, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []int64
for rows.Next() {
var id int64
if err := rows.Scan(&id); err != nil {
return nil, err
}
items = append(items, id)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const lockSignalCreatedEvents = `-- name: LockSignalCreatedEvents :many
WITH input AS (
SELECT
UNNEST($1::BIGINT[]) AS task_id,
UNNEST($2::TIMESTAMPTZ[]) AS task_inserted_at,
UNNEST($3::TEXT[]) AS event_key
), distinct_events AS (
SELECT DISTINCT
task_id, task_inserted_at
FROM
input
), events_to_lock AS (
SELECT
e.id,
e.event_key,
e.data,
e.task_id,
e.task_inserted_at,
e.inserted_at
FROM
v1_task_event e
JOIN
distinct_events de
ON e.task_id = de.task_id
AND e.task_inserted_at = de.task_inserted_at
WHERE
e.tenant_id = $4::uuid
AND e.event_type = 'SIGNAL_CREATED'
)
SELECT
e.id,
e.inserted_at,
e.event_key,
e.data
FROM
events_to_lock e
WHERE
e.event_key = ANY(SELECT event_key FROM input)
`
type LockSignalCreatedEventsParams struct {
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
Eventkeys []string `json:"eventkeys"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type LockSignalCreatedEventsRow struct {
ID int64 `json:"id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
EventKey pgtype.Text `json:"event_key"`
Data []byte `json:"data"`
}
// Places a lock on the SIGNAL_CREATED events to make sure concurrent operations don't
// modify the events.
func (q *Queries) LockSignalCreatedEvents(ctx context.Context, db DBTX, arg LockSignalCreatedEventsParams) ([]*LockSignalCreatedEventsRow, error) {
rows, err := db.Query(ctx, lockSignalCreatedEvents,
arg.Taskids,
arg.Taskinsertedats,
arg.Eventkeys,
arg.Tenantid,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*LockSignalCreatedEventsRow
for rows.Next() {
var i LockSignalCreatedEventsRow
if err := rows.Scan(
&i.ID,
&i.InsertedAt,
&i.EventKey,
&i.Data,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const lookupExternalIds = `-- name: LookupExternalIds :many
SELECT
tenant_id, external_id, task_id, dag_id, inserted_at
FROM
v1_lookup_table
WHERE
external_id = ANY($1::uuid[])
AND tenant_id = $2::uuid
`
type LookupExternalIdsParams struct {
Externalids []pgtype.UUID `json:"externalids"`
Tenantid pgtype.UUID `json:"tenantid"`
}
func (q *Queries) LookupExternalIds(ctx context.Context, db DBTX, arg LookupExternalIdsParams) ([]*V1LookupTable, error) {
rows, err := db.Query(ctx, lookupExternalIds, arg.Externalids, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*V1LookupTable
for rows.Next() {
var i V1LookupTable
if err := rows.Scan(
&i.TenantID,
&i.ExternalID,
&i.TaskID,
&i.DagID,
&i.InsertedAt,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const manualSlotRelease = `-- name: ManualSlotRelease :one
WITH task AS (
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.tenant_id
FROM
v1_lookup_table lt
JOIN
v1_task t ON t.id = lt.task_id AND t.inserted_at = lt.inserted_at
WHERE
lt.external_id = $1::uuid AND
lt.tenant_id = $2::uuid
), locked_runtime AS (
SELECT
tr.task_id,
tr.task_inserted_at,
tr.retry_count,
tr.worker_id
FROM
v1_task_runtime tr
WHERE
(tr.task_id, tr.task_inserted_at, tr.retry_count) IN (SELECT id, inserted_at, retry_count FROM task)
ORDER BY
task_id, task_inserted_at, retry_count
FOR UPDATE
)
UPDATE
v1_task_runtime
SET
worker_id = NULL
FROM
task
WHERE
(v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count) IN (SELECT id, inserted_at, retry_count FROM task)
RETURNING
v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count, v1_task_runtime.worker_id, v1_task_runtime.tenant_id, v1_task_runtime.timeout_at
`
type ManualSlotReleaseParams struct {
Externalid pgtype.UUID `json:"externalid"`
Tenantid pgtype.UUID `json:"tenantid"`
}
func (q *Queries) ManualSlotRelease(ctx context.Context, db DBTX, arg ManualSlotReleaseParams) (*V1TaskRuntime, error) {
row := db.QueryRow(ctx, manualSlotRelease, arg.Externalid, arg.Tenantid)
var i V1TaskRuntime
err := row.Scan(
&i.TaskID,
&i.TaskInsertedAt,
&i.RetryCount,
&i.WorkerID,
&i.TenantID,
&i.TimeoutAt,
)
return &i, err
}
const preflightCheckDAGsForReplay = `-- name: PreflightCheckDAGsForReplay :many
WITH dags_to_step_counts AS (
SELECT
d.id,
d.external_id,
d.inserted_at,
COUNT(DISTINCT s."id") as step_count,
COUNT(DISTINCT dt.task_id) as task_count
FROM
v1_dag d
JOIN
v1_dag_to_task dt ON dt.dag_id = d.id
JOIN
"WorkflowVersion" wv ON wv."id" = d.workflow_version_id
LEFT JOIN
"Job" j ON j."workflowVersionId" = wv."id"
LEFT JOIN
"Step" s ON s."jobId" = j."id"
WHERE
d.id = ANY($1::bigint[])
AND d.tenant_id = $2::uuid
GROUP BY
d.id,
d.inserted_at
)
SELECT
d.id,
d.external_id,
d.inserted_at,
d.step_count,
d.task_count
FROM
dags_to_step_counts d
`
type PreflightCheckDAGsForReplayParams struct {
Dagids []int64 `json:"dagids"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type PreflightCheckDAGsForReplayRow struct {
ID int64 `json:"id"`
ExternalID pgtype.UUID `json:"external_id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
StepCount int64 `json:"step_count"`
TaskCount int64 `json:"task_count"`
}
// Checks whether DAGs can be replayed by ensuring that the length of the tasks which have been written
// match the length of steps in the DAG. This assumes that we have a lock on DAGs so concurrent replays
// don't interfere with each other. It also does not check for whether the tasks are running, as that's
// checked in a different query. It returns DAGs which cannot be replayed.
func (q *Queries) PreflightCheckDAGsForReplay(ctx context.Context, db DBTX, arg PreflightCheckDAGsForReplayParams) ([]*PreflightCheckDAGsForReplayRow, error) {
rows, err := db.Query(ctx, preflightCheckDAGsForReplay, arg.Dagids, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*PreflightCheckDAGsForReplayRow
for rows.Next() {
var i PreflightCheckDAGsForReplayRow
if err := rows.Scan(
&i.ID,
&i.ExternalID,
&i.InsertedAt,
&i.StepCount,
&i.TaskCount,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const preflightCheckTasksForReplay = `-- name: PreflightCheckTasksForReplay :many
WITH input AS (
SELECT
UNNEST($3::bigint[]) AS task_id,
UNNEST($4::timestamptz[]) AS task_inserted_at
), relevant_tasks AS (
SELECT id, inserted_at, tenant_id, queue, action_id, step_id, step_readable_id, workflow_id, workflow_version_id, workflow_run_id, schedule_timeout, step_timeout, priority, sticky, desired_worker_id, external_id, display_name, input, retry_count, internal_retry_count, app_retry_count, step_index, additional_metadata, dag_id, dag_inserted_at, parent_task_external_id, parent_task_id, parent_task_inserted_at, child_index, child_key, initial_state, initial_state_reason, concurrency_parent_strategy_ids, concurrency_strategy_ids, concurrency_keys, retry_backoff_factor, retry_max_backoff, task_id, task_inserted_at
FROM
v1_task t
JOIN
input i ON i.task_id = t.id AND i.task_inserted_at = t.inserted_at
WHERE
-- prune partitions with minInsertedAt
t.inserted_at >= $2::TIMESTAMPTZ
)
SELECT t.id, t.dag_id
FROM relevant_tasks t
LEFT JOIN
v1_task_runtime tr ON tr.task_id = t.id AND tr.task_inserted_at = t.inserted_at AND tr.retry_count = t.retry_count
LEFT JOIN
v1_concurrency_slot cs ON cs.task_id = t.id AND cs.task_inserted_at = t.inserted_at AND cs.task_retry_count = t.retry_count
LEFT JOIN
v1_retry_queue_item rqi ON rqi.task_id = t.id AND rqi.task_inserted_at = t.inserted_at AND rqi.task_retry_count = t.retry_count
WHERE
t.tenant_id = $1::uuid
AND NOT EXISTS (
SELECT 1
FROM v1_task_event e
WHERE
-- prune partitions with minInsertedAt
e.task_inserted_at >= $2::TIMESTAMPTZ
AND (e.task_id, e.task_inserted_at, e.retry_count) = (t.id, t.inserted_at, t.retry_count)
AND e.event_type = ANY('{COMPLETED, FAILED, CANCELLED}'::v1_task_event_type[])
)
AND (tr.task_id IS NOT NULL OR cs.task_id IS NOT NULL OR rqi.task_id IS NOT NULL)
`
type PreflightCheckTasksForReplayParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Mininsertedat pgtype.Timestamptz `json:"mininsertedat"`
Taskids []int64 `json:"taskids"`
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
}
type PreflightCheckTasksForReplayRow struct {
ID int64 `json:"id"`
DagID pgtype.Int8 `json:"dag_id"`
}
// Checks whether tasks can be replayed by ensuring that they don't have any active runtimes,
// concurrency slots, or retry queue items. Returns the tasks which cannot be replayed.
func (q *Queries) PreflightCheckTasksForReplay(ctx context.Context, db DBTX, arg PreflightCheckTasksForReplayParams) ([]*PreflightCheckTasksForReplayRow, error) {
rows, err := db.Query(ctx, preflightCheckTasksForReplay,
arg.Tenantid,
arg.Mininsertedat,
arg.Taskids,
arg.Taskinsertedats,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*PreflightCheckTasksForReplayRow
for rows.Next() {
var i PreflightCheckTasksForReplayRow
if err := rows.Scan(&i.ID, &i.DagID); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const processRetryQueueItems = `-- name: ProcessRetryQueueItems :many
WITH rqis_to_delete AS (
SELECT
task_id, task_inserted_at, task_retry_count, retry_after, tenant_id
FROM
v1_retry_queue_item rqi
WHERE
rqi.tenant_id = $1::uuid
AND rqi.retry_after <= NOW()
ORDER BY
rqi.task_id, rqi.task_inserted_at, rqi.task_retry_count
LIMIT
COALESCE($2::integer, 1000)
FOR UPDATE SKIP LOCKED
)
DELETE FROM
v1_retry_queue_item
WHERE
(task_id, task_inserted_at, task_retry_count) IN (SELECT task_id, task_inserted_at, task_retry_count FROM rqis_to_delete)
RETURNING task_id, task_inserted_at, task_retry_count, retry_after, tenant_id
`
type ProcessRetryQueueItemsParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Limit pgtype.Int4 `json:"limit"`
}
func (q *Queries) ProcessRetryQueueItems(ctx context.Context, db DBTX, arg ProcessRetryQueueItemsParams) ([]*V1RetryQueueItem, error) {
rows, err := db.Query(ctx, processRetryQueueItems, arg.Tenantid, arg.Limit)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*V1RetryQueueItem
for rows.Next() {
var i V1RetryQueueItem
if err := rows.Scan(
&i.TaskID,
&i.TaskInsertedAt,
&i.TaskRetryCount,
&i.RetryAfter,
&i.TenantID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const refreshTimeoutBy = `-- name: RefreshTimeoutBy :one
WITH task AS (
SELECT
t.id,
t.inserted_at,
t.retry_count,
t.tenant_id
FROM
v1_lookup_table lt
JOIN
v1_task t ON t.id = lt.task_id AND t.inserted_at = lt.inserted_at
WHERE
lt.external_id = $2::uuid AND
lt.tenant_id = $3::uuid
), locked_runtime AS (
SELECT
tr.task_id,
tr.task_inserted_at,
tr.retry_count,
tr.worker_id
FROM
v1_task_runtime tr
WHERE
(tr.task_id, tr.task_inserted_at, tr.retry_count) IN (SELECT id, inserted_at, retry_count FROM task)
ORDER BY
task_id, task_inserted_at, retry_count
FOR UPDATE
)
UPDATE
v1_task_runtime
SET
timeout_at = timeout_at + convert_duration_to_interval($1::text)
FROM
task
WHERE
(v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count) IN (SELECT id, inserted_at, retry_count FROM task)
RETURNING
v1_task_runtime.task_id, v1_task_runtime.task_inserted_at, v1_task_runtime.retry_count, v1_task_runtime.worker_id, v1_task_runtime.tenant_id, v1_task_runtime.timeout_at
`
type RefreshTimeoutByParams struct {
IncrementTimeoutBy pgtype.Text `json:"incrementTimeoutBy"`
Externalid pgtype.UUID `json:"externalid"`
Tenantid pgtype.UUID `json:"tenantid"`
}
func (q *Queries) RefreshTimeoutBy(ctx context.Context, db DBTX, arg RefreshTimeoutByParams) (*V1TaskRuntime, error) {
row := db.QueryRow(ctx, refreshTimeoutBy, arg.IncrementTimeoutBy, arg.Externalid, arg.Tenantid)
var i V1TaskRuntime
err := row.Scan(
&i.TaskID,
&i.TaskInsertedAt,
&i.RetryCount,
&i.WorkerID,
&i.TenantID,
&i.TimeoutAt,
)
return &i, err
}