mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2025-12-16 22:35:11 -06:00
* feat: chunking query * feat: first pass at range chunking * fix: bug bashing * fix: function geq * fix: use maps.Copy * fix: olap func * feat: olap side * refactor: external id * fix: order by * feat: wire up env vars * fix: pass var through * fix: naming * fix: append to returnErr properly * fix: use eg.Go
3434 lines
108 KiB
Go
3434 lines
108 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.29.0
|
|
// source: olap.sql
|
|
|
|
package sqlcv1
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/jackc/pgx/v5/pgtype"
|
|
)
|
|
|
|
const acquireOrExtendOLAPCutoverJobLease = `-- name: AcquireOrExtendOLAPCutoverJobLease :one
|
|
WITH inputs AS (
|
|
SELECT
|
|
$2::DATE AS key,
|
|
$1::UUID AS lease_process_id,
|
|
$3::TIMESTAMPTZ AS lease_expires_at,
|
|
$4::UUID AS last_tenant_id,
|
|
$5::UUID AS last_external_id,
|
|
$6::TIMESTAMPTZ AS last_inserted_at
|
|
), any_lease_held_by_other_process AS (
|
|
-- need coalesce here in case there are no rows that don't belong to this process
|
|
SELECT COALESCE(BOOL_OR(lease_expires_at > NOW()), FALSE) AS lease_exists
|
|
FROM v1_payloads_olap_cutover_job_offset
|
|
WHERE lease_process_id != $1::UUID
|
|
), to_insert AS (
|
|
SELECT key, lease_process_id, lease_expires_at, last_tenant_id, last_external_id, last_inserted_at
|
|
FROM inputs
|
|
-- if a lease is held by another process, we shouldn't try to insert a new row regardless
|
|
-- of which key we're trying to acquire a lease on
|
|
WHERE NOT (SELECT lease_exists FROM any_lease_held_by_other_process)
|
|
)
|
|
|
|
INSERT INTO v1_payloads_olap_cutover_job_offset (key, lease_process_id, lease_expires_at, last_tenant_id, last_external_id, last_inserted_at)
|
|
SELECT
|
|
ti.key,
|
|
ti.lease_process_id,
|
|
ti.lease_expires_at,
|
|
ti.last_tenant_id,
|
|
ti.last_external_id,
|
|
ti.last_inserted_at
|
|
FROM to_insert ti
|
|
ON CONFLICT (key)
|
|
DO UPDATE SET
|
|
-- if the lease is held by this process, then we extend the offset to the new value
|
|
-- otherwise it's a new process acquiring the lease, so we should keep the offset where it was before
|
|
last_tenant_id = CASE
|
|
WHEN EXCLUDED.lease_process_id = v1_payloads_olap_cutover_job_offset.lease_process_id THEN EXCLUDED.last_tenant_id
|
|
ELSE v1_payloads_olap_cutover_job_offset.last_tenant_id
|
|
END,
|
|
last_external_id = CASE
|
|
WHEN EXCLUDED.lease_process_id = v1_payloads_olap_cutover_job_offset.lease_process_id THEN EXCLUDED.last_external_id
|
|
ELSE v1_payloads_olap_cutover_job_offset.last_external_id
|
|
END,
|
|
last_inserted_at = CASE
|
|
WHEN EXCLUDED.lease_process_id = v1_payloads_olap_cutover_job_offset.lease_process_id THEN EXCLUDED.last_inserted_at
|
|
ELSE v1_payloads_olap_cutover_job_offset.last_inserted_at
|
|
END,
|
|
|
|
lease_process_id = EXCLUDED.lease_process_id,
|
|
lease_expires_at = EXCLUDED.lease_expires_at
|
|
WHERE v1_payloads_olap_cutover_job_offset.lease_expires_at < NOW() OR v1_payloads_olap_cutover_job_offset.lease_process_id = $1::UUID
|
|
RETURNING key, is_completed, lease_process_id, lease_expires_at, last_tenant_id, last_external_id, last_inserted_at
|
|
`
|
|
|
|
type AcquireOrExtendOLAPCutoverJobLeaseParams struct {
|
|
Leaseprocessid pgtype.UUID `json:"leaseprocessid"`
|
|
Key pgtype.Date `json:"key"`
|
|
Leaseexpiresat pgtype.Timestamptz `json:"leaseexpiresat"`
|
|
Lasttenantid pgtype.UUID `json:"lasttenantid"`
|
|
Lastexternalid pgtype.UUID `json:"lastexternalid"`
|
|
Lastinsertedat pgtype.Timestamptz `json:"lastinsertedat"`
|
|
}
|
|
|
|
func (q *Queries) AcquireOrExtendOLAPCutoverJobLease(ctx context.Context, db DBTX, arg AcquireOrExtendOLAPCutoverJobLeaseParams) (*V1PayloadsOlapCutoverJobOffset, error) {
|
|
row := db.QueryRow(ctx, acquireOrExtendOLAPCutoverJobLease,
|
|
arg.Leaseprocessid,
|
|
arg.Key,
|
|
arg.Leaseexpiresat,
|
|
arg.Lasttenantid,
|
|
arg.Lastexternalid,
|
|
arg.Lastinsertedat,
|
|
)
|
|
var i V1PayloadsOlapCutoverJobOffset
|
|
err := row.Scan(
|
|
&i.Key,
|
|
&i.IsCompleted,
|
|
&i.LeaseProcessID,
|
|
&i.LeaseExpiresAt,
|
|
&i.LastTenantID,
|
|
&i.LastExternalID,
|
|
&i.LastInsertedAt,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const analyzeV1DAGToTaskOLAP = `-- name: AnalyzeV1DAGToTaskOLAP :exec
|
|
ANALYZE v1_dag_to_task_olap
|
|
`
|
|
|
|
func (q *Queries) AnalyzeV1DAGToTaskOLAP(ctx context.Context, db DBTX) error {
|
|
_, err := db.Exec(ctx, analyzeV1DAGToTaskOLAP)
|
|
return err
|
|
}
|
|
|
|
const analyzeV1DAGsOLAP = `-- name: AnalyzeV1DAGsOLAP :exec
|
|
ANALYZE v1_dags_olap
|
|
`
|
|
|
|
func (q *Queries) AnalyzeV1DAGsOLAP(ctx context.Context, db DBTX) error {
|
|
_, err := db.Exec(ctx, analyzeV1DAGsOLAP)
|
|
return err
|
|
}
|
|
|
|
const analyzeV1LookupTableOLAP = `-- name: AnalyzeV1LookupTableOLAP :exec
|
|
ANALYZE v1_lookup_table_olap
|
|
`
|
|
|
|
func (q *Queries) AnalyzeV1LookupTableOLAP(ctx context.Context, db DBTX) error {
|
|
_, err := db.Exec(ctx, analyzeV1LookupTableOLAP)
|
|
return err
|
|
}
|
|
|
|
const analyzeV1PayloadsOLAP = `-- name: AnalyzeV1PayloadsOLAP :exec
|
|
ANALYZE v1_payloads_olap
|
|
`
|
|
|
|
func (q *Queries) AnalyzeV1PayloadsOLAP(ctx context.Context, db DBTX) error {
|
|
_, err := db.Exec(ctx, analyzeV1PayloadsOLAP)
|
|
return err
|
|
}
|
|
|
|
const analyzeV1RunsOLAP = `-- name: AnalyzeV1RunsOLAP :exec
|
|
ANALYZE v1_runs_olap
|
|
`
|
|
|
|
func (q *Queries) AnalyzeV1RunsOLAP(ctx context.Context, db DBTX) error {
|
|
_, err := db.Exec(ctx, analyzeV1RunsOLAP)
|
|
return err
|
|
}
|
|
|
|
const analyzeV1TasksOLAP = `-- name: AnalyzeV1TasksOLAP :exec
|
|
ANALYZE v1_tasks_olap
|
|
`
|
|
|
|
func (q *Queries) AnalyzeV1TasksOLAP(ctx context.Context, db DBTX) error {
|
|
_, err := db.Exec(ctx, analyzeV1TasksOLAP)
|
|
return err
|
|
}
|
|
|
|
type BulkCreateEventTriggersParams struct {
|
|
RunID int64 `json:"run_id"`
|
|
RunInsertedAt pgtype.Timestamptz `json:"run_inserted_at"`
|
|
EventID int64 `json:"event_id"`
|
|
EventSeenAt pgtype.Timestamptz `json:"event_seen_at"`
|
|
FilterID pgtype.UUID `json:"filter_id"`
|
|
}
|
|
|
|
const countEvents = `-- name: CountEvents :one
|
|
WITH included_events AS (
|
|
SELECT e.tenant_id, e.id, e.external_id, e.seen_at, e.key, e.payload, e.additional_metadata, e.scope, e.triggering_webhook_name
|
|
FROM v1_event_lookup_table_olap elt
|
|
JOIN v1_events_olap e ON (elt.tenant_id, elt.event_id, elt.event_seen_at) = (e.tenant_id, e.id, e.seen_at)
|
|
WHERE
|
|
e.tenant_id = $1
|
|
AND (
|
|
$2::TEXT[] IS NULL OR
|
|
"key" = ANY($2::TEXT[])
|
|
)
|
|
AND e.seen_at >= $3::TIMESTAMPTZ
|
|
AND (
|
|
$4::TIMESTAMPTZ IS NULL OR
|
|
e.seen_at <= $4::TIMESTAMPTZ
|
|
)
|
|
AND (
|
|
$5::UUID[] IS NULL OR
|
|
EXISTS (
|
|
SELECT 1
|
|
FROM v1_event_to_run_olap etr
|
|
JOIN v1_runs_olap r ON (etr.run_id, etr.run_inserted_at) = (r.id, r.inserted_at)
|
|
WHERE
|
|
(etr.event_id, etr.event_seen_at) = (e.id, e.seen_at)
|
|
AND r.workflow_id = ANY($5::UUID[]::UUID[])
|
|
)
|
|
)
|
|
AND (
|
|
$6::UUID[] IS NULL OR
|
|
elt.external_id = ANY($6::UUID[])
|
|
)
|
|
AND (
|
|
$7::JSONB IS NULL OR
|
|
e.additional_metadata @> $7::JSONB
|
|
)
|
|
AND (
|
|
CAST($8::TEXT[] AS v1_readable_status_olap[]) IS NULL OR
|
|
EXISTS (
|
|
SELECT 1
|
|
FROM v1_event_to_run_olap etr
|
|
JOIN v1_runs_olap r ON (etr.run_id, etr.run_inserted_at) = (r.id, r.inserted_at)
|
|
WHERE
|
|
(etr.event_id, etr.event_seen_at) = (e.id, e.seen_at)
|
|
AND r.readable_status = ANY(CAST($8::text[]::TEXT[] AS v1_readable_status_olap[]))
|
|
)
|
|
)
|
|
AND (
|
|
$9::TEXT[] IS NULL OR
|
|
e.scope = ANY($9::TEXT[])
|
|
)
|
|
ORDER BY e.seen_at DESC, e.id
|
|
LIMIT 20000
|
|
)
|
|
|
|
SELECT COUNT(*)
|
|
FROM included_events e
|
|
`
|
|
|
|
type CountEventsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Keys []string `json:"keys"`
|
|
Since pgtype.Timestamptz `json:"since"`
|
|
Until pgtype.Timestamptz `json:"until"`
|
|
WorkflowIds []pgtype.UUID `json:"workflowIds"`
|
|
EventIds []pgtype.UUID `json:"eventIds"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
Statuses []string `json:"statuses"`
|
|
Scopes []string `json:"scopes"`
|
|
}
|
|
|
|
func (q *Queries) CountEvents(ctx context.Context, db DBTX, arg CountEventsParams) (int64, error) {
|
|
row := db.QueryRow(ctx, countEvents,
|
|
arg.Tenantid,
|
|
arg.Keys,
|
|
arg.Since,
|
|
arg.Until,
|
|
arg.WorkflowIds,
|
|
arg.EventIds,
|
|
arg.AdditionalMetadata,
|
|
arg.Statuses,
|
|
arg.Scopes,
|
|
)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
type CreateDAGsOLAPParams struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
DisplayName string `json:"display_name"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
TotalTasks int32 `json:"total_tasks"`
|
|
}
|
|
|
|
const createIncomingWebhookValidationFailureLogs = `-- name: CreateIncomingWebhookValidationFailureLogs :exec
|
|
WITH inputs AS (
|
|
SELECT
|
|
UNNEST($2::TEXT[]) AS incoming_webhook_name,
|
|
UNNEST($3::TEXT[]) AS error
|
|
)
|
|
INSERT INTO v1_incoming_webhook_validation_failures_olap(
|
|
tenant_id,
|
|
incoming_webhook_name,
|
|
error
|
|
)
|
|
SELECT
|
|
$1::UUID,
|
|
i.incoming_webhook_name,
|
|
i.error
|
|
FROM inputs i
|
|
`
|
|
|
|
type CreateIncomingWebhookValidationFailureLogsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Incomingwebhooknames []string `json:"incomingwebhooknames"`
|
|
Errors []string `json:"errors"`
|
|
}
|
|
|
|
func (q *Queries) CreateIncomingWebhookValidationFailureLogs(ctx context.Context, db DBTX, arg CreateIncomingWebhookValidationFailureLogsParams) error {
|
|
_, err := db.Exec(ctx, createIncomingWebhookValidationFailureLogs, arg.Tenantid, arg.Incomingwebhooknames, arg.Errors)
|
|
return err
|
|
}
|
|
|
|
const createOLAPEventPartitions = `-- name: CreateOLAPEventPartitions :exec
|
|
SELECT
|
|
create_v1_range_partition('v1_events_olap'::text, $1::date),
|
|
create_v1_range_partition('v1_event_to_run_olap'::text, $1::date),
|
|
create_v1_weekly_range_partition('v1_event_lookup_table_olap'::text, $1::date),
|
|
create_v1_range_partition('v1_incoming_webhook_validation_failures_olap'::text, $1::date),
|
|
create_v1_range_partition('v1_cel_evaluation_failures_olap'::text, $1::date)
|
|
`
|
|
|
|
func (q *Queries) CreateOLAPEventPartitions(ctx context.Context, db DBTX, date pgtype.Date) error {
|
|
_, err := db.Exec(ctx, createOLAPEventPartitions, date)
|
|
return err
|
|
}
|
|
|
|
const createOLAPPartitions = `-- name: CreateOLAPPartitions :exec
|
|
SELECT
|
|
create_v1_hash_partitions('v1_task_events_olap_tmp'::text, $1::int),
|
|
create_v1_hash_partitions('v1_task_status_updates_tmp'::text, $1::int),
|
|
create_v1_olap_partition_with_date_and_status('v1_tasks_olap'::text, $2::date),
|
|
create_v1_olap_partition_with_date_and_status('v1_runs_olap'::text, $2::date),
|
|
create_v1_olap_partition_with_date_and_status('v1_dags_olap'::text, $2::date),
|
|
create_v1_range_partition('v1_payloads_olap'::text, $2::date)
|
|
`
|
|
|
|
type CreateOLAPPartitionsParams struct {
|
|
Partitions int32 `json:"partitions"`
|
|
Date pgtype.Date `json:"date"`
|
|
}
|
|
|
|
func (q *Queries) CreateOLAPPartitions(ctx context.Context, db DBTX, arg CreateOLAPPartitionsParams) error {
|
|
_, err := db.Exec(ctx, createOLAPPartitions, arg.Partitions, arg.Date)
|
|
return err
|
|
}
|
|
|
|
const createOLAPPayloadRangeChunks = `-- name: CreateOLAPPayloadRangeChunks :many
|
|
WITH payloads AS (
|
|
SELECT
|
|
(p).*
|
|
FROM list_paginated_olap_payloads_for_offload(
|
|
$2::DATE,
|
|
$3::INTEGER,
|
|
$4::UUID,
|
|
$5::UUID,
|
|
$6::TIMESTAMPTZ
|
|
) p
|
|
), with_rows AS (
|
|
SELECT
|
|
tenant_id::UUID,
|
|
external_id::UUID,
|
|
inserted_at::TIMESTAMPTZ,
|
|
ROW_NUMBER() OVER (ORDER BY tenant_id, external_id, inserted_at) AS rn
|
|
FROM payloads
|
|
)
|
|
|
|
SELECT tenant_id, external_id, inserted_at, rn
|
|
FROM with_rows
|
|
WHERE MOD(rn, $1::INTEGER) = 1
|
|
ORDER BY tenant_id, external_id, inserted_at
|
|
`
|
|
|
|
type CreateOLAPPayloadRangeChunksParams struct {
|
|
Chunksize int32 `json:"chunksize"`
|
|
Partitiondate pgtype.Date `json:"partitiondate"`
|
|
Windowsize int32 `json:"windowsize"`
|
|
Lasttenantid pgtype.UUID `json:"lasttenantid"`
|
|
Lastexternalid pgtype.UUID `json:"lastexternalid"`
|
|
Lastinsertedat pgtype.Timestamptz `json:"lastinsertedat"`
|
|
}
|
|
|
|
type CreateOLAPPayloadRangeChunksRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
Rn int64 `json:"rn"`
|
|
}
|
|
|
|
// row numbers are one-indexed
|
|
func (q *Queries) CreateOLAPPayloadRangeChunks(ctx context.Context, db DBTX, arg CreateOLAPPayloadRangeChunksParams) ([]*CreateOLAPPayloadRangeChunksRow, error) {
|
|
rows, err := db.Query(ctx, createOLAPPayloadRangeChunks,
|
|
arg.Chunksize,
|
|
arg.Partitiondate,
|
|
arg.Windowsize,
|
|
arg.Lasttenantid,
|
|
arg.Lastexternalid,
|
|
arg.Lastinsertedat,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*CreateOLAPPayloadRangeChunksRow
|
|
for rows.Next() {
|
|
var i CreateOLAPPayloadRangeChunksRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ExternalID,
|
|
&i.InsertedAt,
|
|
&i.Rn,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
type CreateTaskEventsOLAPParams struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
TaskID int64 `json:"task_id"`
|
|
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
|
|
EventType V1EventTypeOlap `json:"event_type"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
EventTimestamp pgtype.Timestamptz `json:"event_timestamp"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
Output []byte `json:"output"`
|
|
WorkerID pgtype.UUID `json:"worker_id"`
|
|
AdditionalEventData pgtype.Text `json:"additional__event_data"`
|
|
AdditionalEventMessage pgtype.Text `json:"additional__event_message"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
}
|
|
|
|
type CreateTaskEventsOLAPTmpParams struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
TaskID int64 `json:"task_id"`
|
|
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
|
|
EventType V1EventTypeOlap `json:"event_type"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
WorkerID pgtype.UUID `json:"worker_id"`
|
|
}
|
|
|
|
type CreateTasksOLAPParams struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
Queue string `json:"queue"`
|
|
ActionID string `json:"action_id"`
|
|
StepID pgtype.UUID `json:"step_id"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
|
|
ScheduleTimeout string `json:"schedule_timeout"`
|
|
StepTimeout pgtype.Text `json:"step_timeout"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
Sticky V1StickyStrategyOlap `json:"sticky"`
|
|
DesiredWorkerID pgtype.UUID `json:"desired_worker_id"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
DisplayName string `json:"display_name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
DagID pgtype.Int8 `json:"dag_id"`
|
|
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
}
|
|
|
|
const createV1PayloadOLAPCutoverTemporaryTable = `-- name: CreateV1PayloadOLAPCutoverTemporaryTable :exec
|
|
SELECT copy_v1_payloads_olap_partition_structure($1::DATE)
|
|
`
|
|
|
|
func (q *Queries) CreateV1PayloadOLAPCutoverTemporaryTable(ctx context.Context, db DBTX, date pgtype.Date) error {
|
|
_, err := db.Exec(ctx, createV1PayloadOLAPCutoverTemporaryTable, date)
|
|
return err
|
|
}
|
|
|
|
const findMinInsertedAtForDAGStatusUpdates = `-- name: FindMinInsertedAtForDAGStatusUpdates :one
|
|
WITH tenants AS (
|
|
SELECT UNNEST(
|
|
find_matching_tenants_in_task_status_updates_tmp_partition(
|
|
$1::int,
|
|
$3::UUID[]
|
|
)
|
|
) AS tenant_id
|
|
)
|
|
|
|
SELECT
|
|
MIN(u.dag_inserted_at)::TIMESTAMPTZ AS min_inserted_at
|
|
FROM tenants t,
|
|
LATERAL list_task_status_updates_tmp(
|
|
$1::int,
|
|
t.tenant_id,
|
|
$2::int
|
|
) u
|
|
`
|
|
|
|
type FindMinInsertedAtForDAGStatusUpdatesParams struct {
|
|
Partitionnumber int32 `json:"partitionnumber"`
|
|
Eventlimit int32 `json:"eventlimit"`
|
|
Tenantids []pgtype.UUID `json:"tenantids"`
|
|
}
|
|
|
|
func (q *Queries) FindMinInsertedAtForDAGStatusUpdates(ctx context.Context, db DBTX, arg FindMinInsertedAtForDAGStatusUpdatesParams) (pgtype.Timestamptz, error) {
|
|
row := db.QueryRow(ctx, findMinInsertedAtForDAGStatusUpdates, arg.Partitionnumber, arg.Eventlimit, arg.Tenantids)
|
|
var min_inserted_at pgtype.Timestamptz
|
|
err := row.Scan(&min_inserted_at)
|
|
return min_inserted_at, err
|
|
}
|
|
|
|
const findMinInsertedAtForTaskStatusUpdates = `-- name: FindMinInsertedAtForTaskStatusUpdates :one
|
|
WITH tenants AS (
|
|
SELECT UNNEST(
|
|
find_matching_tenants_in_task_events_tmp_partition(
|
|
$1::int,
|
|
$3::UUID[]
|
|
)
|
|
) AS tenant_id
|
|
)
|
|
|
|
SELECT
|
|
MIN(e.task_inserted_at)::TIMESTAMPTZ AS min_inserted_at
|
|
FROM tenants t,
|
|
LATERAL list_task_events_tmp(
|
|
$1::int,
|
|
t.tenant_id,
|
|
$2::int
|
|
) e
|
|
`
|
|
|
|
type FindMinInsertedAtForTaskStatusUpdatesParams struct {
|
|
Partitionnumber int32 `json:"partitionnumber"`
|
|
Eventlimit int32 `json:"eventlimit"`
|
|
Tenantids []pgtype.UUID `json:"tenantids"`
|
|
}
|
|
|
|
func (q *Queries) FindMinInsertedAtForTaskStatusUpdates(ctx context.Context, db DBTX, arg FindMinInsertedAtForTaskStatusUpdatesParams) (pgtype.Timestamptz, error) {
|
|
row := db.QueryRow(ctx, findMinInsertedAtForTaskStatusUpdates, arg.Partitionnumber, arg.Eventlimit, arg.Tenantids)
|
|
var min_inserted_at pgtype.Timestamptz
|
|
err := row.Scan(&min_inserted_at)
|
|
return min_inserted_at, err
|
|
}
|
|
|
|
const flattenTasksByExternalIds = `-- name: FlattenTasksByExternalIds :many
|
|
WITH lookups AS (
|
|
SELECT
|
|
tenant_id, external_id, task_id, dag_id, inserted_at
|
|
FROM
|
|
v1_lookup_table_olap
|
|
WHERE
|
|
external_id = ANY($1::uuid[])
|
|
AND tenant_id = $2::uuid
|
|
), tasks_from_dags AS (
|
|
SELECT
|
|
l.tenant_id,
|
|
dt.task_id,
|
|
dt.task_inserted_at
|
|
FROM
|
|
lookups l
|
|
JOIN
|
|
v1_dag_to_task_olap dt ON l.dag_id = dt.dag_id AND l.inserted_at = dt.dag_inserted_at
|
|
WHERE
|
|
l.dag_id IS NOT NULL
|
|
), unioned_tasks AS (
|
|
SELECT
|
|
l.tenant_id AS tenant_id,
|
|
l.task_id AS task_id,
|
|
l.inserted_at AS task_inserted_at
|
|
FROM
|
|
lookups l
|
|
UNION ALL
|
|
SELECT
|
|
t.tenant_id AS tenant_id,
|
|
t.task_id AS task_id,
|
|
t.task_inserted_at AS task_inserted_at
|
|
FROM
|
|
tasks_from_dags t
|
|
)
|
|
SELECT
|
|
t.tenant_id,
|
|
t.id,
|
|
t.inserted_at,
|
|
t.external_id,
|
|
t.latest_retry_count AS retry_count
|
|
FROM
|
|
v1_tasks_olap t
|
|
JOIN
|
|
unioned_tasks ut ON (t.inserted_at, t.id) = (ut.task_inserted_at, ut.task_id)
|
|
`
|
|
|
|
type FlattenTasksByExternalIdsParams struct {
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type FlattenTasksByExternalIdsRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
}
|
|
|
|
// Get retry counts for each task
|
|
func (q *Queries) FlattenTasksByExternalIds(ctx context.Context, db DBTX, arg FlattenTasksByExternalIdsParams) ([]*FlattenTasksByExternalIdsRow, error) {
|
|
rows, err := db.Query(ctx, flattenTasksByExternalIds, arg.Externalids, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*FlattenTasksByExternalIdsRow
|
|
for rows.Next() {
|
|
var i FlattenTasksByExternalIdsRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.RetryCount,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getDagDurations = `-- name: GetDagDurations :many
|
|
SELECT
|
|
lt.external_id,
|
|
MIN(e.event_timestamp) FILTER (WHERE e.readable_status = 'RUNNING')::TIMESTAMPTZ AS started_at,
|
|
MAX(e.event_timestamp) FILTER (WHERE e.readable_status IN ('COMPLETED', 'FAILED', 'CANCELLED'))::TIMESTAMPTZ AS finished_at
|
|
FROM
|
|
v1_lookup_table_olap lt
|
|
JOIN
|
|
v1_dags_olap d ON (lt.dag_id, lt.inserted_at) = (d.id, d.inserted_at)
|
|
JOIN
|
|
v1_dag_to_task_olap dt ON (d.id, d.inserted_at) = (dt.dag_id, dt.dag_inserted_at)
|
|
JOIN
|
|
v1_task_events_olap e ON (dt.task_id, dt.task_inserted_at) = (e.task_id, e.task_inserted_at)
|
|
WHERE lt.external_id = ANY($1::UUID[])
|
|
AND lt.tenant_id = $2::UUID
|
|
AND d.inserted_at >= $3::TIMESTAMPTZ
|
|
GROUP BY lt.external_id
|
|
`
|
|
|
|
type GetDagDurationsParams struct {
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Mininsertedat pgtype.Timestamptz `json:"mininsertedat"`
|
|
}
|
|
|
|
type GetDagDurationsRow struct {
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
|
}
|
|
|
|
func (q *Queries) GetDagDurations(ctx context.Context, db DBTX, arg GetDagDurationsParams) ([]*GetDagDurationsRow, error) {
|
|
rows, err := db.Query(ctx, getDagDurations, arg.Externalids, arg.Tenantid, arg.Mininsertedat)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*GetDagDurationsRow
|
|
for rows.Next() {
|
|
var i GetDagDurationsRow
|
|
if err := rows.Scan(&i.ExternalID, &i.StartedAt, &i.FinishedAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getEventByExternalId = `-- name: GetEventByExternalId :one
|
|
SELECT e.tenant_id, e.id, e.external_id, e.seen_at, e.key, e.payload, e.additional_metadata, e.scope, e.triggering_webhook_name
|
|
FROM v1_event_lookup_table_olap elt
|
|
JOIN v1_events_olap e ON (elt.event_id, elt.event_seen_at) = (e.id, e.seen_at)
|
|
WHERE elt.external_id = $1::uuid
|
|
`
|
|
|
|
func (q *Queries) GetEventByExternalId(ctx context.Context, db DBTX, eventexternalid pgtype.UUID) (*V1EventsOlap, error) {
|
|
row := db.QueryRow(ctx, getEventByExternalId, eventexternalid)
|
|
var i V1EventsOlap
|
|
err := row.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.ExternalID,
|
|
&i.SeenAt,
|
|
&i.Key,
|
|
&i.Payload,
|
|
&i.AdditionalMetadata,
|
|
&i.Scope,
|
|
&i.TriggeringWebhookName,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const getRunsListRecursive = `-- name: GetRunsListRecursive :many
|
|
WITH RECURSIVE all_runs AS (
|
|
-- seed term
|
|
SELECT
|
|
t.id,
|
|
t.inserted_at,
|
|
t.tenant_id,
|
|
t.external_id,
|
|
t.parent_task_external_id,
|
|
0 AS depth
|
|
FROM
|
|
v1_lookup_table_olap lt
|
|
JOIN v1_tasks_olap t
|
|
ON t.inserted_at = lt.inserted_at
|
|
AND t.id = lt.task_id
|
|
WHERE
|
|
lt.external_id = ANY($2::uuid[])
|
|
|
|
UNION ALL
|
|
|
|
-- single recursive term for both DAG- and TASK-driven children
|
|
SELECT
|
|
t.id,
|
|
t.inserted_at,
|
|
t.tenant_id,
|
|
t.external_id,
|
|
t.parent_task_external_id,
|
|
ar.depth + 1 AS depth
|
|
FROM
|
|
v1_runs_olap r
|
|
JOIN all_runs ar ON ar.external_id = r.parent_task_external_id
|
|
|
|
-- only present when r.kind = 'DAG'
|
|
LEFT JOIN v1_dag_to_task_olap dt ON r.kind = 'DAG' AND r.id = dt.dag_id AND r.inserted_at = dt.dag_inserted_at
|
|
|
|
-- pick the correct task row for either branch
|
|
JOIN v1_tasks_olap t
|
|
ON (
|
|
r.kind = 'DAG'
|
|
AND t.id = dt.task_id
|
|
AND t.inserted_at = dt.task_inserted_at
|
|
)
|
|
OR (
|
|
r.kind = 'TASK'
|
|
AND t.id = r.id
|
|
AND t.inserted_at = r.inserted_at
|
|
)
|
|
WHERE
|
|
r.tenant_id = $1::uuid
|
|
AND ar.depth < $3::int
|
|
AND r.inserted_at >= $4::timestamptz
|
|
AND t.inserted_at >= $4::timestamptz
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
id,
|
|
inserted_at,
|
|
external_id,
|
|
parent_task_external_id,
|
|
depth
|
|
FROM
|
|
all_runs
|
|
WHERE
|
|
tenant_id = $1::uuid
|
|
`
|
|
|
|
type GetRunsListRecursiveParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Taskexternalids []pgtype.UUID `json:"taskexternalids"`
|
|
Depth int32 `json:"depth"`
|
|
Createdafter pgtype.Timestamptz `json:"createdafter"`
|
|
}
|
|
|
|
type GetRunsListRecursiveRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
Depth int32 `json:"depth"`
|
|
}
|
|
|
|
func (q *Queries) GetRunsListRecursive(ctx context.Context, db DBTX, arg GetRunsListRecursiveParams) ([]*GetRunsListRecursiveRow, error) {
|
|
rows, err := db.Query(ctx, getRunsListRecursive,
|
|
arg.Tenantid,
|
|
arg.Taskexternalids,
|
|
arg.Depth,
|
|
arg.Createdafter,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*GetRunsListRecursiveRow
|
|
for rows.Next() {
|
|
var i GetRunsListRecursiveRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.ParentTaskExternalID,
|
|
&i.Depth,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTaskDurationsByTaskIds = `-- name: GetTaskDurationsByTaskIds :many
|
|
WITH input AS (
|
|
SELECT
|
|
UNNEST($1::bigint[]) AS task_id,
|
|
UNNEST($2::timestamptz[]) AS inserted_at,
|
|
UNNEST($3::v1_readable_status_olap[]) AS readable_status
|
|
), task_data AS (
|
|
SELECT
|
|
i.task_id,
|
|
i.inserted_at,
|
|
t.external_id,
|
|
t.display_name,
|
|
t.readable_status,
|
|
t.latest_retry_count,
|
|
t.tenant_id
|
|
FROM
|
|
input i
|
|
JOIN
|
|
v1_tasks_olap t ON (t.inserted_at, t.id, t.readable_status, t.tenant_id) = (i.inserted_at, i.task_id, i.readable_status, $4::uuid)
|
|
), task_events AS (
|
|
SELECT
|
|
td.task_id,
|
|
td.inserted_at,
|
|
e.event_type,
|
|
e.event_timestamp,
|
|
e.readable_status
|
|
FROM
|
|
task_data td
|
|
JOIN
|
|
v1_task_events_olap e ON (e.tenant_id, e.task_id, e.task_inserted_at, e.retry_count) = (td.tenant_id, td.task_id, td.inserted_at, td.latest_retry_count)
|
|
), task_times AS (
|
|
SELECT
|
|
task_id,
|
|
inserted_at,
|
|
MIN(CASE WHEN event_type = 'STARTED' THEN event_timestamp END) AS started_at,
|
|
MAX(CASE WHEN readable_status = ANY(ARRAY['COMPLETED', 'FAILED', 'CANCELLED']::v1_readable_status_olap[])
|
|
THEN event_timestamp END) AS finished_at
|
|
FROM task_events
|
|
GROUP BY task_id, inserted_at
|
|
)
|
|
SELECT
|
|
tt.started_at::timestamptz AS started_at,
|
|
tt.finished_at::timestamptz AS finished_at
|
|
FROM
|
|
task_data td
|
|
LEFT JOIN
|
|
task_times tt ON (td.task_id, td.inserted_at) = (tt.task_id, tt.inserted_at)
|
|
ORDER BY td.task_id, td.inserted_at
|
|
`
|
|
|
|
type GetTaskDurationsByTaskIdsParams struct {
|
|
Taskids []int64 `json:"taskids"`
|
|
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
|
|
Readablestatuses []V1ReadableStatusOlap `json:"readablestatuses"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type GetTaskDurationsByTaskIdsRow struct {
|
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
|
}
|
|
|
|
func (q *Queries) GetTaskDurationsByTaskIds(ctx context.Context, db DBTX, arg GetTaskDurationsByTaskIdsParams) ([]*GetTaskDurationsByTaskIdsRow, error) {
|
|
rows, err := db.Query(ctx, getTaskDurationsByTaskIds,
|
|
arg.Taskids,
|
|
arg.Taskinsertedats,
|
|
arg.Readablestatuses,
|
|
arg.Tenantid,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*GetTaskDurationsByTaskIdsRow
|
|
for rows.Next() {
|
|
var i GetTaskDurationsByTaskIdsRow
|
|
if err := rows.Scan(&i.StartedAt, &i.FinishedAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTaskPointMetrics = `-- name: GetTaskPointMetrics :many
|
|
SELECT
|
|
DATE_BIN(
|
|
COALESCE($1::INTERVAL, '1 minute'),
|
|
task_inserted_at,
|
|
TIMESTAMPTZ '1970-01-01 00:00:00+00'
|
|
) :: TIMESTAMPTZ AS bucket_2,
|
|
COUNT(*) FILTER (WHERE readable_status = 'COMPLETED') AS completed_count,
|
|
COUNT(*) FILTER (WHERE readable_status = 'FAILED') AS failed_count
|
|
FROM
|
|
v1_task_events_olap
|
|
WHERE
|
|
tenant_id = $2::UUID
|
|
AND task_inserted_at BETWEEN $3::TIMESTAMPTZ AND $4::TIMESTAMPTZ
|
|
GROUP BY bucket_2
|
|
ORDER BY bucket_2
|
|
`
|
|
|
|
type GetTaskPointMetricsParams struct {
|
|
Interval pgtype.Interval `json:"interval"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Createdafter pgtype.Timestamptz `json:"createdafter"`
|
|
Createdbefore pgtype.Timestamptz `json:"createdbefore"`
|
|
}
|
|
|
|
type GetTaskPointMetricsRow struct {
|
|
Bucket2 pgtype.Timestamptz `json:"bucket_2"`
|
|
CompletedCount int64 `json:"completed_count"`
|
|
FailedCount int64 `json:"failed_count"`
|
|
}
|
|
|
|
func (q *Queries) GetTaskPointMetrics(ctx context.Context, db DBTX, arg GetTaskPointMetricsParams) ([]*GetTaskPointMetricsRow, error) {
|
|
rows, err := db.Query(ctx, getTaskPointMetrics,
|
|
arg.Interval,
|
|
arg.Tenantid,
|
|
arg.Createdafter,
|
|
arg.Createdbefore,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*GetTaskPointMetricsRow
|
|
for rows.Next() {
|
|
var i GetTaskPointMetricsRow
|
|
if err := rows.Scan(&i.Bucket2, &i.CompletedCount, &i.FailedCount); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTenantStatusMetrics = `-- name: GetTenantStatusMetrics :one
|
|
WITH task_external_ids AS (
|
|
SELECT external_id
|
|
FROM v1_runs_olap
|
|
WHERE (
|
|
$5::UUID IS NULL OR parent_task_external_id = $5::UUID
|
|
) AND (
|
|
$6::UUID IS NULL
|
|
OR (id, inserted_at) IN (
|
|
SELECT etr.run_id, etr.run_inserted_at
|
|
FROM v1_event_lookup_table_olap lt
|
|
JOIN v1_events_olap e ON (lt.tenant_id, lt.event_id, lt.event_seen_at) = (e.tenant_id, e.id, e.seen_at)
|
|
JOIN v1_event_to_run_olap etr ON (e.id, e.seen_at) = (etr.event_id, etr.event_seen_at)
|
|
WHERE
|
|
lt.tenant_id = $1::uuid
|
|
AND lt.external_id = $6::UUID
|
|
)
|
|
)
|
|
AND (
|
|
$7::text[] IS NULL
|
|
OR $8::text[] IS NULL
|
|
OR EXISTS (
|
|
SELECT 1 FROM jsonb_each_text(additional_metadata) kv
|
|
JOIN LATERAL (
|
|
SELECT unnest($7::text[]) AS k,
|
|
unnest($8::text[]) AS v
|
|
) AS u ON kv.key = u.k AND kv.value = u.v
|
|
)
|
|
)
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
COUNT(*) FILTER (WHERE readable_status = 'QUEUED') AS total_queued,
|
|
COUNT(*) FILTER (WHERE readable_status = 'RUNNING') AS total_running,
|
|
COUNT(*) FILTER (WHERE readable_status = 'COMPLETED') AS total_completed,
|
|
COUNT(*) FILTER (WHERE readable_status = 'CANCELLED') AS total_cancelled,
|
|
COUNT(*) FILTER (WHERE readable_status = 'FAILED') AS total_failed
|
|
FROM v1_statuses_olap
|
|
WHERE
|
|
tenant_id = $1::UUID
|
|
AND inserted_at >= $2::TIMESTAMPTZ
|
|
AND (
|
|
$3::TIMESTAMPTZ IS NULL OR inserted_at <= $3::TIMESTAMPTZ
|
|
)
|
|
AND (
|
|
$4::UUID[] IS NULL OR workflow_id = ANY($4::UUID[])
|
|
)
|
|
AND external_id IN (
|
|
SELECT external_id
|
|
FROM task_external_ids
|
|
)
|
|
GROUP BY tenant_id
|
|
`
|
|
|
|
type GetTenantStatusMetricsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Createdafter pgtype.Timestamptz `json:"createdafter"`
|
|
CreatedBefore pgtype.Timestamptz `json:"createdBefore"`
|
|
WorkflowIds []pgtype.UUID `json:"workflowIds"`
|
|
ParentTaskExternalId pgtype.UUID `json:"parentTaskExternalId"`
|
|
TriggeringEventExternalId pgtype.UUID `json:"triggeringEventExternalId"`
|
|
AdditionalMetaKeys []string `json:"additionalMetaKeys"`
|
|
AdditionalMetaValues []string `json:"additionalMetaValues"`
|
|
}
|
|
|
|
type GetTenantStatusMetricsRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
TotalQueued int64 `json:"total_queued"`
|
|
TotalRunning int64 `json:"total_running"`
|
|
TotalCompleted int64 `json:"total_completed"`
|
|
TotalCancelled int64 `json:"total_cancelled"`
|
|
TotalFailed int64 `json:"total_failed"`
|
|
}
|
|
|
|
func (q *Queries) GetTenantStatusMetrics(ctx context.Context, db DBTX, arg GetTenantStatusMetricsParams) (*GetTenantStatusMetricsRow, error) {
|
|
row := db.QueryRow(ctx, getTenantStatusMetrics,
|
|
arg.Tenantid,
|
|
arg.Createdafter,
|
|
arg.CreatedBefore,
|
|
arg.WorkflowIds,
|
|
arg.ParentTaskExternalId,
|
|
arg.TriggeringEventExternalId,
|
|
arg.AdditionalMetaKeys,
|
|
arg.AdditionalMetaValues,
|
|
)
|
|
var i GetTenantStatusMetricsRow
|
|
err := row.Scan(
|
|
&i.TenantID,
|
|
&i.TotalQueued,
|
|
&i.TotalRunning,
|
|
&i.TotalCompleted,
|
|
&i.TotalCancelled,
|
|
&i.TotalFailed,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const getWorkflowRunIdFromDagIdInsertedAt = `-- name: GetWorkflowRunIdFromDagIdInsertedAt :one
|
|
SELECT external_id
|
|
FROM v1_dags_olap
|
|
WHERE
|
|
id = $1::bigint
|
|
AND inserted_at = $2::timestamptz
|
|
`
|
|
|
|
type GetWorkflowRunIdFromDagIdInsertedAtParams struct {
|
|
Dagid int64 `json:"dagid"`
|
|
Daginsertedat pgtype.Timestamptz `json:"daginsertedat"`
|
|
}
|
|
|
|
func (q *Queries) GetWorkflowRunIdFromDagIdInsertedAt(ctx context.Context, db DBTX, arg GetWorkflowRunIdFromDagIdInsertedAtParams) (pgtype.UUID, error) {
|
|
row := db.QueryRow(ctx, getWorkflowRunIdFromDagIdInsertedAt, arg.Dagid, arg.Daginsertedat)
|
|
var external_id pgtype.UUID
|
|
err := row.Scan(&external_id)
|
|
return external_id, err
|
|
}
|
|
|
|
const listEventKeys = `-- name: ListEventKeys :many
|
|
SELECT DISTINCT key
|
|
FROM
|
|
v1_events_olap
|
|
WHERE
|
|
tenant_id = $1::uuid
|
|
AND seen_at > NOW() - INTERVAL '1 day'
|
|
`
|
|
|
|
func (q *Queries) ListEventKeys(ctx context.Context, db DBTX, tenantid pgtype.UUID) ([]string, error) {
|
|
rows, err := db.Query(ctx, listEventKeys, tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []string
|
|
for rows.Next() {
|
|
var key string
|
|
if err := rows.Scan(&key); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, key)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listEvents = `-- name: ListEvents :many
|
|
SELECT e.tenant_id, e.id, e.external_id, e.seen_at, e.key, e.payload, e.additional_metadata, e.scope, e.triggering_webhook_name
|
|
FROM v1_event_lookup_table_olap elt
|
|
JOIN v1_events_olap e ON (elt.tenant_id, elt.event_id, elt.event_seen_at) = (e.tenant_id, e.id, e.seen_at)
|
|
WHERE
|
|
e.tenant_id = $1
|
|
AND (
|
|
$2::TEXT[] IS NULL OR
|
|
"key" = ANY($2::TEXT[])
|
|
)
|
|
AND e.seen_at >= $3::TIMESTAMPTZ
|
|
AND (
|
|
$4::TIMESTAMPTZ IS NULL OR
|
|
e.seen_at <= $4::TIMESTAMPTZ
|
|
)
|
|
AND (
|
|
$5::UUID[] IS NULL OR
|
|
EXISTS (
|
|
SELECT 1
|
|
FROM v1_event_to_run_olap etr
|
|
JOIN v1_runs_olap r ON (etr.run_id, etr.run_inserted_at) = (r.id, r.inserted_at)
|
|
WHERE
|
|
(etr.event_id, etr.event_seen_at) = (e.id, e.seen_at)
|
|
AND r.workflow_id = ANY($5::UUID[]::UUID[])
|
|
)
|
|
)
|
|
AND (
|
|
$6::UUID[] IS NULL OR
|
|
elt.external_id = ANY($6::UUID[])
|
|
)
|
|
AND (
|
|
$7::JSONB IS NULL OR
|
|
e.additional_metadata @> $7::JSONB
|
|
)
|
|
AND (
|
|
CAST($8::TEXT[] AS v1_readable_status_olap[]) IS NULL OR
|
|
EXISTS (
|
|
SELECT 1
|
|
FROM v1_event_to_run_olap etr
|
|
JOIN v1_runs_olap r ON (etr.run_id, etr.run_inserted_at) = (r.id, r.inserted_at)
|
|
WHERE
|
|
(etr.event_id, etr.event_seen_at) = (e.id, e.seen_at)
|
|
AND r.readable_status = ANY(CAST($8::text[]::TEXT[] AS v1_readable_status_olap[]))
|
|
)
|
|
)
|
|
AND (
|
|
$9::TEXT[] IS NULL OR
|
|
e.scope = ANY($9::TEXT[])
|
|
)
|
|
ORDER BY e.seen_at DESC, e.id
|
|
OFFSET
|
|
COALESCE($10::BIGINT, 0)
|
|
LIMIT
|
|
COALESCE($11::BIGINT, 50)
|
|
`
|
|
|
|
type ListEventsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Keys []string `json:"keys"`
|
|
Since pgtype.Timestamptz `json:"since"`
|
|
Until pgtype.Timestamptz `json:"until"`
|
|
WorkflowIds []pgtype.UUID `json:"workflowIds"`
|
|
EventIds []pgtype.UUID `json:"eventIds"`
|
|
AdditionalMetadata []byte `json:"additionalMetadata"`
|
|
Statuses []string `json:"statuses"`
|
|
Scopes []string `json:"scopes"`
|
|
Offset pgtype.Int8 `json:"offset"`
|
|
Limit pgtype.Int8 `json:"limit"`
|
|
}
|
|
|
|
func (q *Queries) ListEvents(ctx context.Context, db DBTX, arg ListEventsParams) ([]*V1EventsOlap, error) {
|
|
rows, err := db.Query(ctx, listEvents,
|
|
arg.Tenantid,
|
|
arg.Keys,
|
|
arg.Since,
|
|
arg.Until,
|
|
arg.WorkflowIds,
|
|
arg.EventIds,
|
|
arg.AdditionalMetadata,
|
|
arg.Statuses,
|
|
arg.Scopes,
|
|
arg.Offset,
|
|
arg.Limit,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*V1EventsOlap
|
|
for rows.Next() {
|
|
var i V1EventsOlap
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.ExternalID,
|
|
&i.SeenAt,
|
|
&i.Key,
|
|
&i.Payload,
|
|
&i.AdditionalMetadata,
|
|
&i.Scope,
|
|
&i.TriggeringWebhookName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listOLAPPartitionsBeforeDate = `-- name: ListOLAPPartitionsBeforeDate :many
|
|
WITH task_partitions AS (
|
|
SELECT 'v1_tasks_olap' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_tasks_olap'::text, $2::date) AS p
|
|
), dag_partitions AS (
|
|
SELECT 'v1_dags_olap' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_dags_olap', $2::date) AS p
|
|
), runs_partitions AS (
|
|
SELECT 'v1_runs_olap' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_runs_olap', $2::date) AS p
|
|
), events_partitions AS (
|
|
SELECT 'v1_events_olap' AS parent_table, p::TEXT AS partition_name FROM get_v1_partitions_before_date('v1_events_olap', $2::date) AS p
|
|
), event_trigger_partitions AS (
|
|
SELECT 'v1_event_to_run_olap' AS parent_table, p::TEXT AS partition_name FROM get_v1_partitions_before_date('v1_event_to_run_olap', $2::date) AS p
|
|
), events_lookup_table_partitions AS (
|
|
SELECT 'v1_event_lookup_table_olap' AS parent_table, p::TEXT AS partition_name FROM get_v1_weekly_partitions_before_date('v1_event_lookup_table_olap', $2::date) AS p
|
|
), incoming_webhook_validation_failure_partitions AS (
|
|
SELECT 'v1_incoming_webhook_validation_failures_olap' AS parent_table, p::TEXT AS partition_name FROM get_v1_partitions_before_date('v1_incoming_webhook_validation_failures_olap', $2::date) AS p
|
|
), cel_evaluation_failures_partitions AS (
|
|
SELECT 'v1_cel_evaluation_failures_olap' AS parent_table, p::TEXT AS partition_name FROM get_v1_partitions_before_date('v1_cel_evaluation_failures_olap', $2::date) AS p
|
|
), payloads_partitions AS (
|
|
SELECT 'v1_payloads_olap' AS parent_table, p::TEXT AS partition_name FROM get_v1_partitions_before_date('v1_payloads_olap', $2::date) AS p
|
|
), candidates AS (
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
task_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
dag_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
runs_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
events_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
event_trigger_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
events_lookup_table_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
incoming_webhook_validation_failure_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
cel_evaluation_failures_partitions
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
parent_table, partition_name
|
|
FROM
|
|
payloads_partitions
|
|
)
|
|
|
|
SELECT parent_table, partition_name
|
|
FROM candidates
|
|
WHERE
|
|
CASE
|
|
WHEN $1::BOOLEAN THEN TRUE
|
|
-- this is a list of all of the tables which are hypertables in timescale, so we should not manually drop their
|
|
-- partitions if @shouldPartitionEventsTables is false
|
|
ELSE parent_table NOT IN ('v1_events_olap', 'v1_event_to_run_olap', 'v1_cel_evaluation_failures_olap', 'v1_incoming_webhook_validation_failures_olap')
|
|
END
|
|
`
|
|
|
|
type ListOLAPPartitionsBeforeDateParams struct {
|
|
Shouldpartitioneventstables bool `json:"shouldpartitioneventstables"`
|
|
Date pgtype.Date `json:"date"`
|
|
}
|
|
|
|
type ListOLAPPartitionsBeforeDateRow struct {
|
|
ParentTable string `json:"parent_table"`
|
|
PartitionName string `json:"partition_name"`
|
|
}
|
|
|
|
func (q *Queries) ListOLAPPartitionsBeforeDate(ctx context.Context, db DBTX, arg ListOLAPPartitionsBeforeDateParams) ([]*ListOLAPPartitionsBeforeDateRow, error) {
|
|
rows, err := db.Query(ctx, listOLAPPartitionsBeforeDate, arg.Shouldpartitioneventstables, arg.Date)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListOLAPPartitionsBeforeDateRow
|
|
for rows.Next() {
|
|
var i ListOLAPPartitionsBeforeDateRow
|
|
if err := rows.Scan(&i.ParentTable, &i.PartitionName); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listPaginatedOLAPPayloadsForOffload = `-- name: ListPaginatedOLAPPayloadsForOffload :many
|
|
WITH payloads AS (
|
|
SELECT
|
|
(p).*
|
|
FROM list_paginated_olap_payloads_for_offload(
|
|
$1::DATE,
|
|
$2::INT,
|
|
$3::UUID,
|
|
$4::UUID,
|
|
$5::TIMESTAMPTZ
|
|
) p
|
|
)
|
|
SELECT
|
|
tenant_id::UUID,
|
|
external_id::UUID,
|
|
location::v1_payload_location_olap,
|
|
COALESCE(external_location_key, '')::TEXT AS external_location_key,
|
|
inline_content::JSONB AS inline_content,
|
|
inserted_at::TIMESTAMPTZ,
|
|
updated_at::TIMESTAMPTZ
|
|
FROM payloads
|
|
`
|
|
|
|
type ListPaginatedOLAPPayloadsForOffloadParams struct {
|
|
Partitiondate pgtype.Date `json:"partitiondate"`
|
|
Limitparam int32 `json:"limitparam"`
|
|
Lasttenantid pgtype.UUID `json:"lasttenantid"`
|
|
Lastexternalid pgtype.UUID `json:"lastexternalid"`
|
|
Lastinsertedat pgtype.Timestamptz `json:"lastinsertedat"`
|
|
}
|
|
|
|
type ListPaginatedOLAPPayloadsForOffloadRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
Location V1PayloadLocationOlap `json:"location"`
|
|
ExternalLocationKey string `json:"external_location_key"`
|
|
InlineContent []byte `json:"inline_content"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
|
|
}
|
|
|
|
func (q *Queries) ListPaginatedOLAPPayloadsForOffload(ctx context.Context, db DBTX, arg ListPaginatedOLAPPayloadsForOffloadParams) ([]*ListPaginatedOLAPPayloadsForOffloadRow, error) {
|
|
rows, err := db.Query(ctx, listPaginatedOLAPPayloadsForOffload,
|
|
arg.Partitiondate,
|
|
arg.Limitparam,
|
|
arg.Lasttenantid,
|
|
arg.Lastexternalid,
|
|
arg.Lastinsertedat,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListPaginatedOLAPPayloadsForOffloadRow
|
|
for rows.Next() {
|
|
var i ListPaginatedOLAPPayloadsForOffloadRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ExternalID,
|
|
&i.Location,
|
|
&i.ExternalLocationKey,
|
|
&i.InlineContent,
|
|
&i.InsertedAt,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listTaskEvents = `-- name: ListTaskEvents :many
|
|
WITH aggregated_events AS (
|
|
SELECT
|
|
tenant_id,
|
|
task_id,
|
|
task_inserted_at,
|
|
retry_count,
|
|
event_type,
|
|
MIN(event_timestamp) AS time_first_seen,
|
|
MAX(event_timestamp) AS time_last_seen,
|
|
COUNT(*) AS count,
|
|
MIN(id) AS first_id
|
|
FROM v1_task_events_olap
|
|
WHERE
|
|
tenant_id = $1::uuid
|
|
AND task_id = $2::bigint
|
|
AND task_inserted_at = $3::timestamptz
|
|
GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type
|
|
)
|
|
SELECT
|
|
a.tenant_id,
|
|
a.task_id,
|
|
a.task_inserted_at,
|
|
a.retry_count,
|
|
a.event_type,
|
|
a.time_first_seen,
|
|
a.time_last_seen,
|
|
a.count,
|
|
t.id,
|
|
t.event_timestamp,
|
|
t.readable_status,
|
|
t.error_message,
|
|
t.output,
|
|
t.external_id AS event_external_id,
|
|
t.worker_id,
|
|
t.additional__event_data,
|
|
t.additional__event_message
|
|
FROM aggregated_events a
|
|
JOIN v1_task_events_olap t
|
|
ON t.tenant_id = a.tenant_id
|
|
AND t.task_id = a.task_id
|
|
AND t.task_inserted_at = a.task_inserted_at
|
|
AND t.id = a.first_id
|
|
ORDER BY a.time_first_seen DESC, t.event_timestamp DESC
|
|
`
|
|
|
|
type ListTaskEventsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Taskid int64 `json:"taskid"`
|
|
Taskinsertedat pgtype.Timestamptz `json:"taskinsertedat"`
|
|
}
|
|
|
|
type ListTaskEventsRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
TaskID int64 `json:"task_id"`
|
|
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
EventType V1EventTypeOlap `json:"event_type"`
|
|
TimeFirstSeen interface{} `json:"time_first_seen"`
|
|
TimeLastSeen interface{} `json:"time_last_seen"`
|
|
Count int64 `json:"count"`
|
|
ID int64 `json:"id"`
|
|
EventTimestamp pgtype.Timestamptz `json:"event_timestamp"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
Output []byte `json:"output"`
|
|
EventExternalID pgtype.UUID `json:"event_external_id"`
|
|
WorkerID pgtype.UUID `json:"worker_id"`
|
|
AdditionalEventData pgtype.Text `json:"additional__event_data"`
|
|
AdditionalEventMessage pgtype.Text `json:"additional__event_message"`
|
|
}
|
|
|
|
func (q *Queries) ListTaskEvents(ctx context.Context, db DBTX, arg ListTaskEventsParams) ([]*ListTaskEventsRow, error) {
|
|
rows, err := db.Query(ctx, listTaskEvents, arg.Tenantid, arg.Taskid, arg.Taskinsertedat)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListTaskEventsRow
|
|
for rows.Next() {
|
|
var i ListTaskEventsRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.TaskID,
|
|
&i.TaskInsertedAt,
|
|
&i.RetryCount,
|
|
&i.EventType,
|
|
&i.TimeFirstSeen,
|
|
&i.TimeLastSeen,
|
|
&i.Count,
|
|
&i.ID,
|
|
&i.EventTimestamp,
|
|
&i.ReadableStatus,
|
|
&i.ErrorMessage,
|
|
&i.Output,
|
|
&i.EventExternalID,
|
|
&i.WorkerID,
|
|
&i.AdditionalEventData,
|
|
&i.AdditionalEventMessage,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listTaskEventsForWorkflowRun = `-- name: ListTaskEventsForWorkflowRun :many
|
|
WITH tasks AS (
|
|
SELECT dt.task_id, dt.task_inserted_at
|
|
FROM v1_lookup_table_olap lt
|
|
JOIN v1_dag_to_task_olap dt ON lt.dag_id = dt.dag_id AND lt.inserted_at = dt.dag_inserted_at
|
|
WHERE
|
|
lt.external_id = $1::uuid
|
|
AND lt.tenant_id = $2::uuid
|
|
), aggregated_events AS (
|
|
SELECT
|
|
tenant_id,
|
|
task_id,
|
|
task_inserted_at,
|
|
retry_count,
|
|
event_type,
|
|
MIN(event_timestamp)::timestamptz AS time_first_seen,
|
|
MAX(event_timestamp)::timestamptz AS time_last_seen,
|
|
COUNT(*) AS count,
|
|
MIN(id) AS first_id
|
|
FROM v1_task_events_olap
|
|
WHERE
|
|
tenant_id = $2::uuid
|
|
AND (task_id, task_inserted_at) IN (SELECT task_id, task_inserted_at FROM tasks)
|
|
GROUP BY tenant_id, task_id, task_inserted_at, retry_count, event_type
|
|
)
|
|
SELECT
|
|
a.tenant_id,
|
|
a.task_id,
|
|
a.task_inserted_at,
|
|
a.retry_count,
|
|
a.event_type,
|
|
a.time_first_seen,
|
|
a.time_last_seen,
|
|
a.count,
|
|
t.id,
|
|
t.event_timestamp,
|
|
t.readable_status,
|
|
t.error_message,
|
|
t.output,
|
|
t.external_id AS event_external_id,
|
|
t.worker_id,
|
|
t.additional__event_data,
|
|
t.additional__event_message,
|
|
tsk.display_name,
|
|
tsk.external_id AS task_external_id
|
|
FROM aggregated_events a
|
|
JOIN v1_task_events_olap t
|
|
ON t.tenant_id = a.tenant_id
|
|
AND t.task_id = a.task_id
|
|
AND t.task_inserted_at = a.task_inserted_at
|
|
AND t.id = a.first_id
|
|
JOIN v1_tasks_olap tsk
|
|
ON (tsk.tenant_id, tsk.id, tsk.inserted_at) = (t.tenant_id, t.task_id, t.task_inserted_at)
|
|
ORDER BY a.time_first_seen DESC, t.event_timestamp DESC
|
|
`
|
|
|
|
type ListTaskEventsForWorkflowRunParams struct {
|
|
Workflowrunid pgtype.UUID `json:"workflowrunid"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type ListTaskEventsForWorkflowRunRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
TaskID int64 `json:"task_id"`
|
|
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
EventType V1EventTypeOlap `json:"event_type"`
|
|
TimeFirstSeen pgtype.Timestamptz `json:"time_first_seen"`
|
|
TimeLastSeen pgtype.Timestamptz `json:"time_last_seen"`
|
|
Count int64 `json:"count"`
|
|
ID int64 `json:"id"`
|
|
EventTimestamp pgtype.Timestamptz `json:"event_timestamp"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
Output []byte `json:"output"`
|
|
EventExternalID pgtype.UUID `json:"event_external_id"`
|
|
WorkerID pgtype.UUID `json:"worker_id"`
|
|
AdditionalEventData pgtype.Text `json:"additional__event_data"`
|
|
AdditionalEventMessage pgtype.Text `json:"additional__event_message"`
|
|
DisplayName string `json:"display_name"`
|
|
TaskExternalID pgtype.UUID `json:"task_external_id"`
|
|
}
|
|
|
|
func (q *Queries) ListTaskEventsForWorkflowRun(ctx context.Context, db DBTX, arg ListTaskEventsForWorkflowRunParams) ([]*ListTaskEventsForWorkflowRunRow, error) {
|
|
rows, err := db.Query(ctx, listTaskEventsForWorkflowRun, arg.Workflowrunid, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListTaskEventsForWorkflowRunRow
|
|
for rows.Next() {
|
|
var i ListTaskEventsForWorkflowRunRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.TaskID,
|
|
&i.TaskInsertedAt,
|
|
&i.RetryCount,
|
|
&i.EventType,
|
|
&i.TimeFirstSeen,
|
|
&i.TimeLastSeen,
|
|
&i.Count,
|
|
&i.ID,
|
|
&i.EventTimestamp,
|
|
&i.ReadableStatus,
|
|
&i.ErrorMessage,
|
|
&i.Output,
|
|
&i.EventExternalID,
|
|
&i.WorkerID,
|
|
&i.AdditionalEventData,
|
|
&i.AdditionalEventMessage,
|
|
&i.DisplayName,
|
|
&i.TaskExternalID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listTasksByDAGIds = `-- name: ListTasksByDAGIds :many
|
|
SELECT
|
|
DISTINCT ON (t.external_id)
|
|
dt.dag_id, dt.dag_inserted_at, dt.task_id, dt.task_inserted_at,
|
|
lt.external_id AS dag_external_id
|
|
FROM
|
|
v1_lookup_table_olap lt
|
|
JOIN
|
|
v1_dag_to_task_olap dt ON (lt.dag_id, lt.inserted_at)= (dt.dag_id, dt.dag_inserted_at)
|
|
JOIN
|
|
v1_tasks_olap t ON (t.id, t.inserted_at) = (dt.task_id, dt.task_inserted_at)
|
|
WHERE
|
|
lt.external_id = ANY($1::uuid[])
|
|
AND lt.tenant_id = $2::uuid
|
|
ORDER BY
|
|
t.external_id, t.inserted_at DESC
|
|
`
|
|
|
|
type ListTasksByDAGIdsParams struct {
|
|
Dagids []pgtype.UUID `json:"dagids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type ListTasksByDAGIdsRow struct {
|
|
DagID int64 `json:"dag_id"`
|
|
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
|
|
TaskID int64 `json:"task_id"`
|
|
TaskInsertedAt pgtype.Timestamptz `json:"task_inserted_at"`
|
|
DagExternalID pgtype.UUID `json:"dag_external_id"`
|
|
}
|
|
|
|
func (q *Queries) ListTasksByDAGIds(ctx context.Context, db DBTX, arg ListTasksByDAGIdsParams) ([]*ListTasksByDAGIdsRow, error) {
|
|
rows, err := db.Query(ctx, listTasksByDAGIds, arg.Dagids, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListTasksByDAGIdsRow
|
|
for rows.Next() {
|
|
var i ListTasksByDAGIdsRow
|
|
if err := rows.Scan(
|
|
&i.DagID,
|
|
&i.DagInsertedAt,
|
|
&i.TaskID,
|
|
&i.TaskInsertedAt,
|
|
&i.DagExternalID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listTasksByExternalIds = `-- name: ListTasksByExternalIds :many
|
|
SELECT
|
|
tenant_id,
|
|
task_id,
|
|
inserted_at
|
|
FROM
|
|
v1_lookup_table_olap
|
|
WHERE
|
|
external_id = ANY($1::uuid[])
|
|
AND tenant_id = $2::uuid
|
|
`
|
|
|
|
type ListTasksByExternalIdsParams struct {
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type ListTasksByExternalIdsRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
TaskID pgtype.Int8 `json:"task_id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
}
|
|
|
|
func (q *Queries) ListTasksByExternalIds(ctx context.Context, db DBTX, arg ListTasksByExternalIdsParams) ([]*ListTasksByExternalIdsRow, error) {
|
|
rows, err := db.Query(ctx, listTasksByExternalIds, arg.Externalids, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListTasksByExternalIdsRow
|
|
for rows.Next() {
|
|
var i ListTasksByExternalIdsRow
|
|
if err := rows.Scan(&i.TenantID, &i.TaskID, &i.InsertedAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listWorkflowRunDisplayNames = `-- name: ListWorkflowRunDisplayNames :many
|
|
SELECT
|
|
lt.external_id,
|
|
COALESCE(t.display_name, d.display_name) AS display_name,
|
|
COALESCE(t.inserted_at, d.inserted_at) AS inserted_at
|
|
FROM v1_lookup_table_olap lt
|
|
LEFT JOIN v1_dags_olap d ON (lt.dag_id, lt.inserted_at) = (d.id, d.inserted_at)
|
|
LEFT JOIN v1_tasks_olap t ON (lt.task_id, lt.inserted_at) = (t.id, t.inserted_at)
|
|
WHERE
|
|
lt.external_id = ANY($1::uuid[])
|
|
AND lt.tenant_id = $2::uuid
|
|
LIMIT 10000
|
|
`
|
|
|
|
type ListWorkflowRunDisplayNamesParams struct {
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type ListWorkflowRunDisplayNamesRow struct {
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
DisplayName string `json:"display_name"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
}
|
|
|
|
func (q *Queries) ListWorkflowRunDisplayNames(ctx context.Context, db DBTX, arg ListWorkflowRunDisplayNamesParams) ([]*ListWorkflowRunDisplayNamesRow, error) {
|
|
rows, err := db.Query(ctx, listWorkflowRunDisplayNames, arg.Externalids, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*ListWorkflowRunDisplayNamesRow
|
|
for rows.Next() {
|
|
var i ListWorkflowRunDisplayNamesRow
|
|
if err := rows.Scan(&i.ExternalID, &i.DisplayName, &i.InsertedAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listWorkflowRunExternalIds = `-- name: ListWorkflowRunExternalIds :many
|
|
SELECT external_id
|
|
FROM v1_runs_olap
|
|
WHERE
|
|
tenant_id = $1::UUID
|
|
AND inserted_at > $2::TIMESTAMPTZ
|
|
AND (
|
|
$3::TIMESTAMPTZ IS NULL
|
|
OR inserted_at <= $3::TIMESTAMPTZ
|
|
)
|
|
AND readable_status = ANY(CAST($4::TEXT[] AS v1_readable_status_olap[]))
|
|
AND (
|
|
$5::text[] IS NULL
|
|
OR $6::text[] IS NULL
|
|
OR EXISTS (
|
|
SELECT 1 FROM jsonb_each_text(additional_metadata) kv
|
|
JOIN LATERAL (
|
|
SELECT unnest($5::text[]) AS k,
|
|
unnest($6::text[]) AS v
|
|
) AS u ON kv.key = u.k AND kv.value = u.v
|
|
)
|
|
)
|
|
AND (
|
|
$7::UUID[] IS NULL OR workflow_id = ANY($7::UUID[])
|
|
)
|
|
`
|
|
|
|
type ListWorkflowRunExternalIdsParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Since pgtype.Timestamptz `json:"since"`
|
|
Until pgtype.Timestamptz `json:"until"`
|
|
Statuses []string `json:"statuses"`
|
|
AdditionalMetaKeys []string `json:"additionalMetaKeys"`
|
|
AdditionalMetaValues []string `json:"additionalMetaValues"`
|
|
WorkflowIds []pgtype.UUID `json:"workflowIds"`
|
|
}
|
|
|
|
func (q *Queries) ListWorkflowRunExternalIds(ctx context.Context, db DBTX, arg ListWorkflowRunExternalIdsParams) ([]pgtype.UUID, error) {
|
|
rows, err := db.Query(ctx, listWorkflowRunExternalIds,
|
|
arg.Tenantid,
|
|
arg.Since,
|
|
arg.Until,
|
|
arg.Statuses,
|
|
arg.AdditionalMetaKeys,
|
|
arg.AdditionalMetaValues,
|
|
arg.WorkflowIds,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []pgtype.UUID
|
|
for rows.Next() {
|
|
var external_id pgtype.UUID
|
|
if err := rows.Scan(&external_id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, external_id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const markOLAPCutoverJobAsCompleted = `-- name: MarkOLAPCutoverJobAsCompleted :exec
|
|
UPDATE v1_payloads_olap_cutover_job_offset
|
|
SET is_completed = TRUE
|
|
WHERE key = $1::DATE
|
|
`
|
|
|
|
func (q *Queries) MarkOLAPCutoverJobAsCompleted(ctx context.Context, db DBTX, key pgtype.Date) error {
|
|
_, err := db.Exec(ctx, markOLAPCutoverJobAsCompleted, key)
|
|
return err
|
|
}
|
|
|
|
const offloadPayloads = `-- name: OffloadPayloads :exec
|
|
WITH inputs AS (
|
|
SELECT
|
|
UNNEST($1::UUID[]) AS external_id,
|
|
UNNEST($2::UUID[]) AS tenant_id,
|
|
UNNEST($3::TEXT[]) AS external_location_key
|
|
)
|
|
|
|
UPDATE v1_payloads_olap
|
|
SET
|
|
location = 'EXTERNAL',
|
|
external_location_key = i.external_location_key,
|
|
inline_content = NULL,
|
|
updated_at = NOW()
|
|
FROM inputs i
|
|
WHERE
|
|
(v1_payloads_olap.tenant_id, v1_payloads_olap.external_id) = (i.tenant_id, i.external_id)
|
|
AND v1_payloads_olap.location = 'INLINE'
|
|
AND v1_payloads_olap.external_location_key IS NULL
|
|
`
|
|
|
|
type OffloadPayloadsParams struct {
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
Tenantids []pgtype.UUID `json:"tenantids"`
|
|
Externallocationkeys []string `json:"externallocationkeys"`
|
|
}
|
|
|
|
func (q *Queries) OffloadPayloads(ctx context.Context, db DBTX, arg OffloadPayloadsParams) error {
|
|
_, err := db.Exec(ctx, offloadPayloads, arg.Externalids, arg.Tenantids, arg.Externallocationkeys)
|
|
return err
|
|
}
|
|
|
|
const populateDAGMetadata = `-- name: PopulateDAGMetadata :many
|
|
WITH input AS (
|
|
SELECT
|
|
UNNEST($2::bigint[]) AS id,
|
|
UNNEST($3::timestamptz[]) AS inserted_at
|
|
), runs AS (
|
|
SELECT
|
|
d.id AS dag_id,
|
|
r.id AS run_id,
|
|
r.tenant_id,
|
|
r.inserted_at,
|
|
r.external_id,
|
|
r.readable_status,
|
|
r.kind,
|
|
r.workflow_id,
|
|
d.display_name,
|
|
CASE
|
|
WHEN $1::BOOLEAN THEN d.input
|
|
ELSE '{}'::JSONB
|
|
END::JSONB AS input,
|
|
d.additional_metadata,
|
|
d.workflow_version_id,
|
|
d.parent_task_external_id
|
|
FROM input i
|
|
JOIN v1_runs_olap r ON (i.id, i.inserted_at) = (r.id, r.inserted_at)
|
|
JOIN v1_dags_olap d ON (r.id, r.inserted_at) = (d.id, d.inserted_at)
|
|
WHERE r.tenant_id = $4::uuid AND r.kind = 'DAG'
|
|
), relevant_events AS (
|
|
SELECT r.run_id, e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message
|
|
FROM runs r
|
|
JOIN v1_dag_to_task_olap dt ON (r.dag_id, r.inserted_at) = (dt.dag_id, dt.dag_inserted_at)
|
|
JOIN v1_task_events_olap e ON (e.task_id, e.task_inserted_at) = (dt.task_id, dt.task_inserted_at)
|
|
WHERE e.tenant_id = $4::uuid
|
|
), max_retry_count AS (
|
|
SELECT run_id, MAX(retry_count) AS max_retry_count
|
|
FROM relevant_events
|
|
GROUP BY run_id
|
|
), metadata AS (
|
|
SELECT
|
|
e.run_id,
|
|
MIN(e.inserted_at)::timestamptz AS created_at,
|
|
MIN(e.inserted_at) FILTER (WHERE e.readable_status = 'RUNNING')::timestamptz AS started_at,
|
|
MAX(e.inserted_at) FILTER (WHERE e.readable_status IN ('COMPLETED', 'CANCELLED', 'FAILED'))::timestamptz AS finished_at
|
|
FROM
|
|
relevant_events e
|
|
JOIN max_retry_count mrc ON (e.run_id, e.retry_count) = (mrc.run_id, mrc.max_retry_count)
|
|
GROUP BY e.run_id
|
|
), error_message AS (
|
|
SELECT
|
|
DISTINCT ON (e.run_id) e.run_id::bigint,
|
|
e.error_message
|
|
FROM
|
|
relevant_events e
|
|
WHERE
|
|
e.readable_status = 'FAILED'
|
|
ORDER BY
|
|
e.run_id, e.retry_count DESC
|
|
), task_output AS (
|
|
SELECT
|
|
run_id,
|
|
output,
|
|
external_id
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
event_type = 'FINISHED'
|
|
)
|
|
|
|
SELECT
|
|
r.dag_id, r.run_id, r.tenant_id, r.inserted_at, r.external_id, r.readable_status, r.kind, r.workflow_id, r.display_name, r.input, r.additional_metadata, r.workflow_version_id, r.parent_task_external_id,
|
|
m.created_at,
|
|
m.started_at,
|
|
m.finished_at,
|
|
e.error_message,
|
|
CASE
|
|
WHEN $1::BOOLEAN THEN o.output::JSONB
|
|
ELSE '{}'::JSONB
|
|
END::JSONB AS output,
|
|
o.external_id AS output_event_external_id,
|
|
COALESCE(mrc.max_retry_count, 0)::int as retry_count
|
|
FROM runs r
|
|
LEFT JOIN metadata m ON r.run_id = m.run_id
|
|
LEFT JOIN error_message e ON r.run_id = e.run_id
|
|
LEFT JOIN task_output o ON r.run_id = o.run_id
|
|
LEFT JOIN max_retry_count mrc ON r.run_id = mrc.run_id
|
|
ORDER BY r.inserted_at DESC, r.run_id DESC
|
|
`
|
|
|
|
type PopulateDAGMetadataParams struct {
|
|
Includepayloads bool `json:"includepayloads"`
|
|
Ids []int64 `json:"ids"`
|
|
Insertedats []pgtype.Timestamptz `json:"insertedats"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type PopulateDAGMetadataRow struct {
|
|
DagID int64 `json:"dag_id"`
|
|
RunID int64 `json:"run_id"`
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
Kind V1RunKind `json:"kind"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
DisplayName string `json:"display_name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
CreatedAt pgtype.Timestamptz `json:"created_at"`
|
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
Output []byte `json:"output"`
|
|
OutputEventExternalID pgtype.UUID `json:"output_event_external_id"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
}
|
|
|
|
func (q *Queries) PopulateDAGMetadata(ctx context.Context, db DBTX, arg PopulateDAGMetadataParams) ([]*PopulateDAGMetadataRow, error) {
|
|
rows, err := db.Query(ctx, populateDAGMetadata,
|
|
arg.Includepayloads,
|
|
arg.Ids,
|
|
arg.Insertedats,
|
|
arg.Tenantid,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*PopulateDAGMetadataRow
|
|
for rows.Next() {
|
|
var i PopulateDAGMetadataRow
|
|
if err := rows.Scan(
|
|
&i.DagID,
|
|
&i.RunID,
|
|
&i.TenantID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.ReadableStatus,
|
|
&i.Kind,
|
|
&i.WorkflowID,
|
|
&i.DisplayName,
|
|
&i.Input,
|
|
&i.AdditionalMetadata,
|
|
&i.WorkflowVersionID,
|
|
&i.ParentTaskExternalID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.FinishedAt,
|
|
&i.ErrorMessage,
|
|
&i.Output,
|
|
&i.OutputEventExternalID,
|
|
&i.RetryCount,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const populateEventData = `-- name: PopulateEventData :many
|
|
SELECT
|
|
elt.external_id,
|
|
COUNT(*) FILTER (WHERE r.readable_status = 'QUEUED') AS queued_count,
|
|
COUNT(*) FILTER (WHERE r.readable_status = 'RUNNING') AS running_count,
|
|
COUNT(*) FILTER (WHERE r.readable_status = 'COMPLETED') AS completed_count,
|
|
COUNT(*) FILTER (WHERE r.readable_status = 'CANCELLED') AS cancelled_count,
|
|
COUNT(*) FILTER (WHERE r.readable_status = 'FAILED') AS failed_count,
|
|
JSON_AGG(JSON_BUILD_OBJECT('run_external_id', r.external_id, 'filter_id', etr.filter_id)) FILTER (WHERE r.external_id IS NOT NULL)::JSONB AS triggered_runs
|
|
FROM v1_event_lookup_table_olap elt
|
|
JOIN v1_events_olap e ON (elt.tenant_id, elt.event_id, elt.event_seen_at) = (e.tenant_id, e.id, e.seen_at)
|
|
JOIN v1_event_to_run_olap etr ON (e.id, e.seen_at) = (etr.event_id, etr.event_seen_at)
|
|
JOIN v1_runs_olap r ON (etr.run_id, etr.run_inserted_at) = (r.id, r.inserted_at)
|
|
WHERE
|
|
elt.external_id = ANY($1::uuid[])
|
|
AND elt.tenant_id = $2::uuid
|
|
GROUP BY elt.external_id
|
|
`
|
|
|
|
type PopulateEventDataParams struct {
|
|
Eventexternalids []pgtype.UUID `json:"eventexternalids"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type PopulateEventDataRow struct {
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
QueuedCount int64 `json:"queued_count"`
|
|
RunningCount int64 `json:"running_count"`
|
|
CompletedCount int64 `json:"completed_count"`
|
|
CancelledCount int64 `json:"cancelled_count"`
|
|
FailedCount int64 `json:"failed_count"`
|
|
TriggeredRuns []byte `json:"triggered_runs"`
|
|
}
|
|
|
|
func (q *Queries) PopulateEventData(ctx context.Context, db DBTX, arg PopulateEventDataParams) ([]*PopulateEventDataRow, error) {
|
|
rows, err := db.Query(ctx, populateEventData, arg.Eventexternalids, arg.Tenantid)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*PopulateEventDataRow
|
|
for rows.Next() {
|
|
var i PopulateEventDataRow
|
|
if err := rows.Scan(
|
|
&i.ExternalID,
|
|
&i.QueuedCount,
|
|
&i.RunningCount,
|
|
&i.CompletedCount,
|
|
&i.CancelledCount,
|
|
&i.FailedCount,
|
|
&i.TriggeredRuns,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const populateSingleTaskRunData = `-- name: PopulateSingleTaskRunData :one
|
|
WITH selected_retry_count AS (
|
|
SELECT
|
|
CASE
|
|
WHEN $4::int IS NOT NULL THEN $4::int
|
|
ELSE MAX(retry_count)::int
|
|
END AS retry_count
|
|
FROM
|
|
v1_task_events_olap
|
|
WHERE
|
|
tenant_id = $1::uuid
|
|
AND task_id = $2::bigint
|
|
AND task_inserted_at = $3::timestamptz
|
|
LIMIT 1
|
|
), relevant_events AS (
|
|
SELECT
|
|
tenant_id, id, inserted_at, external_id, task_id, task_inserted_at, event_type, workflow_id, event_timestamp, readable_status, retry_count, error_message, output, worker_id, additional__event_data, additional__event_message
|
|
FROM
|
|
v1_task_events_olap
|
|
WHERE
|
|
tenant_id = $1::uuid
|
|
AND task_id = $2::bigint
|
|
AND task_inserted_at = $3::timestamptz
|
|
AND retry_count = (SELECT retry_count FROM selected_retry_count)
|
|
), finished_at AS (
|
|
SELECT
|
|
MAX(event_timestamp) AS finished_at
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
readable_status = ANY(ARRAY['COMPLETED', 'FAILED', 'CANCELLED']::v1_readable_status_olap[])
|
|
), started_at AS (
|
|
SELECT
|
|
MAX(event_timestamp) AS started_at
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
event_type = 'STARTED'
|
|
), queued_at AS (
|
|
SELECT
|
|
MAX(event_timestamp) AS queued_at
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
event_type = 'QUEUED'
|
|
), task_output AS (
|
|
SELECT
|
|
external_id,
|
|
output
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
event_type = 'FINISHED'
|
|
LIMIT 1
|
|
), status AS (
|
|
SELECT
|
|
readable_status
|
|
FROM
|
|
relevant_events
|
|
ORDER BY
|
|
readable_status DESC
|
|
LIMIT 1
|
|
), error_message AS (
|
|
SELECT
|
|
error_message
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
readable_status = 'FAILED'
|
|
ORDER BY
|
|
event_timestamp DESC
|
|
LIMIT 1
|
|
), spawned_children AS (
|
|
SELECT COUNT(*) AS spawned_children
|
|
FROM v1_runs_olap
|
|
WHERE parent_task_external_id = (
|
|
SELECT external_id
|
|
FROM v1_tasks_olap
|
|
WHERE
|
|
tenant_id = $1::uuid
|
|
AND id = $2::bigint
|
|
AND inserted_at = $3::timestamptz
|
|
LIMIT 1
|
|
)
|
|
)
|
|
SELECT
|
|
t.tenant_id, t.id, t.inserted_at, t.external_id, t.queue, t.action_id, t.step_id, t.workflow_id, t.workflow_version_id, t.workflow_run_id, t.schedule_timeout, t.step_timeout, t.priority, t.sticky, t.desired_worker_id, t.display_name, t.input, t.additional_metadata, t.readable_status, t.latest_retry_count, t.latest_worker_id, t.dag_id, t.dag_inserted_at, t.parent_task_external_id,
|
|
st.readable_status::v1_readable_status_olap as status,
|
|
f.finished_at::timestamptz as finished_at,
|
|
s.started_at::timestamptz as started_at,
|
|
q.queued_at::timestamptz as queued_at,
|
|
o.external_id::UUID AS output_event_external_id,
|
|
o.output as output,
|
|
e.error_message as error_message,
|
|
sc.spawned_children,
|
|
(SELECT retry_count FROM selected_retry_count) as retry_count
|
|
FROM
|
|
v1_tasks_olap t
|
|
LEFT JOIN
|
|
finished_at f ON true
|
|
LEFT JOIN
|
|
started_at s ON true
|
|
LEFT JOIN
|
|
queued_at q ON true
|
|
LEFT JOIN
|
|
task_output o ON true
|
|
LEFT JOIN
|
|
status st ON true
|
|
LEFT JOIN
|
|
error_message e ON true
|
|
LEFT JOIN
|
|
spawned_children sc ON true
|
|
WHERE
|
|
(t.tenant_id, t.id, t.inserted_at) = ($1::uuid, $2::bigint, $3::timestamptz)
|
|
`
|
|
|
|
type PopulateSingleTaskRunDataParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Taskid int64 `json:"taskid"`
|
|
Taskinsertedat pgtype.Timestamptz `json:"taskinsertedat"`
|
|
RetryCount pgtype.Int4 `json:"retry_count"`
|
|
}
|
|
|
|
type PopulateSingleTaskRunDataRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
Queue string `json:"queue"`
|
|
ActionID string `json:"action_id"`
|
|
StepID pgtype.UUID `json:"step_id"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
|
|
ScheduleTimeout string `json:"schedule_timeout"`
|
|
StepTimeout pgtype.Text `json:"step_timeout"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
Sticky V1StickyStrategyOlap `json:"sticky"`
|
|
DesiredWorkerID pgtype.UUID `json:"desired_worker_id"`
|
|
DisplayName string `json:"display_name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
LatestRetryCount int32 `json:"latest_retry_count"`
|
|
LatestWorkerID pgtype.UUID `json:"latest_worker_id"`
|
|
DagID pgtype.Int8 `json:"dag_id"`
|
|
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
Status V1ReadableStatusOlap `json:"status"`
|
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
|
QueuedAt pgtype.Timestamptz `json:"queued_at"`
|
|
OutputEventExternalID pgtype.UUID `json:"output_event_external_id"`
|
|
Output []byte `json:"output"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
SpawnedChildren pgtype.Int8 `json:"spawned_children"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
}
|
|
|
|
func (q *Queries) PopulateSingleTaskRunData(ctx context.Context, db DBTX, arg PopulateSingleTaskRunDataParams) (*PopulateSingleTaskRunDataRow, error) {
|
|
row := db.QueryRow(ctx, populateSingleTaskRunData,
|
|
arg.Tenantid,
|
|
arg.Taskid,
|
|
arg.Taskinsertedat,
|
|
arg.RetryCount,
|
|
)
|
|
var i PopulateSingleTaskRunDataRow
|
|
err := row.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.Queue,
|
|
&i.ActionID,
|
|
&i.StepID,
|
|
&i.WorkflowID,
|
|
&i.WorkflowVersionID,
|
|
&i.WorkflowRunID,
|
|
&i.ScheduleTimeout,
|
|
&i.StepTimeout,
|
|
&i.Priority,
|
|
&i.Sticky,
|
|
&i.DesiredWorkerID,
|
|
&i.DisplayName,
|
|
&i.Input,
|
|
&i.AdditionalMetadata,
|
|
&i.ReadableStatus,
|
|
&i.LatestRetryCount,
|
|
&i.LatestWorkerID,
|
|
&i.DagID,
|
|
&i.DagInsertedAt,
|
|
&i.ParentTaskExternalID,
|
|
&i.Status,
|
|
&i.FinishedAt,
|
|
&i.StartedAt,
|
|
&i.QueuedAt,
|
|
&i.OutputEventExternalID,
|
|
&i.Output,
|
|
&i.ErrorMessage,
|
|
&i.SpawnedChildren,
|
|
&i.RetryCount,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const populateTaskRunData = `-- name: PopulateTaskRunData :many
|
|
WITH input AS (
|
|
SELECT
|
|
UNNEST($2::bigint[]) AS id,
|
|
UNNEST($3::timestamptz[]) AS inserted_at
|
|
), tasks AS (
|
|
SELECT
|
|
DISTINCT ON(t.tenant_id, t.id, t.inserted_at)
|
|
t.tenant_id,
|
|
t.id,
|
|
t.inserted_at,
|
|
t.queue,
|
|
t.action_id,
|
|
t.step_id,
|
|
t.workflow_id,
|
|
t.workflow_version_id,
|
|
t.schedule_timeout,
|
|
t.step_timeout,
|
|
t.priority,
|
|
t.sticky,
|
|
t.desired_worker_id,
|
|
t.external_id,
|
|
t.display_name,
|
|
t.input,
|
|
t.additional_metadata,
|
|
t.readable_status,
|
|
t.parent_task_external_id,
|
|
t.workflow_run_id,
|
|
t.latest_retry_count
|
|
FROM
|
|
v1_tasks_olap t
|
|
JOIN
|
|
input i ON i.id = t.id AND i.inserted_at = t.inserted_at
|
|
WHERE
|
|
t.tenant_id = $4::uuid
|
|
), relevant_events AS (
|
|
SELECT
|
|
e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message
|
|
FROM
|
|
v1_task_events_olap e
|
|
JOIN
|
|
tasks t ON t.id = e.task_id AND t.tenant_id = e.tenant_id AND t.inserted_at = e.task_inserted_at
|
|
), max_retry_counts AS (
|
|
SELECT
|
|
e.tenant_id,
|
|
e.task_id,
|
|
e.task_inserted_at,
|
|
MAX(e.retry_count) AS max_retry_count
|
|
FROM
|
|
relevant_events e
|
|
GROUP BY
|
|
e.tenant_id, e.task_id, e.task_inserted_at
|
|
), queued_ats AS (
|
|
SELECT
|
|
e.task_id::bigint,
|
|
MAX(e.event_timestamp) AS queued_at
|
|
FROM
|
|
relevant_events e
|
|
JOIN
|
|
max_retry_counts mrc ON
|
|
e.tenant_id = mrc.tenant_id
|
|
AND e.task_id = mrc.task_id
|
|
AND e.task_inserted_at = mrc.task_inserted_at
|
|
AND e.retry_count = mrc.max_retry_count
|
|
WHERE
|
|
e.event_type = 'QUEUED'
|
|
GROUP BY e.task_id
|
|
), started_ats AS (
|
|
SELECT
|
|
e.task_id::bigint,
|
|
MAX(e.event_timestamp) AS started_at
|
|
FROM
|
|
relevant_events e
|
|
JOIN
|
|
max_retry_counts mrc ON
|
|
e.tenant_id = mrc.tenant_id
|
|
AND e.task_id = mrc.task_id
|
|
AND e.task_inserted_at = mrc.task_inserted_at
|
|
AND e.retry_count = mrc.max_retry_count
|
|
WHERE
|
|
e.event_type = 'STARTED'
|
|
GROUP BY e.task_id
|
|
), finished_ats AS (
|
|
SELECT
|
|
e.task_id::bigint,
|
|
MAX(e.event_timestamp) AS finished_at
|
|
FROM
|
|
relevant_events e
|
|
JOIN
|
|
max_retry_counts mrc ON
|
|
e.tenant_id = mrc.tenant_id
|
|
AND e.task_id = mrc.task_id
|
|
AND e.task_inserted_at = mrc.task_inserted_at
|
|
AND e.retry_count = mrc.max_retry_count
|
|
WHERE
|
|
e.readable_status = ANY(ARRAY['COMPLETED', 'FAILED', 'CANCELLED']::v1_readable_status_olap[])
|
|
GROUP BY e.task_id
|
|
), error_message AS (
|
|
SELECT
|
|
DISTINCT ON (e.task_id) e.task_id::bigint,
|
|
e.error_message
|
|
FROM
|
|
relevant_events e
|
|
JOIN
|
|
max_retry_counts mrc ON
|
|
e.tenant_id = mrc.tenant_id
|
|
AND e.task_id = mrc.task_id
|
|
AND e.task_inserted_at = mrc.task_inserted_at
|
|
AND e.retry_count = mrc.max_retry_count
|
|
WHERE
|
|
e.readable_status = 'FAILED'
|
|
ORDER BY
|
|
e.task_id, e.retry_count DESC
|
|
), task_output AS (
|
|
SELECT
|
|
task_id,
|
|
MAX(output::TEXT) FILTER (WHERE readable_status = 'COMPLETED')::JSONB AS output,
|
|
MAX(external_id::TEXT) FILTER (WHERE readable_status = 'COMPLETED')::UUID AS output_event_external_id
|
|
FROM
|
|
relevant_events
|
|
WHERE
|
|
readable_status = 'COMPLETED'
|
|
GROUP BY
|
|
task_id
|
|
)
|
|
SELECT
|
|
t.tenant_id,
|
|
t.id,
|
|
t.inserted_at,
|
|
t.external_id,
|
|
t.queue,
|
|
t.action_id,
|
|
t.step_id,
|
|
t.workflow_id,
|
|
t.workflow_version_id,
|
|
t.schedule_timeout,
|
|
t.step_timeout,
|
|
t.priority,
|
|
t.sticky,
|
|
t.display_name,
|
|
t.additional_metadata,
|
|
t.parent_task_external_id,
|
|
CASE
|
|
WHEN $1::BOOLEAN THEN t.input
|
|
ELSE '{}'::JSONB
|
|
END::JSONB AS input,
|
|
t.readable_status::v1_readable_status_olap as status,
|
|
t.workflow_run_id,
|
|
f.finished_at::timestamptz as finished_at,
|
|
s.started_at::timestamptz as started_at,
|
|
q.queued_at::timestamptz as queued_at,
|
|
e.error_message as error_message,
|
|
COALESCE(t.latest_retry_count, 0)::int as retry_count,
|
|
CASE
|
|
WHEN $1::BOOLEAN THEN o.output::JSONB
|
|
ELSE '{}'::JSONB
|
|
END::JSONB as output,
|
|
o.output_event_external_id::UUID AS output_event_external_id
|
|
FROM
|
|
tasks t
|
|
LEFT JOIN
|
|
finished_ats f ON f.task_id = t.id
|
|
LEFT JOIN
|
|
started_ats s ON s.task_id = t.id
|
|
LEFT JOIN
|
|
queued_ats q ON q.task_id = t.id
|
|
LEFT JOIN
|
|
error_message e ON e.task_id = t.id
|
|
LEFT JOIN
|
|
task_output o ON o.task_id = t.id
|
|
ORDER BY t.inserted_at DESC, t.id DESC
|
|
`
|
|
|
|
type PopulateTaskRunDataParams struct {
|
|
Includepayloads bool `json:"includepayloads"`
|
|
Taskids []int64 `json:"taskids"`
|
|
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
}
|
|
|
|
type PopulateTaskRunDataRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
Queue string `json:"queue"`
|
|
ActionID string `json:"action_id"`
|
|
StepID pgtype.UUID `json:"step_id"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
ScheduleTimeout string `json:"schedule_timeout"`
|
|
StepTimeout pgtype.Text `json:"step_timeout"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
Sticky V1StickyStrategyOlap `json:"sticky"`
|
|
DisplayName string `json:"display_name"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
Input []byte `json:"input"`
|
|
Status V1ReadableStatusOlap `json:"status"`
|
|
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
|
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
|
QueuedAt pgtype.Timestamptz `json:"queued_at"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
RetryCount int32 `json:"retry_count"`
|
|
Output []byte `json:"output"`
|
|
OutputEventExternalID pgtype.UUID `json:"output_event_external_id"`
|
|
}
|
|
|
|
func (q *Queries) PopulateTaskRunData(ctx context.Context, db DBTX, arg PopulateTaskRunDataParams) ([]*PopulateTaskRunDataRow, error) {
|
|
rows, err := db.Query(ctx, populateTaskRunData,
|
|
arg.Includepayloads,
|
|
arg.Taskids,
|
|
arg.Taskinsertedats,
|
|
arg.Tenantid,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*PopulateTaskRunDataRow
|
|
for rows.Next() {
|
|
var i PopulateTaskRunDataRow
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.Queue,
|
|
&i.ActionID,
|
|
&i.StepID,
|
|
&i.WorkflowID,
|
|
&i.WorkflowVersionID,
|
|
&i.ScheduleTimeout,
|
|
&i.StepTimeout,
|
|
&i.Priority,
|
|
&i.Sticky,
|
|
&i.DisplayName,
|
|
&i.AdditionalMetadata,
|
|
&i.ParentTaskExternalID,
|
|
&i.Input,
|
|
&i.Status,
|
|
&i.WorkflowRunID,
|
|
&i.FinishedAt,
|
|
&i.StartedAt,
|
|
&i.QueuedAt,
|
|
&i.ErrorMessage,
|
|
&i.RetryCount,
|
|
&i.Output,
|
|
&i.OutputEventExternalID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const putPayloads = `-- name: PutPayloads :exec
|
|
WITH inputs AS (
|
|
SELECT
|
|
UNNEST($1::UUID[]) AS external_id,
|
|
UNNEST($2::TIMESTAMPTZ[]) AS inserted_at,
|
|
UNNEST($3::JSONB[]) AS payload,
|
|
UNNEST($4::UUID[]) AS tenant_id,
|
|
UNNEST(CAST($5::TEXT[] AS v1_payload_location_olap[])) AS location,
|
|
UNNEST($6::TEXT[]) AS external_location_key
|
|
)
|
|
|
|
INSERT INTO v1_payloads_olap (
|
|
tenant_id,
|
|
external_id,
|
|
inserted_at,
|
|
location,
|
|
external_location_key,
|
|
inline_content
|
|
)
|
|
|
|
SELECT
|
|
i.tenant_id,
|
|
i.external_id,
|
|
i.inserted_at,
|
|
i.location,
|
|
CASE
|
|
WHEN i.location = 'EXTERNAL' THEN i.external_location_key
|
|
ELSE NULL
|
|
END,
|
|
CASE
|
|
WHEN i.location = 'INLINE' THEN i.payload
|
|
ELSE NULL
|
|
END AS inline_content
|
|
FROM inputs i
|
|
ON CONFLICT (tenant_id, external_id, inserted_at) DO UPDATE
|
|
SET
|
|
location = EXCLUDED.location,
|
|
external_location_key = EXCLUDED.external_location_key,
|
|
inline_content = EXCLUDED.inline_content,
|
|
updated_at = NOW()
|
|
`
|
|
|
|
type PutPayloadsParams struct {
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
Insertedats []pgtype.Timestamptz `json:"insertedats"`
|
|
Payloads [][]byte `json:"payloads"`
|
|
Tenantids []pgtype.UUID `json:"tenantids"`
|
|
Locations []string `json:"locations"`
|
|
Externallocationkeys []string `json:"externallocationkeys"`
|
|
}
|
|
|
|
func (q *Queries) PutPayloads(ctx context.Context, db DBTX, arg PutPayloadsParams) error {
|
|
_, err := db.Exec(ctx, putPayloads,
|
|
arg.Externalids,
|
|
arg.Insertedats,
|
|
arg.Payloads,
|
|
arg.Tenantids,
|
|
arg.Locations,
|
|
arg.Externallocationkeys,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const readDAGByExternalID = `-- name: ReadDAGByExternalID :one
|
|
WITH lookup_task AS (
|
|
SELECT
|
|
tenant_id,
|
|
dag_id,
|
|
inserted_at
|
|
FROM
|
|
v1_lookup_table_olap
|
|
WHERE
|
|
external_id = $1::uuid
|
|
)
|
|
SELECT
|
|
d.id, d.inserted_at, d.tenant_id, d.external_id, d.display_name, d.workflow_id, d.workflow_version_id, d.readable_status, d.input, d.additional_metadata, d.parent_task_external_id, d.total_tasks
|
|
FROM
|
|
v1_dags_olap d
|
|
JOIN
|
|
lookup_task lt ON lt.tenant_id = d.tenant_id AND lt.dag_id = d.id AND lt.inserted_at = d.inserted_at
|
|
`
|
|
|
|
func (q *Queries) ReadDAGByExternalID(ctx context.Context, db DBTX, externalid pgtype.UUID) (*V1DagsOlap, error) {
|
|
row := db.QueryRow(ctx, readDAGByExternalID, externalid)
|
|
var i V1DagsOlap
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.TenantID,
|
|
&i.ExternalID,
|
|
&i.DisplayName,
|
|
&i.WorkflowID,
|
|
&i.WorkflowVersionID,
|
|
&i.ReadableStatus,
|
|
&i.Input,
|
|
&i.AdditionalMetadata,
|
|
&i.ParentTaskExternalID,
|
|
&i.TotalTasks,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const readPayloadsOLAP = `-- name: ReadPayloadsOLAP :many
|
|
SELECT tenant_id, external_id, location, external_location_key, inline_content, inserted_at, updated_at
|
|
FROM v1_payloads_olap
|
|
WHERE
|
|
tenant_id = $1::UUID
|
|
AND external_id = ANY($2::UUID[])
|
|
`
|
|
|
|
type ReadPayloadsOLAPParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Externalids []pgtype.UUID `json:"externalids"`
|
|
}
|
|
|
|
func (q *Queries) ReadPayloadsOLAP(ctx context.Context, db DBTX, arg ReadPayloadsOLAPParams) ([]*V1PayloadsOlap, error) {
|
|
rows, err := db.Query(ctx, readPayloadsOLAP, arg.Tenantid, arg.Externalids)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*V1PayloadsOlap
|
|
for rows.Next() {
|
|
var i V1PayloadsOlap
|
|
if err := rows.Scan(
|
|
&i.TenantID,
|
|
&i.ExternalID,
|
|
&i.Location,
|
|
&i.ExternalLocationKey,
|
|
&i.InlineContent,
|
|
&i.InsertedAt,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const readTaskByExternalID = `-- name: ReadTaskByExternalID :one
|
|
WITH lookup_task AS (
|
|
SELECT
|
|
tenant_id,
|
|
task_id,
|
|
inserted_at
|
|
FROM
|
|
v1_lookup_table_olap
|
|
WHERE
|
|
external_id = $1::uuid
|
|
)
|
|
SELECT
|
|
t.tenant_id, t.id, t.inserted_at, t.external_id, t.queue, t.action_id, t.step_id, t.workflow_id, t.workflow_version_id, t.workflow_run_id, t.schedule_timeout, t.step_timeout, t.priority, t.sticky, t.desired_worker_id, t.display_name, t.input, t.additional_metadata, t.readable_status, t.latest_retry_count, t.latest_worker_id, t.dag_id, t.dag_inserted_at, t.parent_task_external_id,
|
|
e.output,
|
|
e.external_id AS event_external_id,
|
|
e.error_message
|
|
FROM
|
|
v1_tasks_olap t
|
|
JOIN
|
|
lookup_task lt ON lt.tenant_id = t.tenant_id AND lt.task_id = t.id AND lt.inserted_at = t.inserted_at
|
|
JOIN
|
|
v1_task_events_olap e ON (e.tenant_id, e.task_id, e.readable_status, e.retry_count) = (t.tenant_id, t.id, t.readable_status, t.latest_retry_count)
|
|
`
|
|
|
|
type ReadTaskByExternalIDRow struct {
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
Queue string `json:"queue"`
|
|
ActionID string `json:"action_id"`
|
|
StepID pgtype.UUID `json:"step_id"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
WorkflowRunID pgtype.UUID `json:"workflow_run_id"`
|
|
ScheduleTimeout string `json:"schedule_timeout"`
|
|
StepTimeout pgtype.Text `json:"step_timeout"`
|
|
Priority pgtype.Int4 `json:"priority"`
|
|
Sticky V1StickyStrategyOlap `json:"sticky"`
|
|
DesiredWorkerID pgtype.UUID `json:"desired_worker_id"`
|
|
DisplayName string `json:"display_name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
LatestRetryCount int32 `json:"latest_retry_count"`
|
|
LatestWorkerID pgtype.UUID `json:"latest_worker_id"`
|
|
DagID pgtype.Int8 `json:"dag_id"`
|
|
DagInsertedAt pgtype.Timestamptz `json:"dag_inserted_at"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
Output []byte `json:"output"`
|
|
EventExternalID pgtype.UUID `json:"event_external_id"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
}
|
|
|
|
func (q *Queries) ReadTaskByExternalID(ctx context.Context, db DBTX, externalid pgtype.UUID) (*ReadTaskByExternalIDRow, error) {
|
|
row := db.QueryRow(ctx, readTaskByExternalID, externalid)
|
|
var i ReadTaskByExternalIDRow
|
|
err := row.Scan(
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.Queue,
|
|
&i.ActionID,
|
|
&i.StepID,
|
|
&i.WorkflowID,
|
|
&i.WorkflowVersionID,
|
|
&i.WorkflowRunID,
|
|
&i.ScheduleTimeout,
|
|
&i.StepTimeout,
|
|
&i.Priority,
|
|
&i.Sticky,
|
|
&i.DesiredWorkerID,
|
|
&i.DisplayName,
|
|
&i.Input,
|
|
&i.AdditionalMetadata,
|
|
&i.ReadableStatus,
|
|
&i.LatestRetryCount,
|
|
&i.LatestWorkerID,
|
|
&i.DagID,
|
|
&i.DagInsertedAt,
|
|
&i.ParentTaskExternalID,
|
|
&i.Output,
|
|
&i.EventExternalID,
|
|
&i.ErrorMessage,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const readWorkflowRunByExternalId = `-- name: ReadWorkflowRunByExternalId :one
|
|
WITH runs AS (
|
|
SELECT
|
|
lt.dag_id AS dag_id,
|
|
lt.task_id AS task_id,
|
|
r.id AS id,
|
|
r.tenant_id,
|
|
r.inserted_at,
|
|
r.external_id,
|
|
r.readable_status,
|
|
r.kind,
|
|
r.workflow_id,
|
|
d.display_name AS display_name,
|
|
d.input AS input,
|
|
d.additional_metadata AS additional_metadata,
|
|
d.workflow_version_id AS workflow_version_id,
|
|
d.parent_task_external_id AS parent_task_external_id
|
|
FROM
|
|
v1_lookup_table_olap lt
|
|
JOIN
|
|
v1_runs_olap r ON r.inserted_at = lt.inserted_at AND r.id = lt.dag_id
|
|
JOIN
|
|
v1_dags_olap d ON (lt.tenant_id, lt.dag_id, lt.inserted_at) = (d.tenant_id, d.id, d.inserted_at)
|
|
WHERE
|
|
lt.external_id = $1::uuid
|
|
AND lt.dag_id IS NOT NULL
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
lt.dag_id AS dag_id,
|
|
lt.task_id AS task_id,
|
|
r.id AS id,
|
|
r.tenant_id,
|
|
r.inserted_at,
|
|
r.external_id,
|
|
r.readable_status,
|
|
r.kind,
|
|
r.workflow_id,
|
|
t.display_name AS display_name,
|
|
t.input AS input,
|
|
t.additional_metadata AS additional_metadata,
|
|
t.workflow_version_id AS workflow_version_id,
|
|
NULL :: UUID AS parent_task_external_id
|
|
FROM
|
|
v1_lookup_table_olap lt
|
|
JOIN
|
|
v1_runs_olap r ON r.inserted_at = lt.inserted_at AND r.id = lt.task_id
|
|
JOIN
|
|
v1_tasks_olap t ON (lt.tenant_id, lt.task_id, lt.inserted_at) = (t.tenant_id, t.id, t.inserted_at)
|
|
WHERE
|
|
lt.external_id = $1::uuid
|
|
AND lt.task_id IS NOT NULL
|
|
), relevant_events AS (
|
|
SELECT
|
|
e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message
|
|
FROM runs r
|
|
JOIN v1_dag_to_task_olap dt ON r.dag_id = dt.dag_id AND r.inserted_at = dt.dag_inserted_at
|
|
JOIN v1_task_events_olap e ON (e.task_id, e.task_inserted_at) = (dt.task_id, dt.task_inserted_at)
|
|
WHERE r.dag_id IS NOT NULL
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
e.tenant_id, e.id, e.inserted_at, e.external_id, e.task_id, e.task_inserted_at, e.event_type, e.workflow_id, e.event_timestamp, e.readable_status, e.retry_count, e.error_message, e.output, e.worker_id, e.additional__event_data, e.additional__event_message
|
|
FROM runs r
|
|
JOIN v1_task_events_olap e ON e.task_id = r.task_id AND e.task_inserted_at = r.inserted_at
|
|
WHERE r.task_id IS NOT NULL
|
|
), max_retry_counts AS (
|
|
SELECT task_id, MAX(retry_count) AS max_retry_count
|
|
FROM relevant_events
|
|
GROUP BY task_id
|
|
), metadata AS (
|
|
SELECT
|
|
MIN(e.inserted_at)::timestamptz AS created_at,
|
|
MIN(e.inserted_at) FILTER (WHERE e.readable_status = 'RUNNING')::timestamptz AS started_at,
|
|
MAX(e.inserted_at) FILTER (WHERE e.readable_status IN ('COMPLETED', 'CANCELLED', 'FAILED'))::timestamptz AS finished_at,
|
|
JSON_AGG(JSON_BUILD_OBJECT('task_id', e.task_id,'task_inserted_at', e.task_inserted_at)) AS task_metadata
|
|
FROM
|
|
relevant_events e
|
|
JOIN max_retry_counts mrc ON (e.task_id, e.retry_count) = (mrc.task_id, mrc.max_retry_count)
|
|
), error_message AS (
|
|
SELECT
|
|
e.error_message
|
|
FROM
|
|
relevant_events e
|
|
WHERE
|
|
e.readable_status = 'FAILED'
|
|
ORDER BY
|
|
e.retry_count DESC
|
|
LIMIT 1
|
|
)
|
|
SELECT
|
|
r.dag_id, r.task_id, r.id, r.tenant_id, r.inserted_at, r.external_id, r.readable_status, r.kind, r.workflow_id, r.display_name, r.input, r.additional_metadata, r.workflow_version_id, r.parent_task_external_id,
|
|
m.created_at,
|
|
m.started_at,
|
|
m.finished_at,
|
|
e.error_message,
|
|
m.task_metadata
|
|
FROM runs r
|
|
LEFT JOIN metadata m ON true
|
|
LEFT JOIN error_message e ON true
|
|
ORDER BY r.inserted_at DESC
|
|
`
|
|
|
|
type ReadWorkflowRunByExternalIdRow struct {
|
|
DagID pgtype.Int8 `json:"dag_id"`
|
|
TaskID pgtype.Int8 `json:"task_id"`
|
|
ID int64 `json:"id"`
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
Kind V1RunKind `json:"kind"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
DisplayName string `json:"display_name"`
|
|
Input []byte `json:"input"`
|
|
AdditionalMetadata []byte `json:"additional_metadata"`
|
|
WorkflowVersionID pgtype.UUID `json:"workflow_version_id"`
|
|
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
|
CreatedAt pgtype.Timestamptz `json:"created_at"`
|
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
|
ErrorMessage pgtype.Text `json:"error_message"`
|
|
TaskMetadata []byte `json:"task_metadata"`
|
|
}
|
|
|
|
func (q *Queries) ReadWorkflowRunByExternalId(ctx context.Context, db DBTX, workflowrunexternalid pgtype.UUID) (*ReadWorkflowRunByExternalIdRow, error) {
|
|
row := db.QueryRow(ctx, readWorkflowRunByExternalId, workflowrunexternalid)
|
|
var i ReadWorkflowRunByExternalIdRow
|
|
err := row.Scan(
|
|
&i.DagID,
|
|
&i.TaskID,
|
|
&i.ID,
|
|
&i.TenantID,
|
|
&i.InsertedAt,
|
|
&i.ExternalID,
|
|
&i.ReadableStatus,
|
|
&i.Kind,
|
|
&i.WorkflowID,
|
|
&i.DisplayName,
|
|
&i.Input,
|
|
&i.AdditionalMetadata,
|
|
&i.WorkflowVersionID,
|
|
&i.ParentTaskExternalID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.FinishedAt,
|
|
&i.ErrorMessage,
|
|
&i.TaskMetadata,
|
|
)
|
|
return &i, err
|
|
}
|
|
|
|
const storeCELEvaluationFailures = `-- name: StoreCELEvaluationFailures :exec
|
|
WITH inputs AS (
|
|
SELECT
|
|
UNNEST(CAST($2::TEXT[] AS v1_cel_evaluation_failure_source[])) AS source,
|
|
UNNEST($3::TEXT[]) AS error
|
|
)
|
|
INSERT INTO v1_cel_evaluation_failures_olap (
|
|
tenant_id,
|
|
source,
|
|
error
|
|
)
|
|
SELECT $1::UUID, source, error
|
|
FROM inputs
|
|
`
|
|
|
|
type StoreCELEvaluationFailuresParams struct {
|
|
Tenantid pgtype.UUID `json:"tenantid"`
|
|
Sources []string `json:"sources"`
|
|
Errors []string `json:"errors"`
|
|
}
|
|
|
|
func (q *Queries) StoreCELEvaluationFailures(ctx context.Context, db DBTX, arg StoreCELEvaluationFailuresParams) error {
|
|
_, err := db.Exec(ctx, storeCELEvaluationFailures, arg.Tenantid, arg.Sources, arg.Errors)
|
|
return err
|
|
}
|
|
|
|
const swapV1PayloadOLAPPartitionWithTemp = `-- name: SwapV1PayloadOLAPPartitionWithTemp :exec
|
|
SELECT swap_v1_payloads_olap_partition_with_temp($1::DATE)
|
|
`
|
|
|
|
func (q *Queries) SwapV1PayloadOLAPPartitionWithTemp(ctx context.Context, db DBTX, date pgtype.Date) error {
|
|
_, err := db.Exec(ctx, swapV1PayloadOLAPPartitionWithTemp, date)
|
|
return err
|
|
}
|
|
|
|
const updateDAGStatuses = `-- name: UpdateDAGStatuses :many
|
|
WITH tenants AS (
|
|
SELECT UNNEST(
|
|
find_matching_tenants_in_task_status_updates_tmp_partition(
|
|
$1::int,
|
|
$2::UUID[]
|
|
)
|
|
) AS tenant_id
|
|
), locked_events AS (
|
|
SELECT
|
|
u.tenant_id, u.requeue_after, u.requeue_retries, u.id, u.dag_id, u.dag_inserted_at
|
|
FROM tenants t,
|
|
LATERAL list_task_status_updates_tmp(
|
|
$1::int,
|
|
t.tenant_id,
|
|
$3::int
|
|
) u
|
|
), distinct_dags AS (
|
|
SELECT
|
|
DISTINCT ON (e.tenant_id, e.dag_id, e.dag_inserted_at)
|
|
e.tenant_id,
|
|
e.dag_id,
|
|
e.dag_inserted_at
|
|
FROM
|
|
locked_events e
|
|
), locked_dags AS (
|
|
SELECT
|
|
d.id,
|
|
d.inserted_at,
|
|
d.readable_status,
|
|
d.tenant_id,
|
|
d.total_tasks
|
|
FROM
|
|
v1_dags_olap d
|
|
WHERE
|
|
d.inserted_at >= $4::TIMESTAMPTZ
|
|
AND (d.inserted_at, d.id, d.tenant_id) IN (
|
|
SELECT
|
|
dd.dag_inserted_at, dd.dag_id, dd.tenant_id
|
|
FROM
|
|
distinct_dags dd
|
|
)
|
|
ORDER BY
|
|
d.inserted_at, d.id
|
|
FOR UPDATE
|
|
), relevant_tasks AS (
|
|
SELECT
|
|
t.tenant_id,
|
|
t.id,
|
|
d.id AS dag_id,
|
|
d.inserted_at AS dag_inserted_at,
|
|
t.readable_status
|
|
FROM
|
|
locked_dags d
|
|
JOIN
|
|
v1_dag_to_task_olap dt ON
|
|
(d.id, d.inserted_at) = (dt.dag_id, dt.dag_inserted_at)
|
|
JOIN
|
|
v1_tasks_olap t ON
|
|
(dt.task_id, dt.task_inserted_at) = (t.id, t.inserted_at)
|
|
WHERE
|
|
t.inserted_at >= $4::TIMESTAMPTZ
|
|
-- Note that the ORDER BY seems to help the query planner by pruning partitions earlier. We
|
|
-- have previously seen Postgres use an index-only scan on partitions older than the minInsertedAt,
|
|
-- each of which can take a long time to scan. This can be very pathological since we partition on
|
|
-- both the status and the date, so 14 days of data with 5 statuses is 70 partitions to index scan.
|
|
ORDER BY t.inserted_at DESC
|
|
), dag_task_counts AS (
|
|
SELECT
|
|
d.id,
|
|
d.inserted_at,
|
|
d.total_tasks,
|
|
COUNT(t.id) AS task_count,
|
|
COUNT(t.id) FILTER (WHERE t.readable_status = 'COMPLETED') AS completed_count,
|
|
COUNT(t.id) FILTER (WHERE t.readable_status = 'FAILED') AS failed_count,
|
|
COUNT(t.id) FILTER (WHERE t.readable_status = 'CANCELLED') AS cancelled_count,
|
|
COUNT(t.id) FILTER (WHERE t.readable_status = 'QUEUED') AS queued_count,
|
|
COUNT(t.id) FILTER (WHERE t.readable_status = 'RUNNING') AS running_count
|
|
FROM
|
|
locked_dags d
|
|
LEFT JOIN
|
|
relevant_tasks t ON (d.tenant_id, d.id, d.inserted_at) = (t.tenant_id, t.dag_id, t.dag_inserted_at)
|
|
GROUP BY
|
|
d.id, d.inserted_at, d.total_tasks
|
|
), updated_dags AS (
|
|
UPDATE
|
|
v1_dags_olap d
|
|
SET
|
|
readable_status = CASE
|
|
-- If we only have queued events, we should keep the status as is
|
|
WHEN dtc.queued_count = dtc.task_count THEN d.readable_status
|
|
-- If the task count is not equal to the total tasks, we should set the status to running
|
|
WHEN dtc.task_count != dtc.total_tasks THEN 'RUNNING'
|
|
-- If we have any running or queued tasks, we should set the status to running
|
|
WHEN dtc.running_count > 0 OR dtc.queued_count > 0 THEN 'RUNNING'
|
|
WHEN dtc.failed_count > 0 THEN 'FAILED'
|
|
WHEN dtc.cancelled_count > 0 THEN 'CANCELLED'
|
|
WHEN dtc.completed_count = dtc.task_count THEN 'COMPLETED'
|
|
ELSE 'RUNNING'
|
|
END
|
|
FROM
|
|
dag_task_counts dtc
|
|
WHERE
|
|
(d.id, d.inserted_at) = (dtc.id, dtc.inserted_at)
|
|
RETURNING
|
|
d.tenant_id, d.id, d.inserted_at, d.readable_status, d.external_id, d.workflow_id
|
|
), events_to_requeue AS (
|
|
-- Get events which don't have a corresponding locked_task
|
|
SELECT
|
|
e.tenant_id,
|
|
e.requeue_retries,
|
|
e.dag_id,
|
|
e.dag_inserted_at
|
|
FROM
|
|
locked_events e
|
|
WHERE NOT EXISTS (
|
|
SELECT 1
|
|
FROM locked_dags d
|
|
WHERE (e.dag_inserted_at, e.dag_id, e.tenant_id) = (d.inserted_at, d.id, d.tenant_id)
|
|
)
|
|
), deleted_events AS (
|
|
DELETE FROM
|
|
v1_task_status_updates_tmp
|
|
WHERE
|
|
(tenant_id, requeue_after, dag_id, id) IN (SELECT tenant_id, requeue_after, dag_id, id FROM locked_events)
|
|
), requeued_events AS (
|
|
INSERT INTO
|
|
v1_task_status_updates_tmp (
|
|
tenant_id,
|
|
requeue_after,
|
|
requeue_retries,
|
|
dag_id,
|
|
dag_inserted_at
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
-- Exponential backoff, we limit to 10 retries which is 2048 seconds/34 minutes
|
|
CURRENT_TIMESTAMP + (2 ^ requeue_retries) * INTERVAL '2 seconds',
|
|
requeue_retries + 1,
|
|
dag_id,
|
|
dag_inserted_at
|
|
FROM
|
|
events_to_requeue
|
|
WHERE
|
|
requeue_retries < 10
|
|
RETURNING
|
|
tenant_id, requeue_after, requeue_retries, id, dag_id, dag_inserted_at
|
|
), event_count AS (
|
|
SELECT
|
|
COUNT(*) as count
|
|
FROM
|
|
locked_events
|
|
)
|
|
SELECT
|
|
-- Little wonky, but we return the count of events that were processed in each row. Potential edge case
|
|
-- where there are no tasks updated with a non-zero count, but this should be very rare and we'll get
|
|
-- updates on the next run.
|
|
(SELECT count FROM event_count) AS count,
|
|
d.tenant_id, d.id, d.inserted_at, d.readable_status, d.external_id, d.workflow_id
|
|
FROM
|
|
updated_dags d
|
|
`
|
|
|
|
type UpdateDAGStatusesParams struct {
|
|
Partitionnumber int32 `json:"partitionnumber"`
|
|
Tenantids []pgtype.UUID `json:"tenantids"`
|
|
Eventlimit int32 `json:"eventlimit"`
|
|
Mininsertedat pgtype.Timestamptz `json:"mininsertedat"`
|
|
}
|
|
|
|
type UpdateDAGStatusesRow struct {
|
|
Count int64 `json:"count"`
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
}
|
|
|
|
func (q *Queries) UpdateDAGStatuses(ctx context.Context, db DBTX, arg UpdateDAGStatusesParams) ([]*UpdateDAGStatusesRow, error) {
|
|
rows, err := db.Query(ctx, updateDAGStatuses,
|
|
arg.Partitionnumber,
|
|
arg.Tenantids,
|
|
arg.Eventlimit,
|
|
arg.Mininsertedat,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*UpdateDAGStatusesRow
|
|
for rows.Next() {
|
|
var i UpdateDAGStatusesRow
|
|
if err := rows.Scan(
|
|
&i.Count,
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ReadableStatus,
|
|
&i.ExternalID,
|
|
&i.WorkflowID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateTaskStatuses = `-- name: UpdateTaskStatuses :many
|
|
WITH tenants AS (
|
|
SELECT UNNEST(
|
|
find_matching_tenants_in_task_events_tmp_partition(
|
|
$1::int,
|
|
$2::UUID[]
|
|
)
|
|
) AS tenant_id
|
|
), locked_events AS (
|
|
SELECT
|
|
e.tenant_id, e.requeue_after, e.requeue_retries, e.id, e.task_id, e.task_inserted_at, e.event_type, e.readable_status, e.retry_count, e.worker_id
|
|
FROM tenants t,
|
|
LATERAL list_task_events_tmp(
|
|
$1::int,
|
|
t.tenant_id,
|
|
$3::int
|
|
) e
|
|
), max_retry_counts AS (
|
|
SELECT
|
|
tenant_id,
|
|
task_id,
|
|
task_inserted_at,
|
|
MAX(retry_count) AS max_retry_count
|
|
FROM
|
|
locked_events
|
|
GROUP BY
|
|
tenant_id, task_id, task_inserted_at
|
|
), updatable_events AS (
|
|
SELECT
|
|
e.tenant_id,
|
|
e.task_id,
|
|
e.task_inserted_at,
|
|
e.retry_count,
|
|
MAX(e.readable_status) AS max_readable_status
|
|
FROM
|
|
locked_events e
|
|
JOIN
|
|
max_retry_counts mrc ON
|
|
e.tenant_id = mrc.tenant_id
|
|
AND e.task_id = mrc.task_id
|
|
AND e.task_inserted_at = mrc.task_inserted_at
|
|
AND e.retry_count = mrc.max_retry_count
|
|
GROUP BY
|
|
e.tenant_id, e.task_id, e.task_inserted_at, e.retry_count
|
|
), latest_worker_id AS (
|
|
SELECT
|
|
tenant_id,
|
|
task_id,
|
|
task_inserted_at,
|
|
retry_count,
|
|
MAX(worker_id::text) AS worker_id
|
|
FROM
|
|
locked_events
|
|
WHERE
|
|
worker_id IS NOT NULL
|
|
GROUP BY
|
|
tenant_id, task_id, task_inserted_at, retry_count
|
|
), locked_tasks AS (
|
|
SELECT
|
|
t.tenant_id,
|
|
t.id,
|
|
t.inserted_at,
|
|
e.retry_count,
|
|
e.max_readable_status
|
|
FROM
|
|
v1_tasks_olap t
|
|
JOIN
|
|
updatable_events e ON
|
|
(t.tenant_id, t.id, t.inserted_at) = (e.tenant_id, e.task_id, e.task_inserted_at)
|
|
WHERE t.inserted_at >= $4::TIMESTAMPTZ
|
|
ORDER BY
|
|
t.inserted_at, t.id
|
|
FOR UPDATE
|
|
), updated_tasks AS (
|
|
UPDATE
|
|
v1_tasks_olap t
|
|
SET
|
|
readable_status = e.max_readable_status,
|
|
latest_retry_count = e.retry_count,
|
|
latest_worker_id = CASE WHEN lw.worker_id::uuid IS NOT NULL THEN lw.worker_id::uuid ELSE t.latest_worker_id END
|
|
FROM
|
|
updatable_events e
|
|
LEFT JOIN
|
|
latest_worker_id lw ON
|
|
(e.tenant_id, e.task_id, e.task_inserted_at, e.retry_count) = (lw.tenant_id, lw.task_id, lw.task_inserted_at, lw.retry_count)
|
|
WHERE
|
|
(t.tenant_id, t.id, t.inserted_at) = (e.tenant_id, e.task_id, e.task_inserted_at)
|
|
AND
|
|
(
|
|
-- if the retry count is greater than the latest retry count, update the status
|
|
(
|
|
e.retry_count > t.latest_retry_count
|
|
AND e.max_readable_status != t.readable_status
|
|
) OR
|
|
-- if the retry count is equal to the latest retry count, update the status if the status is greater
|
|
(
|
|
e.retry_count = t.latest_retry_count
|
|
AND e.max_readable_status > t.readable_status
|
|
)
|
|
)
|
|
RETURNING
|
|
t.tenant_id, t.id, t.inserted_at, t.readable_status, t.external_id, t.latest_worker_id, t.workflow_id, (t.dag_id IS NOT NULL)::boolean AS is_dag_task
|
|
), events_to_requeue AS (
|
|
-- Get events which don't have a corresponding locked_task
|
|
SELECT
|
|
e.tenant_id,
|
|
e.requeue_retries,
|
|
e.task_id,
|
|
e.task_inserted_at,
|
|
e.event_type,
|
|
e.readable_status,
|
|
e.retry_count
|
|
FROM
|
|
locked_events e
|
|
WHERE NOT EXISTS (
|
|
SELECT 1
|
|
FROM locked_tasks t
|
|
WHERE (e.tenant_id, e.task_id, e.task_inserted_at) = (t.tenant_id, t.id, t.inserted_at)
|
|
)
|
|
), deleted_events AS (
|
|
DELETE FROM
|
|
v1_task_events_olap_tmp
|
|
WHERE
|
|
(tenant_id, requeue_after, task_id, id) IN (SELECT tenant_id, requeue_after, task_id, id FROM locked_events)
|
|
), requeued_events AS (
|
|
INSERT INTO
|
|
v1_task_events_olap_tmp (
|
|
tenant_id,
|
|
requeue_after,
|
|
requeue_retries,
|
|
task_id,
|
|
task_inserted_at,
|
|
event_type,
|
|
readable_status,
|
|
retry_count
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
-- Exponential backoff, we limit to 10 retries which is 2048 seconds/34 minutes
|
|
CURRENT_TIMESTAMP + (2 ^ requeue_retries) * INTERVAL '2 seconds',
|
|
requeue_retries + 1,
|
|
task_id,
|
|
task_inserted_at,
|
|
event_type,
|
|
readable_status,
|
|
retry_count
|
|
FROM
|
|
events_to_requeue
|
|
WHERE
|
|
requeue_retries < 10
|
|
RETURNING
|
|
tenant_id, requeue_after, requeue_retries, id, task_id, task_inserted_at, event_type, readable_status, retry_count, worker_id
|
|
), event_count AS (
|
|
SELECT
|
|
COUNT(*) as count
|
|
FROM
|
|
locked_events
|
|
)
|
|
SELECT
|
|
-- Little wonky, but we return the count of events that were processed in each row. Potential edge case
|
|
-- where there are no tasks updated with a non-zero count, but this should be very rare and we'll get
|
|
-- updates on the next run.
|
|
(SELECT count FROM event_count) AS count,
|
|
t.tenant_id, t.id, t.inserted_at, t.readable_status, t.external_id, t.latest_worker_id, t.workflow_id, t.is_dag_task
|
|
FROM
|
|
updated_tasks t
|
|
`
|
|
|
|
type UpdateTaskStatusesParams struct {
|
|
Partitionnumber int32 `json:"partitionnumber"`
|
|
Tenantids []pgtype.UUID `json:"tenantids"`
|
|
Eventlimit int32 `json:"eventlimit"`
|
|
Mininsertedat pgtype.Timestamptz `json:"mininsertedat"`
|
|
}
|
|
|
|
type UpdateTaskStatusesRow struct {
|
|
Count int64 `json:"count"`
|
|
TenantID pgtype.UUID `json:"tenant_id"`
|
|
ID int64 `json:"id"`
|
|
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
|
ExternalID pgtype.UUID `json:"external_id"`
|
|
LatestWorkerID pgtype.UUID `json:"latest_worker_id"`
|
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
|
IsDagTask bool `json:"is_dag_task"`
|
|
}
|
|
|
|
func (q *Queries) UpdateTaskStatuses(ctx context.Context, db DBTX, arg UpdateTaskStatusesParams) ([]*UpdateTaskStatusesRow, error) {
|
|
rows, err := db.Query(ctx, updateTaskStatuses,
|
|
arg.Partitionnumber,
|
|
arg.Tenantids,
|
|
arg.Eventlimit,
|
|
arg.Mininsertedat,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []*UpdateTaskStatusesRow
|
|
for rows.Next() {
|
|
var i UpdateTaskStatusesRow
|
|
if err := rows.Scan(
|
|
&i.Count,
|
|
&i.TenantID,
|
|
&i.ID,
|
|
&i.InsertedAt,
|
|
&i.ReadableStatus,
|
|
&i.ExternalID,
|
|
&i.LatestWorkerID,
|
|
&i.WorkflowID,
|
|
&i.IsDagTask,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, &i)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|