mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-02-25 03:21:00 -06:00
* Revert "Revert "Feat: Events in the OLAP Repo (#1633)" (#1706)"
This reverts commit bf29269a27.
* Feat: Events Frontend (#1678)
* feat: add events tables
* fix: tweak PK
* feat: migration
* feat: gen models
* fix: add external id col + index
* fix: uuid pk
* fix: types
* chore: gen
* feat: add index
* Feat: Write events into OLAP tables (#1634)
* feat: query for event creation
* feat: olap impl
* feat: wire up the olap event write
* feat: goroutine?
* feat: start wiring up inserts to triggers
* fix: no `RETURNING`
* fix: hack
* fix: inner join
* feat: attempt 2
* fix: return errors
* chore: lint
* fix: diff
* feat: add new partitions
* fix: eof
* fix: write external ids into table
* chore: gen
* fix: wiring
* fix: event deduping
* fix: insert in bulk
* fix: bug
* refactor: return type of trigger
* fix: unnest ids
* fix: unnest tenant ids
* fix: run ids in bulk insert
* feat: two bulk inserts, one tx
* fix: cruft
* fix: bug
* Update pkg/repository/v1/olap.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: rework to avoid n^2 loop
* fix: transaction timeout
* fix: lint
* fix: use error
* fix: rm penultimate version
* fix: rm penultimate test part ii
* Feat: CEL-based filtering of events (#1676)
* feat: add optional expression to workflow trigger event ref
* feat: proto field for expression
* feat: write and parse the expression
* feat: wire up through put workflow ver request
* feat: query
* fix: naming
* fix: cleanup
* fix: rebase
* Update pkg/repository/v1/trigger.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: skip workflow on cel eval failure
* fix: zero value
* fix: cel evaluator
* fix: usage
* fix: naming + type
* fix: rm event filter from v0 defn
* feat: tests + fix typing
* fix: usage
* fix: construct input
* feat: always write events
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: select existing partitions
* feat: add prio to push event request
* feat: priority from pushed events
* fix: missed a spot
* fix: write events even if they're not tied to any workflows
* fix: revert cel event filtering
* fix: couple more
* fix: simplify
* feat: initial API work
* chore: gen ts
* feat: fe skeleton
* feat: wiring up skeleton data
* feat: hook
* fix: bugs
* fix: lint on gen
* fix: couple more
* feat: wire up counts
* feat: initial events cols + styling
* feat: layout
* feat: styling
* fix: cleanup
* feat: use external ids on the FE
* fix: separate openapi spec for new events route
* fix: required param
* fix: update queries and api
* feat: event detail
* fix: page
* fix: rebase
* tweak: table
* feat: add events page to sidebar
* feat: modify queries to allow fetching by triggering event
* feat: add triggering event id to api
* chore: lint
* feat: wire up events api
* fix: rm log
* fix: gen
* feat: wire up status counts
* fix: rm time series
* fix: rm state
* fix: lint
* fix: eof
* chore: lint
* feat: wire up filters
* fix: lint
* chore: api gen
* feat: add events tables
* fix: tweak PK
* feat: migration
* feat: gen models
* fix: add external id col + index
* fix: uuid pk
* fix: types
* chore: gen
* feat: add index
* Feat: Write events into OLAP tables (#1634)
* feat: query for event creation
* feat: olap impl
* feat: wire up the olap event write
* feat: goroutine?
* feat: start wiring up inserts to triggers
* fix: no `RETURNING`
* fix: hack
* fix: inner join
* feat: attempt 2
* fix: return errors
* chore: lint
* fix: diff
* feat: add new partitions
* fix: eof
* fix: write external ids into table
* chore: gen
* fix: wiring
* fix: event deduping
* fix: insert in bulk
* fix: bug
* refactor: return type of trigger
* fix: unnest ids
* fix: unnest tenant ids
* fix: run ids in bulk insert
* feat: two bulk inserts, one tx
* fix: cruft
* fix: bug
* Update pkg/repository/v1/olap.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: rework to avoid n^2 loop
* fix: transaction timeout
* fix: lint
* fix: use error
* fix: rm penultimate version
* fix: rm penultimate test part ii
* Feat: CEL-based filtering of events (#1676)
* feat: add optional expression to workflow trigger event ref
* feat: proto field for expression
* feat: write and parse the expression
* feat: wire up through put workflow ver request
* feat: query
* fix: naming
* fix: cleanup
* fix: rebase
* Update pkg/repository/v1/trigger.go
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: skip workflow on cel eval failure
* fix: zero value
* fix: cel evaluator
* fix: usage
* fix: naming + type
* fix: rm event filter from v0 defn
* feat: tests + fix typing
* fix: usage
* fix: construct input
* feat: always write events
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: select existing partitions
* feat: add prio to push event request
* feat: priority from pushed events
* fix: missed a spot
* fix: write events even if they're not tied to any workflows
* fix: revert cel event filtering
* fix: couple more
* fix: simplify
* fix: gen api
* fix: gen
* fix: more merge issues
* chore: gen
* fix: lockfile
* fix: merge issues
* chore: gen again
* fix: rm unused fields from openapi spec
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: migration ver
* fix: insert trigger, event types
* fix: bunch o refs
* fix: migration
* fix: queries
* fix: finish wiring up inserts
* fix: misc bugs
* fix: fe filtering
* chore: lint
* fix: formatting, gen
* fix: current_date
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
682 lines
19 KiB
PL/PgSQL
682 lines
19 KiB
PL/PgSQL
CREATE TYPE v1_sticky_strategy_olap AS ENUM ('NONE', 'SOFT', 'HARD');
|
|
|
|
CREATE TYPE v1_readable_status_olap AS ENUM (
|
|
'QUEUED',
|
|
'RUNNING',
|
|
'CANCELLED',
|
|
'FAILED',
|
|
'COMPLETED'
|
|
);
|
|
|
|
-- HELPER FUNCTIONS FOR PARTITIONED TABLES --
|
|
CREATE OR REPLACE FUNCTION get_v1_partitions_before_date(
|
|
targetTableName text,
|
|
targetDate date
|
|
) RETURNS TABLE(partition_name text)
|
|
LANGUAGE plpgsql AS
|
|
$$
|
|
BEGIN
|
|
RETURN QUERY
|
|
SELECT
|
|
inhrelid::regclass::text AS partition_name
|
|
FROM
|
|
pg_inherits
|
|
WHERE
|
|
inhparent = targetTableName::regclass
|
|
AND substring(inhrelid::regclass::text, format('%s_(\d{8})', targetTableName)) ~ '^\d{8}'
|
|
AND (substring(inhrelid::regclass::text, format('%s_(\d{8})', targetTableName))::date) < targetDate;
|
|
END;
|
|
$$;
|
|
|
|
CREATE OR REPLACE FUNCTION create_v1_partition_with_status(
|
|
newTableName text,
|
|
status v1_readable_status_olap
|
|
) RETURNS integer
|
|
LANGUAGE plpgsql AS
|
|
$$
|
|
DECLARE
|
|
targetNameWithStatus varchar;
|
|
BEGIN
|
|
SELECT lower(format('%s_%s', newTableName, status::text)) INTO targetNameWithStatus;
|
|
|
|
-- exit if the table exists
|
|
IF EXISTS (SELECT 1 FROM pg_tables WHERE tablename = targetNameWithStatus) THEN
|
|
RETURN 0;
|
|
END IF;
|
|
|
|
EXECUTE
|
|
format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES)', targetNameWithStatus, newTableName);
|
|
EXECUTE
|
|
format('ALTER TABLE %s SET (
|
|
autovacuum_vacuum_scale_factor = ''0.1'',
|
|
autovacuum_analyze_scale_factor=''0.05'',
|
|
autovacuum_vacuum_threshold=''25'',
|
|
autovacuum_analyze_threshold=''25'',
|
|
autovacuum_vacuum_cost_delay=''10'',
|
|
autovacuum_vacuum_cost_limit=''1000''
|
|
)', targetNameWithStatus);
|
|
EXECUTE
|
|
format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES IN (''%s'')', newTableName, targetNameWithStatus, status);
|
|
RETURN 1;
|
|
END;
|
|
$$;
|
|
|
|
CREATE OR REPLACE FUNCTION create_v1_olap_partition_with_date_and_status(
|
|
targetTableName text,
|
|
targetDate date
|
|
) RETURNS integer
|
|
LANGUAGE plpgsql AS
|
|
$$
|
|
DECLARE
|
|
targetDateStr varchar;
|
|
targetDatePlusOneDayStr varchar;
|
|
newTableName varchar;
|
|
BEGIN
|
|
SELECT to_char(targetDate, 'YYYYMMDD') INTO targetDateStr;
|
|
SELECT to_char(targetDate + INTERVAL '1 day', 'YYYYMMDD') INTO targetDatePlusOneDayStr;
|
|
SELECT format('%s_%s', targetTableName, targetDateStr) INTO newTableName;
|
|
IF NOT EXISTS (SELECT 1 FROM pg_tables WHERE tablename = newTableName) THEN
|
|
EXECUTE format('CREATE TABLE %s (LIKE %s INCLUDING INDEXES) PARTITION BY LIST (readable_status)', newTableName, targetTableName);
|
|
END IF;
|
|
|
|
PERFORM create_v1_partition_with_status(newTableName, 'QUEUED');
|
|
PERFORM create_v1_partition_with_status(newTableName, 'RUNNING');
|
|
PERFORM create_v1_partition_with_status(newTableName, 'COMPLETED');
|
|
PERFORM create_v1_partition_with_status(newTableName, 'CANCELLED');
|
|
PERFORM create_v1_partition_with_status(newTableName, 'FAILED');
|
|
|
|
-- If it's not already attached, attach the partition
|
|
IF NOT EXISTS (SELECT 1 FROM pg_inherits WHERE inhrelid = newTableName::regclass) THEN
|
|
EXECUTE format('ALTER TABLE %s ATTACH PARTITION %s FOR VALUES FROM (''%s'') TO (''%s'')', targetTableName, newTableName, targetDateStr, targetDatePlusOneDayStr);
|
|
END IF;
|
|
|
|
RETURN 1;
|
|
END;
|
|
$$;
|
|
|
|
CREATE OR REPLACE FUNCTION create_v1_hash_partitions(
|
|
targetTableName text,
|
|
num_partitions INT
|
|
) RETURNS integer
|
|
LANGUAGE plpgsql AS
|
|
$$
|
|
DECLARE
|
|
existing_count INT;
|
|
partition_name text;
|
|
created_count INT := 0;
|
|
i INT;
|
|
BEGIN
|
|
SELECT count(*) INTO existing_count
|
|
FROM pg_inherits
|
|
WHERE inhparent = targetTableName::regclass;
|
|
|
|
IF existing_count > num_partitions THEN
|
|
RAISE EXCEPTION 'Cannot decrease the number of partitions: we already have % partitions which is more than the target %', existing_count, num_partitions;
|
|
END IF;
|
|
|
|
FOR i IN 0..(num_partitions - 1) LOOP
|
|
partition_name := format('%s_%s', targetTableName, i);
|
|
IF to_regclass(partition_name) IS NULL THEN
|
|
EXECUTE format('CREATE TABLE %I (LIKE %s INCLUDING INDEXES)', partition_name, targetTableName);
|
|
EXECUTE format('ALTER TABLE %I SET (
|
|
autovacuum_vacuum_scale_factor = ''0.1'',
|
|
autovacuum_analyze_scale_factor = ''0.05'',
|
|
autovacuum_vacuum_threshold = ''25'',
|
|
autovacuum_analyze_threshold = ''25'',
|
|
autovacuum_vacuum_cost_delay = ''10'',
|
|
autovacuum_vacuum_cost_limit = ''1000''
|
|
)', partition_name);
|
|
EXECUTE format('ALTER TABLE %s ATTACH PARTITION %I FOR VALUES WITH (modulus %s, remainder %s)', targetTableName, partition_name, num_partitions, i);
|
|
created_count := created_count + 1;
|
|
END IF;
|
|
END LOOP;
|
|
RETURN created_count;
|
|
END;
|
|
$$;
|
|
|
|
-- TASKS DEFINITIONS --
|
|
CREATE TABLE v1_tasks_olap (
|
|
tenant_id UUID NOT NULL,
|
|
id BIGINT NOT NULL,
|
|
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
external_id UUID NOT NULL DEFAULT gen_random_uuid(),
|
|
queue TEXT NOT NULL,
|
|
action_id TEXT NOT NULL,
|
|
step_id UUID NOT NULL,
|
|
workflow_id UUID NOT NULL,
|
|
workflow_version_id UUID NOT NULL,
|
|
workflow_run_id UUID NOT NULL,
|
|
schedule_timeout TEXT NOT NULL,
|
|
step_timeout TEXT,
|
|
priority INTEGER DEFAULT 1,
|
|
sticky v1_sticky_strategy_olap NOT NULL,
|
|
desired_worker_id UUID,
|
|
display_name TEXT NOT NULL,
|
|
input JSONB NOT NULL,
|
|
additional_metadata JSONB,
|
|
readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED',
|
|
latest_retry_count INT NOT NULL DEFAULT 0,
|
|
latest_worker_id UUID,
|
|
dag_id BIGINT,
|
|
dag_inserted_at TIMESTAMPTZ,
|
|
parent_task_external_id UUID,
|
|
|
|
PRIMARY KEY (inserted_at, id, readable_status)
|
|
) PARTITION BY RANGE(inserted_at);
|
|
|
|
CREATE INDEX v1_tasks_olap_workflow_id_idx ON v1_tasks_olap (tenant_id, workflow_id);
|
|
|
|
CREATE INDEX v1_tasks_olap_worker_id_idx ON v1_tasks_olap (tenant_id, latest_worker_id) WHERE latest_worker_id IS NOT NULL;
|
|
|
|
SELECT create_v1_olap_partition_with_date_and_status('v1_tasks_olap', CURRENT_DATE);
|
|
|
|
-- DAG DEFINITIONS --
|
|
CREATE TABLE v1_dags_olap (
|
|
id BIGINT NOT NULL,
|
|
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
tenant_id UUID NOT NULL,
|
|
external_id UUID NOT NULL,
|
|
display_name TEXT NOT NULL,
|
|
workflow_id UUID NOT NULL,
|
|
workflow_version_id UUID NOT NULL,
|
|
readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED',
|
|
input JSONB NOT NULL,
|
|
additional_metadata JSONB,
|
|
parent_task_external_id UUID,
|
|
total_tasks INT NOT NULL DEFAULT 1,
|
|
PRIMARY KEY (inserted_at, id, readable_status)
|
|
) PARTITION BY RANGE(inserted_at);
|
|
|
|
CREATE INDEX v1_dags_olap_workflow_id_idx ON v1_dags_olap (tenant_id, workflow_id);
|
|
|
|
SELECT create_v1_olap_partition_with_date_and_status('v1_dags_olap', CURRENT_DATE);
|
|
|
|
-- RUN DEFINITIONS --
|
|
CREATE TYPE v1_run_kind AS ENUM ('TASK', 'DAG');
|
|
|
|
-- v1_runs_olap represents an invocation of a workflow. it can either refer to a DAG or a task.
|
|
-- we partition this table on status to allow for efficient querying of tasks in different states.
|
|
CREATE TABLE v1_runs_olap (
|
|
tenant_id UUID NOT NULL,
|
|
id BIGINT NOT NULL,
|
|
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
external_id UUID NOT NULL DEFAULT gen_random_uuid(),
|
|
readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED',
|
|
kind v1_run_kind NOT NULL,
|
|
workflow_id UUID NOT NULL,
|
|
workflow_version_id UUID NOT NULL,
|
|
additional_metadata JSONB,
|
|
parent_task_external_id UUID,
|
|
|
|
PRIMARY KEY (inserted_at, id, readable_status, kind)
|
|
) PARTITION BY RANGE(inserted_at);
|
|
|
|
SELECT create_v1_olap_partition_with_date_and_status('v1_runs_olap', CURRENT_DATE);
|
|
|
|
CREATE INDEX ix_v1_runs_olap_parent_task_external_id ON v1_runs_olap (parent_task_external_id) WHERE parent_task_external_id IS NOT NULL;
|
|
|
|
-- LOOKUP TABLES --
|
|
CREATE TABLE v1_lookup_table_olap (
|
|
tenant_id UUID NOT NULL,
|
|
external_id UUID NOT NULL,
|
|
task_id BIGINT,
|
|
dag_id BIGINT,
|
|
inserted_at TIMESTAMPTZ NOT NULL,
|
|
|
|
PRIMARY KEY (external_id)
|
|
);
|
|
|
|
CREATE TABLE v1_dag_to_task_olap (
|
|
dag_id BIGINT NOT NULL,
|
|
dag_inserted_at TIMESTAMPTZ NOT NULL,
|
|
task_id BIGINT NOT NULL,
|
|
task_inserted_at TIMESTAMPTZ NOT NULL,
|
|
PRIMARY KEY (dag_id, dag_inserted_at, task_id, task_inserted_at)
|
|
);
|
|
|
|
-- STATUS DEFINITION --
|
|
CREATE TYPE v1_status_kind AS ENUM ('TASK', 'DAG');
|
|
|
|
CREATE TABLE v1_statuses_olap (
|
|
external_id UUID NOT NULL,
|
|
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
tenant_id UUID NOT NULL,
|
|
workflow_id UUID NOT NULL,
|
|
kind v1_run_kind NOT NULL,
|
|
readable_status v1_readable_status_olap NOT NULL DEFAULT 'QUEUED',
|
|
|
|
PRIMARY KEY (external_id, inserted_at)
|
|
);
|
|
|
|
|
|
-- EVENT DEFINITIONS --
|
|
CREATE TYPE v1_event_type_olap AS ENUM (
|
|
'RETRYING',
|
|
'REASSIGNED',
|
|
'RETRIED_BY_USER',
|
|
'CREATED',
|
|
'QUEUED',
|
|
'REQUEUED_NO_WORKER',
|
|
'REQUEUED_RATE_LIMIT',
|
|
'ASSIGNED',
|
|
'ACKNOWLEDGED',
|
|
'SENT_TO_WORKER',
|
|
'SLOT_RELEASED',
|
|
'STARTED',
|
|
'TIMEOUT_REFRESHED',
|
|
'SCHEDULING_TIMED_OUT',
|
|
'FINISHED',
|
|
'FAILED',
|
|
'CANCELLED',
|
|
'TIMED_OUT',
|
|
'RATE_LIMIT_ERROR',
|
|
'SKIPPED'
|
|
);
|
|
|
|
-- this is a hash-partitioned table on the task_id, so that we can process batches of events in parallel
|
|
-- without needing to place conflicting locks on tasks.
|
|
CREATE TABLE v1_task_events_olap_tmp (
|
|
tenant_id UUID NOT NULL,
|
|
requeue_after TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
requeue_retries INT NOT NULL DEFAULT 0,
|
|
id bigint GENERATED ALWAYS AS IDENTITY,
|
|
task_id BIGINT NOT NULL,
|
|
task_inserted_at TIMESTAMPTZ NOT NULL,
|
|
event_type v1_event_type_olap NOT NULL,
|
|
readable_status v1_readable_status_olap NOT NULL,
|
|
retry_count INT NOT NULL DEFAULT 0,
|
|
worker_id UUID,
|
|
|
|
PRIMARY KEY (tenant_id, requeue_after, task_id, id)
|
|
) PARTITION BY HASH(task_id);
|
|
|
|
CREATE OR REPLACE FUNCTION list_task_events_tmp(
|
|
partition_number INT,
|
|
tenant_id UUID,
|
|
event_limit INT
|
|
) RETURNS SETOF v1_task_events_olap_tmp
|
|
LANGUAGE plpgsql AS
|
|
$$
|
|
DECLARE
|
|
partition_table text;
|
|
BEGIN
|
|
partition_table := 'v1_task_events_olap_tmp_' || partition_number::text;
|
|
RETURN QUERY EXECUTE format(
|
|
'SELECT e.*
|
|
FROM %I e
|
|
WHERE e.tenant_id = $1
|
|
AND e.requeue_after <= CURRENT_TIMESTAMP
|
|
ORDER BY e.requeue_after, e.task_id, e.id
|
|
LIMIT $2
|
|
FOR UPDATE SKIP LOCKED',
|
|
partition_table)
|
|
USING tenant_id, event_limit;
|
|
END;
|
|
$$;
|
|
|
|
CREATE TABLE v1_task_events_olap (
|
|
tenant_id UUID NOT NULL,
|
|
id bigint GENERATED ALWAYS AS IDENTITY,
|
|
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
task_id BIGINT NOT NULL,
|
|
task_inserted_at TIMESTAMPTZ NOT NULL,
|
|
event_type v1_event_type_olap NOT NULL,
|
|
workflow_id UUID NOT NULL,
|
|
event_timestamp TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
readable_status v1_readable_status_olap NOT NULL,
|
|
retry_count INT NOT NULL DEFAULT 0,
|
|
error_message TEXT,
|
|
output JSONB,
|
|
worker_id UUID,
|
|
additional__event_data TEXT,
|
|
additional__event_message TEXT,
|
|
|
|
PRIMARY KEY (task_id, task_inserted_at, id)
|
|
);
|
|
|
|
CREATE INDEX v1_task_events_olap_task_id_idx ON v1_task_events_olap (task_id);
|
|
|
|
-- this is a hash-partitioned table on the dag_id, so that we can process batches of events in parallel
|
|
-- without needing to place conflicting locks on dags.
|
|
CREATE TABLE v1_task_status_updates_tmp (
|
|
tenant_id UUID NOT NULL,
|
|
requeue_after TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
requeue_retries INT NOT NULL DEFAULT 0,
|
|
id bigint GENERATED ALWAYS AS IDENTITY,
|
|
dag_id BIGINT NOT NULL,
|
|
dag_inserted_at TIMESTAMPTZ NOT NULL,
|
|
|
|
PRIMARY KEY (tenant_id, requeue_after, dag_id, id)
|
|
) PARTITION BY HASH(dag_id);
|
|
|
|
CREATE OR REPLACE FUNCTION list_task_status_updates_tmp(
|
|
partition_number INT,
|
|
tenant_id UUID,
|
|
event_limit INT
|
|
) RETURNS SETOF v1_task_status_updates_tmp
|
|
LANGUAGE plpgsql AS
|
|
$$
|
|
DECLARE
|
|
partition_table text;
|
|
BEGIN
|
|
partition_table := 'v1_task_status_updates_tmp_' || partition_number::text;
|
|
RETURN QUERY EXECUTE format(
|
|
'SELECT e.*
|
|
FROM %I e
|
|
WHERE e.tenant_id = $1
|
|
AND e.requeue_after <= CURRENT_TIMESTAMP
|
|
ORDER BY e.dag_id
|
|
LIMIT $2
|
|
FOR UPDATE SKIP LOCKED',
|
|
partition_table)
|
|
USING tenant_id, event_limit;
|
|
END;
|
|
$$;
|
|
|
|
-- Events tables
|
|
CREATE TABLE v1_events_olap (
|
|
tenant_id UUID NOT NULL,
|
|
id BIGINT NOT NULL GENERATED ALWAYS AS IDENTITY,
|
|
external_id UUID NOT NULL DEFAULT gen_random_uuid(),
|
|
seen_at TIMESTAMPTZ NOT NULL,
|
|
key TEXT NOT NULL,
|
|
payload JSONB NOT NULL,
|
|
additional_metadata JSONB,
|
|
|
|
PRIMARY KEY (tenant_id, id, seen_at)
|
|
) PARTITION BY RANGE(seen_at);
|
|
|
|
CREATE INDEX v1_events_olap_key_idx ON v1_events_olap (tenant_id, key);
|
|
|
|
CREATE TABLE v1_event_lookup_table_olap (
|
|
tenant_id UUID NOT NULL,
|
|
external_id UUID NOT NULL,
|
|
event_id BIGINT NOT NULL,
|
|
event_seen_at TIMESTAMPTZ NOT NULL,
|
|
|
|
PRIMARY KEY (tenant_id, external_id, event_seen_at)
|
|
) PARTITION BY RANGE(event_seen_at);
|
|
|
|
CREATE TABLE v1_event_to_run_olap (
|
|
run_id BIGINT NOT NULL,
|
|
run_inserted_at TIMESTAMPTZ NOT NULL,
|
|
event_id BIGINT NOT NULL,
|
|
event_seen_at TIMESTAMPTZ NOT NULL,
|
|
|
|
PRIMARY KEY (event_id, event_seen_at, run_id, run_inserted_at)
|
|
) PARTITION BY RANGE(event_seen_at);
|
|
|
|
|
|
-- TRIGGERS TO LINK TASKS, DAGS AND EVENTS --
|
|
CREATE OR REPLACE FUNCTION v1_tasks_olap_insert_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
INSERT INTO v1_runs_olap (
|
|
tenant_id,
|
|
id,
|
|
inserted_at,
|
|
external_id,
|
|
readable_status,
|
|
kind,
|
|
workflow_id,
|
|
workflow_version_id,
|
|
additional_metadata,
|
|
parent_task_external_id
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
id,
|
|
inserted_at,
|
|
external_id,
|
|
readable_status,
|
|
'TASK',
|
|
workflow_id,
|
|
workflow_version_id,
|
|
additional_metadata,
|
|
parent_task_external_id
|
|
FROM new_rows
|
|
WHERE dag_id IS NULL;
|
|
|
|
INSERT INTO v1_lookup_table_olap (
|
|
tenant_id,
|
|
external_id,
|
|
task_id,
|
|
inserted_at
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
external_id,
|
|
id,
|
|
inserted_at
|
|
FROM new_rows
|
|
ON CONFLICT (external_id) DO NOTHING;
|
|
|
|
-- If the task has a dag_id and dag_inserted_at, insert into the lookup table
|
|
INSERT INTO v1_dag_to_task_olap (
|
|
dag_id,
|
|
dag_inserted_at,
|
|
task_id,
|
|
task_inserted_at
|
|
)
|
|
SELECT
|
|
dag_id,
|
|
dag_inserted_at,
|
|
id,
|
|
inserted_at
|
|
FROM new_rows
|
|
WHERE dag_id IS NOT NULL;
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_tasks_olap_status_insert_trigger
|
|
AFTER INSERT ON v1_tasks_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_tasks_olap_insert_function();
|
|
|
|
CREATE OR REPLACE FUNCTION v1_tasks_olap_status_update_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
UPDATE
|
|
v1_runs_olap r
|
|
SET
|
|
readable_status = n.readable_status
|
|
FROM new_rows n
|
|
WHERE
|
|
r.id = n.id
|
|
AND r.inserted_at = n.inserted_at
|
|
AND r.kind = 'TASK';
|
|
|
|
-- insert tmp events into task status updates table if we have a dag_id
|
|
INSERT INTO v1_task_status_updates_tmp (
|
|
tenant_id,
|
|
dag_id,
|
|
dag_inserted_at
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
dag_id,
|
|
dag_inserted_at
|
|
FROM new_rows
|
|
WHERE dag_id IS NOT NULL;
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_tasks_olap_status_update_trigger
|
|
AFTER UPDATE ON v1_tasks_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_tasks_olap_status_update_function();
|
|
|
|
CREATE OR REPLACE FUNCTION v1_dags_olap_insert_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
INSERT INTO v1_runs_olap (
|
|
tenant_id,
|
|
id,
|
|
inserted_at,
|
|
external_id,
|
|
readable_status,
|
|
kind,
|
|
workflow_id,
|
|
workflow_version_id,
|
|
additional_metadata,
|
|
parent_task_external_id
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
id,
|
|
inserted_at,
|
|
external_id,
|
|
readable_status,
|
|
'DAG',
|
|
workflow_id,
|
|
workflow_version_id,
|
|
additional_metadata,
|
|
parent_task_external_id
|
|
FROM new_rows;
|
|
|
|
INSERT INTO v1_lookup_table_olap (
|
|
tenant_id,
|
|
external_id,
|
|
dag_id,
|
|
inserted_at
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
external_id,
|
|
id,
|
|
inserted_at
|
|
FROM new_rows
|
|
ON CONFLICT (external_id) DO NOTHING;
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_dags_olap_status_insert_trigger
|
|
AFTER INSERT ON v1_dags_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_dags_olap_insert_function();
|
|
|
|
CREATE OR REPLACE FUNCTION v1_dags_olap_status_update_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
UPDATE
|
|
v1_runs_olap r
|
|
SET
|
|
readable_status = n.readable_status
|
|
FROM new_rows n
|
|
WHERE
|
|
r.id = n.id
|
|
AND r.inserted_at = n.inserted_at
|
|
AND r.kind = 'DAG';
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_dags_olap_status_update_trigger
|
|
AFTER UPDATE ON v1_dags_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_dags_olap_status_update_function();
|
|
|
|
CREATE OR REPLACE FUNCTION v1_runs_olap_insert_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
INSERT INTO v1_statuses_olap (
|
|
external_id,
|
|
inserted_at,
|
|
tenant_id,
|
|
workflow_id,
|
|
kind,
|
|
readable_status
|
|
)
|
|
SELECT
|
|
external_id,
|
|
inserted_at,
|
|
tenant_id,
|
|
workflow_id,
|
|
kind,
|
|
readable_status
|
|
FROM new_rows;
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_runs_olap_status_insert_trigger
|
|
AFTER INSERT ON v1_runs_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_runs_olap_insert_function();
|
|
|
|
CREATE OR REPLACE FUNCTION v1_runs_olap_status_update_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
UPDATE
|
|
v1_statuses_olap s
|
|
SET
|
|
readable_status = n.readable_status
|
|
FROM new_rows n
|
|
WHERE
|
|
s.external_id = n.external_id
|
|
AND s.inserted_at = n.inserted_at;
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_runs_olap_status_update_trigger
|
|
AFTER UPDATE ON v1_runs_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_runs_olap_status_update_function();
|
|
|
|
CREATE OR REPLACE FUNCTION v1_events_lookup_table_olap_insert_function()
|
|
RETURNS TRIGGER AS
|
|
$$
|
|
BEGIN
|
|
INSERT INTO v1_event_lookup_table_olap (
|
|
tenant_id,
|
|
external_id,
|
|
event_id,
|
|
event_seen_at
|
|
)
|
|
SELECT
|
|
tenant_id,
|
|
external_id,
|
|
id,
|
|
seen_at
|
|
FROM new_rows
|
|
ON CONFLICT (tenant_id, external_id, event_seen_at) DO NOTHING;
|
|
|
|
RETURN NULL;
|
|
END;
|
|
$$
|
|
LANGUAGE plpgsql;
|
|
|
|
CREATE TRIGGER v1_event_lookup_table_olap_insert_trigger
|
|
AFTER INSERT ON v1_events_olap
|
|
REFERENCING NEW TABLE AS new_rows
|
|
FOR EACH STATEMENT
|
|
EXECUTE FUNCTION v1_events_lookup_table_olap_insert_function();
|