mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-02-16 05:09:29 -06:00
feat(v1): new gRPC API endpoints (#1367)
* wip: api contracts * feat: implement put workflow version endpoint * add support for match existing data, get scaffolding in place for additional triggers * create additional matches * feat: durable sleep, user event matching * update protos * fix: working poc of user events, durable sleep * add migration * fix: migration column * feat: durable event listener * fix: skip overrides * fix: input -> output
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts";
|
||||
option go_package = "github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
|
||||
33
api-contracts/v1/dispatcher.proto
Normal file
33
api-contracts/v1/dispatcher.proto
Normal file
@@ -0,0 +1,33 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1";
|
||||
|
||||
package v1;
|
||||
|
||||
import "v1/shared/condition.proto";
|
||||
|
||||
service V1Dispatcher {
|
||||
rpc RegisterDurableEvent(RegisterDurableEventRequest) returns (RegisterDurableEventResponse) {}
|
||||
|
||||
rpc ListenForDurableEvent(stream ListenForDurableEventRequest) returns (stream DurableEvent) {}
|
||||
}
|
||||
|
||||
message RegisterDurableEventRequest {
|
||||
string task_id = 1; // external uuid for the task run
|
||||
string signal_key = 2; // the signal key for the event
|
||||
DurableEventListenerConditions conditions = 3; // the task conditions for creating the task
|
||||
}
|
||||
|
||||
message RegisterDurableEventResponse {
|
||||
}
|
||||
|
||||
message ListenForDurableEventRequest {
|
||||
string task_id = 1; // single listener per worker
|
||||
string signal_key = 2; // the match id for the listener
|
||||
}
|
||||
|
||||
message DurableEvent {
|
||||
string task_id = 1;
|
||||
string signal_key = 2;
|
||||
bytes data = 3; // the data for the event
|
||||
}
|
||||
45
api-contracts/v1/shared/condition.proto
Normal file
45
api-contracts/v1/shared/condition.proto
Normal file
@@ -0,0 +1,45 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1";
|
||||
|
||||
package v1;
|
||||
|
||||
enum Action {
|
||||
CREATE = 0;
|
||||
QUEUE = 1;
|
||||
CANCEL = 2;
|
||||
SKIP = 3;
|
||||
}
|
||||
|
||||
message BaseMatchCondition {
|
||||
string readable_data_key = 1;
|
||||
Action action = 2;
|
||||
string or_group_id = 3; // a UUID defining the OR group for this condition
|
||||
string expression = 4;
|
||||
}
|
||||
|
||||
message ParentOverrideMatchCondition {
|
||||
BaseMatchCondition base = 1;
|
||||
string parent_readable_id = 2;
|
||||
}
|
||||
|
||||
message SleepMatchCondition {
|
||||
BaseMatchCondition base = 1;
|
||||
string sleep_for = 2; // a duration string indicating how long to sleep
|
||||
}
|
||||
|
||||
message UserEventMatchCondition {
|
||||
BaseMatchCondition base = 1;
|
||||
string user_event_key = 2;
|
||||
}
|
||||
|
||||
message TaskConditions {
|
||||
repeated ParentOverrideMatchCondition parent_override_conditions = 1;
|
||||
repeated SleepMatchCondition sleep_conditions = 2;
|
||||
repeated UserEventMatchCondition user_event_conditions = 3;
|
||||
}
|
||||
|
||||
message DurableEventListenerConditions {
|
||||
repeated SleepMatchCondition sleep_conditions = 1;
|
||||
repeated UserEventMatchCondition user_event_conditions = 2;
|
||||
}
|
||||
162
api-contracts/v1/workflows.proto
Normal file
162
api-contracts/v1/workflows.proto
Normal file
@@ -0,0 +1,162 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1";
|
||||
|
||||
package v1;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "v1/shared/condition.proto";
|
||||
|
||||
// AdminService represents a set of RPCs for admin management of tasks, workflows, etc.
|
||||
service AdminService {
|
||||
rpc PutWorkflow(CreateWorkflowVersionRequest) returns (CreateWorkflowVersionResponse);
|
||||
rpc CancelTasks(CancelTasksRequest) returns (CancelTasksResponse);
|
||||
rpc ReplayTasks(ReplayTasksRequest) returns (ReplayTasksResponse);
|
||||
rpc TriggerWorkflowRun(TriggerWorkflowRunRequest) returns (TriggerWorkflowRunResponse);
|
||||
}
|
||||
|
||||
message CancelTasksRequest {
|
||||
repeated string externalIds = 1; // a list of external UUIDs
|
||||
optional TasksFilter filter = 2;
|
||||
}
|
||||
|
||||
message ReplayTasksRequest {
|
||||
repeated string externalIds = 1; // a list of external UUIDs
|
||||
optional TasksFilter filter = 2;
|
||||
}
|
||||
|
||||
message TasksFilter {
|
||||
repeated string statuses = 1;
|
||||
google.protobuf.Timestamp since = 2;
|
||||
optional google.protobuf.Timestamp until = 3;
|
||||
repeated string workflow_ids = 4;
|
||||
repeated string additional_metadata = 5;
|
||||
}
|
||||
|
||||
message CancelTasksResponse {
|
||||
repeated string cancelled_tasks = 1;
|
||||
}
|
||||
|
||||
message ReplayTasksResponse {
|
||||
repeated string replayed_tasks = 1;
|
||||
}
|
||||
|
||||
message TriggerWorkflowRunRequest {
|
||||
string workflow_name = 1;
|
||||
bytes input = 2;
|
||||
bytes additional_metadata = 3;
|
||||
}
|
||||
|
||||
message TriggerWorkflowRunResponse {
|
||||
string external_id = 1;
|
||||
}
|
||||
|
||||
enum StickyStrategy {
|
||||
SOFT = 0;
|
||||
HARD = 1;
|
||||
}
|
||||
|
||||
enum RateLimitDuration {
|
||||
SECOND = 0;
|
||||
MINUTE = 1;
|
||||
HOUR = 2;
|
||||
DAY = 3;
|
||||
WEEK = 4;
|
||||
MONTH = 5;
|
||||
YEAR = 6;
|
||||
}
|
||||
|
||||
// CreateWorkflowVersionRequest represents options to create a workflow version.
|
||||
message CreateWorkflowVersionRequest {
|
||||
string name = 1; // (required) the workflow name
|
||||
string description = 2; // (optional) the workflow description
|
||||
string version = 3; // (optional) the workflow version
|
||||
repeated string event_triggers = 4; // (optional) event triggers for the workflow
|
||||
repeated string cron_triggers = 5; // (optional) cron triggers for the workflow
|
||||
repeated CreateTaskOpts tasks = 6; // (required) the workflow jobs
|
||||
Concurrency concurrency = 7; // (optional) the workflow concurrency options
|
||||
optional string cron_input = 8; // (optional) the input for the cron trigger
|
||||
optional CreateTaskOpts on_failure_task = 9; // (optional) the job to run on failure
|
||||
optional StickyStrategy sticky = 10; // (optional) the sticky strategy for assigning steps to workers
|
||||
}
|
||||
|
||||
enum ConcurrencyLimitStrategy {
|
||||
CANCEL_IN_PROGRESS = 0;
|
||||
DROP_NEWEST = 1; // deprecated
|
||||
QUEUE_NEWEST = 2; // deprecated
|
||||
GROUP_ROUND_ROBIN = 3;
|
||||
CANCEL_NEWEST = 4;
|
||||
}
|
||||
|
||||
message Concurrency {
|
||||
string expression = 1; // (required) the expression to use for concurrency
|
||||
optional int32 max_runs = 2; // (optional) the maximum number of concurrent workflow runs, default 1
|
||||
optional ConcurrencyLimitStrategy limit_strategy = 3; // (optional) the strategy to use when the concurrency limit is reached, default CANCEL_IN_PROGRESS
|
||||
}
|
||||
|
||||
enum WorkerLabelComparator {
|
||||
EQUAL = 0;
|
||||
NOT_EQUAL = 1;
|
||||
GREATER_THAN = 2;
|
||||
GREATER_THAN_OR_EQUAL = 3;
|
||||
LESS_THAN = 4;
|
||||
LESS_THAN_OR_EQUAL = 5;
|
||||
}
|
||||
|
||||
message DesiredWorkerLabels {
|
||||
// value of the affinity
|
||||
optional string strValue = 1;
|
||||
optional int32 intValue = 2;
|
||||
|
||||
/**
|
||||
* (optional) Specifies whether the affinity setting is required.
|
||||
* If required, the worker will not accept actions that do not have a truthy affinity setting.
|
||||
*
|
||||
* Defaults to false.
|
||||
*/
|
||||
optional bool required = 3;
|
||||
|
||||
/**
|
||||
* (optional) Specifies the comparator for the affinity setting.
|
||||
* If not set, the default is EQUAL.
|
||||
*/
|
||||
optional WorkerLabelComparator comparator = 4;
|
||||
|
||||
/**
|
||||
* (optional) Specifies the weight of the affinity setting.
|
||||
* If not set, the default is 100.
|
||||
*/
|
||||
optional int32 weight = 5;
|
||||
}
|
||||
|
||||
// CreateTaskOpts represents options to create a task.
|
||||
message CreateTaskOpts {
|
||||
string readable_id = 1; // (required) the task name
|
||||
string action = 2; // (required) the task action id
|
||||
string timeout = 3; // (optional) the task timeout
|
||||
string inputs = 4; // (optional) the task inputs, assuming string representation of JSON
|
||||
repeated string parents = 5; // (optional) the task parents. if none are passed in, this is a root task
|
||||
int32 retries = 6; // (optional) the number of retries for the step, default 0
|
||||
repeated CreateTaskRateLimit rate_limits = 7; // (optional) the rate limits for the step
|
||||
map<string, DesiredWorkerLabels> worker_labels = 8; // (optional) the desired worker affinity state for the step
|
||||
optional float backoff_factor = 9; // (optional) the retry backoff factor for the step
|
||||
optional int32 backoff_max_seconds = 10; // (optional) the maximum backoff time for the step
|
||||
repeated Concurrency concurrency = 11; // (optional) the task concurrency options
|
||||
optional TaskConditions conditions = 12; // (optional) the task conditions for creating the task
|
||||
optional string schedule_timeout = 13; // (optional) the timeout for the schedule
|
||||
}
|
||||
|
||||
message CreateTaskRateLimit {
|
||||
string key = 1; // (required) the key for the rate limit
|
||||
optional int32 units = 2; // (optional) the number of units this step consumes
|
||||
optional string key_expr = 3; // (optional) a CEL expression for determining the rate limit key
|
||||
optional string units_expr = 4; // (optional) a CEL expression for determining the number of units consumed
|
||||
optional string limit_values_expr = 5; // (optional) a CEL expression for determining the total amount of rate limit units
|
||||
optional RateLimitDuration duration = 6; // (optional) the default rate limit window to use for dynamic rate limits
|
||||
}
|
||||
|
||||
// CreateWorkflowVersionResponse represents the response after creating a workflow version.
|
||||
message CreateWorkflowVersionResponse {
|
||||
string id = 1;
|
||||
string workflow_id = 2;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/hatchet-dev/hatchet/internal/services/admin/v1/contracts";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
// AdminService represents a set of RPCs for admin management of tasks, workflows, etc.
|
||||
service AdminService {
|
||||
rpc CancelTasks(CancelTasksRequest) returns (CancelTasksResponse);
|
||||
rpc ReplayTasks(ReplayTasksRequest) returns (ReplayTasksResponse);
|
||||
rpc TriggerWorkflowRun(TriggerWorkflowRunRequest) returns (TriggerWorkflowRunResponse);
|
||||
}
|
||||
|
||||
message CancelTasksRequest {
|
||||
repeated string externalIds = 1; // a list of external UUIDs
|
||||
optional TasksFilter filter = 2;
|
||||
}
|
||||
|
||||
message ReplayTasksRequest {
|
||||
repeated string externalIds = 1; // a list of external UUIDs
|
||||
optional TasksFilter filter = 2;
|
||||
}
|
||||
|
||||
message TasksFilter {
|
||||
repeated string statuses = 1;
|
||||
google.protobuf.Timestamp since = 2;
|
||||
optional google.protobuf.Timestamp until = 3;
|
||||
repeated string workflow_ids = 4;
|
||||
repeated string additional_metadata = 5;
|
||||
}
|
||||
|
||||
message CancelTasksResponse {
|
||||
repeated string cancelled_tasks = 1;
|
||||
}
|
||||
|
||||
message ReplayTasksResponse {
|
||||
repeated string replayed_tasks = 1;
|
||||
}
|
||||
|
||||
message TriggerWorkflowRunRequest {
|
||||
string workflow_name = 1;
|
||||
bytes input = 2;
|
||||
bytes additional_metadata = 3;
|
||||
}
|
||||
|
||||
message TriggerWorkflowRunResponse {
|
||||
string external_id = 1;
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/handlers/v1/proxy"
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/config/server"
|
||||
|
||||
client "github.com/hatchet-dev/hatchet/pkg/client/v1"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/handlers/v1/proxy"
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/config/server"
|
||||
|
||||
client "github.com/hatchet-dev/hatchet/pkg/client/v1"
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors"
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1"
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/internal/services/controllers/v1/task"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/controllers/workflows"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/dispatcher"
|
||||
dispatcherv1 "github.com/hatchet-dev/hatchet/internal/services/dispatcher/v1"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/grpc"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/health"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/ingestor"
|
||||
@@ -430,6 +431,16 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro
|
||||
return nil, fmt.Errorf("could not start dispatcher: %w", err)
|
||||
}
|
||||
|
||||
dv1, err := dispatcherv1.NewDispatcherService(
|
||||
dispatcherv1.WithRepository(sc.V1),
|
||||
dispatcherv1.WithMessageQueue(sc.MessageQueueV1),
|
||||
dispatcherv1.WithLogger(sc.Logger),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create dispatcher (v1): %w", err)
|
||||
}
|
||||
|
||||
// create the event ingestor
|
||||
ei, err := ingestor.NewIngestor(
|
||||
ingestor.WithEventRepository(
|
||||
@@ -476,6 +487,7 @@ func runV0Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro
|
||||
grpc.WithConfig(sc),
|
||||
grpc.WithIngestor(ei),
|
||||
grpc.WithDispatcher(d),
|
||||
grpc.WithDispatcherV1(dv1),
|
||||
grpc.WithAdmin(adminSvc),
|
||||
grpc.WithAdminV1(adminv1Svc),
|
||||
grpc.WithLogger(sc.Logger),
|
||||
@@ -858,6 +870,16 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro
|
||||
return nil, fmt.Errorf("could not start dispatcher: %w", err)
|
||||
}
|
||||
|
||||
dv1, err := dispatcherv1.NewDispatcherService(
|
||||
dispatcherv1.WithRepository(sc.V1),
|
||||
dispatcherv1.WithMessageQueue(sc.MessageQueueV1),
|
||||
dispatcherv1.WithLogger(sc.Logger),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create dispatcher (v1): %w", err)
|
||||
}
|
||||
|
||||
// create the event ingestor
|
||||
ei, err := ingestor.NewIngestor(
|
||||
ingestor.WithEventRepository(
|
||||
@@ -905,6 +927,7 @@ func runV1Config(ctx context.Context, sc *server.ServerConfig) ([]Teardown, erro
|
||||
grpc.WithConfig(sc),
|
||||
grpc.WithIngestor(ei),
|
||||
grpc.WithDispatcher(d),
|
||||
grpc.WithDispatcherV1(dv1),
|
||||
grpc.WithAdmin(adminSvc),
|
||||
grpc.WithAdminV1(adminv1Svc),
|
||||
grpc.WithLogger(sc.Logger),
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE v1_match ADD COLUMN existing_data JSONB;
|
||||
|
||||
ALTER TYPE v1_match_condition_action ADD VALUE 'CREATE_MATCH';
|
||||
|
||||
CREATE TYPE v1_step_match_condition_kind AS ENUM ('PARENT_OVERRIDE', 'USER_EVENT', 'SLEEP');
|
||||
|
||||
CREATE TABLE v1_step_match_condition (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY,
|
||||
tenant_id UUID NOT NULL,
|
||||
step_id UUID NOT NULL,
|
||||
readable_data_key TEXT NOT NULL,
|
||||
action v1_match_condition_action NOT NULL DEFAULT 'CREATE',
|
||||
or_group_id UUID NOT NULL,
|
||||
expression TEXT,
|
||||
kind v1_step_match_condition_kind NOT NULL,
|
||||
-- If this is a SLEEP condition, this will be set to the sleep duration
|
||||
sleep_duration TEXT,
|
||||
-- If this is a USER_EVENT condition, this will be set to the user event key
|
||||
event_key TEXT,
|
||||
-- If this is a PARENT_OVERRIDE condition, this will be set to the parent readable_id
|
||||
parent_readable_id TEXT,
|
||||
PRIMARY KEY (step_id, id)
|
||||
);
|
||||
|
||||
CREATE TABLE v1_durable_sleep (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY,
|
||||
tenant_id UUID NOT NULL,
|
||||
sleep_until TIMESTAMPTZ NOT NULL,
|
||||
sleep_duration TEXT NOT NULL,
|
||||
PRIMARY KEY (tenant_id, sleep_until, id)
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE v1_match DROP COLUMN existing_data;
|
||||
|
||||
-- Note: Removing the enum value 'CREATE_MATCH' from v1_match_condition_action is not supported by PostgreSQL.
|
||||
|
||||
DROP TABLE v1_durable_sleep;
|
||||
DROP TABLE v1_step_match_condition;
|
||||
DROP TYPE v1_step_match_condition_kind;
|
||||
-- +goose StatementEnd
|
||||
@@ -7,6 +7,27 @@ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2
|
||||
|
||||
export PATH="$PATH:$(go env GOPATH)/bin"
|
||||
|
||||
protoc --proto_path=api-contracts \
|
||||
--go_out=./internal/services/shared/proto/v1 \
|
||||
--go_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \
|
||||
--go-grpc_out=./internal/services/shared/proto/v1 \
|
||||
--go-grpc_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \
|
||||
v1/shared/condition.proto
|
||||
|
||||
protoc --proto_path=api-contracts \
|
||||
--go_out=./internal/services/shared/proto/v1 \
|
||||
--go_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \
|
||||
--go-grpc_out=./internal/services/shared/proto/v1 \
|
||||
--go-grpc_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \
|
||||
v1/dispatcher.proto
|
||||
|
||||
protoc --proto_path=api-contracts \
|
||||
--go_out=./internal/services/shared/proto/v1 \
|
||||
--go_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \
|
||||
--go-grpc_out=./internal/services/shared/proto/v1 \
|
||||
--go-grpc_opt=module=github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1 \
|
||||
v1/workflows.proto
|
||||
|
||||
protoc --proto_path=api-contracts/dispatcher --go_out=./internal/services/dispatcher/contracts --go_opt=paths=source_relative \
|
||||
--go-grpc_out=./internal/services/dispatcher/contracts --go-grpc_opt=paths=source_relative \
|
||||
dispatcher.proto
|
||||
@@ -18,7 +39,3 @@ protoc --proto_path=api-contracts/events --go_out=./internal/services/ingestor/c
|
||||
protoc --proto_path=api-contracts/workflows --go_out=./internal/services/admin/contracts --go_opt=paths=source_relative \
|
||||
--go-grpc_out=./internal/services/admin/contracts --go-grpc_opt=paths=source_relative \
|
||||
workflows.proto
|
||||
|
||||
protoc --proto_path=api-contracts/workflows --go_out=./internal/services/admin/contracts/v1 --go_opt=paths=source_relative \
|
||||
--go-grpc_out=./internal/services/admin/contracts/v1 --go-grpc_opt=paths=source_relative \
|
||||
v1-admin.proto
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package dagutils
|
||||
|
||||
import "github.com/hatchet-dev/hatchet/pkg/repository"
|
||||
import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository"
|
||||
)
|
||||
|
||||
func HasCycle(steps []repository.CreateWorkflowStepOpts) bool {
|
||||
graph := make(map[string][]string)
|
||||
|
||||
@@ -1,646 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v5.29.3
|
||||
// source: v1-admin.proto
|
||||
|
||||
package contracts
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type CancelTasksRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ExternalIds []string `protobuf:"bytes,1,rep,name=externalIds,proto3" json:"externalIds,omitempty"` // a list of external UUIDs
|
||||
Filter *TasksFilter `protobuf:"bytes,2,opt,name=filter,proto3,oneof" json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CancelTasksRequest) Reset() {
|
||||
*x = CancelTasksRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CancelTasksRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CancelTasksRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CancelTasksRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CancelTasksRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CancelTasksRequest) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *CancelTasksRequest) GetExternalIds() []string {
|
||||
if x != nil {
|
||||
return x.ExternalIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CancelTasksRequest) GetFilter() *TasksFilter {
|
||||
if x != nil {
|
||||
return x.Filter
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ReplayTasksRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ExternalIds []string `protobuf:"bytes,1,rep,name=externalIds,proto3" json:"externalIds,omitempty"` // a list of external UUIDs
|
||||
Filter *TasksFilter `protobuf:"bytes,2,opt,name=filter,proto3,oneof" json:"filter,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ReplayTasksRequest) Reset() {
|
||||
*x = ReplayTasksRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ReplayTasksRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ReplayTasksRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ReplayTasksRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ReplayTasksRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ReplayTasksRequest) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ReplayTasksRequest) GetExternalIds() []string {
|
||||
if x != nil {
|
||||
return x.ExternalIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ReplayTasksRequest) GetFilter() *TasksFilter {
|
||||
if x != nil {
|
||||
return x.Filter
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TasksFilter struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Statuses []string `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty"`
|
||||
Since *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=since,proto3" json:"since,omitempty"`
|
||||
Until *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=until,proto3,oneof" json:"until,omitempty"`
|
||||
WorkflowIds []string `protobuf:"bytes,4,rep,name=workflow_ids,json=workflowIds,proto3" json:"workflow_ids,omitempty"`
|
||||
AdditionalMetadata []string `protobuf:"bytes,5,rep,name=additional_metadata,json=additionalMetadata,proto3" json:"additional_metadata,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TasksFilter) Reset() {
|
||||
*x = TasksFilter{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TasksFilter) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TasksFilter) ProtoMessage() {}
|
||||
|
||||
func (x *TasksFilter) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TasksFilter.ProtoReflect.Descriptor instead.
|
||||
func (*TasksFilter) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *TasksFilter) GetStatuses() []string {
|
||||
if x != nil {
|
||||
return x.Statuses
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TasksFilter) GetSince() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Since
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TasksFilter) GetUntil() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Until
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TasksFilter) GetWorkflowIds() []string {
|
||||
if x != nil {
|
||||
return x.WorkflowIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TasksFilter) GetAdditionalMetadata() []string {
|
||||
if x != nil {
|
||||
return x.AdditionalMetadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CancelTasksResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
CancelledTasks []string `protobuf:"bytes,1,rep,name=cancelled_tasks,json=cancelledTasks,proto3" json:"cancelled_tasks,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CancelTasksResponse) Reset() {
|
||||
*x = CancelTasksResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CancelTasksResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CancelTasksResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CancelTasksResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CancelTasksResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CancelTasksResponse) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *CancelTasksResponse) GetCancelledTasks() []string {
|
||||
if x != nil {
|
||||
return x.CancelledTasks
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ReplayTasksResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ReplayedTasks []string `protobuf:"bytes,1,rep,name=replayed_tasks,json=replayedTasks,proto3" json:"replayed_tasks,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ReplayTasksResponse) Reset() {
|
||||
*x = ReplayTasksResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ReplayTasksResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ReplayTasksResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ReplayTasksResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ReplayTasksResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ReplayTasksResponse) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *ReplayTasksResponse) GetReplayedTasks() []string {
|
||||
if x != nil {
|
||||
return x.ReplayedTasks
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TriggerWorkflowRunRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
WorkflowName string `protobuf:"bytes,1,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty"`
|
||||
Input []byte `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"`
|
||||
AdditionalMetadata []byte `protobuf:"bytes,3,opt,name=additional_metadata,json=additionalMetadata,proto3" json:"additional_metadata,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunRequest) Reset() {
|
||||
*x = TriggerWorkflowRunRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TriggerWorkflowRunRequest) ProtoMessage() {}
|
||||
|
||||
func (x *TriggerWorkflowRunRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TriggerWorkflowRunRequest.ProtoReflect.Descriptor instead.
|
||||
func (*TriggerWorkflowRunRequest) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunRequest) GetWorkflowName() string {
|
||||
if x != nil {
|
||||
return x.WorkflowName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunRequest) GetInput() []byte {
|
||||
if x != nil {
|
||||
return x.Input
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunRequest) GetAdditionalMetadata() []byte {
|
||||
if x != nil {
|
||||
return x.AdditionalMetadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TriggerWorkflowRunResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ExternalId string `protobuf:"bytes,1,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunResponse) Reset() {
|
||||
*x = TriggerWorkflowRunResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_admin_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TriggerWorkflowRunResponse) ProtoMessage() {}
|
||||
|
||||
func (x *TriggerWorkflowRunResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_admin_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TriggerWorkflowRunResponse.ProtoReflect.Descriptor instead.
|
||||
func (*TriggerWorkflowRunResponse) Descriptor() ([]byte, []int) {
|
||||
return file_v1_admin_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *TriggerWorkflowRunResponse) GetExternalId() string {
|
||||
if x != nil {
|
||||
return x.ExternalId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_v1_admin_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_v1_admin_proto_rawDesc = []byte{
|
||||
0x0a, 0x0e, 0x76, 0x31, 0x2d, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x22, 0x6c, 0x0a, 0x12, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72,
|
||||
0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x78,
|
||||
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x66, 0x69, 0x6c,
|
||||
0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x54, 0x61, 0x73, 0x6b,
|
||||
0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22,
|
||||
0x6c, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
|
||||
0x6c, 0x49, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x78, 0x74, 0x65,
|
||||
0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x88,
|
||||
0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf0, 0x01,
|
||||
0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a,
|
||||
0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e,
|
||||
0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x75,
|
||||
0x6e, 0x74, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x88,
|
||||
0x01, 0x01, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69,
|
||||
0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
|
||||
0x6f, 0x77, 0x49, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03,
|
||||
0x28, 0x09, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65,
|
||||
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x75, 0x6e, 0x74, 0x69, 0x6c,
|
||||
0x22, 0x3e, 0x0a, 0x13, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x6c, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x0e, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73,
|
||||
0x22, 0x3c, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x61,
|
||||
0x79, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x0d, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x87,
|
||||
0x01, 0x0a, 0x19, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
|
||||
0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d,
|
||||
0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
|
||||
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3d, 0x0a, 0x1a, 0x54, 0x72, 0x69, 0x67,
|
||||
0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
|
||||
0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74,
|
||||
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x32, 0xd1, 0x01, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69,
|
||||
0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x61, 0x6e, 0x63,
|
||||
0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x13, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c,
|
||||
0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x43,
|
||||
0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b,
|
||||
0x73, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54,
|
||||
0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x12,
|
||||
0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52,
|
||||
0x75, 0x6e, 0x12, 0x1a, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b,
|
||||
0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b,
|
||||
0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
|
||||
0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x45, 0x5a, 0x43, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65,
|
||||
0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e,
|
||||
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
|
||||
0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63,
|
||||
0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_v1_admin_proto_rawDescOnce sync.Once
|
||||
file_v1_admin_proto_rawDescData = file_v1_admin_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_v1_admin_proto_rawDescGZIP() []byte {
|
||||
file_v1_admin_proto_rawDescOnce.Do(func() {
|
||||
file_v1_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_v1_admin_proto_rawDescData)
|
||||
})
|
||||
return file_v1_admin_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_v1_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_v1_admin_proto_goTypes = []interface{}{
|
||||
(*CancelTasksRequest)(nil), // 0: CancelTasksRequest
|
||||
(*ReplayTasksRequest)(nil), // 1: ReplayTasksRequest
|
||||
(*TasksFilter)(nil), // 2: TasksFilter
|
||||
(*CancelTasksResponse)(nil), // 3: CancelTasksResponse
|
||||
(*ReplayTasksResponse)(nil), // 4: ReplayTasksResponse
|
||||
(*TriggerWorkflowRunRequest)(nil), // 5: TriggerWorkflowRunRequest
|
||||
(*TriggerWorkflowRunResponse)(nil), // 6: TriggerWorkflowRunResponse
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
}
|
||||
var file_v1_admin_proto_depIdxs = []int32{
|
||||
2, // 0: CancelTasksRequest.filter:type_name -> TasksFilter
|
||||
2, // 1: ReplayTasksRequest.filter:type_name -> TasksFilter
|
||||
7, // 2: TasksFilter.since:type_name -> google.protobuf.Timestamp
|
||||
7, // 3: TasksFilter.until:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: AdminService.CancelTasks:input_type -> CancelTasksRequest
|
||||
1, // 5: AdminService.ReplayTasks:input_type -> ReplayTasksRequest
|
||||
5, // 6: AdminService.TriggerWorkflowRun:input_type -> TriggerWorkflowRunRequest
|
||||
3, // 7: AdminService.CancelTasks:output_type -> CancelTasksResponse
|
||||
4, // 8: AdminService.ReplayTasks:output_type -> ReplayTasksResponse
|
||||
6, // 9: AdminService.TriggerWorkflowRun:output_type -> TriggerWorkflowRunResponse
|
||||
7, // [7:10] is the sub-list for method output_type
|
||||
4, // [4:7] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_v1_admin_proto_init() }
|
||||
func file_v1_admin_proto_init() {
|
||||
if File_v1_admin_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_v1_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CancelTasksRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ReplayTasksRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TasksFilter); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CancelTasksResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ReplayTasksResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TriggerWorkflowRunRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TriggerWorkflowRunResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_v1_admin_proto_msgTypes[0].OneofWrappers = []interface{}{}
|
||||
file_v1_admin_proto_msgTypes[1].OneofWrappers = []interface{}{}
|
||||
file_v1_admin_proto_msgTypes[2].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_v1_admin_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_v1_admin_proto_goTypes,
|
||||
DependencyIndexes: file_v1_admin_proto_depIdxs,
|
||||
MessageInfos: file_v1_admin_proto_msgTypes,
|
||||
}.Build()
|
||||
File_v1_admin_proto = out.File
|
||||
file_v1_admin_proto_rawDesc = nil
|
||||
file_v1_admin_proto_goTypes = nil
|
||||
file_v1_admin_proto_depIdxs = nil
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
msgqueue "github.com/hatchet-dev/hatchet/internal/msgqueue/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/validator"
|
||||
)
|
||||
|
||||
@@ -12,8 +12,9 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
msgqueue "github.com/hatchet-dev/hatchet/internal/msgqueue/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/types"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1"
|
||||
@@ -364,3 +365,260 @@ func (i *AdminServiceImpl) ingest(ctx context.Context, tenantId string, opts ...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AdminServiceImpl) PutWorkflow(ctx context.Context, req *contracts.CreateWorkflowVersionRequest) (*contracts.CreateWorkflowVersionResponse, error) {
|
||||
tenant := ctx.Value("tenant").(*dbsqlc.Tenant)
|
||||
tenantId := sqlchelpers.UUIDToStr(tenant.ID)
|
||||
|
||||
createOpts, err := getCreateWorkflowOpts(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate createOpts
|
||||
if apiErrors, err := a.v.ValidateAPI(createOpts); err != nil {
|
||||
return nil, err
|
||||
} else if apiErrors != nil {
|
||||
return nil, status.Error(
|
||||
codes.InvalidArgument,
|
||||
apiErrors.String(),
|
||||
)
|
||||
}
|
||||
|
||||
currWorkflow, err := a.repo.Workflows().PutWorkflowVersion(
|
||||
ctx,
|
||||
tenantId,
|
||||
createOpts,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &contracts.CreateWorkflowVersionResponse{
|
||||
Id: sqlchelpers.UUIDToStr(currWorkflow.WorkflowVersion.ID),
|
||||
WorkflowId: sqlchelpers.UUIDToStr(currWorkflow.WorkflowVersion.WorkflowId),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCreateWorkflowOpts(req *contracts.CreateWorkflowVersionRequest) (*v1.CreateWorkflowVersionOpts, error) {
|
||||
tasks, err := getCreateTaskOpts(req.Tasks, "DEFAULT")
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, v1.ErrDagParentNotFound) {
|
||||
// Extract the additional error information
|
||||
return nil, status.Error(
|
||||
codes.InvalidArgument,
|
||||
err.Error(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var onFailureTask *v1.CreateStepOpts
|
||||
|
||||
if req.OnFailureTask != nil {
|
||||
onFailureTasks, err := getCreateTaskOpts([]*contracts.CreateTaskOpts{req.OnFailureTask}, "ON_FAILURE")
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(onFailureTasks) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 on failure job, got %d", len(onFailureTasks))
|
||||
}
|
||||
|
||||
onFailureTask = &onFailureTasks[0]
|
||||
}
|
||||
|
||||
var sticky *string
|
||||
|
||||
if req.Sticky != nil {
|
||||
s := req.Sticky.String()
|
||||
sticky = &s
|
||||
}
|
||||
|
||||
var concurrency *v1.CreateConcurrencyOpts
|
||||
|
||||
if req.Concurrency != nil {
|
||||
if req.Concurrency.Expression == "" {
|
||||
return nil, status.Error(
|
||||
codes.InvalidArgument,
|
||||
"CEL expression is required for concurrency",
|
||||
)
|
||||
}
|
||||
|
||||
var limitStrategy *string
|
||||
|
||||
if req.Concurrency.LimitStrategy != nil && req.Concurrency.LimitStrategy.String() != "" {
|
||||
s := req.Concurrency.LimitStrategy.String()
|
||||
limitStrategy = &s
|
||||
}
|
||||
|
||||
concurrency = &v1.CreateConcurrencyOpts{
|
||||
LimitStrategy: limitStrategy,
|
||||
Expression: req.Concurrency.Expression,
|
||||
MaxRuns: req.Concurrency.MaxRuns,
|
||||
}
|
||||
}
|
||||
|
||||
var cronInput []byte
|
||||
|
||||
if req.CronInput != nil {
|
||||
cronInput = []byte(*req.CronInput)
|
||||
}
|
||||
|
||||
return &v1.CreateWorkflowVersionOpts{
|
||||
Name: req.Name,
|
||||
Concurrency: concurrency,
|
||||
Description: &req.Description,
|
||||
EventTriggers: req.EventTriggers,
|
||||
CronTriggers: req.CronTriggers,
|
||||
CronInput: cronInput,
|
||||
Tasks: tasks,
|
||||
OnFailure: onFailureTask,
|
||||
Sticky: sticky,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCreateTaskOpts(tasks []*contracts.CreateTaskOpts, kind string) ([]v1.CreateStepOpts, error) {
|
||||
steps := make([]v1.CreateStepOpts, len(tasks))
|
||||
|
||||
stepReadableIdMap := make(map[string]bool)
|
||||
|
||||
for j, step := range tasks {
|
||||
stepCp := step
|
||||
|
||||
parsedAction, err := types.ParseActionID(step.Action)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
retries := int(stepCp.Retries)
|
||||
|
||||
stepReadableIdMap[stepCp.ReadableId] = true
|
||||
|
||||
var affinity map[string]v1.DesiredWorkerLabelOpts
|
||||
|
||||
if stepCp.WorkerLabels != nil {
|
||||
affinity = map[string]v1.DesiredWorkerLabelOpts{}
|
||||
for k, v := range stepCp.WorkerLabels {
|
||||
|
||||
var c *string
|
||||
|
||||
if v.Comparator != nil {
|
||||
cPtr := v.Comparator.String()
|
||||
c = &cPtr
|
||||
}
|
||||
|
||||
(affinity)[k] = v1.DesiredWorkerLabelOpts{
|
||||
Key: k,
|
||||
StrValue: v.StrValue,
|
||||
IntValue: v.IntValue,
|
||||
Required: v.Required,
|
||||
Weight: v.Weight,
|
||||
Comparator: c,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
steps[j] = v1.CreateStepOpts{
|
||||
ReadableId: stepCp.ReadableId,
|
||||
Action: parsedAction.String(),
|
||||
Parents: stepCp.Parents,
|
||||
Retries: &retries,
|
||||
DesiredWorkerLabels: affinity,
|
||||
TriggerConditions: make([]v1.CreateStepMatchConditionOpt, 0),
|
||||
}
|
||||
|
||||
if stepCp.BackoffFactor != nil {
|
||||
f64 := float64(*stepCp.BackoffFactor)
|
||||
steps[j].RetryBackoffFactor = &f64
|
||||
|
||||
if stepCp.BackoffMaxSeconds != nil {
|
||||
maxInt := int(*stepCp.BackoffMaxSeconds)
|
||||
steps[j].RetryBackoffMaxSeconds = &maxInt
|
||||
} else {
|
||||
maxInt := 24 * 60 * 60
|
||||
steps[j].RetryBackoffMaxSeconds = &maxInt
|
||||
}
|
||||
}
|
||||
|
||||
if stepCp.Timeout != "" {
|
||||
steps[j].Timeout = &stepCp.Timeout
|
||||
}
|
||||
|
||||
for _, rateLimit := range stepCp.RateLimits {
|
||||
opt := v1.CreateWorkflowStepRateLimitOpts{
|
||||
Key: rateLimit.Key,
|
||||
KeyExpr: rateLimit.KeyExpr,
|
||||
LimitExpr: rateLimit.LimitValuesExpr,
|
||||
UnitsExpr: rateLimit.UnitsExpr,
|
||||
}
|
||||
|
||||
if rateLimit.Duration != nil {
|
||||
dur := rateLimit.Duration.String()
|
||||
opt.Duration = &dur
|
||||
}
|
||||
|
||||
if rateLimit.Units != nil {
|
||||
units := int(*rateLimit.Units)
|
||||
opt.Units = &units
|
||||
}
|
||||
|
||||
steps[j].RateLimits = append(steps[j].RateLimits, opt)
|
||||
}
|
||||
|
||||
for _, userEventCondition := range stepCp.Conditions.UserEventConditions {
|
||||
eventKey := userEventCondition.UserEventKey
|
||||
|
||||
steps[j].TriggerConditions = append(steps[j].TriggerConditions, v1.CreateStepMatchConditionOpt{
|
||||
MatchConditionKind: "USER_EVENT",
|
||||
ReadableDataKey: userEventCondition.Base.ReadableDataKey,
|
||||
Action: userEventCondition.Base.Action.String(),
|
||||
OrGroupId: userEventCondition.Base.OrGroupId,
|
||||
Expression: userEventCondition.Base.Expression,
|
||||
EventKey: &eventKey,
|
||||
})
|
||||
}
|
||||
|
||||
for _, sleepCondition := range stepCp.Conditions.SleepConditions {
|
||||
duration := sleepCondition.SleepFor
|
||||
|
||||
steps[j].TriggerConditions = append(steps[j].TriggerConditions, v1.CreateStepMatchConditionOpt{
|
||||
MatchConditionKind: "SLEEP",
|
||||
ReadableDataKey: sleepCondition.Base.ReadableDataKey,
|
||||
Action: sleepCondition.Base.Action.String(),
|
||||
OrGroupId: sleepCondition.Base.OrGroupId,
|
||||
SleepDuration: &duration,
|
||||
})
|
||||
}
|
||||
|
||||
for _, parentOverrideCondition := range stepCp.Conditions.ParentOverrideConditions {
|
||||
parentReadableId := parentOverrideCondition.ParentReadableId
|
||||
|
||||
steps[j].TriggerConditions = append(steps[j].TriggerConditions, v1.CreateStepMatchConditionOpt{
|
||||
MatchConditionKind: "PARENT_OVERRIDE",
|
||||
ReadableDataKey: parentReadableId,
|
||||
Action: parentOverrideCondition.Base.Action.String(),
|
||||
Expression: parentOverrideCondition.Base.Expression,
|
||||
OrGroupId: parentOverrideCondition.Base.OrGroupId,
|
||||
ParentReadableId: &parentReadableId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check if parents are in the map
|
||||
for _, step := range steps {
|
||||
for _, parent := range step.Parents {
|
||||
if !stepReadableIdMap[parent] {
|
||||
return nil, fmt.Errorf("%w: parent step '%s' not found for step '%s'", v1.ErrDagParentNotFound, parent, step.ReadableId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return steps, nil
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ type TasksControllerImpl struct {
|
||||
timeoutTaskOperations *queueutils.OperationPool
|
||||
reassignTaskOperations *queueutils.OperationPool
|
||||
retryTaskOperations *queueutils.OperationPool
|
||||
emitSleepOperations *queueutils.OperationPool
|
||||
}
|
||||
|
||||
type TasksControllerOpt func(*TasksControllerOpts)
|
||||
@@ -193,6 +194,7 @@ func New(fs ...TasksControllerOpt) (*TasksControllerImpl, error) {
|
||||
}
|
||||
|
||||
t.timeoutTaskOperations = queueutils.NewOperationPool(opts.l, time.Second*5, "timeout step runs", t.processTaskTimeouts)
|
||||
t.emitSleepOperations = queueutils.NewOperationPool(opts.l, time.Second*5, "emit sleep step runs", t.processSleeps)
|
||||
t.reassignTaskOperations = queueutils.NewOperationPool(opts.l, time.Second*5, "reassign step runs", t.processTaskReassignments)
|
||||
t.retryTaskOperations = queueutils.NewOperationPool(opts.l, time.Second*5, "retry step runs", t.processTaskRetryQueueItems)
|
||||
|
||||
@@ -233,6 +235,18 @@ func (tc *TasksControllerImpl) Start() (func() error, error) {
|
||||
return nil, fmt.Errorf("could not schedule step run timeout: %w", err)
|
||||
}
|
||||
|
||||
_, err = tc.s.NewJob(
|
||||
gocron.DurationJob(time.Second*1),
|
||||
gocron.NewTask(
|
||||
tc.runTenantSleepEmitter(ctx),
|
||||
),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, fmt.Errorf("could not schedule step run emit sleep: %w", err)
|
||||
}
|
||||
|
||||
_, err = tc.s.NewJob(
|
||||
gocron.DurationJob(time.Second*1),
|
||||
gocron.NewTask(
|
||||
@@ -732,7 +746,7 @@ func (tc *TasksControllerImpl) handleProcessUserEvents(ctx context.Context, tena
|
||||
|
||||
eg := &errgroup.Group{}
|
||||
|
||||
// TODO: RUN IN THE SAME TRANSACTION
|
||||
// TODO: run these in the same tx or send as separate messages?
|
||||
eg.Go(func() error {
|
||||
return tc.handleProcessUserEventTrigger(ctx, tenantId, msgs)
|
||||
})
|
||||
@@ -778,8 +792,7 @@ func (tc *TasksControllerImpl) handleProcessUserEventTrigger(ctx context.Context
|
||||
|
||||
// handleProcessUserEventMatches is responsible for signaling or creating tasks based on user event matches.
|
||||
func (tc *TasksControllerImpl) handleProcessUserEventMatches(ctx context.Context, tenantId string, payloads []*tasktypes.UserEventTaskPayload) error {
|
||||
// tc.l.Error().Msg("not implemented")
|
||||
return nil
|
||||
return tc.processUserEventMatches(ctx, tenantId, payloads)
|
||||
}
|
||||
|
||||
// handleProcessEventTrigger is responsible for inserting tasks into the database based on event triggers.
|
||||
@@ -829,7 +842,37 @@ func (tc *TasksControllerImpl) sendInternalEvents(ctx context.Context, tenantId
|
||||
)
|
||||
}
|
||||
|
||||
// handleProcessUserEventMatches is responsible for triggering tasks based on user event matches.
|
||||
// processUserEventMatches looks for user event matches
|
||||
func (tc *TasksControllerImpl) processUserEventMatches(ctx context.Context, tenantId string, events []*tasktypes.UserEventTaskPayload) error {
|
||||
candidateMatches := make([]v1.CandidateEventMatch, 0)
|
||||
|
||||
for _, event := range events {
|
||||
candidateMatches = append(candidateMatches, v1.CandidateEventMatch{
|
||||
ID: event.EventId,
|
||||
EventTimestamp: time.Now(),
|
||||
// NOTE: the event type of the V1TaskEvent is the event key for the match condition
|
||||
Key: event.EventKey,
|
||||
Data: event.EventData,
|
||||
})
|
||||
}
|
||||
|
||||
matchResult, err := tc.repov1.Matches().ProcessUserEventMatches(ctx, tenantId, candidateMatches)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not process user event matches: %w", err)
|
||||
}
|
||||
|
||||
if len(matchResult.CreatedTasks) > 0 {
|
||||
err = tc.signalTasksCreated(ctx, tenantId, matchResult.CreatedTasks)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not signal created tasks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *TasksControllerImpl) processInternalEvents(ctx context.Context, tenantId string, events []*v1.InternalTaskEvent) error {
|
||||
candidateMatches := make([]v1.CandidateEventMatch, 0)
|
||||
|
||||
|
||||
53
internal/services/controllers/v1/task/process_sleeps.go
Normal file
53
internal/services/controllers/v1/task/process_sleeps.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/internal/telemetry"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
|
||||
)
|
||||
|
||||
func (tc *TasksControllerImpl) runTenantSleepEmitter(ctx context.Context) func() {
|
||||
return func() {
|
||||
tc.l.Debug().Msgf("partition: running sleep emitter for tasks")
|
||||
|
||||
// list all tenants
|
||||
tenants, err := tc.p.ListTenantsForController(ctx, dbsqlc.TenantMajorEngineVersionV1)
|
||||
|
||||
if err != nil {
|
||||
tc.l.Error().Err(err).Msg("could not list tenants")
|
||||
return
|
||||
}
|
||||
|
||||
tc.emitSleepOperations.SetTenants(tenants)
|
||||
|
||||
for i := range tenants {
|
||||
tenantId := sqlchelpers.UUIDToStr(tenants[i].ID)
|
||||
|
||||
tc.emitSleepOperations.RunOrContinue(tenantId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *TasksControllerImpl) processSleeps(ctx context.Context, tenantId string) (bool, error) {
|
||||
ctx, span := telemetry.NewSpan(ctx, "process-sleep")
|
||||
defer span.End()
|
||||
|
||||
matchResult, shouldContinue, err := tc.repov1.Tasks().ProcessDurableSleeps(ctx, tenantId)
|
||||
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not list step runs to timeout for tenant %s: %w", tenantId, err)
|
||||
}
|
||||
|
||||
if len(matchResult.CreatedTasks) > 0 {
|
||||
err = tc.signalTasksCreated(ctx, tenantId, matchResult.CreatedTasks)
|
||||
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not signal created tasks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return shouldContinue, nil
|
||||
}
|
||||
91
internal/services/dispatcher/v1/dispatcher.go
Normal file
91
internal/services/dispatcher/v1/dispatcher.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
msgqueue "github.com/hatchet-dev/hatchet/internal/msgqueue/v1"
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/logger"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/validator"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type DispatcherService interface {
|
||||
contracts.V1DispatcherServer
|
||||
}
|
||||
|
||||
type DispatcherServiceImpl struct {
|
||||
contracts.UnimplementedV1DispatcherServer
|
||||
|
||||
repo v1.Repository
|
||||
mq msgqueue.MessageQueue
|
||||
v validator.Validator
|
||||
l *zerolog.Logger
|
||||
}
|
||||
|
||||
type DispatcherServiceOpt func(*DispatcherServiceOpts)
|
||||
|
||||
type DispatcherServiceOpts struct {
|
||||
repo v1.Repository
|
||||
mq msgqueue.MessageQueue
|
||||
v validator.Validator
|
||||
l *zerolog.Logger
|
||||
}
|
||||
|
||||
func defaultDispatcherServiceOpts() *DispatcherServiceOpts {
|
||||
v := validator.NewDefaultValidator()
|
||||
logger := logger.NewDefaultLogger("dispatcher")
|
||||
|
||||
return &DispatcherServiceOpts{
|
||||
v: v,
|
||||
l: &logger,
|
||||
}
|
||||
}
|
||||
|
||||
func WithRepository(r v1.Repository) DispatcherServiceOpt {
|
||||
return func(opts *DispatcherServiceOpts) {
|
||||
opts.repo = r
|
||||
}
|
||||
}
|
||||
|
||||
func WithMessageQueue(mq msgqueue.MessageQueue) DispatcherServiceOpt {
|
||||
return func(opts *DispatcherServiceOpts) {
|
||||
opts.mq = mq
|
||||
}
|
||||
}
|
||||
|
||||
func WithValidator(v validator.Validator) DispatcherServiceOpt {
|
||||
return func(opts *DispatcherServiceOpts) {
|
||||
opts.v = v
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(l *zerolog.Logger) DispatcherServiceOpt {
|
||||
return func(opts *DispatcherServiceOpts) {
|
||||
opts.l = l
|
||||
}
|
||||
}
|
||||
|
||||
func NewDispatcherService(fs ...DispatcherServiceOpt) (DispatcherService, error) {
|
||||
opts := defaultDispatcherServiceOpts()
|
||||
|
||||
for _, f := range fs {
|
||||
f(opts)
|
||||
}
|
||||
|
||||
if opts.repo == nil {
|
||||
return nil, fmt.Errorf("repository is required. use WithRepository")
|
||||
}
|
||||
|
||||
if opts.mq == nil {
|
||||
return nil, fmt.Errorf("task queue is required. use WithMessageQueue")
|
||||
}
|
||||
|
||||
return &DispatcherServiceImpl{
|
||||
repo: opts.repo,
|
||||
mq: opts.mq,
|
||||
v: opts.v,
|
||||
l: opts.l,
|
||||
}, nil
|
||||
}
|
||||
293
internal/services/dispatcher/v1/server.go
Normal file
293
internal/services/dispatcher/v1/server.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/rs/zerolog"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func (d *DispatcherServiceImpl) RegisterDurableEvent(ctx context.Context, req *contracts.RegisterDurableEventRequest) (*contracts.RegisterDurableEventResponse, error) {
|
||||
tenant := ctx.Value("tenant").(*dbsqlc.Tenant)
|
||||
tenantId := sqlchelpers.UUIDToStr(tenant.ID)
|
||||
|
||||
task, err := d.repo.Tasks().GetTaskByExternalId(ctx, tenantId, req.TaskId, false)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
createConditionOpts := make([]v1.CreateExternalSignalConditionOpt, 0)
|
||||
|
||||
for _, condition := range req.Conditions.SleepConditions {
|
||||
createConditionOpts = append(createConditionOpts, v1.CreateExternalSignalConditionOpt{
|
||||
Kind: v1.CreateExternalSignalConditionKindSLEEP,
|
||||
ReadableDataKey: condition.Base.ReadableDataKey,
|
||||
OrGroupId: condition.Base.OrGroupId,
|
||||
SleepFor: &condition.SleepFor,
|
||||
})
|
||||
}
|
||||
|
||||
for _, condition := range req.Conditions.UserEventConditions {
|
||||
createConditionOpts = append(createConditionOpts, v1.CreateExternalSignalConditionOpt{
|
||||
Kind: v1.CreateExternalSignalConditionKindUSEREVENT,
|
||||
ReadableDataKey: condition.Base.ReadableDataKey,
|
||||
OrGroupId: condition.Base.OrGroupId,
|
||||
UserEventKey: &condition.UserEventKey,
|
||||
Expression: condition.Base.Expression,
|
||||
})
|
||||
}
|
||||
|
||||
createMatchOpts := make([]v1.ExternalCreateSignalMatchOpts, 0)
|
||||
|
||||
createMatchOpts = append(createMatchOpts, v1.ExternalCreateSignalMatchOpts{
|
||||
Conditions: createConditionOpts,
|
||||
SignalTaskId: task.ID,
|
||||
SignalTaskInsertedAt: task.InsertedAt,
|
||||
SignalExternalId: sqlchelpers.UUIDToStr(task.ExternalID),
|
||||
SignalKey: req.SignalKey,
|
||||
})
|
||||
|
||||
err = d.repo.Matches().RegisterSignalMatchConditions(ctx, tenantId, createMatchOpts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &contracts.RegisterDurableEventResponse{}, nil
|
||||
}
|
||||
|
||||
// map of durable signals to whether the durable signals are finished and have sent a message
|
||||
// that the signal is finished
|
||||
type durableEventAcks struct {
|
||||
acks map[v1.TaskIdInsertedAtSignalKey]string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (w *durableEventAcks) addEvent(taskExternalId string, taskId int64, taskInsertedAt pgtype.Timestamptz, signalKey string) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.acks[v1.TaskIdInsertedAtSignalKey{
|
||||
Id: taskId,
|
||||
InsertedAt: taskInsertedAt,
|
||||
SignalKey: signalKey,
|
||||
}] = taskExternalId
|
||||
}
|
||||
|
||||
func (w *durableEventAcks) getNonAckdEvents() []v1.TaskIdInsertedAtSignalKey {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
ids := make([]v1.TaskIdInsertedAtSignalKey, 0, len(w.acks))
|
||||
|
||||
for id := range w.acks {
|
||||
if w.acks[id] != "" {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
func (w *durableEventAcks) getExternalId(taskId int64, taskInsertedAt pgtype.Timestamptz, signalKey string) string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
k := v1.TaskIdInsertedAtSignalKey{
|
||||
Id: taskId,
|
||||
InsertedAt: taskInsertedAt,
|
||||
SignalKey: signalKey,
|
||||
}
|
||||
|
||||
res := w.acks[k]
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (w *durableEventAcks) ackEvent(taskId int64, taskInsertedAt pgtype.Timestamptz, signalKey string) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
k := v1.TaskIdInsertedAtSignalKey{
|
||||
Id: taskId,
|
||||
InsertedAt: taskInsertedAt,
|
||||
SignalKey: signalKey,
|
||||
}
|
||||
|
||||
delete(w.acks, k)
|
||||
}
|
||||
|
||||
func (d *DispatcherServiceImpl) ListenForDurableEvent(server contracts.V1Dispatcher_ListenForDurableEventServer) error {
|
||||
tenant := server.Context().Value("tenant").(*dbsqlc.Tenant)
|
||||
tenantId := sqlchelpers.UUIDToStr(tenant.ID)
|
||||
|
||||
acks := &durableEventAcks{
|
||||
acks: make(map[v1.TaskIdInsertedAtSignalKey]string),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(server.Context())
|
||||
defer cancel()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
sendMu := sync.Mutex{}
|
||||
iterMu := sync.Mutex{}
|
||||
|
||||
sendEvent := func(e *sqlcv1.V1TaskEvent) error {
|
||||
// FIXME: check max size of msg
|
||||
// results := cleanResults(e.Results)
|
||||
|
||||
// if results == nil {
|
||||
// s.l.Warn().Msgf("results size for workflow run %s exceeds 3MB and cannot be reduced", e.WorkflowRunId)
|
||||
// e.Results = nil
|
||||
// }
|
||||
|
||||
externalId := acks.getExternalId(e.TaskID, e.TaskInsertedAt, e.EventKey.String)
|
||||
|
||||
if externalId == "" {
|
||||
d.l.Warn().Msgf("could not find external id for task %d, signal key %s", e.TaskID, e.EventKey.String)
|
||||
return fmt.Errorf("could not find external id for task %d, signal key %s", e.TaskID, e.EventKey.String)
|
||||
}
|
||||
|
||||
// send the task to the client
|
||||
sendMu.Lock()
|
||||
err := server.Send(&contracts.DurableEvent{
|
||||
TaskId: externalId,
|
||||
SignalKey: e.EventKey.String,
|
||||
Data: e.Data,
|
||||
})
|
||||
sendMu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
d.l.Error().Err(err).Msgf("could not send durable event for task %s, key %s", externalId, e.EventKey.String)
|
||||
return err
|
||||
}
|
||||
|
||||
acks.ackEvent(e.TaskID, e.TaskInsertedAt, e.EventKey.String)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
iter := func(signalEvents []v1.TaskIdInsertedAtSignalKey) error {
|
||||
if len(signalEvents) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !iterMu.TryLock() {
|
||||
d.l.Warn().Msg("could not acquire lock")
|
||||
return nil
|
||||
}
|
||||
|
||||
defer iterMu.Unlock()
|
||||
|
||||
signalEvents = signalEvents[:min(1000, len(signalEvents))]
|
||||
start := time.Now()
|
||||
|
||||
dbEvents, err := d.repo.Tasks().ListSignalCompletedEvents(ctx, tenantId, signalEvents)
|
||||
|
||||
if err != nil {
|
||||
d.l.Error().Err(err).Msg("could not list signal completed events")
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dbEvent := range dbEvents {
|
||||
err := sendEvent(dbEvent)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if time.Since(start) > 100*time.Millisecond {
|
||||
d.l.Warn().Msgf("list durable events for %d signals took %s", len(signalEvents), time.Since(start))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// start a new goroutine to handle client-side streaming
|
||||
go func() {
|
||||
for {
|
||||
req, err := server.Recv()
|
||||
|
||||
if err != nil {
|
||||
cancel()
|
||||
if errors.Is(err, io.EOF) || status.Code(err) == codes.Canceled {
|
||||
return
|
||||
}
|
||||
|
||||
d.l.Error().Err(err).Msg("could not receive message from client")
|
||||
return
|
||||
}
|
||||
|
||||
// FIXME: buffer/batch this to make it more efficient
|
||||
task, err := d.repo.Tasks().GetTaskByExternalId(ctx, tenantId, req.TaskId, false)
|
||||
|
||||
if err != nil {
|
||||
d.l.Error().Err(err).Msg("could not get task by external id")
|
||||
continue
|
||||
}
|
||||
|
||||
acks.addEvent(req.TaskId, task.ID, task.InsertedAt, req.SignalKey)
|
||||
}
|
||||
}()
|
||||
|
||||
// new goroutine to poll every second for finished workflow runs which are not ackd
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
signalEvents := acks.getNonAckdEvents()
|
||||
|
||||
if len(signalEvents) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := iter(signalEvents); err != nil {
|
||||
d.l.Error().Err(err).Msg("could not iterate over workflow runs")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
// if err := cleanupQueue(); err != nil {
|
||||
// return fmt.Errorf("could not cleanup queue: %w", err)
|
||||
// }
|
||||
|
||||
waitFor(&wg, 60*time.Second, d.l)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitFor(wg *sync.WaitGroup, timeout time.Duration, l *zerolog.Logger) {
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
defer close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(timeout):
|
||||
l.Error().Msg("timed out waiting for wait group")
|
||||
}
|
||||
}
|
||||
@@ -145,11 +145,13 @@ func matchServiceName(name string) string {
|
||||
switch {
|
||||
case strings.HasPrefix(name, "/Dispatcher"):
|
||||
return "dispatcher"
|
||||
case strings.HasPrefix(name, "/v1.V1Dispatcher"):
|
||||
return "dispatcher"
|
||||
case strings.HasPrefix(name, "/EventsService"):
|
||||
return "events"
|
||||
case strings.HasPrefix(name, "/WorkflowService"):
|
||||
return "workflow"
|
||||
case strings.HasPrefix(name, "/AdminService"):
|
||||
case strings.HasPrefix(name, "/v1.AdminService"):
|
||||
return "admin"
|
||||
default:
|
||||
return "unknown"
|
||||
|
||||
@@ -23,13 +23,14 @@ import (
|
||||
|
||||
"github.com/hatchet-dev/hatchet/internal/services/admin"
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts"
|
||||
adminv1contracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
adminv1 "github.com/hatchet-dev/hatchet/internal/services/admin/v1"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/dispatcher"
|
||||
dispatchercontracts "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts"
|
||||
dispatcherv1 "github.com/hatchet-dev/hatchet/internal/services/dispatcher/v1"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/grpc/middleware"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/ingestor"
|
||||
eventcontracts "github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts"
|
||||
v1contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/analytics"
|
||||
"github.com/hatchet-dev/hatchet/pkg/config/server"
|
||||
"github.com/hatchet-dev/hatchet/pkg/errors"
|
||||
@@ -40,7 +41,8 @@ type Server struct {
|
||||
eventcontracts.UnimplementedEventsServiceServer
|
||||
dispatchercontracts.UnimplementedDispatcherServer
|
||||
admincontracts.UnimplementedWorkflowServiceServer
|
||||
adminv1contracts.UnimplementedAdminServiceServer
|
||||
v1contracts.UnimplementedAdminServiceServer
|
||||
v1contracts.UnimplementedV1DispatcherServer
|
||||
|
||||
l *zerolog.Logger
|
||||
a errors.Alerter
|
||||
@@ -48,30 +50,32 @@ type Server struct {
|
||||
port int
|
||||
bindAddress string
|
||||
|
||||
config *server.ServerConfig
|
||||
ingestor ingestor.Ingestor
|
||||
dispatcher dispatcher.Dispatcher
|
||||
admin admin.AdminService
|
||||
adminv1 adminv1.AdminService
|
||||
tls *tls.Config
|
||||
insecure bool
|
||||
config *server.ServerConfig
|
||||
ingestor ingestor.Ingestor
|
||||
dispatcher dispatcher.Dispatcher
|
||||
dispatcherv1 dispatcherv1.DispatcherService
|
||||
admin admin.AdminService
|
||||
adminv1 adminv1.AdminService
|
||||
tls *tls.Config
|
||||
insecure bool
|
||||
}
|
||||
|
||||
type ServerOpt func(*ServerOpts)
|
||||
|
||||
type ServerOpts struct {
|
||||
config *server.ServerConfig
|
||||
l *zerolog.Logger
|
||||
a errors.Alerter
|
||||
analytics analytics.Analytics
|
||||
port int
|
||||
bindAddress string
|
||||
ingestor ingestor.Ingestor
|
||||
dispatcher dispatcher.Dispatcher
|
||||
admin admin.AdminService
|
||||
adminv1 adminv1.AdminService
|
||||
tls *tls.Config
|
||||
insecure bool
|
||||
config *server.ServerConfig
|
||||
l *zerolog.Logger
|
||||
a errors.Alerter
|
||||
analytics analytics.Analytics
|
||||
port int
|
||||
bindAddress string
|
||||
ingestor ingestor.Ingestor
|
||||
dispatcher dispatcher.Dispatcher
|
||||
dispatcherv1 dispatcherv1.DispatcherService
|
||||
admin admin.AdminService
|
||||
adminv1 adminv1.AdminService
|
||||
tls *tls.Config
|
||||
insecure bool
|
||||
}
|
||||
|
||||
func defaultServerOpts() *ServerOpts {
|
||||
@@ -148,6 +152,12 @@ func WithDispatcher(d dispatcher.Dispatcher) ServerOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithDispatcherV1(d dispatcherv1.DispatcherService) ServerOpt {
|
||||
return func(opts *ServerOpts) {
|
||||
opts.dispatcherv1 = d
|
||||
}
|
||||
}
|
||||
|
||||
func WithAdmin(a admin.AdminService) ServerOpt {
|
||||
return func(opts *ServerOpts) {
|
||||
opts.admin = a
|
||||
@@ -179,18 +189,19 @@ func NewServer(fs ...ServerOpt) (*Server, error) {
|
||||
opts.l = &newLogger
|
||||
|
||||
return &Server{
|
||||
l: opts.l,
|
||||
a: opts.a,
|
||||
analytics: opts.analytics,
|
||||
config: opts.config,
|
||||
port: opts.port,
|
||||
bindAddress: opts.bindAddress,
|
||||
ingestor: opts.ingestor,
|
||||
dispatcher: opts.dispatcher,
|
||||
admin: opts.admin,
|
||||
adminv1: opts.adminv1,
|
||||
tls: opts.tls,
|
||||
insecure: opts.insecure,
|
||||
l: opts.l,
|
||||
a: opts.a,
|
||||
analytics: opts.analytics,
|
||||
config: opts.config,
|
||||
port: opts.port,
|
||||
bindAddress: opts.bindAddress,
|
||||
ingestor: opts.ingestor,
|
||||
dispatcher: opts.dispatcher,
|
||||
dispatcherv1: opts.dispatcherv1,
|
||||
admin: opts.admin,
|
||||
adminv1: opts.adminv1,
|
||||
tls: opts.tls,
|
||||
insecure: opts.insecure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -299,12 +310,16 @@ func (s *Server) startGRPC() (func() error, error) {
|
||||
dispatchercontracts.RegisterDispatcherServer(grpcServer, s.dispatcher)
|
||||
}
|
||||
|
||||
if s.dispatcherv1 != nil {
|
||||
v1contracts.RegisterV1DispatcherServer(grpcServer, s.dispatcherv1)
|
||||
}
|
||||
|
||||
if s.admin != nil {
|
||||
admincontracts.RegisterWorkflowServiceServer(grpcServer, s.admin)
|
||||
}
|
||||
|
||||
if s.adminv1 != nil {
|
||||
adminv1contracts.RegisterAdminServiceServer(grpcServer, s.adminv1)
|
||||
v1contracts.RegisterAdminServiceServer(grpcServer, s.adminv1)
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -652,12 +652,12 @@ var file_events_proto_rawDesc = []byte{
|
||||
0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x50, 0x75, 0x74,
|
||||
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x17, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76,
|
||||
0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x47, 0x5a,
|
||||
0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63,
|
||||
0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x45, 0x5a,
|
||||
0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63,
|
||||
0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f,
|
||||
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x73, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x73, 0x2f, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||
0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
641
internal/services/shared/proto/v1/condition.pb.go
Normal file
641
internal/services/shared/proto/v1/condition.pb.go
Normal file
@@ -0,0 +1,641 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v5.29.3
|
||||
// source: v1/shared/condition.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Action int32
|
||||
|
||||
const (
|
||||
Action_CREATE Action = 0
|
||||
Action_QUEUE Action = 1
|
||||
Action_CANCEL Action = 2
|
||||
Action_SKIP Action = 3
|
||||
)
|
||||
|
||||
// Enum value maps for Action.
|
||||
var (
|
||||
Action_name = map[int32]string{
|
||||
0: "CREATE",
|
||||
1: "QUEUE",
|
||||
2: "CANCEL",
|
||||
3: "SKIP",
|
||||
}
|
||||
Action_value = map[string]int32{
|
||||
"CREATE": 0,
|
||||
"QUEUE": 1,
|
||||
"CANCEL": 2,
|
||||
"SKIP": 3,
|
||||
}
|
||||
)
|
||||
|
||||
func (x Action) Enum() *Action {
|
||||
p := new(Action)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x Action) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (Action) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_v1_shared_condition_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (Action) Type() protoreflect.EnumType {
|
||||
return &file_v1_shared_condition_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x Action) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Action.Descriptor instead.
|
||||
func (Action) EnumDescriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type BaseMatchCondition struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ReadableDataKey string `protobuf:"bytes,1,opt,name=readable_data_key,json=readableDataKey,proto3" json:"readable_data_key,omitempty"`
|
||||
Action Action `protobuf:"varint,2,opt,name=action,proto3,enum=v1.Action" json:"action,omitempty"`
|
||||
OrGroupId string `protobuf:"bytes,3,opt,name=or_group_id,json=orGroupId,proto3" json:"or_group_id,omitempty"` // a UUID defining the OR group for this condition
|
||||
Expression string `protobuf:"bytes,4,opt,name=expression,proto3" json:"expression,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BaseMatchCondition) Reset() {
|
||||
*x = BaseMatchCondition{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BaseMatchCondition) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BaseMatchCondition) ProtoMessage() {}
|
||||
|
||||
func (x *BaseMatchCondition) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BaseMatchCondition.ProtoReflect.Descriptor instead.
|
||||
func (*BaseMatchCondition) Descriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *BaseMatchCondition) GetReadableDataKey() string {
|
||||
if x != nil {
|
||||
return x.ReadableDataKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *BaseMatchCondition) GetAction() Action {
|
||||
if x != nil {
|
||||
return x.Action
|
||||
}
|
||||
return Action_CREATE
|
||||
}
|
||||
|
||||
func (x *BaseMatchCondition) GetOrGroupId() string {
|
||||
if x != nil {
|
||||
return x.OrGroupId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *BaseMatchCondition) GetExpression() string {
|
||||
if x != nil {
|
||||
return x.Expression
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ParentOverrideMatchCondition struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Base *BaseMatchCondition `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
ParentReadableId string `protobuf:"bytes,2,opt,name=parent_readable_id,json=parentReadableId,proto3" json:"parent_readable_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ParentOverrideMatchCondition) Reset() {
|
||||
*x = ParentOverrideMatchCondition{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ParentOverrideMatchCondition) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ParentOverrideMatchCondition) ProtoMessage() {}
|
||||
|
||||
func (x *ParentOverrideMatchCondition) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ParentOverrideMatchCondition.ProtoReflect.Descriptor instead.
|
||||
func (*ParentOverrideMatchCondition) Descriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ParentOverrideMatchCondition) GetBase() *BaseMatchCondition {
|
||||
if x != nil {
|
||||
return x.Base
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ParentOverrideMatchCondition) GetParentReadableId() string {
|
||||
if x != nil {
|
||||
return x.ParentReadableId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type SleepMatchCondition struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Base *BaseMatchCondition `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
SleepFor string `protobuf:"bytes,2,opt,name=sleep_for,json=sleepFor,proto3" json:"sleep_for,omitempty"` // a duration string indicating how long to sleep
|
||||
}
|
||||
|
||||
func (x *SleepMatchCondition) Reset() {
|
||||
*x = SleepMatchCondition{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SleepMatchCondition) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SleepMatchCondition) ProtoMessage() {}
|
||||
|
||||
func (x *SleepMatchCondition) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SleepMatchCondition.ProtoReflect.Descriptor instead.
|
||||
func (*SleepMatchCondition) Descriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *SleepMatchCondition) GetBase() *BaseMatchCondition {
|
||||
if x != nil {
|
||||
return x.Base
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SleepMatchCondition) GetSleepFor() string {
|
||||
if x != nil {
|
||||
return x.SleepFor
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type UserEventMatchCondition struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Base *BaseMatchCondition `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
UserEventKey string `protobuf:"bytes,2,opt,name=user_event_key,json=userEventKey,proto3" json:"user_event_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UserEventMatchCondition) Reset() {
|
||||
*x = UserEventMatchCondition{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UserEventMatchCondition) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UserEventMatchCondition) ProtoMessage() {}
|
||||
|
||||
func (x *UserEventMatchCondition) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UserEventMatchCondition.ProtoReflect.Descriptor instead.
|
||||
func (*UserEventMatchCondition) Descriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *UserEventMatchCondition) GetBase() *BaseMatchCondition {
|
||||
if x != nil {
|
||||
return x.Base
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *UserEventMatchCondition) GetUserEventKey() string {
|
||||
if x != nil {
|
||||
return x.UserEventKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type TaskConditions struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ParentOverrideConditions []*ParentOverrideMatchCondition `protobuf:"bytes,1,rep,name=parent_override_conditions,json=parentOverrideConditions,proto3" json:"parent_override_conditions,omitempty"`
|
||||
SleepConditions []*SleepMatchCondition `protobuf:"bytes,2,rep,name=sleep_conditions,json=sleepConditions,proto3" json:"sleep_conditions,omitempty"`
|
||||
UserEventConditions []*UserEventMatchCondition `protobuf:"bytes,3,rep,name=user_event_conditions,json=userEventConditions,proto3" json:"user_event_conditions,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TaskConditions) Reset() {
|
||||
*x = TaskConditions{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TaskConditions) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TaskConditions) ProtoMessage() {}
|
||||
|
||||
func (x *TaskConditions) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TaskConditions.ProtoReflect.Descriptor instead.
|
||||
func (*TaskConditions) Descriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *TaskConditions) GetParentOverrideConditions() []*ParentOverrideMatchCondition {
|
||||
if x != nil {
|
||||
return x.ParentOverrideConditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TaskConditions) GetSleepConditions() []*SleepMatchCondition {
|
||||
if x != nil {
|
||||
return x.SleepConditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *TaskConditions) GetUserEventConditions() []*UserEventMatchCondition {
|
||||
if x != nil {
|
||||
return x.UserEventConditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DurableEventListenerConditions struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
SleepConditions []*SleepMatchCondition `protobuf:"bytes,1,rep,name=sleep_conditions,json=sleepConditions,proto3" json:"sleep_conditions,omitempty"`
|
||||
UserEventConditions []*UserEventMatchCondition `protobuf:"bytes,2,rep,name=user_event_conditions,json=userEventConditions,proto3" json:"user_event_conditions,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DurableEventListenerConditions) Reset() {
|
||||
*x = DurableEventListenerConditions{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DurableEventListenerConditions) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DurableEventListenerConditions) ProtoMessage() {}
|
||||
|
||||
func (x *DurableEventListenerConditions) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_shared_condition_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DurableEventListenerConditions.ProtoReflect.Descriptor instead.
|
||||
func (*DurableEventListenerConditions) Descriptor() ([]byte, []int) {
|
||||
return file_v1_shared_condition_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DurableEventListenerConditions) GetSleepConditions() []*SleepMatchCondition {
|
||||
if x != nil {
|
||||
return x.SleepConditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DurableEventListenerConditions) GetUserEventConditions() []*UserEventMatchCondition {
|
||||
if x != nil {
|
||||
return x.UserEventConditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_v1_shared_condition_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_v1_shared_condition_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x76, 0x31, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x64,
|
||||
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x22,
|
||||
0xa4, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x73, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e,
|
||||
0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4b,
|
||||
0x65, 0x79, 0x12, 0x22, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06,
|
||||
0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x72, 0x5f, 0x67, 0x72, 0x6f,
|
||||
0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x72, 0x47,
|
||||
0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73,
|
||||
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72,
|
||||
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x0a, 0x1c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e,
|
||||
0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x4d, 0x61,
|
||||
0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x62, 0x61,
|
||||
0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x61,
|
||||
0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10,
|
||||
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64,
|
||||
0x22, 0x5e, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f,
|
||||
0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x4d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x62,
|
||||
0x61, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x5f, 0x66, 0x6f, 0x72,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x46, 0x6f, 0x72,
|
||||
0x22, 0x6b, 0x0a, 0x17, 0x55, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x04, 0x62,
|
||||
0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x42,
|
||||
0x61, 0x73, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x75, 0x73, 0x65, 0x72, 0x5f,
|
||||
0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0c, 0x75, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x85, 0x02,
|
||||
0x0a, 0x0e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x12, 0x5e, 0x0a, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72,
|
||||
0x69, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e,
|
||||
0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4f, 0x76,
|
||||
0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x12, 0x42, 0x0a, 0x10, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x53, 0x6c, 0x65, 0x65, 0x70, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x76, 0x65,
|
||||
0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65,
|
||||
0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x52, 0x13, 0x75, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x1e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f,
|
||||
0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x10, 0x73, 0x6c, 0x65, 0x65,
|
||||
0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x4d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x65,
|
||||
0x65, 0x70, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15,
|
||||
0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x55, 0x73, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43,
|
||||
0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x73, 0x65, 0x72, 0x45, 0x76,
|
||||
0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2a, 0x35, 0x0a,
|
||||
0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54,
|
||||
0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x51, 0x55, 0x45, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0a,
|
||||
0x0a, 0x06, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4b,
|
||||
0x49, 0x50, 0x10, 0x03, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68,
|
||||
0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
|
||||
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_v1_shared_condition_proto_rawDescOnce sync.Once
|
||||
file_v1_shared_condition_proto_rawDescData = file_v1_shared_condition_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_v1_shared_condition_proto_rawDescGZIP() []byte {
|
||||
file_v1_shared_condition_proto_rawDescOnce.Do(func() {
|
||||
file_v1_shared_condition_proto_rawDescData = protoimpl.X.CompressGZIP(file_v1_shared_condition_proto_rawDescData)
|
||||
})
|
||||
return file_v1_shared_condition_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_v1_shared_condition_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_v1_shared_condition_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_v1_shared_condition_proto_goTypes = []interface{}{
|
||||
(Action)(0), // 0: v1.Action
|
||||
(*BaseMatchCondition)(nil), // 1: v1.BaseMatchCondition
|
||||
(*ParentOverrideMatchCondition)(nil), // 2: v1.ParentOverrideMatchCondition
|
||||
(*SleepMatchCondition)(nil), // 3: v1.SleepMatchCondition
|
||||
(*UserEventMatchCondition)(nil), // 4: v1.UserEventMatchCondition
|
||||
(*TaskConditions)(nil), // 5: v1.TaskConditions
|
||||
(*DurableEventListenerConditions)(nil), // 6: v1.DurableEventListenerConditions
|
||||
}
|
||||
var file_v1_shared_condition_proto_depIdxs = []int32{
|
||||
0, // 0: v1.BaseMatchCondition.action:type_name -> v1.Action
|
||||
1, // 1: v1.ParentOverrideMatchCondition.base:type_name -> v1.BaseMatchCondition
|
||||
1, // 2: v1.SleepMatchCondition.base:type_name -> v1.BaseMatchCondition
|
||||
1, // 3: v1.UserEventMatchCondition.base:type_name -> v1.BaseMatchCondition
|
||||
2, // 4: v1.TaskConditions.parent_override_conditions:type_name -> v1.ParentOverrideMatchCondition
|
||||
3, // 5: v1.TaskConditions.sleep_conditions:type_name -> v1.SleepMatchCondition
|
||||
4, // 6: v1.TaskConditions.user_event_conditions:type_name -> v1.UserEventMatchCondition
|
||||
3, // 7: v1.DurableEventListenerConditions.sleep_conditions:type_name -> v1.SleepMatchCondition
|
||||
4, // 8: v1.DurableEventListenerConditions.user_event_conditions:type_name -> v1.UserEventMatchCondition
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_v1_shared_condition_proto_init() }
|
||||
func file_v1_shared_condition_proto_init() {
|
||||
if File_v1_shared_condition_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_v1_shared_condition_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BaseMatchCondition); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_shared_condition_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ParentOverrideMatchCondition); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_shared_condition_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SleepMatchCondition); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_shared_condition_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UserEventMatchCondition); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_shared_condition_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TaskConditions); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_shared_condition_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DurableEventListenerConditions); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_v1_shared_condition_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_v1_shared_condition_proto_goTypes,
|
||||
DependencyIndexes: file_v1_shared_condition_proto_depIdxs,
|
||||
EnumInfos: file_v1_shared_condition_proto_enumTypes,
|
||||
MessageInfos: file_v1_shared_condition_proto_msgTypes,
|
||||
}.Build()
|
||||
File_v1_shared_condition_proto = out.File
|
||||
file_v1_shared_condition_proto_rawDesc = nil
|
||||
file_v1_shared_condition_proto_goTypes = nil
|
||||
file_v1_shared_condition_proto_depIdxs = nil
|
||||
}
|
||||
398
internal/services/shared/proto/v1/dispatcher.pb.go
Normal file
398
internal/services/shared/proto/v1/dispatcher.pb.go
Normal file
@@ -0,0 +1,398 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v5.29.3
|
||||
// source: v1/dispatcher.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type RegisterDurableEventRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // external uuid for the task run
|
||||
SignalKey string `protobuf:"bytes,2,opt,name=signal_key,json=signalKey,proto3" json:"signal_key,omitempty"` // the signal key for the event
|
||||
Conditions *DurableEventListenerConditions `protobuf:"bytes,3,opt,name=conditions,proto3" json:"conditions,omitempty"` // the task conditions for creating the task
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventRequest) Reset() {
|
||||
*x = RegisterDurableEventRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RegisterDurableEventRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RegisterDurableEventRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RegisterDurableEventRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RegisterDurableEventRequest) Descriptor() ([]byte, []int) {
|
||||
return file_v1_dispatcher_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventRequest) GetTaskId() string {
|
||||
if x != nil {
|
||||
return x.TaskId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventRequest) GetSignalKey() string {
|
||||
if x != nil {
|
||||
return x.SignalKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventRequest) GetConditions() *DurableEventListenerConditions {
|
||||
if x != nil {
|
||||
return x.Conditions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RegisterDurableEventResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventResponse) Reset() {
|
||||
*x = RegisterDurableEventResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RegisterDurableEventResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RegisterDurableEventResponse) ProtoMessage() {}
|
||||
|
||||
func (x *RegisterDurableEventResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RegisterDurableEventResponse.ProtoReflect.Descriptor instead.
|
||||
func (*RegisterDurableEventResponse) Descriptor() ([]byte, []int) {
|
||||
return file_v1_dispatcher_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
type ListenForDurableEventRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // single listener per worker
|
||||
SignalKey string `protobuf:"bytes,2,opt,name=signal_key,json=signalKey,proto3" json:"signal_key,omitempty"` // the match id for the listener
|
||||
}
|
||||
|
||||
func (x *ListenForDurableEventRequest) Reset() {
|
||||
*x = ListenForDurableEventRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListenForDurableEventRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListenForDurableEventRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListenForDurableEventRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListenForDurableEventRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListenForDurableEventRequest) Descriptor() ([]byte, []int) {
|
||||
return file_v1_dispatcher_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListenForDurableEventRequest) GetTaskId() string {
|
||||
if x != nil {
|
||||
return x.TaskId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ListenForDurableEventRequest) GetSignalKey() string {
|
||||
if x != nil {
|
||||
return x.SignalKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DurableEvent struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
|
||||
SignalKey string `protobuf:"bytes,2,opt,name=signal_key,json=signalKey,proto3" json:"signal_key,omitempty"`
|
||||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` // the data for the event
|
||||
}
|
||||
|
||||
func (x *DurableEvent) Reset() {
|
||||
*x = DurableEvent{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DurableEvent) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DurableEvent) ProtoMessage() {}
|
||||
|
||||
func (x *DurableEvent) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_v1_dispatcher_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DurableEvent.ProtoReflect.Descriptor instead.
|
||||
func (*DurableEvent) Descriptor() ([]byte, []int) {
|
||||
return file_v1_dispatcher_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *DurableEvent) GetTaskId() string {
|
||||
if x != nil {
|
||||
return x.TaskId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DurableEvent) GetSignalKey() string {
|
||||
if x != nil {
|
||||
return x.SignalKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DurableEvent) GetData() []byte {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_v1_dispatcher_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_v1_dispatcher_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x1a, 0x19, 0x76, 0x31, 0x2f, 0x73, 0x68,
|
||||
0x61, 0x72, 0x65, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
|
||||
0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a,
|
||||
0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x0a,
|
||||
0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65,
|
||||
0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x56, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67,
|
||||
0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x22, 0x5a, 0x0a, 0x0c, 0x44, 0x75, 0x72, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b,
|
||||
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49,
|
||||
0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
|
||||
0x64, 0x61, 0x74, 0x61, 0x32, 0xbe, 0x01, 0x0a, 0x0c, 0x56, 0x31, 0x44, 0x69, 0x73, 0x70, 0x61,
|
||||
0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
|
||||
0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x00, 0x12, 0x51, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44,
|
||||
0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22,
|
||||
0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f,
|
||||
0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
|
||||
0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_v1_dispatcher_proto_rawDescOnce sync.Once
|
||||
file_v1_dispatcher_proto_rawDescData = file_v1_dispatcher_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_v1_dispatcher_proto_rawDescGZIP() []byte {
|
||||
file_v1_dispatcher_proto_rawDescOnce.Do(func() {
|
||||
file_v1_dispatcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_v1_dispatcher_proto_rawDescData)
|
||||
})
|
||||
return file_v1_dispatcher_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_v1_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_v1_dispatcher_proto_goTypes = []interface{}{
|
||||
(*RegisterDurableEventRequest)(nil), // 0: v1.RegisterDurableEventRequest
|
||||
(*RegisterDurableEventResponse)(nil), // 1: v1.RegisterDurableEventResponse
|
||||
(*ListenForDurableEventRequest)(nil), // 2: v1.ListenForDurableEventRequest
|
||||
(*DurableEvent)(nil), // 3: v1.DurableEvent
|
||||
(*DurableEventListenerConditions)(nil), // 4: v1.DurableEventListenerConditions
|
||||
}
|
||||
var file_v1_dispatcher_proto_depIdxs = []int32{
|
||||
4, // 0: v1.RegisterDurableEventRequest.conditions:type_name -> v1.DurableEventListenerConditions
|
||||
0, // 1: v1.V1Dispatcher.RegisterDurableEvent:input_type -> v1.RegisterDurableEventRequest
|
||||
2, // 2: v1.V1Dispatcher.ListenForDurableEvent:input_type -> v1.ListenForDurableEventRequest
|
||||
1, // 3: v1.V1Dispatcher.RegisterDurableEvent:output_type -> v1.RegisterDurableEventResponse
|
||||
3, // 4: v1.V1Dispatcher.ListenForDurableEvent:output_type -> v1.DurableEvent
|
||||
3, // [3:5] is the sub-list for method output_type
|
||||
1, // [1:3] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_v1_dispatcher_proto_init() }
|
||||
func file_v1_dispatcher_proto_init() {
|
||||
if File_v1_dispatcher_proto != nil {
|
||||
return
|
||||
}
|
||||
file_v1_shared_condition_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_v1_dispatcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RegisterDurableEventRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_dispatcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RegisterDurableEventResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_dispatcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListenForDurableEventRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_v1_dispatcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DurableEvent); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_v1_dispatcher_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_v1_dispatcher_proto_goTypes,
|
||||
DependencyIndexes: file_v1_dispatcher_proto_depIdxs,
|
||||
MessageInfos: file_v1_dispatcher_proto_msgTypes,
|
||||
}.Build()
|
||||
File_v1_dispatcher_proto = out.File
|
||||
file_v1_dispatcher_proto_rawDesc = nil
|
||||
file_v1_dispatcher_proto_goTypes = nil
|
||||
file_v1_dispatcher_proto_depIdxs = nil
|
||||
}
|
||||
174
internal/services/shared/proto/v1/dispatcher_grpc.pb.go
Normal file
174
internal/services/shared/proto/v1/dispatcher_grpc.pb.go
Normal file
@@ -0,0 +1,174 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v5.29.3
|
||||
// source: v1/dispatcher.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// V1DispatcherClient is the client API for V1Dispatcher service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type V1DispatcherClient interface {
|
||||
RegisterDurableEvent(ctx context.Context, in *RegisterDurableEventRequest, opts ...grpc.CallOption) (*RegisterDurableEventResponse, error)
|
||||
ListenForDurableEvent(ctx context.Context, opts ...grpc.CallOption) (V1Dispatcher_ListenForDurableEventClient, error)
|
||||
}
|
||||
|
||||
type v1DispatcherClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewV1DispatcherClient(cc grpc.ClientConnInterface) V1DispatcherClient {
|
||||
return &v1DispatcherClient{cc}
|
||||
}
|
||||
|
||||
func (c *v1DispatcherClient) RegisterDurableEvent(ctx context.Context, in *RegisterDurableEventRequest, opts ...grpc.CallOption) (*RegisterDurableEventResponse, error) {
|
||||
out := new(RegisterDurableEventResponse)
|
||||
err := c.cc.Invoke(ctx, "/v1.V1Dispatcher/RegisterDurableEvent", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *v1DispatcherClient) ListenForDurableEvent(ctx context.Context, opts ...grpc.CallOption) (V1Dispatcher_ListenForDurableEventClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &V1Dispatcher_ServiceDesc.Streams[0], "/v1.V1Dispatcher/ListenForDurableEvent", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &v1DispatcherListenForDurableEventClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type V1Dispatcher_ListenForDurableEventClient interface {
|
||||
Send(*ListenForDurableEventRequest) error
|
||||
Recv() (*DurableEvent, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type v1DispatcherListenForDurableEventClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *v1DispatcherListenForDurableEventClient) Send(m *ListenForDurableEventRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *v1DispatcherListenForDurableEventClient) Recv() (*DurableEvent, error) {
|
||||
m := new(DurableEvent)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// V1DispatcherServer is the server API for V1Dispatcher service.
|
||||
// All implementations must embed UnimplementedV1DispatcherServer
|
||||
// for forward compatibility
|
||||
type V1DispatcherServer interface {
|
||||
RegisterDurableEvent(context.Context, *RegisterDurableEventRequest) (*RegisterDurableEventResponse, error)
|
||||
ListenForDurableEvent(V1Dispatcher_ListenForDurableEventServer) error
|
||||
mustEmbedUnimplementedV1DispatcherServer()
|
||||
}
|
||||
|
||||
// UnimplementedV1DispatcherServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedV1DispatcherServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedV1DispatcherServer) RegisterDurableEvent(context.Context, *RegisterDurableEventRequest) (*RegisterDurableEventResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterDurableEvent not implemented")
|
||||
}
|
||||
func (UnimplementedV1DispatcherServer) ListenForDurableEvent(V1Dispatcher_ListenForDurableEventServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ListenForDurableEvent not implemented")
|
||||
}
|
||||
func (UnimplementedV1DispatcherServer) mustEmbedUnimplementedV1DispatcherServer() {}
|
||||
|
||||
// UnsafeV1DispatcherServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to V1DispatcherServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeV1DispatcherServer interface {
|
||||
mustEmbedUnimplementedV1DispatcherServer()
|
||||
}
|
||||
|
||||
func RegisterV1DispatcherServer(s grpc.ServiceRegistrar, srv V1DispatcherServer) {
|
||||
s.RegisterService(&V1Dispatcher_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _V1Dispatcher_RegisterDurableEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterDurableEventRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(V1DispatcherServer).RegisterDurableEvent(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/v1.V1Dispatcher/RegisterDurableEvent",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(V1DispatcherServer).RegisterDurableEvent(ctx, req.(*RegisterDurableEventRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _V1Dispatcher_ListenForDurableEvent_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(V1DispatcherServer).ListenForDurableEvent(&v1DispatcherListenForDurableEventServer{stream})
|
||||
}
|
||||
|
||||
type V1Dispatcher_ListenForDurableEventServer interface {
|
||||
Send(*DurableEvent) error
|
||||
Recv() (*ListenForDurableEventRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type v1DispatcherListenForDurableEventServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *v1DispatcherListenForDurableEventServer) Send(m *DurableEvent) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *v1DispatcherListenForDurableEventServer) Recv() (*ListenForDurableEventRequest, error) {
|
||||
m := new(ListenForDurableEventRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// V1Dispatcher_ServiceDesc is the grpc.ServiceDesc for V1Dispatcher service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var V1Dispatcher_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "v1.V1Dispatcher",
|
||||
HandlerType: (*V1DispatcherServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "RegisterDurableEvent",
|
||||
Handler: _V1Dispatcher_RegisterDurableEvent_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "ListenForDurableEvent",
|
||||
Handler: _V1Dispatcher_ListenForDurableEvent_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "v1/dispatcher.proto",
|
||||
}
|
||||
1696
internal/services/shared/proto/v1/workflows.pb.go
Normal file
1696
internal/services/shared/proto/v1/workflows.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,9 +2,9 @@
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v5.29.3
|
||||
// source: v1-admin.proto
|
||||
// source: v1/workflows.proto
|
||||
|
||||
package contracts
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
@@ -22,6 +22,7 @@ const _ = grpc.SupportPackageIsVersion7
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type AdminServiceClient interface {
|
||||
PutWorkflow(ctx context.Context, in *CreateWorkflowVersionRequest, opts ...grpc.CallOption) (*CreateWorkflowVersionResponse, error)
|
||||
CancelTasks(ctx context.Context, in *CancelTasksRequest, opts ...grpc.CallOption) (*CancelTasksResponse, error)
|
||||
ReplayTasks(ctx context.Context, in *ReplayTasksRequest, opts ...grpc.CallOption) (*ReplayTasksResponse, error)
|
||||
TriggerWorkflowRun(ctx context.Context, in *TriggerWorkflowRunRequest, opts ...grpc.CallOption) (*TriggerWorkflowRunResponse, error)
|
||||
@@ -35,9 +36,18 @@ func NewAdminServiceClient(cc grpc.ClientConnInterface) AdminServiceClient {
|
||||
return &adminServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *adminServiceClient) PutWorkflow(ctx context.Context, in *CreateWorkflowVersionRequest, opts ...grpc.CallOption) (*CreateWorkflowVersionResponse, error) {
|
||||
out := new(CreateWorkflowVersionResponse)
|
||||
err := c.cc.Invoke(ctx, "/v1.AdminService/PutWorkflow", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *adminServiceClient) CancelTasks(ctx context.Context, in *CancelTasksRequest, opts ...grpc.CallOption) (*CancelTasksResponse, error) {
|
||||
out := new(CancelTasksResponse)
|
||||
err := c.cc.Invoke(ctx, "/AdminService/CancelTasks", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/v1.AdminService/CancelTasks", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -46,7 +56,7 @@ func (c *adminServiceClient) CancelTasks(ctx context.Context, in *CancelTasksReq
|
||||
|
||||
func (c *adminServiceClient) ReplayTasks(ctx context.Context, in *ReplayTasksRequest, opts ...grpc.CallOption) (*ReplayTasksResponse, error) {
|
||||
out := new(ReplayTasksResponse)
|
||||
err := c.cc.Invoke(ctx, "/AdminService/ReplayTasks", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/v1.AdminService/ReplayTasks", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -55,7 +65,7 @@ func (c *adminServiceClient) ReplayTasks(ctx context.Context, in *ReplayTasksReq
|
||||
|
||||
func (c *adminServiceClient) TriggerWorkflowRun(ctx context.Context, in *TriggerWorkflowRunRequest, opts ...grpc.CallOption) (*TriggerWorkflowRunResponse, error) {
|
||||
out := new(TriggerWorkflowRunResponse)
|
||||
err := c.cc.Invoke(ctx, "/AdminService/TriggerWorkflowRun", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/v1.AdminService/TriggerWorkflowRun", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,6 +76,7 @@ func (c *adminServiceClient) TriggerWorkflowRun(ctx context.Context, in *Trigger
|
||||
// All implementations must embed UnimplementedAdminServiceServer
|
||||
// for forward compatibility
|
||||
type AdminServiceServer interface {
|
||||
PutWorkflow(context.Context, *CreateWorkflowVersionRequest) (*CreateWorkflowVersionResponse, error)
|
||||
CancelTasks(context.Context, *CancelTasksRequest) (*CancelTasksResponse, error)
|
||||
ReplayTasks(context.Context, *ReplayTasksRequest) (*ReplayTasksResponse, error)
|
||||
TriggerWorkflowRun(context.Context, *TriggerWorkflowRunRequest) (*TriggerWorkflowRunResponse, error)
|
||||
@@ -76,6 +87,9 @@ type AdminServiceServer interface {
|
||||
type UnimplementedAdminServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedAdminServiceServer) PutWorkflow(context.Context, *CreateWorkflowVersionRequest) (*CreateWorkflowVersionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method PutWorkflow not implemented")
|
||||
}
|
||||
func (UnimplementedAdminServiceServer) CancelTasks(context.Context, *CancelTasksRequest) (*CancelTasksResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CancelTasks not implemented")
|
||||
}
|
||||
@@ -98,6 +112,24 @@ func RegisterAdminServiceServer(s grpc.ServiceRegistrar, srv AdminServiceServer)
|
||||
s.RegisterService(&AdminService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _AdminService_PutWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateWorkflowVersionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AdminServiceServer).PutWorkflow(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/v1.AdminService/PutWorkflow",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AdminServiceServer).PutWorkflow(ctx, req.(*CreateWorkflowVersionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _AdminService_CancelTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CancelTasksRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -108,7 +140,7 @@ func _AdminService_CancelTasks_Handler(srv interface{}, ctx context.Context, dec
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/AdminService/CancelTasks",
|
||||
FullMethod: "/v1.AdminService/CancelTasks",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AdminServiceServer).CancelTasks(ctx, req.(*CancelTasksRequest))
|
||||
@@ -126,7 +158,7 @@ func _AdminService_ReplayTasks_Handler(srv interface{}, ctx context.Context, dec
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/AdminService/ReplayTasks",
|
||||
FullMethod: "/v1.AdminService/ReplayTasks",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AdminServiceServer).ReplayTasks(ctx, req.(*ReplayTasksRequest))
|
||||
@@ -144,7 +176,7 @@ func _AdminService_TriggerWorkflowRun_Handler(srv interface{}, ctx context.Conte
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/AdminService/TriggerWorkflowRun",
|
||||
FullMethod: "/v1.AdminService/TriggerWorkflowRun",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AdminServiceServer).TriggerWorkflowRun(ctx, req.(*TriggerWorkflowRunRequest))
|
||||
@@ -156,9 +188,13 @@ func _AdminService_TriggerWorkflowRun_Handler(srv interface{}, ctx context.Conte
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var AdminService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "AdminService",
|
||||
ServiceName: "v1.AdminService",
|
||||
HandlerType: (*AdminServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "PutWorkflow",
|
||||
Handler: _AdminService_PutWorkflow_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CancelTasks",
|
||||
Handler: _AdminService_CancelTasks_Handler,
|
||||
@@ -173,5 +209,5 @@ var AdminService_ServiceDesc = grpc.ServiceDesc{
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "v1-admin.proto",
|
||||
Metadata: "v1/workflows.proto",
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"google.golang.org/grpc/keepalive"
|
||||
grpcMetadata "google.golang.org/grpc/metadata"
|
||||
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/admin/contracts/v1"
|
||||
admincontracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/logger"
|
||||
|
||||
"context"
|
||||
|
||||
@@ -71,6 +71,7 @@ func (s *sharedRepository) ToV1StepRunData(t *TaskInput) *V1StepRunData {
|
||||
}
|
||||
|
||||
parents := make(map[string]map[string]interface{})
|
||||
triggers := make(map[string]map[string]interface{})
|
||||
stepRunErrors := make(map[string]string)
|
||||
|
||||
if t.TriggerData != nil {
|
||||
@@ -92,12 +93,19 @@ func (s *sharedRepository) ToV1StepRunData(t *TaskInput) *V1StepRunData {
|
||||
stepRunErrors[stepReadableId] = data.ErrorMessage
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range t.TriggerData.TriggerDataKeys() {
|
||||
dataMap := t.TriggerData.TriggerDataValue(key)
|
||||
|
||||
triggers[key] = dataMap
|
||||
}
|
||||
}
|
||||
|
||||
return &V1StepRunData{
|
||||
Input: t.Input,
|
||||
TriggeredBy: "manual",
|
||||
Parents: parents,
|
||||
Triggers: triggers,
|
||||
StepRunErrors: stepRunErrors,
|
||||
}
|
||||
}
|
||||
@@ -107,6 +115,8 @@ type V1StepRunData struct {
|
||||
TriggeredBy string `json:"triggered_by"`
|
||||
Parents map[string]map[string]interface{} `json:"parents"`
|
||||
|
||||
Triggers map[string]map[string]interface{} `json:"triggers"`
|
||||
|
||||
// custom-set user data for the step
|
||||
UserData map[string]interface{} `json:"user_data"`
|
||||
|
||||
|
||||
@@ -31,9 +31,44 @@ type CandidateEventMatch struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
type ExternalCreateSignalMatchOpts struct {
|
||||
Conditions []CreateExternalSignalConditionOpt `validate:"required,min=1,dive"`
|
||||
|
||||
SignalTaskId int64 `validate:"required,gt=0"`
|
||||
|
||||
SignalTaskInsertedAt pgtype.Timestamptz
|
||||
|
||||
SignalExternalId string `validate:"required,uuid"`
|
||||
|
||||
SignalKey string `validate:"required"`
|
||||
}
|
||||
|
||||
type CreateExternalSignalConditionKind string
|
||||
|
||||
const (
|
||||
CreateExternalSignalConditionKindSLEEP CreateExternalSignalConditionKind = "SLEEP"
|
||||
CreateExternalSignalConditionKindUSEREVENT CreateExternalSignalConditionKind = "USER_EVENT"
|
||||
)
|
||||
|
||||
type CreateExternalSignalConditionOpt struct {
|
||||
Kind CreateExternalSignalConditionKind `validate:"required, oneof=SLEEP USER_EVENT"`
|
||||
|
||||
ReadableDataKey string `validate:"required"`
|
||||
|
||||
OrGroupId string `validate:"required,uuid"`
|
||||
|
||||
UserEventKey *string
|
||||
|
||||
SleepFor *string `validate:"omitempty,duration"`
|
||||
|
||||
Expression string
|
||||
}
|
||||
|
||||
type CreateMatchOpts struct {
|
||||
Kind sqlcv1.V1MatchKind
|
||||
|
||||
ExistingMatchData []byte
|
||||
|
||||
Conditions []GroupMatchCondition
|
||||
|
||||
TriggerDAGId *int64
|
||||
@@ -71,7 +106,7 @@ type CreateMatchOpts struct {
|
||||
SignalKey *string
|
||||
}
|
||||
|
||||
type InternalEventMatchResults struct {
|
||||
type EventMatchResults struct {
|
||||
// The list of tasks which were created from the matches
|
||||
CreatedTasks []*sqlcv1.V1Task
|
||||
|
||||
@@ -101,7 +136,10 @@ type GroupMatchCondition struct {
|
||||
}
|
||||
|
||||
type MatchRepository interface {
|
||||
ProcessInternalEventMatches(ctx context.Context, tenantId string, events []CandidateEventMatch) (*InternalEventMatchResults, error)
|
||||
RegisterSignalMatchConditions(ctx context.Context, tenantId string, eventMatches []ExternalCreateSignalMatchOpts) error
|
||||
|
||||
ProcessUserEventMatches(ctx context.Context, tenantId string, events []CandidateEventMatch) (*EventMatchResults, error)
|
||||
ProcessInternalEventMatches(ctx context.Context, tenantId string, events []CandidateEventMatch) (*EventMatchResults, error)
|
||||
}
|
||||
|
||||
type MatchRepositoryImpl struct {
|
||||
@@ -114,8 +152,87 @@ func newMatchRepository(s *sharedRepository) (MatchRepository, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MatchRepositoryImpl) RegisterSignalMatchConditions(ctx context.Context, tenantId string, signalMatches []ExternalCreateSignalMatchOpts) error {
|
||||
// TODO: ADD BACK VALIDATION
|
||||
// if err := m.v.Validate(signalMatches); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, m.pool, m.l, 5000)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer rollback()
|
||||
|
||||
eventMatches := make([]CreateMatchOpts, 0, len(signalMatches))
|
||||
|
||||
for _, signalMatch := range signalMatches {
|
||||
conditions := make([]GroupMatchCondition, 0, len(signalMatch.Conditions))
|
||||
|
||||
for _, condition := range signalMatch.Conditions {
|
||||
switch condition.Kind {
|
||||
case CreateExternalSignalConditionKindSLEEP:
|
||||
if condition.SleepFor == nil {
|
||||
return fmt.Errorf("sleep condition requires a duration")
|
||||
}
|
||||
|
||||
c, err := m.durableSleepCondition(
|
||||
ctx,
|
||||
tx,
|
||||
tenantId,
|
||||
condition.OrGroupId,
|
||||
condition.ReadableDataKey,
|
||||
*condition.SleepFor,
|
||||
sqlcv1.V1MatchConditionActionCREATE,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conditions = append(conditions, *c)
|
||||
case CreateExternalSignalConditionKindUSEREVENT:
|
||||
if condition.UserEventKey == nil {
|
||||
return fmt.Errorf("user event condition requires a user event key")
|
||||
}
|
||||
|
||||
conditions = append(conditions, m.userEventCondition(
|
||||
condition.OrGroupId,
|
||||
condition.ReadableDataKey,
|
||||
*condition.UserEventKey,
|
||||
condition.Expression,
|
||||
sqlcv1.V1MatchConditionActionCREATE,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
eventMatches = append(eventMatches, CreateMatchOpts{
|
||||
Kind: sqlcv1.V1MatchKindSIGNAL,
|
||||
Conditions: conditions,
|
||||
SignalTaskId: &signalMatch.SignalTaskId,
|
||||
SignalTaskInsertedAt: signalMatch.SignalTaskInsertedAt,
|
||||
SignalExternalId: &signalMatch.SignalExternalId,
|
||||
SignalKey: &signalMatch.SignalKey,
|
||||
})
|
||||
}
|
||||
|
||||
err = m.createEventMatches(ctx, tx, tenantId, eventMatches)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := commit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessInternalEventMatches processes a list of internal events
|
||||
func (m *MatchRepositoryImpl) ProcessInternalEventMatches(ctx context.Context, tenantId string, events []CandidateEventMatch) (*InternalEventMatchResults, error) {
|
||||
func (m *MatchRepositoryImpl) ProcessInternalEventMatches(ctx context.Context, tenantId string, events []CandidateEventMatch) (*EventMatchResults, error) {
|
||||
tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, m.pool, m.l, 5000)
|
||||
|
||||
if err != nil {
|
||||
@@ -124,7 +241,7 @@ func (m *MatchRepositoryImpl) ProcessInternalEventMatches(ctx context.Context, t
|
||||
|
||||
defer rollback()
|
||||
|
||||
res, err := m.processInternalEventMatches(ctx, tx, tenantId, events)
|
||||
res, err := m.processEventMatches(ctx, tx, tenantId, events, sqlcv1.V1EventTypeINTERNAL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -137,10 +254,33 @@ func (m *MatchRepositoryImpl) ProcessInternalEventMatches(ctx context.Context, t
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx sqlcv1.DBTX, tenantId string, events []CandidateEventMatch) (*InternalEventMatchResults, error) {
|
||||
// ProcessUserEventMatches processes a list of user events
|
||||
func (m *MatchRepositoryImpl) ProcessUserEventMatches(ctx context.Context, tenantId string, events []CandidateEventMatch) (*EventMatchResults, error) {
|
||||
tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, m.pool, m.l, 5000)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rollback()
|
||||
|
||||
res, err := m.processEventMatches(ctx, tx, tenantId, events, sqlcv1.V1EventTypeUSER)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := commit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *sharedRepository) processEventMatches(ctx context.Context, tx sqlcv1.DBTX, tenantId string, events []CandidateEventMatch, eventType sqlcv1.V1EventType) (*EventMatchResults, error) {
|
||||
start := time.Now()
|
||||
|
||||
res := &InternalEventMatchResults{}
|
||||
res := &EventMatchResults{}
|
||||
|
||||
eventKeys := make([]string, 0, len(events))
|
||||
resourceHints := make([]pgtype.Text, 0, len(events))
|
||||
@@ -172,7 +312,7 @@ func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx s
|
||||
tx,
|
||||
sqlcv1.ListMatchConditionsForEventParams{
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
Eventtype: sqlcv1.V1EventTypeINTERNAL,
|
||||
Eventtype: eventType,
|
||||
Eventkeys: eventKeys,
|
||||
Eventresourcehints: resourceHints,
|
||||
},
|
||||
@@ -183,7 +323,7 @@ func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx s
|
||||
}
|
||||
|
||||
// pass match conditions through CEL expressions parser
|
||||
matches, err := m.processCELExpressions(ctx, events, matchConditions)
|
||||
matches, err := m.processCELExpressions(ctx, events, matchConditions, eventType)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -282,8 +422,15 @@ func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx s
|
||||
createTaskOpts := make([]CreateTaskOpts, 0, len(satisfiedMatches))
|
||||
replayTaskOpts := make([]ReplayTaskOpts, 0, len(satisfiedMatches))
|
||||
|
||||
dependentMatches := make([]*sqlcv1.SaveSatisfiedMatchConditionsRow, 0)
|
||||
|
||||
for _, match := range satisfiedMatches {
|
||||
if match.TriggerStepID.Valid && match.TriggerExternalID.Valid {
|
||||
if match.Action == sqlcv1.V1MatchConditionActionCREATEMATCH {
|
||||
dependentMatches = append(dependentMatches, match)
|
||||
continue
|
||||
}
|
||||
|
||||
var input, additionalMetadata []byte
|
||||
|
||||
if match.TriggerDagID.Valid {
|
||||
@@ -369,6 +516,15 @@ func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx s
|
||||
}
|
||||
}
|
||||
|
||||
// create dependent matches
|
||||
if len(dependentMatches) > 0 {
|
||||
err = m.createAdditionalMatches(ctx, tx, tenantId, dependentMatches)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create additional matches: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// create tasks
|
||||
tasks, err = m.createTasks(ctx, tx, tenantId, createTaskOpts)
|
||||
|
||||
@@ -401,6 +557,7 @@ func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx s
|
||||
taskIds = append(taskIds, TaskIdInsertedAtRetryCount{
|
||||
Id: match.SignalTaskID.Int64,
|
||||
InsertedAt: match.SignalTaskInsertedAt,
|
||||
// signals are durable, meaning they persist between retries, so a retryCount of -1 is used
|
||||
RetryCount: -1,
|
||||
})
|
||||
externalIds = append(externalIds, sqlchelpers.UUIDToStr(match.SignalExternalID))
|
||||
@@ -434,13 +591,19 @@ func (m *sharedRepository) processInternalEventMatches(ctx context.Context, tx s
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *sharedRepository) processCELExpressions(ctx context.Context, events []CandidateEventMatch, conditions []*sqlcv1.ListMatchConditionsForEventRow) (map[string][]*sqlcv1.ListMatchConditionsForEventRow, error) {
|
||||
func (m *sharedRepository) processCELExpressions(ctx context.Context, events []CandidateEventMatch, conditions []*sqlcv1.ListMatchConditionsForEventRow, eventType sqlcv1.V1EventType) (map[string][]*sqlcv1.ListMatchConditionsForEventRow, error) {
|
||||
// parse CEL expressions
|
||||
programs := make(map[int64]cel.Program)
|
||||
conditionIdsToConditions := make(map[int64]*sqlcv1.ListMatchConditionsForEventRow)
|
||||
|
||||
for _, condition := range conditions {
|
||||
ast, issues := m.env.Compile(condition.Expression.String)
|
||||
expr := condition.Expression.String
|
||||
|
||||
if expr == "" {
|
||||
expr = "true"
|
||||
}
|
||||
|
||||
ast, issues := m.env.Compile(expr)
|
||||
|
||||
if issues != nil {
|
||||
return nil, issues.Err()
|
||||
@@ -461,13 +624,43 @@ func (m *sharedRepository) processCELExpressions(ctx context.Context, events []C
|
||||
|
||||
for _, event := range events {
|
||||
inputData := map[string]interface{}{}
|
||||
outputData := map[string]interface{}{}
|
||||
|
||||
if len(event.Data) > 0 {
|
||||
err := json.Unmarshal(event.Data, &inputData)
|
||||
switch eventType {
|
||||
case sqlcv1.V1EventTypeINTERNAL:
|
||||
// first unmarshal to event data, then parse the output data
|
||||
outputEventData := &TaskOutputEvent{}
|
||||
|
||||
if err != nil {
|
||||
m.l.Error().Err(err).Msgf("failed to unmarshal event data %s", string(event.Data))
|
||||
return nil, err
|
||||
err := json.Unmarshal(event.Data, &outputEventData)
|
||||
|
||||
if err != nil {
|
||||
m.l.Warn().Err(err).Msgf("[0] failed to unmarshal output event data %s", string(event.Data))
|
||||
continue
|
||||
}
|
||||
|
||||
if len(outputEventData.Output) > 0 {
|
||||
err = json.Unmarshal(outputEventData.Output, &outputData)
|
||||
|
||||
if err != nil {
|
||||
m.l.Warn().Err(err).Msgf("failed to unmarshal output event data, output subfield %s", string(event.Data))
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
err = json.Unmarshal(event.Data, &inputData)
|
||||
|
||||
if err != nil {
|
||||
m.l.Warn().Err(err).Msgf("[1] failed to unmarshal output event data %s", string(event.Data))
|
||||
continue
|
||||
}
|
||||
}
|
||||
case sqlcv1.V1EventTypeUSER:
|
||||
err := json.Unmarshal(event.Data, &inputData)
|
||||
|
||||
if err != nil {
|
||||
m.l.Warn().Err(err).Msgf("failed to unmarshal user event data %s", string(event.Data))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,14 +677,22 @@ func (m *sharedRepository) processCELExpressions(ctx context.Context, events []C
|
||||
}
|
||||
|
||||
out, _, err := program.ContextEval(ctx, map[string]interface{}{
|
||||
"input": inputData,
|
||||
"input": inputData,
|
||||
"output": outputData,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// FIXME: we'd like to display this error to the user somehow, which is difficult as the
|
||||
// task hasn't necessarily been created yet. Additionally, we might have other conditions
|
||||
// which are valid, so we don't necessarily want to fail the entire match process. At the
|
||||
// same time, we need to remove it from the database, so we'll want to mark the condition as
|
||||
// satisfied and write an error to it. If the relevant conditions have errors, the task
|
||||
// should be created in a failed state.
|
||||
// How should we handle signals?
|
||||
m.l.Warn().Err(err).Msgf("failed to eval CEL program")
|
||||
}
|
||||
|
||||
if out.Value().(bool) {
|
||||
if b, ok := out.Value().(bool); ok && b {
|
||||
matches[event.ID] = append(matches[event.ID], conditionIdsToConditions[conditionId])
|
||||
}
|
||||
}
|
||||
@@ -504,6 +705,7 @@ func (m *sharedRepository) createEventMatches(ctx context.Context, tx sqlcv1.DBT
|
||||
// create the event matches first
|
||||
dagTenantIds := make([]pgtype.UUID, 0, len(eventMatches))
|
||||
dagKinds := make([]string, 0, len(eventMatches))
|
||||
dagExistingDatas := make([][]byte, 0, len(eventMatches))
|
||||
triggerDagIds := make([]int64, 0, len(eventMatches))
|
||||
triggerDagInsertedAts := make([]pgtype.Timestamptz, 0, len(eventMatches))
|
||||
triggerStepIds := make([]pgtype.UUID, 0, len(eventMatches))
|
||||
@@ -529,6 +731,7 @@ func (m *sharedRepository) createEventMatches(ctx context.Context, tx sqlcv1.DBT
|
||||
if match.TriggerDAGId != nil && match.TriggerDAGInsertedAt.Valid && match.TriggerStepId != nil && match.TriggerExternalId != nil {
|
||||
dagTenantIds = append(dagTenantIds, sqlchelpers.UUIDFromStr(tenantId))
|
||||
dagKinds = append(dagKinds, string(match.Kind))
|
||||
dagExistingDatas = append(dagExistingDatas, match.ExistingMatchData)
|
||||
triggerDagIds = append(triggerDagIds, *match.TriggerDAGId)
|
||||
triggerDagInsertedAts = append(triggerDagInsertedAts, match.TriggerDAGInsertedAt)
|
||||
triggerStepIds = append(triggerStepIds, sqlchelpers.UUIDFromStr(*match.TriggerStepId))
|
||||
@@ -571,6 +774,7 @@ func (m *sharedRepository) createEventMatches(ctx context.Context, tx sqlcv1.DBT
|
||||
sqlcv1.CreateMatchesForDAGTriggersParams{
|
||||
Tenantids: dagTenantIds,
|
||||
Kinds: dagKinds,
|
||||
ExistingDatas: dagExistingDatas,
|
||||
Triggerdagids: triggerDagIds,
|
||||
Triggerdaginsertedats: triggerDagInsertedAts,
|
||||
Triggerstepids: triggerStepIds,
|
||||
@@ -654,3 +858,167 @@ func (m *sharedRepository) createEventMatches(ctx context.Context, tx sqlcv1.DBT
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *sharedRepository) createAdditionalMatches(ctx context.Context, tx sqlcv1.DBTX, tenantId string, satisfiedMatches []*sqlcv1.SaveSatisfiedMatchConditionsRow) error { // nolint: unused
|
||||
additionalMatchStepIds := make([]pgtype.UUID, 0, len(satisfiedMatches))
|
||||
|
||||
for _, match := range satisfiedMatches {
|
||||
if match.Action == sqlcv1.V1MatchConditionActionCREATEMATCH {
|
||||
additionalMatchStepIds = append(additionalMatchStepIds, match.TriggerStepID)
|
||||
}
|
||||
}
|
||||
|
||||
// get the configs for the additional matches
|
||||
stepMatchConditions, err := m.queries.ListStepMatchConditions(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.ListStepMatchConditionsParams{
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
Stepids: additionalMatchStepIds,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stepIdsToConditions := make(map[string][]*sqlcv1.V1StepMatchCondition)
|
||||
|
||||
for _, condition := range stepMatchConditions {
|
||||
stepId := sqlchelpers.UUIDToStr(condition.StepID)
|
||||
if _, ok := stepIdsToConditions[stepId]; !ok {
|
||||
stepIdsToConditions[stepId] = make([]*sqlcv1.V1StepMatchCondition, 0)
|
||||
}
|
||||
|
||||
stepIdsToConditions[stepId] = append(stepIdsToConditions[stepId], condition)
|
||||
}
|
||||
|
||||
additionalMatches := make([]CreateMatchOpts, 0, len(satisfiedMatches))
|
||||
|
||||
for _, match := range satisfiedMatches {
|
||||
if match.TriggerStepID.Valid && match.Action == sqlcv1.V1MatchConditionActionCREATEMATCH {
|
||||
conditions, ok := stepIdsToConditions[sqlchelpers.UUIDToStr(match.TriggerStepID)]
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
triggerExternalId := sqlchelpers.UUIDToStr(match.TriggerExternalID)
|
||||
triggerWorkflowRunId := sqlchelpers.UUIDToStr(match.TriggerWorkflowRunID)
|
||||
triggerStepId := sqlchelpers.UUIDToStr(match.TriggerStepID)
|
||||
var triggerExistingTaskId *int64
|
||||
|
||||
if match.TriggerExistingTaskID.Valid {
|
||||
triggerExistingTaskId = &match.TriggerExistingTaskID.Int64
|
||||
}
|
||||
|
||||
// copy over the match data
|
||||
opt := CreateMatchOpts{
|
||||
Kind: sqlcv1.V1MatchKindTRIGGER,
|
||||
ExistingMatchData: match.McAggregatedData,
|
||||
Conditions: make([]GroupMatchCondition, 0),
|
||||
TriggerDAGId: &match.TriggerDagID.Int64,
|
||||
TriggerDAGInsertedAt: match.TriggerDagInsertedAt,
|
||||
TriggerExternalId: &triggerExternalId,
|
||||
TriggerWorkflowRunId: &triggerWorkflowRunId,
|
||||
TriggerStepId: &triggerStepId,
|
||||
TriggerStepIndex: match.TriggerStepIndex,
|
||||
TriggerExistingTaskId: triggerExistingTaskId,
|
||||
TriggerExistingTaskInsertedAt: match.TriggerExistingTaskInsertedAt,
|
||||
TriggerParentTaskExternalId: match.TriggerParentTaskExternalID,
|
||||
TriggerParentTaskId: match.TriggerParentTaskID,
|
||||
TriggerParentTaskInsertedAt: match.TriggerParentTaskInsertedAt,
|
||||
TriggerChildIndex: match.TriggerChildIndex,
|
||||
TriggerChildKey: match.TriggerChildKey,
|
||||
}
|
||||
|
||||
for _, condition := range conditions {
|
||||
switch condition.Kind {
|
||||
case sqlcv1.V1StepMatchConditionKindSLEEP:
|
||||
c, err := m.durableSleepCondition(
|
||||
ctx,
|
||||
tx,
|
||||
tenantId,
|
||||
sqlchelpers.UUIDToStr(condition.OrGroupID),
|
||||
condition.ReadableDataKey,
|
||||
condition.SleepDuration.String,
|
||||
condition.Action,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opt.Conditions = append(opt.Conditions, *c)
|
||||
case sqlcv1.V1StepMatchConditionKindUSEREVENT:
|
||||
opt.Conditions = append(opt.Conditions, m.userEventCondition(
|
||||
sqlchelpers.UUIDToStr(condition.OrGroupID),
|
||||
condition.ReadableDataKey,
|
||||
condition.EventKey.String,
|
||||
condition.Expression.String,
|
||||
condition.Action,
|
||||
))
|
||||
default:
|
||||
// PARENT_OVERRIDE is another kind, but it isn't processed here
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
additionalMatches = append(additionalMatches, opt)
|
||||
}
|
||||
}
|
||||
|
||||
if len(additionalMatches) > 0 {
|
||||
err := m.createEventMatches(ctx, tx, tenantId, additionalMatches)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *sharedRepository) durableSleepCondition(ctx context.Context, tx sqlcv1.DBTX, tenantId, orGroupId, readableDataKey, sleepDuration string, action sqlcv1.V1MatchConditionAction) (*GroupMatchCondition, error) {
|
||||
// FIXME: make this a proper bulk write
|
||||
sleep, err := m.queries.CreateDurableSleep(ctx, tx, sqlcv1.CreateDurableSleepParams{
|
||||
TenantID: sqlchelpers.UUIDFromStr(tenantId),
|
||||
SleepDurations: []string{sleepDuration},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(sleep) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 sleep to be created, but got %d", len(sleep))
|
||||
}
|
||||
|
||||
eventKey := getDurableSleepEventKey(sleep[0].ID)
|
||||
eventType := sqlcv1.V1EventTypeINTERNAL
|
||||
expression := "true"
|
||||
|
||||
return &GroupMatchCondition{
|
||||
GroupId: orGroupId,
|
||||
EventType: eventType,
|
||||
EventKey: eventKey,
|
||||
ReadableDataKey: readableDataKey,
|
||||
Expression: expression,
|
||||
Action: action,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *sharedRepository) userEventCondition(orGroupId, readableDataKey, eventKey, expression string, action sqlcv1.V1MatchConditionAction) GroupMatchCondition {
|
||||
return GroupMatchCondition{
|
||||
GroupId: orGroupId,
|
||||
EventType: sqlcv1.V1EventTypeUSER,
|
||||
EventKey: eventKey,
|
||||
ReadableDataKey: readableDataKey,
|
||||
Expression: expression,
|
||||
Action: action,
|
||||
}
|
||||
}
|
||||
|
||||
func getDurableSleepEventKey(sleepId int64) string {
|
||||
return fmt.Sprintf("sleep-%d", sleepId)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@ type MatchData struct {
|
||||
|
||||
// maps readable data keys to a list of data values
|
||||
dataKeys map[string][]interface{}
|
||||
|
||||
triggerDataKeys map[string][]interface{}
|
||||
}
|
||||
|
||||
func (m *MatchData) Action() sqlcv1.V1MatchConditionAction {
|
||||
@@ -33,6 +35,45 @@ func (m *MatchData) DataKeys() []string {
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *MatchData) TriggerDataKeys() []string {
|
||||
if len(m.triggerDataKeys) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(m.triggerDataKeys))
|
||||
|
||||
for k := range m.triggerDataKeys {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *MatchData) TriggerDataValue(key string) map[string]interface{} {
|
||||
values := m.triggerDataKeys[key]
|
||||
|
||||
for _, v := range values {
|
||||
// convert the values to a byte array, then to a map
|
||||
vBytes, err := json.Marshal(v)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
data := map[string]interface{}{}
|
||||
|
||||
err = json.Unmarshal(vBytes, &data)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function for internal events
|
||||
func (m *MatchData) DataValueAsTaskOutputEvent(key string) *TaskOutputEvent {
|
||||
values := m.dataKeys[key]
|
||||
@@ -72,6 +113,17 @@ func NewMatchData(mcAggregatedData []byte) (*MatchData, error) {
|
||||
return nil, fmt.Errorf("no match condition aggregated data")
|
||||
}
|
||||
|
||||
// look for any CREATE_MATCH data which should be merged into the match data
|
||||
existingDataKeys := make(map[string][]interface{})
|
||||
|
||||
for k, v := range triggerDataMap {
|
||||
if k == "CREATE_MATCH" {
|
||||
for key, values := range v {
|
||||
existingDataKeys[key] = values
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range triggerDataMap {
|
||||
var action sqlcv1.V1MatchConditionAction
|
||||
|
||||
@@ -86,9 +138,18 @@ func NewMatchData(mcAggregatedData []byte) (*MatchData, error) {
|
||||
action = sqlcv1.V1MatchConditionActionSKIP
|
||||
}
|
||||
|
||||
triggerDataKeys := map[string][]interface{}{}
|
||||
|
||||
if len(existingDataKeys) == 0 {
|
||||
existingDataKeys = v
|
||||
} else {
|
||||
triggerDataKeys = v
|
||||
}
|
||||
|
||||
return &MatchData{
|
||||
action: action,
|
||||
dataKeys: v,
|
||||
action: action,
|
||||
dataKeys: existingDataKeys,
|
||||
triggerDataKeys: triggerDataKeys,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/validator"
|
||||
|
||||
celgo "github.com/google/cel-go/cel"
|
||||
"github.com/google/cel-go/checker/decls"
|
||||
)
|
||||
|
||||
// implements comparable for the lru cache
|
||||
@@ -45,9 +44,8 @@ func newSharedRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.L
|
||||
celParser := cel.NewCELParser()
|
||||
|
||||
env, err := celgo.NewEnv(
|
||||
celgo.Declarations(
|
||||
decls.NewVar("input", decls.NewMapType(decls.String, decls.Dyn)),
|
||||
),
|
||||
celgo.Variable("input", celgo.MapType(celgo.StringType, celgo.DynType)),
|
||||
celgo.Variable("output", celgo.MapType(celgo.StringType, celgo.DynType)),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -22,8 +22,8 @@ SELECT
|
||||
FROM
|
||||
v1_match_condition m
|
||||
JOIN
|
||||
input i ON (m.tenant_id, m.event_type, m.event_key, m.is_satisfied, m.event_resource_hint) =
|
||||
(@tenantId::uuid, @eventType::v1_event_type, i.event_key, FALSE, i.event_resource_hint);
|
||||
input i ON (m.tenant_id, m.event_type, m.event_key, m.is_satisfied, COALESCE(m.event_resource_hint, '')::text) =
|
||||
(@tenantId::uuid, @eventType::v1_event_type, i.event_key, FALSE, COALESCE(i.event_resource_hint, '')::text);
|
||||
|
||||
-- name: CreateMatchesForDAGTriggers :many
|
||||
WITH input AS (
|
||||
|
||||
@@ -30,8 +30,8 @@ SELECT
|
||||
FROM
|
||||
v1_match_condition m
|
||||
JOIN
|
||||
input i ON (m.tenant_id, m.event_type, m.event_key, m.is_satisfied, m.event_resource_hint) =
|
||||
($1::uuid, $2::v1_event_type, i.event_key, FALSE, i.event_resource_hint)
|
||||
input i ON (m.tenant_id, m.event_type, m.event_key, m.is_satisfied, COALESCE(m.event_resource_hint, '')::text) =
|
||||
($1::uuid, $2::v1_event_type, i.event_key, FALSE, COALESCE(i.event_resource_hint, '')::text)
|
||||
`
|
||||
|
||||
type ListMatchConditionsForEventParams struct {
|
||||
@@ -89,7 +89,7 @@ func (q *Queries) ListMatchConditionsForEvent(ctx context.Context, db DBTX, arg
|
||||
const createMatchesForDAGTriggers = `-- name: CreateMatchesForDAGTriggers :many
|
||||
WITH input AS (
|
||||
SELECT
|
||||
tenant_id, kind, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_existing_task_id, trigger_existing_task_inserted_at, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key
|
||||
tenant_id, kind, existing_data, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_existing_task_id, trigger_existing_task_inserted_at, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
@@ -107,12 +107,14 @@ WITH input AS (
|
||||
unnest($12::bigint[]) AS trigger_parent_task_id,
|
||||
unnest($13::timestamptz[]) AS trigger_parent_task_inserted_at,
|
||||
unnest($14::bigint[]) AS trigger_child_index,
|
||||
unnest($15::text[]) AS trigger_child_key
|
||||
unnest($15::text[]) AS trigger_child_key,
|
||||
unnest($16::jsonb[]) AS existing_data
|
||||
) AS subquery
|
||||
)
|
||||
INSERT INTO v1_match (
|
||||
tenant_id,
|
||||
kind,
|
||||
existing_data,
|
||||
trigger_dag_id,
|
||||
trigger_dag_inserted_at,
|
||||
trigger_step_id,
|
||||
@@ -130,6 +132,7 @@ INSERT INTO v1_match (
|
||||
SELECT
|
||||
i.tenant_id,
|
||||
i.kind,
|
||||
i.existing_data,
|
||||
i.trigger_dag_id,
|
||||
i.trigger_dag_inserted_at,
|
||||
i.trigger_step_id,
|
||||
@@ -146,12 +149,13 @@ SELECT
|
||||
FROM
|
||||
input i
|
||||
RETURNING
|
||||
id, tenant_id, kind, is_satisfied, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_existing_task_id, trigger_existing_task_inserted_at, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key
|
||||
id, tenant_id, kind, existing_data, is_satisfied, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_existing_task_id, trigger_existing_task_inserted_at, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key
|
||||
`
|
||||
|
||||
type CreateMatchesForDAGTriggersParams struct {
|
||||
Tenantids []pgtype.UUID `json:"tenantids"`
|
||||
Kinds []string `json:"kinds"`
|
||||
ExistingDatas [][]byte `json:"existingDatas"`
|
||||
Triggerdagids []int64 `json:"triggerdagids"`
|
||||
Triggerdaginsertedats []pgtype.Timestamptz `json:"triggerdaginsertedats"`
|
||||
Triggerstepids []pgtype.UUID `json:"triggerstepids"`
|
||||
@@ -184,6 +188,7 @@ func (q *Queries) CreateMatchesForDAGTriggers(ctx context.Context, db DBTX, arg
|
||||
arg.TriggerParentTaskInsertedAt,
|
||||
arg.TriggerChildIndex,
|
||||
arg.TriggerChildKey,
|
||||
arg.ExistingDatas,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -196,6 +201,7 @@ func (q *Queries) CreateMatchesForDAGTriggers(ctx context.Context, db DBTX, arg
|
||||
&i.ID,
|
||||
&i.TenantID,
|
||||
&i.Kind,
|
||||
&i.ExistingData,
|
||||
&i.IsSatisfied,
|
||||
&i.SignalTaskID,
|
||||
&i.SignalTaskInsertedAt,
|
||||
|
||||
@@ -128,7 +128,9 @@ WITH match_counts AS (
|
||||
COUNT(DISTINCT CASE WHEN action = 'CANCEL' THEN or_group_id END) AS total_cancel_groups,
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'CANCEL' THEN or_group_id END) AS satisfied_cancel_groups,
|
||||
COUNT(DISTINCT CASE WHEN action = 'SKIP' THEN or_group_id END) AS total_skip_groups,
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'SKIP' THEN or_group_id END) AS satisfied_skip_groups
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'SKIP' THEN or_group_id END) AS satisfied_skip_groups,
|
||||
COUNT(DISTINCT CASE WHEN action = 'CREATE_MATCH' THEN or_group_id END) AS total_create_match_groups,
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'CREATE_MATCH' THEN or_group_id END) AS satisfied_create_match_groups
|
||||
FROM v1_match_condition main
|
||||
WHERE v1_match_id = ANY(@matchIds::bigint[])
|
||||
GROUP BY v1_match_id
|
||||
@@ -136,10 +138,11 @@ WITH match_counts AS (
|
||||
SELECT
|
||||
m.*,
|
||||
CASE WHEN
|
||||
(mc.total_create_groups > 0 AND mc.total_create_groups = mc.satisfied_create_groups) THEN 'CREATE'
|
||||
WHEN (mc.total_queue_groups > 0 AND mc.total_queue_groups = mc.satisfied_queue_groups) THEN 'QUEUE'
|
||||
(mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups) THEN 'SKIP'
|
||||
WHEN (mc.total_cancel_groups > 0 AND mc.total_cancel_groups = mc.satisfied_cancel_groups) THEN 'CANCEL'
|
||||
WHEN (mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups) THEN 'SKIP'
|
||||
WHEN (mc.total_create_groups > 0 AND mc.total_create_groups = mc.satisfied_create_groups) THEN 'CREATE'
|
||||
WHEN (mc.total_queue_groups > 0 AND mc.total_queue_groups = mc.satisfied_queue_groups) THEN 'QUEUE'
|
||||
WHEN (mc.total_create_match_groups > 0 AND mc.total_create_match_groups = mc.satisfied_create_match_groups) THEN 'CREATE_MATCH'
|
||||
END::v1_match_condition_action AS action
|
||||
FROM
|
||||
v1_match m
|
||||
@@ -151,6 +154,7 @@ WITH match_counts AS (
|
||||
OR (mc.total_queue_groups > 0 AND mc.total_queue_groups = mc.satisfied_queue_groups)
|
||||
OR (mc.total_cancel_groups > 0 AND mc.total_cancel_groups = mc.satisfied_cancel_groups)
|
||||
OR (mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups)
|
||||
OR (mc.total_create_match_groups > 0 AND mc.total_create_match_groups = mc.satisfied_create_match_groups)
|
||||
)
|
||||
), locked_conditions AS (
|
||||
SELECT
|
||||
@@ -198,12 +202,12 @@ WITH match_counts AS (
|
||||
id IN (SELECT id FROM deleted_conditions)
|
||||
)
|
||||
SELECT
|
||||
*,
|
||||
d.mc_aggregated_data
|
||||
rm.*,
|
||||
COALESCE(rm.existing_data || d.mc_aggregated_data, d.mc_aggregated_data)::jsonb AS mc_aggregated_data
|
||||
FROM
|
||||
result_matches
|
||||
result_matches rm
|
||||
LEFT JOIN
|
||||
matches_with_data d ON result_matches.id = d.id;
|
||||
matches_with_data d ON rm.id = d.id;
|
||||
|
||||
-- name: ResetMatchConditions :many
|
||||
-- NOTE: we have to break this into a separate query because CTEs can't see modified rows
|
||||
|
||||
@@ -58,7 +58,7 @@ SELECT
|
||||
FROM
|
||||
input i
|
||||
RETURNING
|
||||
id, tenant_id, kind, is_satisfied, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key, trigger_existing_task_id, trigger_existing_task_inserted_at
|
||||
id, tenant_id, kind, is_satisfied, existing_data, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key, trigger_existing_task_id, trigger_existing_task_inserted_at
|
||||
`
|
||||
|
||||
type CreateMatchesForSignalTriggersParams struct {
|
||||
@@ -91,6 +91,7 @@ func (q *Queries) CreateMatchesForSignalTriggers(ctx context.Context, db DBTX, a
|
||||
&i.TenantID,
|
||||
&i.Kind,
|
||||
&i.IsSatisfied,
|
||||
&i.ExistingData,
|
||||
&i.SignalTaskID,
|
||||
&i.SignalTaskInsertedAt,
|
||||
&i.SignalExternalID,
|
||||
@@ -292,18 +293,21 @@ WITH match_counts AS (
|
||||
COUNT(DISTINCT CASE WHEN action = 'CANCEL' THEN or_group_id END) AS total_cancel_groups,
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'CANCEL' THEN or_group_id END) AS satisfied_cancel_groups,
|
||||
COUNT(DISTINCT CASE WHEN action = 'SKIP' THEN or_group_id END) AS total_skip_groups,
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'SKIP' THEN or_group_id END) AS satisfied_skip_groups
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'SKIP' THEN or_group_id END) AS satisfied_skip_groups,
|
||||
COUNT(DISTINCT CASE WHEN action = 'CREATE_MATCH' THEN or_group_id END) AS total_create_match_groups,
|
||||
COUNT(DISTINCT CASE WHEN is_satisfied AND action = 'CREATE_MATCH' THEN or_group_id END) AS satisfied_create_match_groups
|
||||
FROM v1_match_condition main
|
||||
WHERE v1_match_id = ANY($1::bigint[])
|
||||
GROUP BY v1_match_id
|
||||
), result_matches AS (
|
||||
SELECT
|
||||
m.id, m.tenant_id, m.kind, m.is_satisfied, m.signal_task_id, m.signal_task_inserted_at, m.signal_external_id, m.signal_key, m.trigger_dag_id, m.trigger_dag_inserted_at, m.trigger_step_id, m.trigger_step_index, m.trigger_external_id, m.trigger_workflow_run_id, m.trigger_parent_task_external_id, m.trigger_parent_task_id, m.trigger_parent_task_inserted_at, m.trigger_child_index, m.trigger_child_key, m.trigger_existing_task_id, m.trigger_existing_task_inserted_at,
|
||||
m.id, m.tenant_id, m.kind, m.is_satisfied, m.existing_data, m.signal_task_id, m.signal_task_inserted_at, m.signal_external_id, m.signal_key, m.trigger_dag_id, m.trigger_dag_inserted_at, m.trigger_step_id, m.trigger_step_index, m.trigger_external_id, m.trigger_workflow_run_id, m.trigger_parent_task_external_id, m.trigger_parent_task_id, m.trigger_parent_task_inserted_at, m.trigger_child_index, m.trigger_child_key, m.trigger_existing_task_id, m.trigger_existing_task_inserted_at,
|
||||
CASE WHEN
|
||||
(mc.total_create_groups > 0 AND mc.total_create_groups = mc.satisfied_create_groups) THEN 'CREATE'
|
||||
WHEN (mc.total_queue_groups > 0 AND mc.total_queue_groups = mc.satisfied_queue_groups) THEN 'QUEUE'
|
||||
(mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups) THEN 'SKIP'
|
||||
WHEN (mc.total_cancel_groups > 0 AND mc.total_cancel_groups = mc.satisfied_cancel_groups) THEN 'CANCEL'
|
||||
WHEN (mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups) THEN 'SKIP'
|
||||
WHEN (mc.total_create_groups > 0 AND mc.total_create_groups = mc.satisfied_create_groups) THEN 'CREATE'
|
||||
WHEN (mc.total_queue_groups > 0 AND mc.total_queue_groups = mc.satisfied_queue_groups) THEN 'QUEUE'
|
||||
WHEN (mc.total_create_match_groups > 0 AND mc.total_create_match_groups = mc.satisfied_create_match_groups) THEN 'CREATE_MATCH'
|
||||
END::v1_match_condition_action AS action
|
||||
FROM
|
||||
v1_match m
|
||||
@@ -315,6 +319,7 @@ WITH match_counts AS (
|
||||
OR (mc.total_queue_groups > 0 AND mc.total_queue_groups = mc.satisfied_queue_groups)
|
||||
OR (mc.total_cancel_groups > 0 AND mc.total_cancel_groups = mc.satisfied_cancel_groups)
|
||||
OR (mc.total_skip_groups > 0 AND mc.total_skip_groups = mc.satisfied_skip_groups)
|
||||
OR (mc.total_create_match_groups > 0 AND mc.total_create_match_groups = mc.satisfied_create_match_groups)
|
||||
)
|
||||
), locked_conditions AS (
|
||||
SELECT
|
||||
@@ -362,41 +367,39 @@ WITH match_counts AS (
|
||||
id IN (SELECT id FROM deleted_conditions)
|
||||
)
|
||||
SELECT
|
||||
result_matches.id, tenant_id, kind, is_satisfied, signal_task_id, signal_task_inserted_at, signal_external_id, signal_key, trigger_dag_id, trigger_dag_inserted_at, trigger_step_id, trigger_step_index, trigger_external_id, trigger_workflow_run_id, trigger_parent_task_external_id, trigger_parent_task_id, trigger_parent_task_inserted_at, trigger_child_index, trigger_child_key, trigger_existing_task_id, trigger_existing_task_inserted_at, result_matches.action, d.id, d.action, mc_aggregated_data,
|
||||
d.mc_aggregated_data
|
||||
rm.id, rm.tenant_id, rm.kind, rm.is_satisfied, rm.existing_data, rm.signal_task_id, rm.signal_task_inserted_at, rm.signal_external_id, rm.signal_key, rm.trigger_dag_id, rm.trigger_dag_inserted_at, rm.trigger_step_id, rm.trigger_step_index, rm.trigger_external_id, rm.trigger_workflow_run_id, rm.trigger_parent_task_external_id, rm.trigger_parent_task_id, rm.trigger_parent_task_inserted_at, rm.trigger_child_index, rm.trigger_child_key, rm.trigger_existing_task_id, rm.trigger_existing_task_inserted_at, rm.action,
|
||||
COALESCE(rm.existing_data || d.mc_aggregated_data, d.mc_aggregated_data)::jsonb AS mc_aggregated_data
|
||||
FROM
|
||||
result_matches
|
||||
result_matches rm
|
||||
LEFT JOIN
|
||||
matches_with_data d ON result_matches.id = d.id
|
||||
matches_with_data d ON rm.id = d.id
|
||||
`
|
||||
|
||||
type SaveSatisfiedMatchConditionsRow struct {
|
||||
ID int64 `json:"id"`
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
Kind V1MatchKind `json:"kind"`
|
||||
IsSatisfied bool `json:"is_satisfied"`
|
||||
SignalTaskID pgtype.Int8 `json:"signal_task_id"`
|
||||
SignalTaskInsertedAt pgtype.Timestamptz `json:"signal_task_inserted_at"`
|
||||
SignalExternalID pgtype.UUID `json:"signal_external_id"`
|
||||
SignalKey pgtype.Text `json:"signal_key"`
|
||||
TriggerDagID pgtype.Int8 `json:"trigger_dag_id"`
|
||||
TriggerDagInsertedAt pgtype.Timestamptz `json:"trigger_dag_inserted_at"`
|
||||
TriggerStepID pgtype.UUID `json:"trigger_step_id"`
|
||||
TriggerStepIndex pgtype.Int8 `json:"trigger_step_index"`
|
||||
TriggerExternalID pgtype.UUID `json:"trigger_external_id"`
|
||||
TriggerWorkflowRunID pgtype.UUID `json:"trigger_workflow_run_id"`
|
||||
TriggerParentTaskExternalID pgtype.UUID `json:"trigger_parent_task_external_id"`
|
||||
TriggerParentTaskID pgtype.Int8 `json:"trigger_parent_task_id"`
|
||||
TriggerParentTaskInsertedAt pgtype.Timestamptz `json:"trigger_parent_task_inserted_at"`
|
||||
TriggerChildIndex pgtype.Int8 `json:"trigger_child_index"`
|
||||
TriggerChildKey pgtype.Text `json:"trigger_child_key"`
|
||||
TriggerExistingTaskID pgtype.Int8 `json:"trigger_existing_task_id"`
|
||||
TriggerExistingTaskInsertedAt pgtype.Timestamptz `json:"trigger_existing_task_inserted_at"`
|
||||
Action V1MatchConditionAction `json:"action"`
|
||||
ID_2 pgtype.Int8 `json:"id_2"`
|
||||
Action_2 NullV1MatchConditionAction `json:"action_2"`
|
||||
McAggregatedData []byte `json:"mc_aggregated_data"`
|
||||
McAggregatedData_2 []byte `json:"mc_aggregated_data_2"`
|
||||
ID int64 `json:"id"`
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
Kind V1MatchKind `json:"kind"`
|
||||
IsSatisfied bool `json:"is_satisfied"`
|
||||
ExistingData []byte `json:"existing_data"`
|
||||
SignalTaskID pgtype.Int8 `json:"signal_task_id"`
|
||||
SignalTaskInsertedAt pgtype.Timestamptz `json:"signal_task_inserted_at"`
|
||||
SignalExternalID pgtype.UUID `json:"signal_external_id"`
|
||||
SignalKey pgtype.Text `json:"signal_key"`
|
||||
TriggerDagID pgtype.Int8 `json:"trigger_dag_id"`
|
||||
TriggerDagInsertedAt pgtype.Timestamptz `json:"trigger_dag_inserted_at"`
|
||||
TriggerStepID pgtype.UUID `json:"trigger_step_id"`
|
||||
TriggerStepIndex pgtype.Int8 `json:"trigger_step_index"`
|
||||
TriggerExternalID pgtype.UUID `json:"trigger_external_id"`
|
||||
TriggerWorkflowRunID pgtype.UUID `json:"trigger_workflow_run_id"`
|
||||
TriggerParentTaskExternalID pgtype.UUID `json:"trigger_parent_task_external_id"`
|
||||
TriggerParentTaskID pgtype.Int8 `json:"trigger_parent_task_id"`
|
||||
TriggerParentTaskInsertedAt pgtype.Timestamptz `json:"trigger_parent_task_inserted_at"`
|
||||
TriggerChildIndex pgtype.Int8 `json:"trigger_child_index"`
|
||||
TriggerChildKey pgtype.Text `json:"trigger_child_key"`
|
||||
TriggerExistingTaskID pgtype.Int8 `json:"trigger_existing_task_id"`
|
||||
TriggerExistingTaskInsertedAt pgtype.Timestamptz `json:"trigger_existing_task_inserted_at"`
|
||||
Action V1MatchConditionAction `json:"action"`
|
||||
McAggregatedData []byte `json:"mc_aggregated_data"`
|
||||
}
|
||||
|
||||
// NOTE: we have to break this into a separate query because CTEs can't see modified rows
|
||||
@@ -417,6 +420,7 @@ func (q *Queries) SaveSatisfiedMatchConditions(ctx context.Context, db DBTX, mat
|
||||
&i.TenantID,
|
||||
&i.Kind,
|
||||
&i.IsSatisfied,
|
||||
&i.ExistingData,
|
||||
&i.SignalTaskID,
|
||||
&i.SignalTaskInsertedAt,
|
||||
&i.SignalExternalID,
|
||||
@@ -435,10 +439,7 @@ func (q *Queries) SaveSatisfiedMatchConditions(ctx context.Context, db DBTX, mat
|
||||
&i.TriggerExistingTaskID,
|
||||
&i.TriggerExistingTaskInsertedAt,
|
||||
&i.Action,
|
||||
&i.ID_2,
|
||||
&i.Action_2,
|
||||
&i.McAggregatedData,
|
||||
&i.McAggregatedData_2,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1005,10 +1005,11 @@ func (ns NullV1LogLineLevel) Value() (driver.Value, error) {
|
||||
type V1MatchConditionAction string
|
||||
|
||||
const (
|
||||
V1MatchConditionActionCREATE V1MatchConditionAction = "CREATE"
|
||||
V1MatchConditionActionQUEUE V1MatchConditionAction = "QUEUE"
|
||||
V1MatchConditionActionCANCEL V1MatchConditionAction = "CANCEL"
|
||||
V1MatchConditionActionSKIP V1MatchConditionAction = "SKIP"
|
||||
V1MatchConditionActionCREATE V1MatchConditionAction = "CREATE"
|
||||
V1MatchConditionActionQUEUE V1MatchConditionAction = "QUEUE"
|
||||
V1MatchConditionActionCANCEL V1MatchConditionAction = "CANCEL"
|
||||
V1MatchConditionActionSKIP V1MatchConditionAction = "SKIP"
|
||||
V1MatchConditionActionCREATEMATCH V1MatchConditionAction = "CREATE_MATCH"
|
||||
)
|
||||
|
||||
func (e *V1MatchConditionAction) Scan(src interface{}) error {
|
||||
@@ -1217,6 +1218,49 @@ func (ns NullV1StatusKind) Value() (driver.Value, error) {
|
||||
return string(ns.V1StatusKind), nil
|
||||
}
|
||||
|
||||
type V1StepMatchConditionKind string
|
||||
|
||||
const (
|
||||
V1StepMatchConditionKindPARENTOVERRIDE V1StepMatchConditionKind = "PARENT_OVERRIDE"
|
||||
V1StepMatchConditionKindUSEREVENT V1StepMatchConditionKind = "USER_EVENT"
|
||||
V1StepMatchConditionKindSLEEP V1StepMatchConditionKind = "SLEEP"
|
||||
)
|
||||
|
||||
func (e *V1StepMatchConditionKind) Scan(src interface{}) error {
|
||||
switch s := src.(type) {
|
||||
case []byte:
|
||||
*e = V1StepMatchConditionKind(s)
|
||||
case string:
|
||||
*e = V1StepMatchConditionKind(s)
|
||||
default:
|
||||
return fmt.Errorf("unsupported scan type for V1StepMatchConditionKind: %T", src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type NullV1StepMatchConditionKind struct {
|
||||
V1StepMatchConditionKind V1StepMatchConditionKind `json:"v1_step_match_condition_kind"`
|
||||
Valid bool `json:"valid"` // Valid is true if V1StepMatchConditionKind is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (ns *NullV1StepMatchConditionKind) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
ns.V1StepMatchConditionKind, ns.Valid = "", false
|
||||
return nil
|
||||
}
|
||||
ns.Valid = true
|
||||
return ns.V1StepMatchConditionKind.Scan(value)
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (ns NullV1StepMatchConditionKind) Value() (driver.Value, error) {
|
||||
if !ns.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return string(ns.V1StepMatchConditionKind), nil
|
||||
}
|
||||
|
||||
type V1StickyStrategy string
|
||||
|
||||
const (
|
||||
@@ -2425,6 +2469,13 @@ type V1DagsOlap struct {
|
||||
ParentTaskExternalID pgtype.UUID `json:"parent_task_external_id"`
|
||||
}
|
||||
|
||||
type V1DurableSleep struct {
|
||||
ID int64 `json:"id"`
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
SleepUntil pgtype.Timestamptz `json:"sleep_until"`
|
||||
SleepDuration string `json:"sleep_duration"`
|
||||
}
|
||||
|
||||
type V1LogLine struct {
|
||||
ID int64 `json:"id"`
|
||||
CreatedAt pgtype.Timestamptz `json:"created_at"`
|
||||
@@ -2457,6 +2508,7 @@ type V1Match struct {
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
Kind V1MatchKind `json:"kind"`
|
||||
IsSatisfied bool `json:"is_satisfied"`
|
||||
ExistingData []byte `json:"existing_data"`
|
||||
SignalTaskID pgtype.Int8 `json:"signal_task_id"`
|
||||
SignalTaskInsertedAt pgtype.Timestamptz `json:"signal_task_inserted_at"`
|
||||
SignalExternalID pgtype.UUID `json:"signal_external_id"`
|
||||
@@ -2560,6 +2612,20 @@ type V1StepConcurrency struct {
|
||||
MaxConcurrency int32 `json:"max_concurrency"`
|
||||
}
|
||||
|
||||
type V1StepMatchCondition struct {
|
||||
ID int64 `json:"id"`
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
StepID pgtype.UUID `json:"step_id"`
|
||||
ReadableDataKey string `json:"readable_data_key"`
|
||||
Action V1MatchConditionAction `json:"action"`
|
||||
OrGroupID pgtype.UUID `json:"or_group_id"`
|
||||
Expression pgtype.Text `json:"expression"`
|
||||
Kind V1StepMatchConditionKind `json:"kind"`
|
||||
SleepDuration pgtype.Text `json:"sleep_duration"`
|
||||
EventKey pgtype.Text `json:"event_key"`
|
||||
ParentReadableID pgtype.Text `json:"parent_readable_id"`
|
||||
}
|
||||
|
||||
type V1Task struct {
|
||||
ID int64 `json:"id"`
|
||||
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
|
||||
|
||||
39
pkg/repository/v1/sqlcv1/sleep.sql
Normal file
39
pkg/repository/v1/sqlcv1/sleep.sql
Normal file
@@ -0,0 +1,39 @@
|
||||
-- name: CreateDurableSleep :many
|
||||
WITH input AS (
|
||||
SELECT
|
||||
sleep_duration
|
||||
FROM (
|
||||
SELECT
|
||||
unnest(@sleep_durations::text[]) as sleep_duration
|
||||
) as subquery
|
||||
)
|
||||
INSERT INTO
|
||||
v1_durable_sleep (tenant_id, sleep_until, sleep_duration)
|
||||
SELECT
|
||||
@tenant_id::uuid,
|
||||
CURRENT_TIMESTAMP + convert_duration_to_interval(sleep_duration),
|
||||
sleep_duration
|
||||
FROM
|
||||
input
|
||||
RETURNING *;
|
||||
|
||||
-- name: PopDurableSleep :many
|
||||
WITH to_delete AS (
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
v1_durable_sleep
|
||||
WHERE
|
||||
tenant_id = @tenant_id::uuid
|
||||
AND sleep_until <= CURRENT_TIMESTAMP
|
||||
ORDER BY
|
||||
id ASC
|
||||
LIMIT
|
||||
COALESCE(sqlc.narg('limit')::integer, 1000)
|
||||
FOR UPDATE
|
||||
)
|
||||
DELETE FROM
|
||||
v1_durable_sleep
|
||||
WHERE
|
||||
(tenant_id, sleep_until, id) IN (SELECT tenant_id, sleep_until, id FROM to_delete)
|
||||
RETURNING *;
|
||||
114
pkg/repository/v1/sqlcv1/sleep.sql.go
Normal file
114
pkg/repository/v1/sqlcv1/sleep.sql.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.24.0
|
||||
// source: sleep.sql
|
||||
|
||||
package sqlcv1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const createDurableSleep = `-- name: CreateDurableSleep :many
|
||||
WITH input AS (
|
||||
SELECT
|
||||
sleep_duration
|
||||
FROM (
|
||||
SELECT
|
||||
unnest($2::text[]) as sleep_duration
|
||||
) as subquery
|
||||
)
|
||||
INSERT INTO
|
||||
v1_durable_sleep (tenant_id, sleep_until, sleep_duration)
|
||||
SELECT
|
||||
$1::uuid,
|
||||
CURRENT_TIMESTAMP + convert_duration_to_interval(sleep_duration),
|
||||
sleep_duration
|
||||
FROM
|
||||
input
|
||||
RETURNING id, tenant_id, sleep_until, sleep_duration
|
||||
`
|
||||
|
||||
type CreateDurableSleepParams struct {
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
SleepDurations []string `json:"sleep_durations"`
|
||||
}
|
||||
|
||||
func (q *Queries) CreateDurableSleep(ctx context.Context, db DBTX, arg CreateDurableSleepParams) ([]*V1DurableSleep, error) {
|
||||
rows, err := db.Query(ctx, createDurableSleep, arg.TenantID, arg.SleepDurations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []*V1DurableSleep
|
||||
for rows.Next() {
|
||||
var i V1DurableSleep
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.TenantID,
|
||||
&i.SleepUntil,
|
||||
&i.SleepDuration,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, &i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const popDurableSleep = `-- name: PopDurableSleep :many
|
||||
WITH to_delete AS (
|
||||
SELECT
|
||||
id, tenant_id, sleep_until, sleep_duration
|
||||
FROM
|
||||
v1_durable_sleep
|
||||
WHERE
|
||||
tenant_id = $1::uuid
|
||||
AND sleep_until <= CURRENT_TIMESTAMP
|
||||
ORDER BY
|
||||
id ASC
|
||||
LIMIT
|
||||
COALESCE($2::integer, 1000)
|
||||
FOR UPDATE
|
||||
)
|
||||
DELETE FROM
|
||||
v1_durable_sleep
|
||||
WHERE
|
||||
(tenant_id, sleep_until, id) IN (SELECT tenant_id, sleep_until, id FROM to_delete)
|
||||
RETURNING id, tenant_id, sleep_until, sleep_duration
|
||||
`
|
||||
|
||||
type PopDurableSleepParams struct {
|
||||
TenantID pgtype.UUID `json:"tenant_id"`
|
||||
Limit pgtype.Int4 `json:"limit"`
|
||||
}
|
||||
|
||||
func (q *Queries) PopDurableSleep(ctx context.Context, db DBTX, arg PopDurableSleepParams) ([]*V1DurableSleep, error) {
|
||||
rows, err := db.Query(ctx, popDurableSleep, arg.TenantID, arg.Limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []*V1DurableSleep
|
||||
for rows.Next() {
|
||||
var i V1DurableSleep
|
||||
if err := rows.Scan(
|
||||
&i.ID,
|
||||
&i.TenantID,
|
||||
&i.SleepUntil,
|
||||
&i.SleepDuration,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, &i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
@@ -16,6 +16,7 @@ sql:
|
||||
- olap.sql
|
||||
- rate_limits.sql
|
||||
- log_line.sql
|
||||
- sleep.sql
|
||||
- ticker.sql
|
||||
schema:
|
||||
- ../../../../sql/schema/v0.sql
|
||||
|
||||
@@ -5,7 +5,8 @@ WITH steps AS (
|
||||
wv."id" as "workflowVersionId",
|
||||
w."name" as "workflowName",
|
||||
w."id" as "workflowId",
|
||||
j."kind" as "jobKind"
|
||||
j."kind" as "jobKind",
|
||||
COUNT(mc.id) as "matchConditionCount"
|
||||
FROM
|
||||
"WorkflowVersion" as wv
|
||||
JOIN
|
||||
@@ -14,11 +15,15 @@ WITH steps AS (
|
||||
"Job" j ON j."workflowVersionId" = wv."id"
|
||||
JOIN
|
||||
"Step" s ON s."jobId" = j."id"
|
||||
LEFT JOIN
|
||||
v1_step_match_condition mc ON mc.step_id = s."id"
|
||||
WHERE
|
||||
wv."id" = ANY(@ids::uuid[])
|
||||
AND w."tenantId" = @tenantId::uuid
|
||||
AND w."deletedAt" IS NULL
|
||||
AND wv."deletedAt" IS NULL
|
||||
GROUP BY
|
||||
s."id", wv."id", w."name", w."id", j."kind"
|
||||
), step_orders AS (
|
||||
SELECT
|
||||
so."B" as "stepId",
|
||||
@@ -80,3 +85,405 @@ SELECT id, name
|
||||
FROM "Workflow"
|
||||
WHERE id = ANY(@ids::uuid[])
|
||||
;
|
||||
|
||||
|
||||
-- name: CreateWorkflow :one
|
||||
INSERT INTO "Workflow" (
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"deletedAt",
|
||||
"tenantId",
|
||||
"name",
|
||||
"description"
|
||||
) VALUES (
|
||||
@id::uuid,
|
||||
coalesce(sqlc.narg('createdAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
coalesce(sqlc.narg('updatedAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
@deletedAt::timestamp,
|
||||
@tenantId::uuid,
|
||||
@name::text,
|
||||
@description::text
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateWorkflowVersion :one
|
||||
INSERT INTO "WorkflowVersion" (
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"deletedAt",
|
||||
"checksum",
|
||||
"version",
|
||||
"workflowId",
|
||||
"scheduleTimeout",
|
||||
"sticky",
|
||||
"kind"
|
||||
) VALUES (
|
||||
@id::uuid,
|
||||
coalesce(sqlc.narg('createdAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
coalesce(sqlc.narg('updatedAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
@deletedAt::timestamp,
|
||||
@checksum::text,
|
||||
sqlc.narg('version')::text,
|
||||
@workflowId::uuid,
|
||||
-- Deprecated: this is set but unused
|
||||
'5m',
|
||||
sqlc.narg('sticky')::"StickyStrategy",
|
||||
coalesce(sqlc.narg('kind')::"WorkflowKind", 'DAG')
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateJob :one
|
||||
INSERT INTO "Job" (
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"deletedAt",
|
||||
"tenantId",
|
||||
"workflowVersionId",
|
||||
"name",
|
||||
"description",
|
||||
"timeout",
|
||||
"kind"
|
||||
) VALUES (
|
||||
@id::uuid,
|
||||
coalesce(sqlc.narg('createdAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
coalesce(sqlc.narg('updatedAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
@deletedAt::timestamp,
|
||||
@tenantId::uuid,
|
||||
@workflowVersionId::uuid,
|
||||
@name::text,
|
||||
@description::text,
|
||||
-- Deprecated: this is set but unused
|
||||
'5m',
|
||||
coalesce(sqlc.narg('kind')::"JobKind", 'DEFAULT')
|
||||
) RETURNING *;
|
||||
|
||||
-- name: UpsertAction :one
|
||||
INSERT INTO "Action" (
|
||||
"id",
|
||||
"actionId",
|
||||
"tenantId"
|
||||
)
|
||||
VALUES (
|
||||
gen_random_uuid(),
|
||||
LOWER(@action::text),
|
||||
@tenantId::uuid
|
||||
)
|
||||
ON CONFLICT ("tenantId", "actionId") DO UPDATE
|
||||
SET
|
||||
"tenantId" = EXCLUDED."tenantId"
|
||||
WHERE
|
||||
"Action"."tenantId" = @tenantId AND "Action"."actionId" = LOWER(@action::text)
|
||||
RETURNING *;
|
||||
|
||||
-- name: CreateWorkflowTriggers :one
|
||||
INSERT INTO "WorkflowTriggers" (
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"deletedAt",
|
||||
"workflowVersionId",
|
||||
"tenantId"
|
||||
) VALUES (
|
||||
@id::uuid,
|
||||
CURRENT_TIMESTAMP,
|
||||
CURRENT_TIMESTAMP,
|
||||
NULL,
|
||||
@workflowVersionId::uuid,
|
||||
@tenantId::uuid
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateWorkflowTriggerEventRef :one
|
||||
INSERT INTO "WorkflowTriggerEventRef" (
|
||||
"parentId",
|
||||
"eventKey"
|
||||
) VALUES (
|
||||
@workflowTriggersId::uuid,
|
||||
@eventTrigger::text
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateWorkflowTriggerCronRef :one
|
||||
INSERT INTO "WorkflowTriggerCronRef" (
|
||||
"parentId",
|
||||
"cron",
|
||||
"name",
|
||||
"input",
|
||||
"additionalMetadata",
|
||||
"id",
|
||||
"method"
|
||||
) VALUES (
|
||||
@workflowTriggersId::uuid,
|
||||
@cronTrigger::text,
|
||||
sqlc.narg('name')::text,
|
||||
sqlc.narg('input')::jsonb,
|
||||
sqlc.narg('additionalMetadata')::jsonb,
|
||||
gen_random_uuid(),
|
||||
COALESCE(sqlc.narg('method')::"WorkflowTriggerCronRefMethods", 'DEFAULT')
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateWorkflowConcurrency :one
|
||||
INSERT INTO "WorkflowConcurrency" (
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"workflowVersionId",
|
||||
"getConcurrencyGroupId",
|
||||
"maxRuns",
|
||||
"limitStrategy",
|
||||
"concurrencyGroupExpression"
|
||||
) VALUES (
|
||||
gen_random_uuid(),
|
||||
coalesce(sqlc.narg('createdAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
coalesce(sqlc.narg('updatedAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
@workflowVersionId::uuid,
|
||||
sqlc.narg('getConcurrencyGroupId')::uuid,
|
||||
coalesce(sqlc.narg('maxRuns')::integer, 1),
|
||||
coalesce(sqlc.narg('limitStrategy')::"ConcurrencyLimitStrategy", 'CANCEL_IN_PROGRESS'),
|
||||
sqlc.narg('concurrencyGroupExpression')::text
|
||||
) RETURNING *;
|
||||
|
||||
-- name: LinkOnFailureJob :one
|
||||
UPDATE "WorkflowVersion"
|
||||
SET "onFailureJobId" = @jobId::uuid
|
||||
WHERE "id" = @workflowVersionId::uuid
|
||||
RETURNING *;
|
||||
|
||||
-- name: CreateStep :one
|
||||
INSERT INTO "Step" (
|
||||
"id",
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"deletedAt",
|
||||
"readableId",
|
||||
"tenantId",
|
||||
"jobId",
|
||||
"actionId",
|
||||
"timeout",
|
||||
"customUserData",
|
||||
"retries",
|
||||
"scheduleTimeout",
|
||||
"retryBackoffFactor",
|
||||
"retryMaxBackoff"
|
||||
) VALUES (
|
||||
@id::uuid,
|
||||
coalesce(sqlc.narg('createdAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
coalesce(sqlc.narg('updatedAt')::timestamp, CURRENT_TIMESTAMP),
|
||||
@deletedAt::timestamp,
|
||||
@readableId::text,
|
||||
@tenantId::uuid,
|
||||
@jobId::uuid,
|
||||
@actionId::text,
|
||||
sqlc.narg('timeout')::text,
|
||||
coalesce(sqlc.narg('customUserData')::jsonb, '{}'),
|
||||
coalesce(sqlc.narg('retries')::integer, 0),
|
||||
coalesce(sqlc.narg('scheduleTimeout')::text, '5m'),
|
||||
sqlc.narg('retryBackoffFactor'),
|
||||
sqlc.narg('retryMaxBackoff')
|
||||
) RETURNING *;
|
||||
|
||||
-- name: AddStepParents :exec
|
||||
INSERT INTO "_StepOrder" ("A", "B")
|
||||
SELECT
|
||||
step."id",
|
||||
@id::uuid
|
||||
FROM
|
||||
unnest(@parents::text[]) AS parent_readable_id
|
||||
JOIN
|
||||
"Step" AS step ON step."readableId" = parent_readable_id AND step."jobId" = @jobId::uuid;
|
||||
|
||||
-- name: CreateStepRateLimit :one
|
||||
INSERT INTO "StepRateLimit" (
|
||||
"units",
|
||||
"stepId",
|
||||
"rateLimitKey",
|
||||
"tenantId",
|
||||
"kind"
|
||||
) VALUES (
|
||||
@units::integer,
|
||||
@stepId::uuid,
|
||||
@rateLimitKey::text,
|
||||
@tenantId::uuid,
|
||||
@kind
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateStepExpressions :exec
|
||||
INSERT INTO "StepExpression" (
|
||||
"key",
|
||||
"stepId",
|
||||
"expression",
|
||||
"kind"
|
||||
) VALUES (
|
||||
unnest(@keys::text[]),
|
||||
@stepId::uuid,
|
||||
unnest(@expressions::text[]),
|
||||
unnest(cast(@kinds::text[] as"StepExpressionKind"[]))
|
||||
) ON CONFLICT ("key", "stepId", "kind") DO UPDATE
|
||||
SET
|
||||
"expression" = EXCLUDED."expression";
|
||||
|
||||
-- name: UpsertDesiredWorkerLabel :one
|
||||
INSERT INTO "StepDesiredWorkerLabel" (
|
||||
"createdAt",
|
||||
"updatedAt",
|
||||
"stepId",
|
||||
"key",
|
||||
"intValue",
|
||||
"strValue",
|
||||
"required",
|
||||
"weight",
|
||||
"comparator"
|
||||
) VALUES (
|
||||
CURRENT_TIMESTAMP,
|
||||
CURRENT_TIMESTAMP,
|
||||
@stepId::uuid,
|
||||
@key::text,
|
||||
COALESCE(sqlc.narg('intValue')::int, NULL),
|
||||
COALESCE(sqlc.narg('strValue')::text, NULL),
|
||||
COALESCE(sqlc.narg('required')::boolean, false),
|
||||
COALESCE(sqlc.narg('weight')::int, 100),
|
||||
COALESCE(sqlc.narg('comparator')::"WorkerLabelComparator", 'EQUAL')
|
||||
) ON CONFLICT ("stepId", "key") DO UPDATE
|
||||
SET
|
||||
"updatedAt" = CURRENT_TIMESTAMP,
|
||||
"intValue" = COALESCE(sqlc.narg('intValue')::int, null),
|
||||
"strValue" = COALESCE(sqlc.narg('strValue')::text, null),
|
||||
"required" = COALESCE(sqlc.narg('required')::boolean, false),
|
||||
"weight" = COALESCE(sqlc.narg('weight')::int, 100),
|
||||
"comparator" = COALESCE(sqlc.narg('comparator')::"WorkerLabelComparator", 'EQUAL')
|
||||
RETURNING *;
|
||||
|
||||
-- name: GetWorkflowVersionForEngine :many
|
||||
SELECT
|
||||
sqlc.embed(workflowVersions),
|
||||
w."name" as "workflowName",
|
||||
wc."limitStrategy" as "concurrencyLimitStrategy",
|
||||
wc."maxRuns" as "concurrencyMaxRuns",
|
||||
wc."getConcurrencyGroupId" as "concurrencyGroupId",
|
||||
wc."concurrencyGroupExpression" as "concurrencyGroupExpression"
|
||||
FROM
|
||||
"WorkflowVersion" as workflowVersions
|
||||
JOIN
|
||||
"Workflow" as w ON w."id" = workflowVersions."workflowId"
|
||||
LEFT JOIN
|
||||
"WorkflowConcurrency" as wc ON wc."workflowVersionId" = workflowVersions."id"
|
||||
WHERE
|
||||
workflowVersions."id" = ANY(@ids::uuid[]) AND
|
||||
w."tenantId" = @tenantId::uuid AND
|
||||
w."deletedAt" IS NULL AND
|
||||
workflowVersions."deletedAt" IS NULL;
|
||||
|
||||
-- name: GetWorkflowByName :one
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
"Workflow" as workflows
|
||||
WHERE
|
||||
workflows."tenantId" = @tenantId::uuid AND
|
||||
workflows."name" = @name::text AND
|
||||
workflows."deletedAt" IS NULL;
|
||||
|
||||
-- name: MoveCronTriggerToNewWorkflowTriggers :exec
|
||||
WITH triggersToUpdate AS (
|
||||
SELECT cronTrigger."id" FROM "WorkflowTriggerCronRef" cronTrigger
|
||||
JOIN "WorkflowTriggers" triggers ON triggers."id" = cronTrigger."parentId"
|
||||
WHERE triggers."workflowVersionId" = @oldWorkflowVersionId::uuid
|
||||
AND cronTrigger."method" = 'API'
|
||||
)
|
||||
UPDATE "WorkflowTriggerCronRef"
|
||||
SET "parentId" = @newWorkflowTriggerId::uuid
|
||||
WHERE "id" IN (SELECT "id" FROM triggersToUpdate);
|
||||
|
||||
-- name: MoveScheduledTriggerToNewWorkflowTriggers :exec
|
||||
WITH triggersToUpdate AS (
|
||||
SELECT scheduledTrigger."id" FROM "WorkflowTriggerScheduledRef" scheduledTrigger
|
||||
JOIN "WorkflowTriggers" triggers ON triggers."id" = scheduledTrigger."parentId"
|
||||
WHERE triggers."workflowVersionId" = @oldWorkflowVersionId::uuid
|
||||
AND scheduledTrigger."method" = 'API'
|
||||
)
|
||||
UPDATE "WorkflowTriggerScheduledRef"
|
||||
SET "parentId" = @newWorkflowTriggerId::uuid
|
||||
WHERE "id" IN (SELECT "id" FROM triggersToUpdate);
|
||||
|
||||
-- name: GetLatestWorkflowVersionForWorkflows :many
|
||||
WITH latest_versions AS (
|
||||
SELECT DISTINCT ON (workflowVersions."workflowId")
|
||||
workflowVersions."id" AS workflowVersionId,
|
||||
workflowVersions."workflowId",
|
||||
workflowVersions."order"
|
||||
FROM
|
||||
"WorkflowVersion" as workflowVersions
|
||||
WHERE
|
||||
workflowVersions."workflowId" = ANY(@workflowIds::uuid[]) AND
|
||||
workflowVersions."deletedAt" IS NULL
|
||||
ORDER BY
|
||||
workflowVersions."workflowId", workflowVersions."order" DESC
|
||||
)
|
||||
SELECT
|
||||
workflowVersions."id"
|
||||
FROM
|
||||
latest_versions
|
||||
JOIN
|
||||
"WorkflowVersion" as workflowVersions ON workflowVersions."id" = latest_versions.workflowVersionId
|
||||
JOIN
|
||||
"Workflow" as w ON w."id" = workflowVersions."workflowId"
|
||||
LEFT JOIN
|
||||
"WorkflowConcurrency" as wc ON wc."workflowVersionId" = workflowVersions."id"
|
||||
WHERE
|
||||
w."tenantId" = @tenantId::uuid AND
|
||||
w."deletedAt" IS NULL AND
|
||||
workflowVersions."deletedAt" IS NULL;
|
||||
|
||||
-- name: CreateStepConcurrency :one
|
||||
INSERT INTO v1_step_concurrency (
|
||||
workflow_id,
|
||||
workflow_version_id,
|
||||
step_id,
|
||||
strategy,
|
||||
expression,
|
||||
tenant_id,
|
||||
max_concurrency
|
||||
)
|
||||
VALUES (
|
||||
@workflowId::uuid,
|
||||
@workflowVersionId::uuid,
|
||||
@stepId::uuid,
|
||||
@strategy::text,
|
||||
@expression::text,
|
||||
@tenantId::uuid,
|
||||
@maxConcurrency::integer
|
||||
) RETURNING *;
|
||||
|
||||
-- name: CreateStepMatchCondition :one
|
||||
INSERT INTO v1_step_match_condition (
|
||||
tenant_id,
|
||||
step_id,
|
||||
readable_data_key,
|
||||
action,
|
||||
or_group_id,
|
||||
expression,
|
||||
kind,
|
||||
sleep_duration,
|
||||
event_key,
|
||||
parent_readable_id
|
||||
)
|
||||
VALUES (
|
||||
@tenantId::uuid,
|
||||
@stepId::uuid,
|
||||
@readableDataKey::text,
|
||||
@action::v1_match_condition_action,
|
||||
@orGroupId::uuid,
|
||||
sqlc.narg('expression')::text,
|
||||
@kind::v1_step_match_condition_kind,
|
||||
sqlc.narg('sleepDuration')::text,
|
||||
sqlc.narg('eventKey')::text,
|
||||
sqlc.narg('parentReadableId')::text
|
||||
) RETURNING *;
|
||||
|
||||
-- name: ListStepMatchConditions :many
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
v1_step_match_condition
|
||||
WHERE
|
||||
step_id = ANY(@stepIds::uuid[])
|
||||
AND tenant_id = @tenantId::uuid;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -70,7 +70,7 @@ type ReplayTasksResult struct {
|
||||
|
||||
UpsertedTasks []*sqlcv1.V1Task
|
||||
|
||||
InternalEventResults *InternalEventMatchResults
|
||||
InternalEventResults *EventMatchResults
|
||||
}
|
||||
|
||||
type ReplayTaskOpts struct {
|
||||
@@ -107,6 +107,17 @@ type TaskIdInsertedAtRetryCount struct {
|
||||
RetryCount int32
|
||||
}
|
||||
|
||||
type TaskIdInsertedAtSignalKey struct {
|
||||
// (required) the external id
|
||||
Id int64 `validate:"required"`
|
||||
|
||||
// (required) the inserted at time
|
||||
InsertedAt pgtype.Timestamptz
|
||||
|
||||
// (required) the signal key for the event
|
||||
SignalKey string
|
||||
}
|
||||
|
||||
type CompleteTaskOpts struct {
|
||||
*TaskIdInsertedAtRetryCount
|
||||
|
||||
@@ -216,6 +227,8 @@ type TaskRepository interface {
|
||||
|
||||
ProcessTaskRetryQueueItems(ctx context.Context, tenantId string) ([]*sqlcv1.V1RetryQueueItem, bool, error)
|
||||
|
||||
ProcessDurableSleeps(ctx context.Context, tenantId string) (*EventMatchResults, bool, error)
|
||||
|
||||
GetQueueCounts(ctx context.Context, tenantId string) (map[string]int, error)
|
||||
|
||||
ReplayTasks(ctx context.Context, tenantId string, tasks []TaskIdInsertedAtRetryCount) (*ReplayTasksResult, error)
|
||||
@@ -223,6 +236,8 @@ type TaskRepository interface {
|
||||
RefreshTimeoutBy(ctx context.Context, tenantId string, opt RefreshTimeoutBy) (*sqlcv1.V1TaskRuntime, error)
|
||||
|
||||
ReleaseSlot(ctx context.Context, tenantId string, externalId string) (*sqlcv1.V1TaskRuntime, error)
|
||||
|
||||
ListSignalCompletedEvents(ctx context.Context, tenantId string, tasks []TaskIdInsertedAtSignalKey) ([]*sqlcv1.V1TaskEvent, error)
|
||||
}
|
||||
|
||||
type TaskRepositoryImpl struct {
|
||||
@@ -1097,6 +1112,63 @@ func (r *TaskRepositoryImpl) ProcessTaskRetryQueueItems(ctx context.Context, ten
|
||||
return res, len(res) == limit, nil
|
||||
}
|
||||
|
||||
type durableSleepEventData struct {
|
||||
SleepDuration string `json:"sleep_duration"`
|
||||
}
|
||||
|
||||
func (r *TaskRepositoryImpl) ProcessDurableSleeps(ctx context.Context, tenantId string) (*EventMatchResults, bool, error) {
|
||||
tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, r.pool, r.l, 5000)
|
||||
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
defer rollback()
|
||||
|
||||
limit := 1000
|
||||
|
||||
emitted, err := r.queries.PopDurableSleep(ctx, tx, sqlcv1.PopDurableSleepParams{
|
||||
TenantID: sqlchelpers.UUIDFromStr(tenantId),
|
||||
Limit: pgtype.Int4{Int32: int32(limit), Valid: true},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// each emitted item becomes a candidate event match for internal events
|
||||
events := make([]CandidateEventMatch, 0, len(emitted))
|
||||
|
||||
for _, sleep := range emitted {
|
||||
data, err := json.Marshal(durableSleepEventData{
|
||||
SleepDuration: sleep.SleepDuration,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
events = append(events, CandidateEventMatch{
|
||||
ID: uuid.New().String(),
|
||||
EventTimestamp: time.Now(),
|
||||
Key: getDurableSleepEventKey(sleep.ID),
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
results, err := r.processEventMatches(ctx, tx, tenantId, events, sqlcv1.V1EventTypeINTERNAL)
|
||||
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if err := commit(ctx); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return results, len(emitted) == limit, nil
|
||||
}
|
||||
|
||||
func (r *TaskRepositoryImpl) GetQueueCounts(ctx context.Context, tenantId string) (map[string]int, error) {
|
||||
counts, err := r.queries.GetQueuedCounts(ctx, r.pool, sqlchelpers.UUIDFromStr(tenantId))
|
||||
|
||||
@@ -2410,12 +2482,15 @@ func (r *TaskRepositoryImpl) ReplayTasks(ctx context.Context, tenantId string, t
|
||||
}
|
||||
|
||||
dagIdsToAllTasks := make(map[int64][]*sqlcv1.ListAllTasksInDagsRow)
|
||||
stepIdsInDAGs := make([]pgtype.UUID, 0)
|
||||
|
||||
for _, task := range allTasksInDAGs {
|
||||
if _, ok := dagIdsToAllTasks[task.DagID.Int64]; !ok {
|
||||
dagIdsToAllTasks[task.DagID.Int64] = make([]*sqlcv1.ListAllTasksInDagsRow, 0)
|
||||
}
|
||||
|
||||
stepIdsInDAGs = append(stepIdsInDAGs, task.StepID)
|
||||
|
||||
dagIdsToAllTasks[task.DagID.Int64] = append(dagIdsToAllTasks[task.DagID.Int64], task)
|
||||
}
|
||||
|
||||
@@ -2494,6 +2569,26 @@ func (r *TaskRepositoryImpl) ReplayTasks(ctx context.Context, tenantId string, t
|
||||
// we do not reset other match conditions (for example, ones which refer to completed events for tasks
|
||||
// which are outside of this subtree). otherwise, we would end up in a state where these events would
|
||||
// never be matched.
|
||||
// if any steps have additional match conditions, query for the additional matches
|
||||
stepsToAdditionalMatches := make(map[string][]*sqlcv1.V1StepMatchCondition)
|
||||
|
||||
if len(stepIdsInDAGs) > 0 {
|
||||
additionalMatches, err := r.queries.ListStepMatchConditions(ctx, r.pool, sqlcv1.ListStepMatchConditionsParams{
|
||||
Stepids: sqlchelpers.UniqueSet(stepIdsInDAGs),
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list step match conditions: %w", err)
|
||||
}
|
||||
|
||||
for _, match := range additionalMatches {
|
||||
stepId := sqlchelpers.UUIDToStr(match.StepID)
|
||||
|
||||
stepsToAdditionalMatches[stepId] = append(stepsToAdditionalMatches[stepId], match)
|
||||
}
|
||||
}
|
||||
|
||||
for dagId, tasks := range dagIdsToChildTasks {
|
||||
allTasks := dagIdsToAllTasks[dagId]
|
||||
|
||||
@@ -2537,6 +2632,12 @@ func (r *TaskRepositoryImpl) ReplayTasks(ctx context.Context, tenantId string, t
|
||||
|
||||
cancelGroupId := uuid.NewString()
|
||||
|
||||
additionalMatches, ok := stepsToAdditionalMatches[stepId]
|
||||
|
||||
if !ok {
|
||||
additionalMatches = make([]*sqlcv1.V1StepMatchCondition, 0)
|
||||
}
|
||||
|
||||
for _, parent := range task.Parents {
|
||||
// FIXME: n^2 complexity here, fix it.
|
||||
for _, otherTask := range allTasks {
|
||||
@@ -2544,7 +2645,21 @@ func (r *TaskRepositoryImpl) ReplayTasks(ctx context.Context, tenantId string, t
|
||||
parentExternalId := sqlchelpers.UUIDToStr(otherTask.ExternalID)
|
||||
readableId := otherTask.StepReadableID
|
||||
|
||||
conditions = append(conditions, getParentInDAGGroupMatch(cancelGroupId, parentExternalId, readableId)...)
|
||||
hasUserEventOrSleepMatches := false
|
||||
|
||||
parentOverrideMatches := make([]*sqlcv1.V1StepMatchCondition, 0)
|
||||
|
||||
for _, match := range additionalMatches {
|
||||
if match.Kind == sqlcv1.V1StepMatchConditionKindPARENTOVERRIDE {
|
||||
if match.ParentReadableID.String == readableId {
|
||||
parentOverrideMatches = append(parentOverrideMatches, match)
|
||||
}
|
||||
} else {
|
||||
hasUserEventOrSleepMatches = true
|
||||
}
|
||||
}
|
||||
|
||||
conditions = append(conditions, getParentInDAGGroupMatch(cancelGroupId, parentExternalId, readableId, parentOverrideMatches, hasUserEventOrSleepMatches)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2586,7 +2701,7 @@ func (r *TaskRepositoryImpl) ReplayTasks(ctx context.Context, tenantId string, t
|
||||
|
||||
// process event matches
|
||||
// TODO: signal the event matches to the caller
|
||||
internalMatchResults, err := r.processInternalEventMatches(ctx, tx, tenantId, candidateEvents)
|
||||
internalMatchResults, err := r.processEventMatches(ctx, tx, tenantId, candidateEvents, sqlcv1.V1EventTypeINTERNAL)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process internal event matches: %w", err)
|
||||
@@ -2854,3 +2969,23 @@ func (r *TaskRepositoryImpl) ListTaskParentOutputs(ctx context.Context, tenantId
|
||||
|
||||
return resMap, nil
|
||||
}
|
||||
|
||||
func (r *TaskRepositoryImpl) ListSignalCompletedEvents(ctx context.Context, tenantId string, tasks []TaskIdInsertedAtSignalKey) ([]*sqlcv1.V1TaskEvent, error) {
|
||||
taskIds := make([]int64, 0)
|
||||
taskInsertedAts := make([]pgtype.Timestamptz, 0)
|
||||
eventKeys := make([]string, 0)
|
||||
|
||||
for _, task := range tasks {
|
||||
taskIds = append(taskIds, task.Id)
|
||||
taskInsertedAts = append(taskInsertedAts, task.InsertedAt)
|
||||
eventKeys = append(eventKeys, task.SignalKey)
|
||||
}
|
||||
|
||||
return r.queries.ListMatchingSignalEvents(ctx, r.pool, sqlcv1.ListMatchingSignalEventsParams{
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
Eventtype: sqlcv1.V1TaskEventTypeSIGNALCOMPLETED,
|
||||
Taskids: taskIds,
|
||||
Taskinsertedats: taskInsertedAts,
|
||||
Eventkeys: eventKeys,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -335,6 +335,34 @@ func (r *TriggerRepositoryImpl) triggerWorkflows(ctx context.Context, tenantId s
|
||||
stepIdsToReadableIds[sqlchelpers.UUIDToStr(step.ID)] = step.ReadableId.String
|
||||
}
|
||||
|
||||
// if any steps have additional match conditions, query for the additional matches
|
||||
stepsWithAdditionalMatchConditions := make([]pgtype.UUID, 0)
|
||||
|
||||
for _, step := range steps {
|
||||
if step.MatchConditionCount > 0 {
|
||||
stepsWithAdditionalMatchConditions = append(stepsWithAdditionalMatchConditions, step.ID)
|
||||
}
|
||||
}
|
||||
|
||||
stepsToAdditionalMatches := make(map[string][]*sqlcv1.V1StepMatchCondition)
|
||||
|
||||
if len(stepsWithAdditionalMatchConditions) > 0 {
|
||||
additionalMatches, err := r.queries.ListStepMatchConditions(ctx, r.pool, sqlcv1.ListStepMatchConditionsParams{
|
||||
Stepids: stepsWithAdditionalMatchConditions,
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to list step match conditions: %w", err)
|
||||
}
|
||||
|
||||
for _, match := range additionalMatches {
|
||||
stepId := sqlchelpers.UUIDToStr(match.StepID)
|
||||
|
||||
stepsToAdditionalMatches[stepId] = append(stepsToAdditionalMatches[stepId], match)
|
||||
}
|
||||
}
|
||||
|
||||
// start constructing options for creating tasks, DAGs, and triggers. logic is as follows:
|
||||
//
|
||||
// 1. if a step does not have any parent steps, create a task
|
||||
@@ -540,11 +568,31 @@ func (r *TriggerRepositoryImpl) triggerWorkflows(ctx context.Context, tenantId s
|
||||
|
||||
cancelGroupId := uuid.NewString()
|
||||
|
||||
additionalMatches, ok := stepsToAdditionalMatches[stepId]
|
||||
|
||||
if !ok {
|
||||
additionalMatches = make([]*sqlcv1.V1StepMatchCondition, 0)
|
||||
}
|
||||
|
||||
for _, parent := range step.Parents {
|
||||
parentExternalId := stepsToExternalIds[i][sqlchelpers.UUIDToStr(parent)]
|
||||
readableId := stepIdsToReadableIds[sqlchelpers.UUIDToStr(parent)]
|
||||
|
||||
conditions = append(conditions, getParentInDAGGroupMatch(cancelGroupId, parentExternalId, readableId)...)
|
||||
hasUserEventOrSleepMatches := false
|
||||
|
||||
parentOverrideMatches := make([]*sqlcv1.V1StepMatchCondition, 0)
|
||||
|
||||
for _, match := range additionalMatches {
|
||||
if match.Kind == sqlcv1.V1StepMatchConditionKindPARENTOVERRIDE {
|
||||
if match.ParentReadableID.String == readableId {
|
||||
parentOverrideMatches = append(parentOverrideMatches, match)
|
||||
}
|
||||
} else {
|
||||
hasUserEventOrSleepMatches = true
|
||||
}
|
||||
}
|
||||
|
||||
conditions = append(conditions, getParentInDAGGroupMatch(cancelGroupId, parentExternalId, readableId, parentOverrideMatches, hasUserEventOrSleepMatches)...)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -950,27 +998,97 @@ func (r *TriggerRepositoryImpl) registerChildWorkflows(
|
||||
return tuplesToSkip, nil
|
||||
}
|
||||
|
||||
func getParentInDAGGroupMatch(cancelGroupId, parentExternalId, parentReadableId string) []GroupMatchCondition {
|
||||
return []GroupMatchCondition{
|
||||
{
|
||||
// getParentInDAGGroupMatch encodes the following default behavior:
|
||||
// - If all parents complete, the child task is created
|
||||
// - If all parents are skipped, the child task is skipped
|
||||
// - If parents are both created and skipped, the child is created
|
||||
// - If any parent is cancelled, the child is cancelled
|
||||
// - If any parent fails, the child is cancelled
|
||||
//
|
||||
// Users can override this behavior by setting their own skip and creation conditions.
|
||||
func getParentInDAGGroupMatch(
|
||||
cancelGroupId, parentExternalId, parentReadableId string,
|
||||
parentOverrideMatches []*sqlcv1.V1StepMatchCondition,
|
||||
hasUserEventOrSleepMatches bool,
|
||||
) []GroupMatchCondition {
|
||||
completeAction := sqlcv1.V1MatchConditionActionQUEUE
|
||||
|
||||
if hasUserEventOrSleepMatches {
|
||||
completeAction = sqlcv1.V1MatchConditionActionCREATEMATCH
|
||||
}
|
||||
|
||||
actionsToOverrides := make(map[sqlcv1.V1MatchConditionAction][]*sqlcv1.V1StepMatchCondition)
|
||||
|
||||
for _, match := range parentOverrideMatches {
|
||||
actionsToOverrides[match.Action] = append(actionsToOverrides[match.Action], match)
|
||||
}
|
||||
|
||||
res := []GroupMatchCondition{}
|
||||
|
||||
if len(actionsToOverrides[sqlcv1.V1MatchConditionActionQUEUE]) > 0 {
|
||||
for _, override := range actionsToOverrides[sqlcv1.V1MatchConditionActionQUEUE] {
|
||||
res = append(res, GroupMatchCondition{
|
||||
GroupId: sqlchelpers.UUIDToStr(override.OrGroupID),
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeCOMPLETED),
|
||||
ReadableDataKey: parentReadableId,
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: override.Expression.String,
|
||||
Action: completeAction,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
res = append(res, GroupMatchCondition{
|
||||
GroupId: uuid.NewString(),
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeCOMPLETED),
|
||||
ReadableDataKey: parentReadableId,
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: "!has(input.skipped) || (has(input.skipped) && !input.skipped)",
|
||||
Action: sqlcv1.V1MatchConditionActionQUEUE,
|
||||
},
|
||||
{
|
||||
// NOTE: complete match on skip takes precedence over queue, so we might meet all QUEUE conditions with a skipped
|
||||
// parent but end up skipping anyway
|
||||
Expression: "true",
|
||||
Action: completeAction,
|
||||
})
|
||||
}
|
||||
|
||||
if len(actionsToOverrides[sqlcv1.V1MatchConditionActionSKIP]) > 0 {
|
||||
for _, override := range actionsToOverrides[sqlcv1.V1MatchConditionActionSKIP] {
|
||||
res = append(res, GroupMatchCondition{
|
||||
GroupId: sqlchelpers.UUIDToStr(override.OrGroupID),
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeCOMPLETED),
|
||||
ReadableDataKey: parentReadableId,
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: override.Expression.String,
|
||||
Action: sqlcv1.V1MatchConditionActionSKIP,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
res = append(res, GroupMatchCondition{
|
||||
GroupId: uuid.NewString(),
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeCOMPLETED),
|
||||
ReadableDataKey: parentReadableId,
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: "has(input.skipped) && input.skipped",
|
||||
Expression: "has(output.skipped) && output.skipped",
|
||||
Action: sqlcv1.V1MatchConditionActionSKIP,
|
||||
},
|
||||
{
|
||||
})
|
||||
}
|
||||
|
||||
if len(actionsToOverrides[sqlcv1.V1MatchConditionActionCANCEL]) > 0 {
|
||||
for _, override := range actionsToOverrides[sqlcv1.V1MatchConditionActionCANCEL] {
|
||||
res = append(res, GroupMatchCondition{
|
||||
GroupId: sqlchelpers.UUIDToStr(override.OrGroupID),
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeFAILED),
|
||||
ReadableDataKey: parentReadableId,
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: override.Expression.String,
|
||||
Action: sqlcv1.V1MatchConditionActionCANCEL,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
res = append(res, GroupMatchCondition{
|
||||
GroupId: cancelGroupId,
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeFAILED),
|
||||
@@ -978,8 +1096,7 @@ func getParentInDAGGroupMatch(cancelGroupId, parentExternalId, parentReadableId
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: "true",
|
||||
Action: sqlcv1.V1MatchConditionActionCANCEL,
|
||||
},
|
||||
{
|
||||
}, GroupMatchCondition{
|
||||
GroupId: cancelGroupId,
|
||||
EventType: sqlcv1.V1EventTypeINTERNAL,
|
||||
EventKey: string(sqlcv1.V1TaskEventTypeCANCELLED),
|
||||
@@ -987,8 +1104,10 @@ func getParentInDAGGroupMatch(cancelGroupId, parentExternalId, parentReadableId
|
||||
EventResourceHint: &parentExternalId,
|
||||
Expression: "true",
|
||||
Action: sqlcv1.V1MatchConditionActionCANCEL,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func getChildWorkflowGroupMatches(taskExternalId, stepReadableId string) []GroupMatchCondition {
|
||||
|
||||
@@ -2,12 +2,199 @@ package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/internal/cel"
|
||||
"github.com/hatchet-dev/hatchet/internal/datautils"
|
||||
"github.com/hatchet-dev/hatchet/internal/digest"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1"
|
||||
)
|
||||
|
||||
var ErrDagParentNotFound = errors.New("dag parent not found")
|
||||
|
||||
type CreateWorkflowVersionOpts struct {
|
||||
// (required) the workflow name
|
||||
Name string `validate:"required,hatchetName"`
|
||||
|
||||
// (optional) the workflow description
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
// (optional) event triggers for the workflow
|
||||
EventTriggers []string
|
||||
|
||||
// (optional) cron triggers for the workflow
|
||||
CronTriggers []string `validate:"dive,cron"`
|
||||
|
||||
// (optional) the input bytes for the cron triggers
|
||||
CronInput []byte
|
||||
|
||||
// (required) the tasks in the workflow
|
||||
Tasks []CreateStepOpts `validate:"required,min=1,dive"`
|
||||
|
||||
OnFailure *CreateStepOpts `json:"onFailureJob,omitempty" validate:"omitempty"`
|
||||
|
||||
// (optional) the workflow concurrency groups
|
||||
Concurrency *CreateConcurrencyOpts `json:"concurrency,omitempty" validator:"omitnil"`
|
||||
|
||||
// (optional) sticky strategy
|
||||
Sticky *string `validate:"omitempty,oneof=SOFT HARD"`
|
||||
}
|
||||
|
||||
type CreateCronWorkflowTriggerOpts struct {
|
||||
// (required) the workflow id
|
||||
WorkflowId string `validate:"required,uuid"`
|
||||
|
||||
// (required) the workflow name
|
||||
Name string `validate:"required"`
|
||||
|
||||
Cron string `validate:"required,cron"`
|
||||
|
||||
Input map[string]interface{}
|
||||
AdditionalMetadata map[string]interface{}
|
||||
}
|
||||
|
||||
type CreateConcurrencyOpts struct {
|
||||
// (optional) the maximum number of concurrent workflow runs, default 1
|
||||
MaxRuns *int32
|
||||
|
||||
// (optional) the strategy to use when the concurrency limit is reached, default CANCEL_IN_PROGRESS
|
||||
LimitStrategy *string `validate:"omitnil,oneof=CANCEL_IN_PROGRESS GROUP_ROUND_ROBIN CANCEL_NEWEST"`
|
||||
|
||||
// (required) a concurrency expression for evaluating the concurrency key
|
||||
Expression string `validate:"celworkflowrunstr"`
|
||||
}
|
||||
|
||||
func (o *CreateWorkflowVersionOpts) Checksum() (string, error) {
|
||||
// compute a checksum for the workflow
|
||||
declaredValues, err := datautils.ToJSONMap(o)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
workflowChecksum, err := digest.DigestValues(declaredValues)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return workflowChecksum.String(), nil
|
||||
}
|
||||
|
||||
type CreateStepOpts struct {
|
||||
// (required) the task name
|
||||
ReadableId string `validate:"hatchetName"`
|
||||
|
||||
// (required) the task action id
|
||||
Action string `validate:"required,actionId"`
|
||||
|
||||
// (optional) the task timeout
|
||||
Timeout *string `validate:"omitnil,duration"`
|
||||
|
||||
// (optional) the task scheduling timeout
|
||||
ScheduleTimeout *string `validate:"omitnil,duration"`
|
||||
|
||||
// (optional) the parents that this step depends on
|
||||
Parents []string `validate:"dive,hatchetName"`
|
||||
|
||||
// (optional) the step retry max
|
||||
Retries *int `validate:"omitempty,min=0"`
|
||||
|
||||
// (optional) rate limits for this step
|
||||
RateLimits []CreateWorkflowStepRateLimitOpts `validate:"dive"`
|
||||
|
||||
// (optional) desired worker affinity state for this step
|
||||
DesiredWorkerLabels map[string]DesiredWorkerLabelOpts `validate:"omitempty"`
|
||||
|
||||
// (optional) the step retry backoff factor
|
||||
RetryBackoffFactor *float64 `validate:"omitnil,min=1,max=1000"`
|
||||
|
||||
// (optional) the step retry backoff max seconds (can't be greater than 86400)
|
||||
RetryBackoffMaxSeconds *int `validate:"omitnil,min=1,max=86400"`
|
||||
|
||||
// (optional) a list of additional trigger conditions
|
||||
TriggerConditions []CreateStepMatchConditionOpt `validate:"omitempty,dive"`
|
||||
|
||||
// (optional) the step concurrency options
|
||||
Concurrency []CreateConcurrencyOpts `json:"concurrency,omitempty" validator:"omitnil"`
|
||||
}
|
||||
|
||||
type CreateStepMatchConditionOpt struct {
|
||||
// (required) the type of match condition for triggering the step
|
||||
MatchConditionKind string `validate:"required,oneof=PARENT_OVERRIDE USER_EVENT SLEEP"`
|
||||
|
||||
// (required) the key for the event data when the workflow is triggered
|
||||
ReadableDataKey string `validate:"required"`
|
||||
|
||||
// (required) the initial state for the task when the match condition is satisfied
|
||||
Action string `validate:"required,oneof=QUEUE CANCEL SKIP"`
|
||||
|
||||
// (required) the or group id for the match condition
|
||||
OrGroupId string `validate:"required,uuid"`
|
||||
|
||||
// (optional) the expression for the match condition
|
||||
Expression string `validate:"omitempty"`
|
||||
|
||||
// (optional) the sleep duration for the match condition, only set if this is a SLEEP
|
||||
SleepDuration *string `validate:"omitempty,duration"`
|
||||
|
||||
// (optional) the event key for the match condition, only set if this is a USER_EVENT
|
||||
EventKey *string `validate:"omitempty"`
|
||||
|
||||
// (optional) if this is a PARENT_OVERRIDE condition, this will be set to the parent readable_id for
|
||||
// the parent whose trigger behavior we're overriding
|
||||
ParentReadableId *string `validate:"omitempty"`
|
||||
}
|
||||
|
||||
type DesiredWorkerLabelOpts struct {
|
||||
// (required) the label key
|
||||
Key string `validate:"required"`
|
||||
|
||||
// (required if StringValue is nil) the label integer value
|
||||
IntValue *int32 `validate:"omitnil,required_without=StrValue"`
|
||||
|
||||
// (required if StrValue is nil) the label string value
|
||||
StrValue *string `validate:"omitnil,required_without=IntValue"`
|
||||
|
||||
// (optional) if the label is required
|
||||
Required *bool `validate:"omitempty"`
|
||||
|
||||
// (optional) the weight of the label for scheduling (default: 100)
|
||||
Weight *int32 `validate:"omitempty"`
|
||||
|
||||
// (optional) the label comparator for scheduling (default: EQUAL)
|
||||
Comparator *string `validate:"omitempty,oneof=EQUAL NOT_EQUAL GREATER_THAN LESS_THAN GREATER_THAN_OR_EQUAL LESS_THAN_OR_EQUAL"`
|
||||
}
|
||||
|
||||
type CreateWorkflowStepRateLimitOpts struct {
|
||||
// (required) the rate limit key
|
||||
Key string `validate:"required"`
|
||||
|
||||
// (optional) a CEL expression for the rate limit key
|
||||
KeyExpr *string `validate:"omitnil,celsteprunstr,required_without=Key"`
|
||||
|
||||
// (optional) the rate limit units to consume
|
||||
Units *int `validate:"omitnil,required_without=UnitsExpr"`
|
||||
|
||||
// (optional) a CEL expression for the rate limit units
|
||||
UnitsExpr *string `validate:"omitnil,celsteprunstr,required_without=Units"`
|
||||
|
||||
// (optional) a CEL expression for a dynamic limit value for the rate limit
|
||||
LimitExpr *string `validate:"omitnil,celsteprunstr"`
|
||||
|
||||
// (optional) the rate limit duration, defaults to MINUTE
|
||||
Duration *string `validate:"omitnil,oneof=SECOND MINUTE HOUR DAY WEEK MONTH YEAR"`
|
||||
}
|
||||
|
||||
type WorkflowRepository interface {
|
||||
ListWorkflowNamesByIds(ctx context.Context, tenantId string, workflowIds []pgtype.UUID) (map[pgtype.UUID]string, error)
|
||||
PutWorkflowVersion(ctx context.Context, tenantId string, opts *CreateWorkflowVersionOpts) (*sqlcv1.GetWorkflowVersionForEngineRow, error)
|
||||
}
|
||||
|
||||
type workflowRepository struct {
|
||||
@@ -35,3 +222,723 @@ func (w *workflowRepository) ListWorkflowNamesByIds(ctx context.Context, tenantI
|
||||
|
||||
return workflowIdToNameMap, nil
|
||||
}
|
||||
|
||||
type JobRunHasCycleError struct {
|
||||
JobName string
|
||||
}
|
||||
|
||||
func (e *JobRunHasCycleError) Error() string {
|
||||
return fmt.Sprintf("job %s has a cycle", e.JobName)
|
||||
}
|
||||
|
||||
func (r *workflowRepository) PutWorkflowVersion(ctx context.Context, tenantId string, opts *CreateWorkflowVersionOpts) (*sqlcv1.GetWorkflowVersionForEngineRow, error) {
|
||||
if err := r.v.Validate(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasCycleV1(opts.Tasks) {
|
||||
return nil, &JobRunHasCycleError{
|
||||
JobName: opts.Name,
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
opts.Tasks, err = orderWorkflowStepsV1(opts.Tasks)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx, commit, rollback, err := sqlchelpers.PrepareTx(ctx, r.pool, r.l, 10000)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rollback()
|
||||
|
||||
pgTenantId := sqlchelpers.UUIDFromStr(tenantId)
|
||||
var workflowId pgtype.UUID
|
||||
var oldWorkflowVersion *sqlcv1.GetWorkflowVersionForEngineRow
|
||||
|
||||
// check whether the workflow exists
|
||||
existingWorkflow, err := r.queries.GetWorkflowByName(ctx, r.pool, sqlcv1.GetWorkflowByNameParams{
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
Name: opts.Name,
|
||||
})
|
||||
|
||||
switch {
|
||||
case err != nil && errors.Is(err, pgx.ErrNoRows):
|
||||
// create the workflow
|
||||
workflowId = sqlchelpers.UUIDFromStr(uuid.New().String())
|
||||
|
||||
_, err = r.queries.CreateWorkflow(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateWorkflowParams{
|
||||
ID: workflowId,
|
||||
Tenantid: pgTenantId,
|
||||
Name: opts.Name,
|
||||
Description: *opts.Description,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case !existingWorkflow.ID.Valid:
|
||||
return nil, fmt.Errorf("invalid id for workflow %s", opts.Name)
|
||||
default:
|
||||
workflowId = existingWorkflow.ID
|
||||
|
||||
// fetch the latest workflow version
|
||||
workflowVersionIds, err := r.queries.GetLatestWorkflowVersionForWorkflows(ctx, tx, sqlcv1.GetLatestWorkflowVersionForWorkflowsParams{
|
||||
Tenantid: pgTenantId,
|
||||
Workflowids: []pgtype.UUID{workflowId},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(workflowVersionIds) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 workflow version, got %d", len(workflowVersionIds))
|
||||
}
|
||||
|
||||
workflowVersions, err := r.queries.GetWorkflowVersionForEngine(ctx, tx, sqlcv1.GetWorkflowVersionForEngineParams{
|
||||
Tenantid: pgTenantId,
|
||||
Ids: []pgtype.UUID{workflowVersionIds[0]},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(workflowVersions) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 workflow version, got %d", len(workflowVersions))
|
||||
}
|
||||
|
||||
oldWorkflowVersion = workflowVersions[0]
|
||||
}
|
||||
|
||||
workflowVersionId, err := r.createWorkflowVersionTxs(ctx, tx, pgTenantId, workflowId, opts, oldWorkflowVersion)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workflowVersion, err := r.queries.GetWorkflowVersionForEngine(ctx, tx, sqlcv1.GetWorkflowVersionForEngineParams{
|
||||
Tenantid: pgTenantId,
|
||||
Ids: []pgtype.UUID{sqlchelpers.UUIDFromStr(workflowVersionId)},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch workflow version: %w", err)
|
||||
}
|
||||
|
||||
if len(workflowVersion) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 workflow version when creating new, got %d", len(workflowVersion))
|
||||
}
|
||||
|
||||
err = commit(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return workflowVersion[0], nil
|
||||
}
|
||||
|
||||
func (r *workflowRepository) createWorkflowVersionTxs(ctx context.Context, tx sqlcv1.DBTX, tenantId, workflowId pgtype.UUID, opts *CreateWorkflowVersionOpts, oldWorkflowVersion *sqlcv1.GetWorkflowVersionForEngineRow) (string, error) {
|
||||
workflowVersionId := uuid.New().String()
|
||||
|
||||
cs, err := opts.Checksum()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
createParams := sqlcv1.CreateWorkflowVersionParams{
|
||||
ID: sqlchelpers.UUIDFromStr(workflowVersionId),
|
||||
Checksum: cs,
|
||||
Workflowid: workflowId,
|
||||
}
|
||||
|
||||
if opts.Sticky != nil {
|
||||
createParams.Sticky = sqlcv1.NullStickyStrategy{
|
||||
StickyStrategy: sqlcv1.StickyStrategy(*opts.Sticky),
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
sqlcWorkflowVersion, err := r.queries.CreateWorkflowVersion(
|
||||
ctx,
|
||||
tx,
|
||||
createParams,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = r.createJobTx(ctx, tx, tenantId, workflowId, sqlcWorkflowVersion.ID, sqlcv1.JobKindDEFAULT, opts.Tasks)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// create the onFailure job if exists
|
||||
if opts.OnFailure != nil {
|
||||
jobId, err := r.createJobTx(ctx, tx, tenantId, workflowId, sqlcWorkflowVersion.ID, sqlcv1.JobKindONFAILURE, []CreateStepOpts{*opts.OnFailure})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = r.queries.LinkOnFailureJob(ctx, tx, sqlcv1.LinkOnFailureJobParams{
|
||||
Workflowversionid: sqlcWorkflowVersion.ID,
|
||||
Jobid: sqlchelpers.UUIDFromStr(jobId),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// create concurrency group
|
||||
// NOTE: we do this AFTER the creation of steps/jobs because we have a trigger which depends on the existence
|
||||
// of the jobs/steps to create the v1 concurrency groups
|
||||
if opts.Concurrency != nil {
|
||||
params := sqlcv1.CreateWorkflowConcurrencyParams{
|
||||
Workflowversionid: sqlcWorkflowVersion.ID,
|
||||
ConcurrencyGroupExpression: sqlchelpers.TextFromStr(opts.Concurrency.Expression),
|
||||
}
|
||||
|
||||
if opts.Concurrency.MaxRuns != nil {
|
||||
params.MaxRuns = sqlchelpers.ToInt(*opts.Concurrency.MaxRuns)
|
||||
}
|
||||
|
||||
var ls sqlcv1.ConcurrencyLimitStrategy
|
||||
|
||||
if opts.Concurrency.LimitStrategy != nil && *opts.Concurrency.LimitStrategy != "" {
|
||||
ls = sqlcv1.ConcurrencyLimitStrategy(*opts.Concurrency.LimitStrategy)
|
||||
} else {
|
||||
ls = sqlcv1.ConcurrencyLimitStrategyCANCELINPROGRESS
|
||||
}
|
||||
|
||||
params.LimitStrategy = sqlcv1.NullConcurrencyLimitStrategy{
|
||||
Valid: true,
|
||||
ConcurrencyLimitStrategy: ls,
|
||||
}
|
||||
|
||||
_, err = r.queries.CreateWorkflowConcurrency(
|
||||
ctx,
|
||||
tx,
|
||||
params,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not create concurrency group: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// create the workflow triggers
|
||||
workflowTriggersId := uuid.New().String()
|
||||
|
||||
sqlcWorkflowTriggers, err := r.queries.CreateWorkflowTriggers(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateWorkflowTriggersParams{
|
||||
ID: sqlchelpers.UUIDFromStr(workflowTriggersId),
|
||||
Workflowversionid: sqlcWorkflowVersion.ID,
|
||||
Tenantid: tenantId,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, eventTrigger := range opts.EventTriggers {
|
||||
_, err := r.queries.CreateWorkflowTriggerEventRef(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateWorkflowTriggerEventRefParams{
|
||||
Workflowtriggersid: sqlcWorkflowTriggers.ID,
|
||||
Eventtrigger: eventTrigger,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
for _, cronTrigger := range opts.CronTriggers {
|
||||
|
||||
_, err := r.queries.CreateWorkflowTriggerCronRef(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateWorkflowTriggerCronRefParams{
|
||||
Workflowtriggersid: sqlcWorkflowTriggers.ID,
|
||||
Crontrigger: cronTrigger,
|
||||
Input: opts.CronInput,
|
||||
Name: pgtype.Text{
|
||||
String: "",
|
||||
Valid: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if oldWorkflowVersion != nil {
|
||||
// move existing api crons to the new workflow version
|
||||
err = r.queries.MoveCronTriggerToNewWorkflowTriggers(ctx, tx, sqlcv1.MoveCronTriggerToNewWorkflowTriggersParams{
|
||||
Oldworkflowversionid: oldWorkflowVersion.WorkflowVersion.ID,
|
||||
Newworkflowtriggerid: sqlcWorkflowTriggers.ID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not move existing cron triggers to new workflow triggers: %w", err)
|
||||
}
|
||||
|
||||
// move existing scheduled triggers to the new workflow version
|
||||
err = r.queries.MoveScheduledTriggerToNewWorkflowTriggers(ctx, tx, sqlcv1.MoveScheduledTriggerToNewWorkflowTriggersParams{
|
||||
Oldworkflowversionid: oldWorkflowVersion.WorkflowVersion.ID,
|
||||
Newworkflowtriggerid: sqlcWorkflowTriggers.ID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not move existing scheduled triggers to new workflow triggers: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return workflowVersionId, nil
|
||||
}
|
||||
|
||||
func (r *workflowRepository) createJobTx(ctx context.Context, tx sqlcv1.DBTX, tenantId, workflowId, workflowVersionId pgtype.UUID, jobKind sqlcv1.JobKind, steps []CreateStepOpts) (string, error) {
|
||||
if len(steps) == 0 {
|
||||
return "", errors.New("no steps provided")
|
||||
}
|
||||
|
||||
jobName := steps[0].ReadableId
|
||||
jobId := uuid.New().String()
|
||||
|
||||
sqlcJob, err := r.queries.CreateJob(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateJobParams{
|
||||
ID: sqlchelpers.UUIDFromStr(jobId),
|
||||
Tenantid: tenantId,
|
||||
Workflowversionid: workflowVersionId,
|
||||
Name: jobName,
|
||||
Kind: sqlcv1.NullJobKind{
|
||||
Valid: true,
|
||||
JobKind: jobKind,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, stepOpts := range steps {
|
||||
stepId := uuid.New().String()
|
||||
|
||||
var (
|
||||
timeout pgtype.Text
|
||||
customUserData []byte
|
||||
retries pgtype.Int4
|
||||
)
|
||||
|
||||
if stepOpts.Timeout != nil {
|
||||
timeout = sqlchelpers.TextFromStr(*stepOpts.Timeout)
|
||||
}
|
||||
|
||||
if stepOpts.Retries != nil {
|
||||
retries = pgtype.Int4{
|
||||
Valid: true,
|
||||
Int32: int32(*stepOpts.Retries), // nolint: gosec
|
||||
}
|
||||
}
|
||||
|
||||
// upsert the action
|
||||
_, err := r.queries.UpsertAction(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.UpsertActionParams{
|
||||
Action: stepOpts.Action,
|
||||
Tenantid: tenantId,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
createStepParams := sqlcv1.CreateStepParams{
|
||||
ID: sqlchelpers.UUIDFromStr(stepId),
|
||||
Tenantid: tenantId,
|
||||
Jobid: sqlchelpers.UUIDFromStr(jobId),
|
||||
Actionid: stepOpts.Action,
|
||||
Timeout: timeout,
|
||||
Readableid: stepOpts.ReadableId,
|
||||
CustomUserData: customUserData,
|
||||
Retries: retries,
|
||||
}
|
||||
|
||||
if stepOpts.ScheduleTimeout != nil {
|
||||
createStepParams.ScheduleTimeout = sqlchelpers.TextFromStr(*stepOpts.ScheduleTimeout)
|
||||
}
|
||||
|
||||
if stepOpts.RetryBackoffFactor != nil {
|
||||
createStepParams.RetryBackoffFactor = pgtype.Float8{
|
||||
Float64: *stepOpts.RetryBackoffFactor,
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
if stepOpts.RetryBackoffMaxSeconds != nil {
|
||||
createStepParams.RetryMaxBackoff = pgtype.Int4{
|
||||
Int32: int32(*stepOpts.RetryBackoffMaxSeconds), // nolint: gosec
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
_, err = r.queries.CreateStep(
|
||||
ctx,
|
||||
tx,
|
||||
createStepParams,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(stepOpts.DesiredWorkerLabels) > 0 {
|
||||
for i := range stepOpts.DesiredWorkerLabels {
|
||||
key := (stepOpts.DesiredWorkerLabels)[i].Key
|
||||
value := (stepOpts.DesiredWorkerLabels)[i]
|
||||
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
opts := sqlcv1.UpsertDesiredWorkerLabelParams{
|
||||
Stepid: sqlchelpers.UUIDFromStr(stepId),
|
||||
Key: key,
|
||||
}
|
||||
|
||||
if value.IntValue != nil {
|
||||
opts.IntValue = sqlchelpers.ToInt(*value.IntValue)
|
||||
}
|
||||
|
||||
if value.StrValue != nil {
|
||||
opts.StrValue = sqlchelpers.TextFromStr(*value.StrValue)
|
||||
}
|
||||
|
||||
if value.Weight != nil {
|
||||
opts.Weight = sqlchelpers.ToInt(*value.Weight)
|
||||
}
|
||||
|
||||
if value.Required != nil {
|
||||
opts.Required = sqlchelpers.BoolFromBoolean(*value.Required)
|
||||
}
|
||||
|
||||
if value.Comparator != nil {
|
||||
opts.Comparator = sqlcv1.NullWorkerLabelComparator{
|
||||
WorkerLabelComparator: sqlcv1.WorkerLabelComparator(*value.Comparator),
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
_, err = r.queries.UpsertDesiredWorkerLabel(
|
||||
ctx,
|
||||
tx,
|
||||
opts,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(stepOpts.Parents) > 0 {
|
||||
err := r.queries.AddStepParents(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.AddStepParentsParams{
|
||||
ID: sqlchelpers.UUIDFromStr(stepId),
|
||||
Parents: stepOpts.Parents,
|
||||
Jobid: sqlcJob.ID,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if len(stepOpts.RateLimits) > 0 {
|
||||
createStepExprParams := sqlcv1.CreateStepExpressionsParams{
|
||||
Stepid: sqlchelpers.UUIDFromStr(stepId),
|
||||
}
|
||||
|
||||
for _, rateLimit := range stepOpts.RateLimits {
|
||||
// if ANY of the step expressions are not nil, we create ALL options as expressions, but with static
|
||||
// keys for any nil expressions.
|
||||
if rateLimit.KeyExpr != nil || rateLimit.LimitExpr != nil || rateLimit.UnitsExpr != nil {
|
||||
var keyExpr, limitExpr, unitsExpr string
|
||||
|
||||
windowExpr := cel.Str("MINUTE")
|
||||
|
||||
if rateLimit.Duration != nil {
|
||||
windowExpr = fmt.Sprintf(`"%s"`, *rateLimit.Duration)
|
||||
}
|
||||
|
||||
if rateLimit.KeyExpr != nil {
|
||||
keyExpr = *rateLimit.KeyExpr
|
||||
} else {
|
||||
keyExpr = cel.Str(rateLimit.Key)
|
||||
}
|
||||
|
||||
if rateLimit.UnitsExpr != nil {
|
||||
unitsExpr = *rateLimit.UnitsExpr
|
||||
} else {
|
||||
unitsExpr = cel.Int(*rateLimit.Units)
|
||||
}
|
||||
|
||||
// create the key expression
|
||||
createStepExprParams.Kinds = append(createStepExprParams.Kinds, string(sqlcv1.StepExpressionKindDYNAMICRATELIMITKEY))
|
||||
createStepExprParams.Keys = append(createStepExprParams.Keys, rateLimit.Key)
|
||||
createStepExprParams.Expressions = append(createStepExprParams.Expressions, keyExpr)
|
||||
|
||||
// create the limit value expression, if it's set
|
||||
if rateLimit.LimitExpr != nil {
|
||||
limitExpr = *rateLimit.LimitExpr
|
||||
|
||||
createStepExprParams.Kinds = append(createStepExprParams.Kinds, string(sqlcv1.StepExpressionKindDYNAMICRATELIMITVALUE))
|
||||
createStepExprParams.Keys = append(createStepExprParams.Keys, rateLimit.Key)
|
||||
createStepExprParams.Expressions = append(createStepExprParams.Expressions, limitExpr)
|
||||
}
|
||||
|
||||
// create the units value expression
|
||||
createStepExprParams.Kinds = append(createStepExprParams.Kinds, string(sqlcv1.StepExpressionKindDYNAMICRATELIMITUNITS))
|
||||
createStepExprParams.Keys = append(createStepExprParams.Keys, rateLimit.Key)
|
||||
createStepExprParams.Expressions = append(createStepExprParams.Expressions, unitsExpr)
|
||||
|
||||
// create the window expression
|
||||
createStepExprParams.Kinds = append(createStepExprParams.Kinds, string(sqlcv1.StepExpressionKindDYNAMICRATELIMITWINDOW))
|
||||
createStepExprParams.Keys = append(createStepExprParams.Keys, rateLimit.Key)
|
||||
createStepExprParams.Expressions = append(createStepExprParams.Expressions, windowExpr)
|
||||
} else {
|
||||
_, err := r.queries.CreateStepRateLimit(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateStepRateLimitParams{
|
||||
Stepid: sqlchelpers.UUIDFromStr(stepId),
|
||||
Ratelimitkey: rateLimit.Key,
|
||||
Units: int32(*rateLimit.Units), // nolint: gosec
|
||||
Tenantid: tenantId,
|
||||
Kind: sqlcv1.StepRateLimitKindSTATIC,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not create step rate limit: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(createStepExprParams.Kinds) > 0 {
|
||||
err := r.queries.CreateStepExpressions(
|
||||
ctx,
|
||||
tx,
|
||||
createStepExprParams,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(stepOpts.Concurrency) > 0 {
|
||||
for _, concurrency := range stepOpts.Concurrency {
|
||||
var maxRuns int32 = 1
|
||||
|
||||
if concurrency.MaxRuns != nil {
|
||||
maxRuns = *concurrency.MaxRuns
|
||||
}
|
||||
|
||||
strategy := sqlcv1.ConcurrencyLimitStrategyCANCELINPROGRESS
|
||||
|
||||
if concurrency.LimitStrategy != nil {
|
||||
strategy = sqlcv1.ConcurrencyLimitStrategy(*concurrency.LimitStrategy)
|
||||
}
|
||||
|
||||
_, err := r.queries.CreateStepConcurrency(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateStepConcurrencyParams{
|
||||
Workflowid: workflowId,
|
||||
Workflowversionid: workflowVersionId,
|
||||
Stepid: sqlchelpers.UUIDFromStr(stepId),
|
||||
Tenantid: tenantId,
|
||||
Expression: concurrency.Expression,
|
||||
Maxconcurrency: maxRuns,
|
||||
Strategy: string(strategy),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(stepOpts.TriggerConditions) > 0 {
|
||||
for _, condition := range stepOpts.TriggerConditions {
|
||||
var parentReadableId pgtype.Text
|
||||
|
||||
if condition.ParentReadableId != nil {
|
||||
parentReadableId = sqlchelpers.TextFromStr(*condition.ParentReadableId)
|
||||
}
|
||||
|
||||
var eventKey pgtype.Text
|
||||
|
||||
if condition.EventKey != nil {
|
||||
eventKey = sqlchelpers.TextFromStr(*condition.EventKey)
|
||||
}
|
||||
|
||||
var sleepDuration pgtype.Text
|
||||
|
||||
if condition.SleepDuration != nil {
|
||||
sleepDuration = sqlchelpers.TextFromStr(*condition.SleepDuration)
|
||||
}
|
||||
|
||||
_, err := r.queries.CreateStepMatchCondition(
|
||||
ctx,
|
||||
tx,
|
||||
sqlcv1.CreateStepMatchConditionParams{
|
||||
Tenantid: tenantId,
|
||||
Stepid: sqlchelpers.UUIDFromStr(stepId),
|
||||
Readabledatakey: condition.ReadableDataKey,
|
||||
Action: sqlcv1.V1MatchConditionAction(condition.Action),
|
||||
Orgroupid: sqlchelpers.UUIDFromStr(condition.OrGroupId),
|
||||
Expression: sqlchelpers.TextFromStr(condition.Expression),
|
||||
Kind: sqlcv1.V1StepMatchConditionKind(condition.MatchConditionKind),
|
||||
ParentReadableId: parentReadableId,
|
||||
EventKey: eventKey,
|
||||
SleepDuration: sleepDuration,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return jobId, nil
|
||||
}
|
||||
|
||||
func hasCycleV1(steps []CreateStepOpts) bool {
|
||||
graph := make(map[string][]string)
|
||||
for _, step := range steps {
|
||||
graph[step.ReadableId] = step.Parents
|
||||
}
|
||||
|
||||
visited := make(map[string]bool)
|
||||
var dfs func(string) bool
|
||||
|
||||
dfs = func(node string) bool {
|
||||
if seen, ok := visited[node]; ok && seen {
|
||||
return true
|
||||
}
|
||||
if _, ok := graph[node]; !ok {
|
||||
return false
|
||||
}
|
||||
visited[node] = true
|
||||
for _, parent := range graph[node] {
|
||||
if dfs(parent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
visited[node] = false
|
||||
return false
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
if dfs(step.ReadableId) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func orderWorkflowStepsV1(steps []CreateStepOpts) ([]CreateStepOpts, error) {
|
||||
// Build a map of step id to step for quick lookup.
|
||||
stepMap := make(map[string]CreateStepOpts)
|
||||
for _, step := range steps {
|
||||
stepMap[step.ReadableId] = step
|
||||
}
|
||||
|
||||
// Initialize in-degree map and adjacency list graph.
|
||||
inDegree := make(map[string]int)
|
||||
graph := make(map[string][]string)
|
||||
for _, step := range steps {
|
||||
inDegree[step.ReadableId] = 0
|
||||
}
|
||||
|
||||
// Build the graph and compute in-degrees.
|
||||
for _, step := range steps {
|
||||
for _, parent := range step.Parents {
|
||||
if _, exists := stepMap[parent]; !exists {
|
||||
return nil, fmt.Errorf("unknown parent step: %s", parent)
|
||||
}
|
||||
graph[parent] = append(graph[parent], step.ReadableId)
|
||||
inDegree[step.ReadableId]++
|
||||
}
|
||||
}
|
||||
|
||||
// Queue for steps with no incoming edges.
|
||||
var queue []string
|
||||
for id, degree := range inDegree {
|
||||
if degree == 0 {
|
||||
queue = append(queue, id)
|
||||
}
|
||||
}
|
||||
|
||||
var ordered []CreateStepOpts
|
||||
// Process the steps in topological order.
|
||||
for len(queue) > 0 {
|
||||
id := queue[0]
|
||||
queue = queue[1:]
|
||||
ordered = append(ordered, stepMap[id])
|
||||
for _, child := range graph[id] {
|
||||
inDegree[child]--
|
||||
if inDegree[child] == 0 {
|
||||
queue = append(queue, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If not all steps are processed, there is a cycle.
|
||||
if len(ordered) != len(steps) {
|
||||
return nil, fmt.Errorf("cycle detected in workflow steps")
|
||||
}
|
||||
|
||||
return ordered, nil
|
||||
}
|
||||
|
||||
@@ -379,6 +379,11 @@ CREATE TABLE v1_match (
|
||||
tenant_id UUID NOT NULL,
|
||||
kind v1_match_kind NOT NULL,
|
||||
is_satisfied BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
-- existing_data is data that from previous match conditions that we'd like to propagate when the
|
||||
-- new match condition is met. this is used when this is a match created from a previous match, for
|
||||
-- example when we've satisfied trigger conditions and would like to register durable sleep, user events
|
||||
-- before triggering the DAG.
|
||||
existing_data JSONB,
|
||||
signal_task_id bigint,
|
||||
signal_task_inserted_at timestamptz,
|
||||
signal_external_id UUID,
|
||||
@@ -409,7 +414,7 @@ CREATE TYPE v1_event_type AS ENUM ('USER', 'INTERNAL');
|
||||
-- negative conditions from positive conditions. For example, if a task is waiting for a set of
|
||||
-- tasks to fail, the success of all tasks would be a CANCEL condition, and the failure of any
|
||||
-- task would be a QUEUE condition. Different actions are implicitly different groups of conditions.
|
||||
CREATE TYPE v1_match_condition_action AS ENUM ('CREATE', 'QUEUE', 'CANCEL', 'SKIP');
|
||||
CREATE TYPE v1_match_condition_action AS ENUM ('CREATE', 'QUEUE', 'CANCEL', 'SKIP', 'CREATE_MATCH');
|
||||
|
||||
CREATE TABLE v1_match_condition (
|
||||
v1_match_id bigint NOT NULL,
|
||||
@@ -1413,3 +1418,31 @@ CREATE TABLE v1_log_line (
|
||||
|
||||
PRIMARY KEY (task_id, task_inserted_at, id)
|
||||
) PARTITION BY RANGE(task_inserted_at);
|
||||
|
||||
CREATE TYPE v1_step_match_condition_kind AS ENUM ('PARENT_OVERRIDE', 'USER_EVENT', 'SLEEP');
|
||||
|
||||
CREATE TABLE v1_step_match_condition (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY,
|
||||
tenant_id UUID NOT NULL,
|
||||
step_id UUID NOT NULL,
|
||||
readable_data_key TEXT NOT NULL,
|
||||
action v1_match_condition_action NOT NULL DEFAULT 'CREATE',
|
||||
or_group_id UUID NOT NULL,
|
||||
expression TEXT,
|
||||
kind v1_step_match_condition_kind NOT NULL,
|
||||
-- If this is a SLEEP condition, this will be set to the sleep duration
|
||||
sleep_duration TEXT,
|
||||
-- If this is a USER_EVENT condition, this will be set to the user event key
|
||||
event_key TEXT,
|
||||
-- If this is a PARENT_OVERRIDE condition, this will be set to the parent readable_id
|
||||
parent_readable_id TEXT,
|
||||
PRIMARY KEY (step_id, id)
|
||||
);
|
||||
|
||||
CREATE TABLE v1_durable_sleep (
|
||||
id BIGINT GENERATED ALWAYS AS IDENTITY,
|
||||
tenant_id UUID NOT NULL,
|
||||
sleep_until TIMESTAMPTZ NOT NULL,
|
||||
sleep_duration TEXT NOT NULL,
|
||||
PRIMARY KEY (tenant_id, sleep_until, id)
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user