Fix: Scheduled runs race w/ idempotency key check (#2077)

* feat: create table for storing key

* feat: is_filled col

* feat: idempotency repo

* fix: handle filling

* fix: improve queries

* feat: check if was created already before triggering

* fix: handle partitions

* feat: improve schema

* feat: initial idempotency key claiming impl

* fix: db

* fix: sql fmt

* feat: crazy query

* fix: downstream

* fix: queries

* fix: query bug

* fix: migration rename

* fix: couple small issues

* feat: eviction job

* fix: copilot comments

* fix: index name

* fix: rm comment
This commit is contained in:
matt
2025-09-12 07:54:42 -04:00
committed by GitHub
parent de08285517
commit f385964fcc
15 changed files with 510 additions and 44 deletions
@@ -0,0 +1,23 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE v1_idempotency_key (
tenant_id UUID NOT NULL,
key TEXT NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
claimed_by_external_id UUID,
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (tenant_id, expires_at, key)
);
CREATE UNIQUE INDEX v1_idempotency_key_unique_tenant_key ON v1_idempotency_key (tenant_id, key);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE v1_idempotency_key;
-- +goose StatementEnd
@@ -41,25 +41,26 @@ type TasksController interface {
}
type TasksControllerImpl struct {
mq msgqueue.MessageQueue
pubBuffer *msgqueue.MQPubBuffer
l *zerolog.Logger
queueLogger *zerolog.Logger
pgxStatsLogger *zerolog.Logger
repo repository.EngineRepository
repov1 v1.Repository
dv datautils.DataDecoderValidator
s gocron.Scheduler
a *hatcheterrors.Wrapped
p *partition.Partition
celParser *cel.CELParser
opsPoolPollInterval time.Duration
opsPoolJitter time.Duration
timeoutTaskOperations *queueutils.OperationPool
reassignTaskOperations *queueutils.OperationPool
retryTaskOperations *queueutils.OperationPool
emitSleepOperations *queueutils.OperationPool
replayEnabled bool
mq msgqueue.MessageQueue
pubBuffer *msgqueue.MQPubBuffer
l *zerolog.Logger
queueLogger *zerolog.Logger
pgxStatsLogger *zerolog.Logger
repo repository.EngineRepository
repov1 v1.Repository
dv datautils.DataDecoderValidator
s gocron.Scheduler
a *hatcheterrors.Wrapped
p *partition.Partition
celParser *cel.CELParser
opsPoolPollInterval time.Duration
opsPoolJitter time.Duration
timeoutTaskOperations *queueutils.OperationPool
reassignTaskOperations *queueutils.OperationPool
retryTaskOperations *queueutils.OperationPool
emitSleepOperations *queueutils.OperationPool
evictExpiredIdempotencyKeysOperations *queueutils.OperationPool
replayEnabled bool
}
type TasksControllerOpt func(*TasksControllerOpts)
@@ -229,6 +230,7 @@ func New(fs ...TasksControllerOpt) (*TasksControllerImpl, error) {
t.emitSleepOperations = queueutils.NewOperationPool(opts.l, timeout, "emit sleep step runs", t.processSleeps).WithJitter(jitter)
t.reassignTaskOperations = queueutils.NewOperationPool(opts.l, timeout, "reassign step runs", t.processTaskReassignments).WithJitter(jitter)
t.retryTaskOperations = queueutils.NewOperationPool(opts.l, timeout, "retry step runs", t.processTaskRetryQueueItems).WithJitter(jitter)
t.evictExpiredIdempotencyKeysOperations = queueutils.NewOperationPool(opts.l, timeout, "evict expired idempotency keys", t.evictExpiredIdempotencyKeys).WithJitter(jitter)
return t, nil
}
@@ -369,6 +371,24 @@ func (tc *TasksControllerImpl) Start() (func() error, error) {
return nil, wrappedErr
}
_, err = tc.s.NewJob(
gocron.DurationJob(tc.opsPoolPollInterval),
gocron.NewTask(
tc.runTenantEvictExpiredIdempotencyKeys(spanContext),
),
)
if err != nil {
wrappedErr := fmt.Errorf("failed to evict expired idempotency keys for tenant: %w", err)
cancel()
span.RecordError(err)
span.SetStatus(codes.Error, "failed to evict expired idempotency keys for tenant")
span.End()
return nil, wrappedErr
}
cleanup := func() error {
cancel()
@@ -0,0 +1,46 @@
package task
import (
"context"
"fmt"
"github.com/hatchet-dev/hatchet/internal/telemetry"
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
)
func (tc *TasksControllerImpl) runTenantEvictExpiredIdempotencyKeys(ctx context.Context) func() {
return func() {
tc.l.Debug().Msgf("idempotency: evicting expired idempotency keys for tasks")
// list all tenants
tenants, err := tc.p.ListTenantsForController(ctx, dbsqlc.TenantMajorEngineVersionV1)
if err != nil {
tc.l.Error().Err(err).Msg("could not list tenants")
return
}
tc.evictExpiredIdempotencyKeysOperations.SetTenants(tenants)
for i := range tenants {
tenantId := sqlchelpers.UUIDToStr(tenants[i].ID)
tc.evictExpiredIdempotencyKeysOperations.RunOrContinue(tenantId)
}
}
}
func (tc *TasksControllerImpl) evictExpiredIdempotencyKeys(ctx context.Context, tenantId string) (bool, error) {
ctx, span := telemetry.NewSpan(ctx, "evict-expired-idempotency-keys")
defer span.End()
err := tc.repov1.Idempotency().EvictExpiredIdempotencyKeys(ctx, sqlchelpers.UUIDFromStr(tenantId))
if err != nil {
return false, fmt.Errorf("failed to evict expired idempotency keys for tenant %s: %w", tenantId, err)
}
// hard-coded false here since the EvictExpiredIdempotencyKeys method deletes everything in one shot
return false, nil
}
@@ -3,16 +3,27 @@ package ticker
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
msgqueuev1 "github.com/hatchet-dev/hatchet/internal/msgqueue/v1"
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc"
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
v1 "github.com/hatchet-dev/hatchet/pkg/repository/v1"
)
func (t *TickerImpl) runScheduledWorkflowV1(ctx context.Context, tenantId string, workflowVersion *dbsqlc.GetWorkflowVersionForEngineRow, scheduledWorkflowId string, scheduled *dbsqlc.PollScheduledWorkflowsRow) error {
expiresAt := scheduled.TriggerAt.Time.Add(time.Second * 30)
err := t.repov1.Idempotency().CreateIdempotencyKey(ctx, tenantId, scheduledWorkflowId, sqlchelpers.TimestamptzFromTime(expiresAt))
if err != nil {
return fmt.Errorf("could not create idempotency key: %w", err)
}
key := v1.IdempotencyKey(scheduledWorkflowId)
// send workflow run to task controller
opt := &v1.WorkflowNameTriggerOpts{
TriggerTaskData: &v1.TriggerTaskData{
@@ -21,8 +32,9 @@ func (t *TickerImpl) runScheduledWorkflowV1(ctx context.Context, tenantId string
AdditionalMetadata: scheduled.AdditionalMetadata,
Priority: &scheduled.Priority,
},
ExternalId: uuid.NewString(),
ShouldSkip: false,
IdempotencyKey: &key,
ExternalId: uuid.NewString(),
ShouldSkip: false,
}
msg, err := tasktypes.TriggerTaskMessage(
+77
View File
@@ -0,0 +1,77 @@
package v1
import (
"context"
"github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers"
"github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1"
"github.com/jackc/pgx/v5/pgtype"
"github.com/jackc/pgx/v5/pgxpool"
)
type WasSuccessfullyClaimed bool
type IdempotencyKey string
type IdempotencyRepository interface {
CreateIdempotencyKey(context context.Context, tenantId, key string, expiresAt pgtype.Timestamptz) error
EvictExpiredIdempotencyKeys(context context.Context, tenantId pgtype.UUID) error
}
type idempotencyRepository struct {
*sharedRepository
}
func newIdempotencyRepository(shared *sharedRepository) IdempotencyRepository {
return &idempotencyRepository{
sharedRepository: shared,
}
}
func (r *idempotencyRepository) CreateIdempotencyKey(context context.Context, tenantId, key string, expiresAt pgtype.Timestamptz) error {
return r.queries.CreateIdempotencyKey(context, r.pool, sqlcv1.CreateIdempotencyKeyParams{
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
Key: key,
Expiresat: expiresAt,
})
}
func (r *idempotencyRepository) EvictExpiredIdempotencyKeys(context context.Context, tenantId pgtype.UUID) error {
return r.queries.CleanUpExpiredIdempotencyKeys(context, r.pool, tenantId)
}
type KeyClaimantPair struct {
IdempotencyKey IdempotencyKey
ClaimedByExternalId pgtype.UUID
}
func claimIdempotencyKeys(context context.Context, queries *sqlcv1.Queries, pool *pgxpool.Pool, tenantId string, claims []KeyClaimantPair) (map[KeyClaimantPair]WasSuccessfullyClaimed, error) {
keys := make([]string, len(claims))
claimedByExternalIds := make([]pgtype.UUID, len(claims))
for i, claim := range claims {
keys[i] = string(claim.IdempotencyKey)
claimedByExternalIds[i] = claim.ClaimedByExternalId
}
claimResults, err := queries.ClaimIdempotencyKeys(context, pool, sqlcv1.ClaimIdempotencyKeysParams{
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
Keys: keys,
Claimedbyexternalids: claimedByExternalIds,
})
if err != nil {
return nil, err
}
keyToClaimStatus := make(map[KeyClaimantPair]WasSuccessfullyClaimed)
for _, claimResult := range claimResults {
keyClaimantPair := KeyClaimantPair{
IdempotencyKey: IdempotencyKey(claimResult.Key),
ClaimedByExternalId: claimResult.ClaimedByExternalID,
}
keyToClaimStatus[keyClaimantPair] = WasSuccessfullyClaimed(claimResult.WasSuccessfullyClaimed)
}
return keyToClaimStatus, nil
}
+3
View File
@@ -27,6 +27,9 @@ type WorkflowNameTriggerOpts struct {
ExternalId string
// (optional) The idempotency key to use for debouncing this task
IdempotencyKey *IdempotencyKey
// Whether to skip the creation of the child workflow
ShouldSkip bool
}
+29 -22
View File
@@ -31,20 +31,22 @@ type Repository interface {
Ticker() TickerRepository
Filters() FilterRepository
Webhooks() WebhookRepository
Idempotency() IdempotencyRepository
}
type repositoryImpl struct {
triggers TriggerRepository
tasks TaskRepository
scheduler SchedulerRepository
matches MatchRepository
olap OLAPRepository
logs LogLineRepository
workers WorkerRepository
workflows WorkflowRepository
ticker TickerRepository
filters FilterRepository
webhooks WebhookRepository
triggers TriggerRepository
tasks TaskRepository
scheduler SchedulerRepository
matches MatchRepository
olap OLAPRepository
logs LogLineRepository
workers WorkerRepository
workflows WorkflowRepository
ticker TickerRepository
filters FilterRepository
webhooks WebhookRepository
idempotency IdempotencyRepository
}
func NewRepository(pool *pgxpool.Pool, l *zerolog.Logger, taskRetentionPeriod, olapRetentionPeriod time.Duration, maxInternalRetryCount int32, entitlements repository.EntitlementsRepository, taskLimits TaskOperationLimits) (Repository, func() error) {
@@ -59,17 +61,18 @@ func NewRepository(pool *pgxpool.Pool, l *zerolog.Logger, taskRetentionPeriod, o
}
impl := &repositoryImpl{
triggers: newTriggerRepository(shared),
tasks: newTaskRepository(shared, taskRetentionPeriod, maxInternalRetryCount, taskLimits.TimeoutLimit, taskLimits.ReassignLimit, taskLimits.RetryQueueLimit, taskLimits.DurableSleepLimit),
scheduler: newSchedulerRepository(shared),
matches: matchRepo,
olap: newOLAPRepository(shared, olapRetentionPeriod, true),
logs: newLogLineRepository(shared),
workers: newWorkerRepository(shared),
workflows: newWorkflowRepository(shared),
ticker: newTickerRepository(shared),
filters: newFilterRepository(shared),
webhooks: newWebhookRepository(shared),
triggers: newTriggerRepository(shared),
tasks: newTaskRepository(shared, taskRetentionPeriod, maxInternalRetryCount, taskLimits.TimeoutLimit, taskLimits.ReassignLimit, taskLimits.RetryQueueLimit, taskLimits.DurableSleepLimit),
scheduler: newSchedulerRepository(shared),
matches: matchRepo,
olap: newOLAPRepository(shared, olapRetentionPeriod, true),
logs: newLogLineRepository(shared),
workers: newWorkerRepository(shared),
workflows: newWorkflowRepository(shared),
ticker: newTickerRepository(shared),
filters: newFilterRepository(shared),
webhooks: newWebhookRepository(shared),
idempotency: newIdempotencyRepository(shared),
}
return impl, func() error {
@@ -128,3 +131,7 @@ func (r *repositoryImpl) Filters() FilterRepository {
func (r *repositoryImpl) Webhooks() WebhookRepository {
return r.webhooks
}
func (r *repositoryImpl) Idempotency() IdempotencyRepository {
return r.idempotency
}
@@ -0,0 +1,73 @@
-- name: CreateIdempotencyKey :exec
INSERT INTO v1_idempotency_key (
tenant_id,
key,
expires_at
)
VALUES (
@tenantId::UUID,
@key::TEXT,
@expiresAt::TIMESTAMPTZ
);
-- name: CleanUpExpiredIdempotencyKeys :exec
DELETE FROM v1_idempotency_key
WHERE
tenant_id = @tenantId::UUID
AND expires_at < NOW()
;
-- name: ClaimIdempotencyKeys :many
WITH inputs AS (
SELECT DISTINCT
UNNEST(@keys::TEXT[]) AS key,
UNNEST(@claimedByExternalIds::UUID[]) AS claimed_by_external_id
), incoming_claims AS (
SELECT
*,
ROW_NUMBER() OVER(PARTITION BY key ORDER BY claimed_by_external_id) AS claim_index
FROM inputs
), candidate_keys AS (
-- Grab all of the keys that are attempting to be claimed
SELECT
tenant_id,
expires_at,
key,
ROW_NUMBER() OVER(PARTITION BY tenant_id, key ORDER BY expires_at) AS key_index
FROM v1_idempotency_key
WHERE
tenant_id = @tenantId::UUID
AND key IN (
SELECT key
FROM incoming_claims
)
AND claimed_by_external_id IS NULL
AND expires_at > NOW()
), to_update AS (
SELECT
ck.tenant_id,
ck.expires_at,
ck.key,
ic.claimed_by_external_id
FROM candidate_keys ck
JOIN incoming_claims ic ON (ck.key, ck.key_index) = (ic.key, ic.claim_index)
WHERE ck.tenant_id = @tenantId::UUID
FOR UPDATE SKIP LOCKED
), claims AS (
UPDATE v1_idempotency_key k
SET
claimed_by_external_id = u.claimed_by_external_id,
updated_at = NOW()
FROM to_update u
WHERE (u.tenant_id, u.expires_at, u.key) = (k.tenant_id, k.expires_at, k.key)
RETURNING k.*
)
SELECT
i.key::TEXT AS key,
c.expires_at::TIMESTAMPTZ AS expires_at,
c.claimed_by_external_id IS NOT NULL::BOOLEAN AS was_successfully_claimed,
c.claimed_by_external_id
FROM inputs i
LEFT JOIN claims c ON (i.key = c.key AND i.claimed_by_external_id = c.claimed_by_external_id)
;
@@ -0,0 +1,141 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.29.0
// source: idempotency-keys.sql
package sqlcv1
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const claimIdempotencyKeys = `-- name: ClaimIdempotencyKeys :many
WITH inputs AS (
SELECT DISTINCT
UNNEST($1::TEXT[]) AS key,
UNNEST($2::UUID[]) AS claimed_by_external_id
), incoming_claims AS (
SELECT
key, claimed_by_external_id,
ROW_NUMBER() OVER(PARTITION BY key ORDER BY claimed_by_external_id) AS claim_index
FROM inputs
), candidate_keys AS (
-- Grab all of the keys that are attempting to be claimed
SELECT
tenant_id,
expires_at,
key,
ROW_NUMBER() OVER(PARTITION BY tenant_id, key ORDER BY expires_at) AS key_index
FROM v1_idempotency_key
WHERE
tenant_id = $3::UUID
AND key IN (
SELECT key
FROM incoming_claims
)
AND claimed_by_external_id IS NULL
AND expires_at > NOW()
), to_update AS (
SELECT
ck.tenant_id,
ck.expires_at,
ck.key,
ic.claimed_by_external_id
FROM candidate_keys ck
JOIN incoming_claims ic ON (ck.key, ck.key_index) = (ic.key, ic.claim_index)
WHERE ck.tenant_id = $3::UUID
FOR UPDATE SKIP LOCKED
), claims AS (
UPDATE v1_idempotency_key k
SET
claimed_by_external_id = u.claimed_by_external_id,
updated_at = NOW()
FROM to_update u
WHERE (u.tenant_id, u.expires_at, u.key) = (k.tenant_id, k.expires_at, k.key)
RETURNING k.tenant_id, k.key, k.expires_at, k.claimed_by_external_id, k.inserted_at, k.updated_at
)
SELECT
i.key::TEXT AS key,
c.expires_at::TIMESTAMPTZ AS expires_at,
c.claimed_by_external_id IS NOT NULL::BOOLEAN AS was_successfully_claimed,
c.claimed_by_external_id
FROM inputs i
LEFT JOIN claims c ON (i.key = c.key AND i.claimed_by_external_id = c.claimed_by_external_id)
`
type ClaimIdempotencyKeysParams struct {
Keys []string `json:"keys"`
Claimedbyexternalids []pgtype.UUID `json:"claimedbyexternalids"`
Tenantid pgtype.UUID `json:"tenantid"`
}
type ClaimIdempotencyKeysRow struct {
Key string `json:"key"`
ExpiresAt pgtype.Timestamptz `json:"expires_at"`
WasSuccessfullyClaimed bool `json:"was_successfully_claimed"`
ClaimedByExternalID pgtype.UUID `json:"claimed_by_external_id"`
}
func (q *Queries) ClaimIdempotencyKeys(ctx context.Context, db DBTX, arg ClaimIdempotencyKeysParams) ([]*ClaimIdempotencyKeysRow, error) {
rows, err := db.Query(ctx, claimIdempotencyKeys, arg.Keys, arg.Claimedbyexternalids, arg.Tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ClaimIdempotencyKeysRow
for rows.Next() {
var i ClaimIdempotencyKeysRow
if err := rows.Scan(
&i.Key,
&i.ExpiresAt,
&i.WasSuccessfullyClaimed,
&i.ClaimedByExternalID,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const cleanUpExpiredIdempotencyKeys = `-- name: CleanUpExpiredIdempotencyKeys :exec
DELETE FROM v1_idempotency_key
WHERE
tenant_id = $1::UUID
AND expires_at < NOW()
`
func (q *Queries) CleanUpExpiredIdempotencyKeys(ctx context.Context, db DBTX, tenantid pgtype.UUID) error {
_, err := db.Exec(ctx, cleanUpExpiredIdempotencyKeys, tenantid)
return err
}
const createIdempotencyKey = `-- name: CreateIdempotencyKey :exec
INSERT INTO v1_idempotency_key (
tenant_id,
key,
expires_at
)
VALUES (
$1::UUID,
$2::TEXT,
$3::TIMESTAMPTZ
)
`
type CreateIdempotencyKeyParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Key string `json:"key"`
Expiresat pgtype.Timestamptz `json:"expiresat"`
}
func (q *Queries) CreateIdempotencyKey(ctx context.Context, db DBTX, arg CreateIdempotencyKeyParams) error {
_, err := db.Exec(ctx, createIdempotencyKey, arg.Tenantid, arg.Key, arg.Expiresat)
return err
}
+9
View File
@@ -2834,6 +2834,15 @@ type V1Filter struct {
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
type V1IdempotencyKey struct {
TenantID pgtype.UUID `json:"tenant_id"`
Key string `json:"key"`
ExpiresAt pgtype.Timestamptz `json:"expires_at"`
ClaimedByExternalID pgtype.UUID `json:"claimed_by_external_id"`
InsertedAt pgtype.Timestamptz `json:"inserted_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
}
type V1IncomingWebhook struct {
TenantID pgtype.UUID `json:"tenant_id"`
Name string `json:"name"`
+1
View File
@@ -20,6 +20,7 @@ sql:
- ticker.sql
- filters.sql
- webhooks.sql
- idempotency-keys.sql
schema:
- ../../../../sql/schema/v0.sql
- ../../../../sql/schema/v1-core.sql
+3 -1
View File
@@ -41,6 +41,7 @@ WITH task_partitions AS (
), log_line_partitions AS (
SELECT 'v1_log_line' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_log_line', @date::date) AS p
)
SELECT
*
FROM
@@ -65,7 +66,8 @@ UNION ALL
SELECT
*
FROM
log_line_partitions;
log_line_partitions
;
-- name: FlattenExternalIds :many
WITH lookup_rows AS (
+1
View File
@@ -697,6 +697,7 @@ WITH task_partitions AS (
), log_line_partitions AS (
SELECT 'v1_log_line' AS parent_table, p::text as partition_name FROM get_v1_partitions_before_date('v1_log_line', $1::date) AS p
)
SELECT
parent_table, partition_name
FROM
+35
View File
@@ -63,6 +63,7 @@ type TriggerTaskData struct {
// (optional) the child key
ChildKey *string `json:"child_key"`
// (optional) the priority of the task
Priority *int32 `json:"priority"`
}
@@ -440,8 +441,13 @@ func (r *TriggerRepositoryImpl) TriggerFromWorkflowNames(ctx context.Context, te
workflowNames := make([]string, 0, len(opts))
uniqueNames := make(map[string]struct{})
namesToOpts := make(map[string][]*WorkflowNameTriggerOpts)
idempotencyKeyToExternalIds := make(map[IdempotencyKey]pgtype.UUID)
for _, opt := range opts {
if opt.IdempotencyKey != nil {
idempotencyKeyToExternalIds[*opt.IdempotencyKey] = sqlchelpers.UUIDFromStr(opt.ExternalId)
}
namesToOpts[opt.WorkflowName] = append(namesToOpts[opt.WorkflowName], opt)
if _, ok := uniqueNames[opt.WorkflowName]; ok {
@@ -452,6 +458,21 @@ func (r *TriggerRepositoryImpl) TriggerFromWorkflowNames(ctx context.Context, te
workflowNames = append(workflowNames, opt.WorkflowName)
}
keyClaimantPairs := make([]KeyClaimantPair, 0, len(idempotencyKeyToExternalIds))
for idempotencyKey, runExternalId := range idempotencyKeyToExternalIds {
keyClaimantPairs = append(keyClaimantPairs, KeyClaimantPair{
IdempotencyKey: idempotencyKey,
ClaimedByExternalId: runExternalId,
})
}
keyClaimantPairToWasClaimed, err := claimIdempotencyKeys(ctx, r.queries, r.pool, tenantId, keyClaimantPairs)
if err != nil {
return nil, nil, fmt.Errorf("failed to claim idempotency keys: %w", err)
}
// we don't run this in a transaction because workflow versions won't change during the course of this operation
workflowVersionsByNames, err := r.queries.ListWorkflowsByNames(ctx, r.pool, sqlcv1.ListWorkflowsByNamesParams{
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
@@ -473,6 +494,20 @@ func (r *TriggerRepositoryImpl) TriggerFromWorkflowNames(ctx context.Context, te
}
for _, opt := range opts {
if opt.IdempotencyKey != nil {
keyClaimantPair := KeyClaimantPair{
IdempotencyKey: *opt.IdempotencyKey,
ClaimedByExternalId: sqlchelpers.UUIDFromStr(opt.ExternalId),
}
wasSuccessfullyClaimed := keyClaimantPairToWasClaimed[keyClaimantPair]
// if we did not successfully claim the idempotency key, we should not trigger the workflow
if !wasSuccessfullyClaimed {
continue
}
}
triggerOpts = append(triggerOpts, triggerTuple{
workflowVersionId: sqlchelpers.UUIDToStr(workflowVersion.WorkflowVersionId),
workflowId: sqlchelpers.UUIDToStr(workflowVersion.WorkflowId),
+16
View File
@@ -1628,3 +1628,19 @@ CREATE TABLE v1_durable_sleep (
sleep_duration TEXT NOT NULL,
PRIMARY KEY (tenant_id, sleep_until, id)
);
CREATE TABLE v1_idempotency_key (
tenant_id UUID NOT NULL,
key TEXT NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
claimed_by_external_id UUID,
inserted_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (tenant_id, expires_at, key)
);
CREATE UNIQUE INDEX v1_idempotency_key_unique_tenant_key ON v1_idempotency_key (tenant_id, key);