Feat: Non-determinism errors (#3041)

* fix: retrieve payloads in bulk

* fix: hash -> idempotency key

* feat: initial hashing work

* feat: check idempotency key if entry exists

* fix: panic

* feat: initial work on custom error for non-determinism

* fix: handle nondeterminism error properly

* feat: add error response, pub message to task controller

* chore: lint

* feat: add node id field to error proto

* chore: rm a bunch of unhelpful cancellation logs

* fix: conflict issues

* fix: rm another log

* fix: send node id properly

* fix: improve what we hash

* fix: improve error handling

* fix: python issues

* fix: don't hash or group id

* fix: rm print

* feat: add python test

* fix: add timeout

* fix: improve handling of non determinism error

* fix: propagate node id through

* fix: types, test

* fix: make serializable

* fix: no need to cancel internally anymore

* fix: hide another internal log

* fix: add link to docs

* fix: copilot

* fix: use sha256

* fix: test cleanup

* fix: add error type enum

* fix: handle exceptions on the worker

* fix: clean up a bunch of cursor imports

* fix: cursor docstring formatting

* fix: simplify idempotency key func

* fix: add back cancellation logs

* feat: tests for idempotency keys

* fix: add a couple more for priority and metadata

* chore: gen

* fix: python reconnect

* fix: noisy error

* fix: improve log

* fix: don't run durable listener if no durable tasks are registered

* fix: non-null idempotency keys
This commit is contained in:
matt
2026-02-18 11:27:02 -05:00
committed by GitHub
parent eaf6bba824
commit 7e3e3b8fc0
39 changed files with 1058 additions and 597 deletions
+14
View File
@@ -84,11 +84,25 @@ message DurableTaskRequest {
}
}
enum DurableTaskErrorType {
DURABLE_TASK_ERROR_TYPE_UNSPECIFIED = 0;
DURABLE_TASK_ERROR_TYPE_NONDETERMINISM = 1;
}
message DurableTaskErrorResponse {
string durable_task_external_id = 1;
int64 invocation_count = 2;
int64 node_id = 3;
DurableTaskErrorType error_type = 4;
string error_message = 5;
}
message DurableTaskResponse {
oneof message {
DurableTaskResponseRegisterWorker register_worker = 1;
DurableTaskEventAckResponse trigger_ack = 2;
DurableTaskEventLogEntryCompletedResponse entry_completed = 3;
DurableTaskErrorResponse error = 4;
}
}
@@ -93,14 +93,9 @@ CREATE TABLE v1_durable_event_log_entry (
parent_node_id BIGINT,
-- The branch id when this event was first seen. A durable event log can be a part of many branches.
branch_id BIGINT NOT NULL,
-- Todo: Associated data for this event should be stored in the v1_payload table!
-- data JSONB,
-- The hash of the data stored in the v1_payload table to check non-determinism violations.
-- This can be null for event types that don't have associated data.
-- TODO: we can add CHECK CONSTRAINT for event types that require data_hash to be non-null.
data_hash BYTEA,
-- Can discuss: adds some flexibility for future hash algorithms
data_hash_alg TEXT,
-- An idempotency key generated from the incoming data (using the type of event + wait for conditions or the trigger event payload + options)
-- to determine whether or not there's been a non-determinism error
idempotency_key BYTEA NOT NULL,
-- Access patterns:
-- Definite: we'll query directly for the node_id when a durable task is replaying its log
-- Possible: we may want to query a range of node_ids for a durable task
+33
View File
@@ -3,6 +3,7 @@ import time
from datetime import timedelta
from typing import Any
from uuid import uuid4
from pydantic import BaseModel
from hatchet_sdk import (
Context,
@@ -13,6 +14,7 @@ from hatchet_sdk import (
UserEventCondition,
or_,
)
from hatchet_sdk.exceptions import NonDeterminismError
hatchet = Hatchet(debug=True)
@@ -209,6 +211,36 @@ async def durable_sleep_event_spawn(
}
class NonDeterminismOutput(BaseModel):
attempt_number: int
sleep_time: int
non_determinism_detected: bool = False
node_id: int | None = None
@hatchet.durable_task(execution_timeout=timedelta(seconds=10))
async def durable_non_determinism(
input: EmptyModel, ctx: DurableContext
) -> NonDeterminismOutput:
sleep_time = ctx.attempt_number * 2
try:
await ctx.aio_sleep_for(timedelta(seconds=sleep_time))
except NonDeterminismError as e:
return NonDeterminismOutput(
attempt_number=ctx.attempt_number,
sleep_time=sleep_time,
non_determinism_detected=True,
node_id=e.node_id,
)
return NonDeterminismOutput(
attempt_number=ctx.attempt_number,
sleep_time=sleep_time,
)
def main() -> None:
worker = hatchet.worker(
"durable-worker",
@@ -219,6 +251,7 @@ def main() -> None:
spawn_child_task,
durable_with_spawn,
durable_sleep_event_spawn,
durable_non_determinism,
],
)
worker.start()
+1 -1
View File
@@ -49,7 +49,7 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
@hatchet.durable_task()
def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
+2 -3
View File
@@ -1,4 +1,4 @@
# This is a worker script that will introduce chaos to test
# This is a worker script that will introduce chaos to test
# complex deployments and migrations.
import argparse
import asyncio
@@ -48,7 +48,7 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
@hatchet.durable_task()
def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
@@ -149,6 +149,5 @@ def main() -> None:
print("Bye!")
if __name__ == "__main__":
main()
@@ -10,9 +10,6 @@ from examples.unit_testing.workflows import (
durable_async_complex_workflow,
durable_async_simple_workflow,
durable_async_standalone,
durable_sync_complex_workflow,
durable_sync_simple_workflow,
durable_sync_standalone,
start,
sync_complex_workflow,
sync_simple_workflow,
@@ -25,11 +22,8 @@ from hatchet_sdk import Task
"func",
[
sync_standalone,
durable_sync_standalone,
sync_simple_workflow,
durable_sync_simple_workflow,
sync_complex_workflow,
durable_sync_complex_workflow,
],
)
def test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None:
-35
View File
@@ -44,19 +44,6 @@ async def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput
)
@hatchet.durable_task(input_validator=UnitTestInput)
def durable_sync_standalone(
input: UnitTestInput, ctx: DurableContext
) -> UnitTestOutput:
return UnitTestOutput(
key=input.key,
number=input.number,
additional_metadata=ctx.additional_metadata,
retry_count=ctx.retry_count,
mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,
)
@hatchet.durable_task(input_validator=UnitTestInput)
async def durable_async_standalone(
input: UnitTestInput, ctx: DurableContext
@@ -97,19 +84,6 @@ async def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestO
)
@simple_workflow.durable_task()
def durable_sync_simple_workflow(
input: UnitTestInput, ctx: DurableContext
) -> UnitTestOutput:
return UnitTestOutput(
key=input.key,
number=input.number,
additional_metadata=ctx.additional_metadata,
retry_count=ctx.retry_count,
mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,
)
@simple_workflow.durable_task()
async def durable_async_simple_workflow(
input: UnitTestInput, ctx: DurableContext
@@ -153,15 +127,6 @@ async def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTest
return ctx.task_output(start)
@complex_workflow.durable_task(
parents=[start],
)
def durable_sync_complex_workflow(
input: UnitTestInput, ctx: DurableContext
) -> UnitTestOutput:
return ctx.task_output(start)
@complex_workflow.durable_task(
parents=[start],
)
+2
View File
@@ -38,6 +38,7 @@ from examples.durable.worker import (
wait_for_sleep_twice,
dag_child_workflow,
durable_spawn_dag,
durable_non_determinism,
)
from examples.events.worker import event_workflow
from examples.fanout.worker import child_wf, parent_wf
@@ -121,6 +122,7 @@ def main() -> None:
serde_workflow,
durable_spawn_dag,
dag_child_workflow,
durable_non_determinism,
],
lifespan=lifespan,
)
+20 -1
View File
@@ -542,7 +542,26 @@ func (d *DispatcherServiceImpl) handleDurableTaskEvent(
TriggerOpts: triggerOpts,
})
if err != nil {
var nde *v1.NonDeterminismError
if err != nil && errors.As(err, &nde) {
sendErr := invocation.send(&contracts.DurableTaskResponse{
Message: &contracts.DurableTaskResponse_Error{
Error: &contracts.DurableTaskErrorResponse{
DurableTaskExternalId: taskExternalId.String(),
NodeId: nde.NodeId,
InvocationCount: req.InvocationCount,
ErrorType: contracts.DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_NONDETERMINISM,
ErrorMessage: nde.Error(),
},
},
})
if sendErr != nil {
return fmt.Errorf("failed to send non-determinism error to worker: %w", sendErr)
}
return nil
} else if err != nil {
return status.Errorf(codes.Internal, "failed to ingest durable task event: %v", err)
}
+314 -132
View File
@@ -72,6 +72,52 @@ func (DurableTaskEventKind) EnumDescriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{0}
}
type DurableTaskErrorType int32
const (
DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_UNSPECIFIED DurableTaskErrorType = 0
DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_NONDETERMINISM DurableTaskErrorType = 1
)
// Enum value maps for DurableTaskErrorType.
var (
DurableTaskErrorType_name = map[int32]string{
0: "DURABLE_TASK_ERROR_TYPE_UNSPECIFIED",
1: "DURABLE_TASK_ERROR_TYPE_NONDETERMINISM",
}
DurableTaskErrorType_value = map[string]int32{
"DURABLE_TASK_ERROR_TYPE_UNSPECIFIED": 0,
"DURABLE_TASK_ERROR_TYPE_NONDETERMINISM": 1,
}
)
func (x DurableTaskErrorType) Enum() *DurableTaskErrorType {
p := new(DurableTaskErrorType)
*p = x
return p
}
func (x DurableTaskErrorType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (DurableTaskErrorType) Descriptor() protoreflect.EnumDescriptor {
return file_v1_dispatcher_proto_enumTypes[1].Descriptor()
}
func (DurableTaskErrorType) Type() protoreflect.EnumType {
return &file_v1_dispatcher_proto_enumTypes[1]
}
func (x DurableTaskErrorType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use DurableTaskErrorType.Descriptor instead.
func (DurableTaskErrorType) EnumDescriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{1}
}
type DurableTaskRequestRegisterWorker struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -675,6 +721,85 @@ func (*DurableTaskRequest_EvictInvocation) isDurableTaskRequest_Message() {}
func (*DurableTaskRequest_WorkerStatus) isDurableTaskRequest_Message() {}
type DurableTaskErrorResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
DurableTaskExternalId string `protobuf:"bytes,1,opt,name=durable_task_external_id,json=durableTaskExternalId,proto3" json:"durable_task_external_id,omitempty"`
InvocationCount int64 `protobuf:"varint,2,opt,name=invocation_count,json=invocationCount,proto3" json:"invocation_count,omitempty"`
NodeId int64 `protobuf:"varint,3,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
ErrorType DurableTaskErrorType `protobuf:"varint,4,opt,name=error_type,json=errorType,proto3,enum=v1.DurableTaskErrorType" json:"error_type,omitempty"`
ErrorMessage string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
}
func (x *DurableTaskErrorResponse) Reset() {
*x = DurableTaskErrorResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_v1_dispatcher_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DurableTaskErrorResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DurableTaskErrorResponse) ProtoMessage() {}
func (x *DurableTaskErrorResponse) ProtoReflect() protoreflect.Message {
mi := &file_v1_dispatcher_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DurableTaskErrorResponse.ProtoReflect.Descriptor instead.
func (*DurableTaskErrorResponse) Descriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{9}
}
func (x *DurableTaskErrorResponse) GetDurableTaskExternalId() string {
if x != nil {
return x.DurableTaskExternalId
}
return ""
}
func (x *DurableTaskErrorResponse) GetInvocationCount() int64 {
if x != nil {
return x.InvocationCount
}
return 0
}
func (x *DurableTaskErrorResponse) GetNodeId() int64 {
if x != nil {
return x.NodeId
}
return 0
}
func (x *DurableTaskErrorResponse) GetErrorType() DurableTaskErrorType {
if x != nil {
return x.ErrorType
}
return DurableTaskErrorType_DURABLE_TASK_ERROR_TYPE_UNSPECIFIED
}
func (x *DurableTaskErrorResponse) GetErrorMessage() string {
if x != nil {
return x.ErrorMessage
}
return ""
}
type DurableTaskResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -685,13 +810,14 @@ type DurableTaskResponse struct {
// *DurableTaskResponse_RegisterWorker
// *DurableTaskResponse_TriggerAck
// *DurableTaskResponse_EntryCompleted
// *DurableTaskResponse_Error
Message isDurableTaskResponse_Message `protobuf_oneof:"message"`
}
func (x *DurableTaskResponse) Reset() {
*x = DurableTaskResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_v1_dispatcher_proto_msgTypes[9]
mi := &file_v1_dispatcher_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -704,7 +830,7 @@ func (x *DurableTaskResponse) String() string {
func (*DurableTaskResponse) ProtoMessage() {}
func (x *DurableTaskResponse) ProtoReflect() protoreflect.Message {
mi := &file_v1_dispatcher_proto_msgTypes[9]
mi := &file_v1_dispatcher_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -717,7 +843,7 @@ func (x *DurableTaskResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use DurableTaskResponse.ProtoReflect.Descriptor instead.
func (*DurableTaskResponse) Descriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{9}
return file_v1_dispatcher_proto_rawDescGZIP(), []int{10}
}
func (m *DurableTaskResponse) GetMessage() isDurableTaskResponse_Message {
@@ -748,6 +874,13 @@ func (x *DurableTaskResponse) GetEntryCompleted() *DurableTaskEventLogEntryCompl
return nil
}
func (x *DurableTaskResponse) GetError() *DurableTaskErrorResponse {
if x, ok := x.GetMessage().(*DurableTaskResponse_Error); ok {
return x.Error
}
return nil
}
type isDurableTaskResponse_Message interface {
isDurableTaskResponse_Message()
}
@@ -764,12 +897,18 @@ type DurableTaskResponse_EntryCompleted struct {
EntryCompleted *DurableTaskEventLogEntryCompletedResponse `protobuf:"bytes,3,opt,name=entry_completed,json=entryCompleted,proto3,oneof"`
}
type DurableTaskResponse_Error struct {
Error *DurableTaskErrorResponse `protobuf:"bytes,4,opt,name=error,proto3,oneof"`
}
func (*DurableTaskResponse_RegisterWorker) isDurableTaskResponse_Message() {}
func (*DurableTaskResponse_TriggerAck) isDurableTaskResponse_Message() {}
func (*DurableTaskResponse_EntryCompleted) isDurableTaskResponse_Message() {}
func (*DurableTaskResponse_Error) isDurableTaskResponse_Message() {}
type RegisterDurableEventRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -783,7 +922,7 @@ type RegisterDurableEventRequest struct {
func (x *RegisterDurableEventRequest) Reset() {
*x = RegisterDurableEventRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_v1_dispatcher_proto_msgTypes[10]
mi := &file_v1_dispatcher_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -796,7 +935,7 @@ func (x *RegisterDurableEventRequest) String() string {
func (*RegisterDurableEventRequest) ProtoMessage() {}
func (x *RegisterDurableEventRequest) ProtoReflect() protoreflect.Message {
mi := &file_v1_dispatcher_proto_msgTypes[10]
mi := &file_v1_dispatcher_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -809,7 +948,7 @@ func (x *RegisterDurableEventRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RegisterDurableEventRequest.ProtoReflect.Descriptor instead.
func (*RegisterDurableEventRequest) Descriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{10}
return file_v1_dispatcher_proto_rawDescGZIP(), []int{11}
}
func (x *RegisterDurableEventRequest) GetTaskId() string {
@@ -842,7 +981,7 @@ type RegisterDurableEventResponse struct {
func (x *RegisterDurableEventResponse) Reset() {
*x = RegisterDurableEventResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_v1_dispatcher_proto_msgTypes[11]
mi := &file_v1_dispatcher_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -855,7 +994,7 @@ func (x *RegisterDurableEventResponse) String() string {
func (*RegisterDurableEventResponse) ProtoMessage() {}
func (x *RegisterDurableEventResponse) ProtoReflect() protoreflect.Message {
mi := &file_v1_dispatcher_proto_msgTypes[11]
mi := &file_v1_dispatcher_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -868,7 +1007,7 @@ func (x *RegisterDurableEventResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RegisterDurableEventResponse.ProtoReflect.Descriptor instead.
func (*RegisterDurableEventResponse) Descriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{11}
return file_v1_dispatcher_proto_rawDescGZIP(), []int{12}
}
type ListenForDurableEventRequest struct {
@@ -883,7 +1022,7 @@ type ListenForDurableEventRequest struct {
func (x *ListenForDurableEventRequest) Reset() {
*x = ListenForDurableEventRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_v1_dispatcher_proto_msgTypes[12]
mi := &file_v1_dispatcher_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -896,7 +1035,7 @@ func (x *ListenForDurableEventRequest) String() string {
func (*ListenForDurableEventRequest) ProtoMessage() {}
func (x *ListenForDurableEventRequest) ProtoReflect() protoreflect.Message {
mi := &file_v1_dispatcher_proto_msgTypes[12]
mi := &file_v1_dispatcher_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -909,7 +1048,7 @@ func (x *ListenForDurableEventRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListenForDurableEventRequest.ProtoReflect.Descriptor instead.
func (*ListenForDurableEventRequest) Descriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{12}
return file_v1_dispatcher_proto_rawDescGZIP(), []int{13}
}
func (x *ListenForDurableEventRequest) GetTaskId() string {
@@ -939,7 +1078,7 @@ type DurableEvent struct {
func (x *DurableEvent) Reset() {
*x = DurableEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_v1_dispatcher_proto_msgTypes[13]
mi := &file_v1_dispatcher_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -952,7 +1091,7 @@ func (x *DurableEvent) String() string {
func (*DurableEvent) ProtoMessage() {}
func (x *DurableEvent) ProtoReflect() protoreflect.Message {
mi := &file_v1_dispatcher_proto_msgTypes[13]
mi := &file_v1_dispatcher_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -965,7 +1104,7 @@ func (x *DurableEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use DurableEvent.ProtoReflect.Descriptor instead.
func (*DurableEvent) Descriptor() ([]byte, []int) {
return file_v1_dispatcher_proto_rawDescGZIP(), []int{13}
return file_v1_dispatcher_proto_rawDescGZIP(), []int{14}
}
func (x *DurableEvent) GetTaskId() string {
@@ -1099,79 +1238,105 @@ var file_v1_dispatcher_proto_rawDesc = []byte{
0x54, 0x61, 0x73, 0x6b, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x65,
0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x22, 0x90, 0x02, 0x0a, 0x13, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61,
0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x72, 0x65,
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65,
0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x67, 0x69,
0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65,
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0b,
0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61,
0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x41, 0x63, 0x6b,
0x12, 0x58, 0x0a, 0x0f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x76, 0x31, 0x2e, 0x44,
0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c,
0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d,
0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x42, 0x0a,
0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76,
0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72,
0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x56, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75,
0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69,
0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x22, 0x5a, 0x0a, 0x0c, 0x44, 0x75, 0x72,
0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73,
0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b,
0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65,
0x79, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x04, 0x64, 0x61, 0x74, 0x61, 0x2a, 0xb0, 0x01, 0x0a, 0x14, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c,
0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x29,
0x0a, 0x25, 0x44, 0x55, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54,
0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50,
0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x44, 0x55, 0x52,
0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45,
0x52, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22,
0x67, 0x65, 0x22, 0xf5, 0x01, 0x0a, 0x18, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61,
0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x37, 0x0a, 0x18, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f,
0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x15, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01,
0x28, 0x03, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03,
0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x0a,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x18, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73,
0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xc6, 0x02, 0x0a, 0x13, 0x44,
0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x77,
0x6f, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x31,
0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b,
0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f,
0x72, 0x6b, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0b, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f,
0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x31, 0x2e, 0x44,
0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41,
0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x72,
0x69, 0x67, 0x67, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x6e, 0x74, 0x72,
0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x2d, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61,
0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x43,
0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
0x65, 0x64, 0x12, 0x34, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61,
0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48,
0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72,
0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a,
0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x63,
0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x22, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62,
0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x56, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, 0x61,
0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e,
0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69,
0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x22, 0x5a, 0x0a, 0x0c, 0x44, 0x75, 0x72, 0x61, 0x62,
0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64,
0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x12,
0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64,
0x61, 0x74, 0x61, 0x2a, 0xb0, 0x01, 0x0a, 0x14, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54,
0x61, 0x73, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x29, 0x0a, 0x25,
0x44, 0x55, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x52, 0x49,
0x47, 0x47, 0x45, 0x52, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x57, 0x41, 0x49, 0x54, 0x5f, 0x46,
0x4f, 0x52, 0x10, 0x02, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x55, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f,
0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x4b, 0x49, 0x4e,
0x44, 0x5f, 0x4d, 0x45, 0x4d, 0x4f, 0x10, 0x03, 0x32, 0x84, 0x02, 0x0a, 0x0c, 0x56, 0x31, 0x44,
0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0b, 0x44, 0x75, 0x72,
0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75,
0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x17, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73,
0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12,
0x5b, 0x0a, 0x14, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62,
0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67,
0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
0x47, 0x47, 0x45, 0x52, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x44, 0x55, 0x52, 0x41, 0x42,
0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f,
0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22, 0x44, 0x55,
0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x52, 0x49, 0x47, 0x47,
0x45, 0x52, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x57, 0x41, 0x49, 0x54, 0x5f, 0x46, 0x4f, 0x52,
0x10, 0x02, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x55, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41,
0x53, 0x4b, 0x5f, 0x54, 0x52, 0x49, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f,
0x4d, 0x45, 0x4d, 0x4f, 0x10, 0x03, 0x2a, 0x6b, 0x0a, 0x14, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c,
0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27,
0x0a, 0x23, 0x44, 0x55, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x45,
0x52, 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2a, 0x0a, 0x26, 0x44, 0x55, 0x52, 0x41, 0x42,
0x4c, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x59,
0x50, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x44, 0x45, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x49, 0x53,
0x4d, 0x10, 0x01, 0x32, 0x84, 0x02, 0x0a, 0x0c, 0x56, 0x31, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74,
0x63, 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0b, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54,
0x61, 0x73, 0x6b, 0x12, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65,
0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x31,
0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x5b, 0x0a, 0x14, 0x52, 0x65,
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65,
0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x15,
0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65,
0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65,
0x6e, 0x74, 0x12, 0x1f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72,
0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x65,
0x6e, 0x46, 0x6f, 0x72, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72,
0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42,
0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61,
0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65,
0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x12, 0x20, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x46, 0x6f, 0x72, 0x44,
0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74,
0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73,
0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1186,51 +1351,55 @@ func file_v1_dispatcher_proto_rawDescGZIP() []byte {
return file_v1_dispatcher_proto_rawDescData
}
var file_v1_dispatcher_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_v1_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
var file_v1_dispatcher_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_v1_dispatcher_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
var file_v1_dispatcher_proto_goTypes = []interface{}{
(DurableTaskEventKind)(0), // 0: v1.DurableTaskEventKind
(*DurableTaskRequestRegisterWorker)(nil), // 1: v1.DurableTaskRequestRegisterWorker
(*DurableTaskResponseRegisterWorker)(nil), // 2: v1.DurableTaskResponseRegisterWorker
(*DurableTaskEventRequest)(nil), // 3: v1.DurableTaskEventRequest
(*DurableTaskEventAckResponse)(nil), // 4: v1.DurableTaskEventAckResponse
(*DurableTaskEventLogEntryCompletedResponse)(nil), // 5: v1.DurableTaskEventLogEntryCompletedResponse
(*DurableTaskEvictInvocationRequest)(nil), // 6: v1.DurableTaskEvictInvocationRequest
(*DurableTaskAwaitedCompletedEntry)(nil), // 7: v1.DurableTaskAwaitedCompletedEntry
(*DurableTaskWorkerStatusRequest)(nil), // 8: v1.DurableTaskWorkerStatusRequest
(*DurableTaskRequest)(nil), // 9: v1.DurableTaskRequest
(*DurableTaskResponse)(nil), // 10: v1.DurableTaskResponse
(*RegisterDurableEventRequest)(nil), // 11: v1.RegisterDurableEventRequest
(*RegisterDurableEventResponse)(nil), // 12: v1.RegisterDurableEventResponse
(*ListenForDurableEventRequest)(nil), // 13: v1.ListenForDurableEventRequest
(*DurableEvent)(nil), // 14: v1.DurableEvent
(*DurableEventListenerConditions)(nil), // 15: v1.DurableEventListenerConditions
(*TriggerWorkflowRequest)(nil), // 16: v1.TriggerWorkflowRequest
(DurableTaskErrorType)(0), // 1: v1.DurableTaskErrorType
(*DurableTaskRequestRegisterWorker)(nil), // 2: v1.DurableTaskRequestRegisterWorker
(*DurableTaskResponseRegisterWorker)(nil), // 3: v1.DurableTaskResponseRegisterWorker
(*DurableTaskEventRequest)(nil), // 4: v1.DurableTaskEventRequest
(*DurableTaskEventAckResponse)(nil), // 5: v1.DurableTaskEventAckResponse
(*DurableTaskEventLogEntryCompletedResponse)(nil), // 6: v1.DurableTaskEventLogEntryCompletedResponse
(*DurableTaskEvictInvocationRequest)(nil), // 7: v1.DurableTaskEvictInvocationRequest
(*DurableTaskAwaitedCompletedEntry)(nil), // 8: v1.DurableTaskAwaitedCompletedEntry
(*DurableTaskWorkerStatusRequest)(nil), // 9: v1.DurableTaskWorkerStatusRequest
(*DurableTaskRequest)(nil), // 10: v1.DurableTaskRequest
(*DurableTaskErrorResponse)(nil), // 11: v1.DurableTaskErrorResponse
(*DurableTaskResponse)(nil), // 12: v1.DurableTaskResponse
(*RegisterDurableEventRequest)(nil), // 13: v1.RegisterDurableEventRequest
(*RegisterDurableEventResponse)(nil), // 14: v1.RegisterDurableEventResponse
(*ListenForDurableEventRequest)(nil), // 15: v1.ListenForDurableEventRequest
(*DurableEvent)(nil), // 16: v1.DurableEvent
(*DurableEventListenerConditions)(nil), // 17: v1.DurableEventListenerConditions
(*TriggerWorkflowRequest)(nil), // 18: v1.TriggerWorkflowRequest
}
var file_v1_dispatcher_proto_depIdxs = []int32{
0, // 0: v1.DurableTaskEventRequest.kind:type_name -> v1.DurableTaskEventKind
15, // 1: v1.DurableTaskEventRequest.wait_for_conditions:type_name -> v1.DurableEventListenerConditions
16, // 2: v1.DurableTaskEventRequest.trigger_opts:type_name -> v1.TriggerWorkflowRequest
7, // 3: v1.DurableTaskWorkerStatusRequest.waiting_entries:type_name -> v1.DurableTaskAwaitedCompletedEntry
1, // 4: v1.DurableTaskRequest.register_worker:type_name -> v1.DurableTaskRequestRegisterWorker
3, // 5: v1.DurableTaskRequest.event:type_name -> v1.DurableTaskEventRequest
6, // 6: v1.DurableTaskRequest.evict_invocation:type_name -> v1.DurableTaskEvictInvocationRequest
8, // 7: v1.DurableTaskRequest.worker_status:type_name -> v1.DurableTaskWorkerStatusRequest
2, // 8: v1.DurableTaskResponse.register_worker:type_name -> v1.DurableTaskResponseRegisterWorker
4, // 9: v1.DurableTaskResponse.trigger_ack:type_name -> v1.DurableTaskEventAckResponse
5, // 10: v1.DurableTaskResponse.entry_completed:type_name -> v1.DurableTaskEventLogEntryCompletedResponse
15, // 11: v1.RegisterDurableEventRequest.conditions:type_name -> v1.DurableEventListenerConditions
9, // 12: v1.V1Dispatcher.DurableTask:input_type -> v1.DurableTaskRequest
11, // 13: v1.V1Dispatcher.RegisterDurableEvent:input_type -> v1.RegisterDurableEventRequest
13, // 14: v1.V1Dispatcher.ListenForDurableEvent:input_type -> v1.ListenForDurableEventRequest
10, // 15: v1.V1Dispatcher.DurableTask:output_type -> v1.DurableTaskResponse
12, // 16: v1.V1Dispatcher.RegisterDurableEvent:output_type -> v1.RegisterDurableEventResponse
14, // 17: v1.V1Dispatcher.ListenForDurableEvent:output_type -> v1.DurableEvent
15, // [15:18] is the sub-list for method output_type
12, // [12:15] is the sub-list for method input_type
12, // [12:12] is the sub-list for extension type_name
12, // [12:12] is the sub-list for extension extendee
0, // [0:12] is the sub-list for field type_name
17, // 1: v1.DurableTaskEventRequest.wait_for_conditions:type_name -> v1.DurableEventListenerConditions
18, // 2: v1.DurableTaskEventRequest.trigger_opts:type_name -> v1.TriggerWorkflowRequest
8, // 3: v1.DurableTaskWorkerStatusRequest.waiting_entries:type_name -> v1.DurableTaskAwaitedCompletedEntry
2, // 4: v1.DurableTaskRequest.register_worker:type_name -> v1.DurableTaskRequestRegisterWorker
4, // 5: v1.DurableTaskRequest.event:type_name -> v1.DurableTaskEventRequest
7, // 6: v1.DurableTaskRequest.evict_invocation:type_name -> v1.DurableTaskEvictInvocationRequest
9, // 7: v1.DurableTaskRequest.worker_status:type_name -> v1.DurableTaskWorkerStatusRequest
1, // 8: v1.DurableTaskErrorResponse.error_type:type_name -> v1.DurableTaskErrorType
3, // 9: v1.DurableTaskResponse.register_worker:type_name -> v1.DurableTaskResponseRegisterWorker
5, // 10: v1.DurableTaskResponse.trigger_ack:type_name -> v1.DurableTaskEventAckResponse
6, // 11: v1.DurableTaskResponse.entry_completed:type_name -> v1.DurableTaskEventLogEntryCompletedResponse
11, // 12: v1.DurableTaskResponse.error:type_name -> v1.DurableTaskErrorResponse
17, // 13: v1.RegisterDurableEventRequest.conditions:type_name -> v1.DurableEventListenerConditions
10, // 14: v1.V1Dispatcher.DurableTask:input_type -> v1.DurableTaskRequest
13, // 15: v1.V1Dispatcher.RegisterDurableEvent:input_type -> v1.RegisterDurableEventRequest
15, // 16: v1.V1Dispatcher.ListenForDurableEvent:input_type -> v1.ListenForDurableEventRequest
12, // 17: v1.V1Dispatcher.DurableTask:output_type -> v1.DurableTaskResponse
14, // 18: v1.V1Dispatcher.RegisterDurableEvent:output_type -> v1.RegisterDurableEventResponse
16, // 19: v1.V1Dispatcher.ListenForDurableEvent:output_type -> v1.DurableEvent
17, // [17:20] is the sub-list for method output_type
14, // [14:17] is the sub-list for method input_type
14, // [14:14] is the sub-list for extension type_name
14, // [14:14] is the sub-list for extension extendee
0, // [0:14] is the sub-list for field type_name
}
func init() { file_v1_dispatcher_proto_init() }
@@ -1350,7 +1519,7 @@ func file_v1_dispatcher_proto_init() {
}
}
file_v1_dispatcher_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DurableTaskResponse); i {
switch v := v.(*DurableTaskErrorResponse); i {
case 0:
return &v.state
case 1:
@@ -1362,7 +1531,7 @@ func file_v1_dispatcher_proto_init() {
}
}
file_v1_dispatcher_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegisterDurableEventRequest); i {
switch v := v.(*DurableTaskResponse); i {
case 0:
return &v.state
case 1:
@@ -1374,7 +1543,7 @@ func file_v1_dispatcher_proto_init() {
}
}
file_v1_dispatcher_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegisterDurableEventResponse); i {
switch v := v.(*RegisterDurableEventRequest); i {
case 0:
return &v.state
case 1:
@@ -1386,7 +1555,7 @@ func file_v1_dispatcher_proto_init() {
}
}
file_v1_dispatcher_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListenForDurableEventRequest); i {
switch v := v.(*RegisterDurableEventResponse); i {
case 0:
return &v.state
case 1:
@@ -1398,6 +1567,18 @@ func file_v1_dispatcher_proto_init() {
}
}
file_v1_dispatcher_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListenForDurableEventRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_v1_dispatcher_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DurableEvent); i {
case 0:
return &v.state
@@ -1417,18 +1598,19 @@ func file_v1_dispatcher_proto_init() {
(*DurableTaskRequest_EvictInvocation)(nil),
(*DurableTaskRequest_WorkerStatus)(nil),
}
file_v1_dispatcher_proto_msgTypes[9].OneofWrappers = []interface{}{
file_v1_dispatcher_proto_msgTypes[10].OneofWrappers = []interface{}{
(*DurableTaskResponse_RegisterWorker)(nil),
(*DurableTaskResponse_TriggerAck)(nil),
(*DurableTaskResponse_EntryCompleted)(nil),
(*DurableTaskResponse_Error)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_v1_dispatcher_proto_rawDesc,
NumEnums: 1,
NumMessages: 14,
NumEnums: 2,
NumMessages: 15,
NumExtensions: 0,
NumServices: 1,
},
+154 -29
View File
@@ -1,9 +1,13 @@
package repository
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"sort"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
@@ -67,19 +71,44 @@ func newDurableEventsRepository(shared *sharedRepository) DurableEventsRepositor
}
}
type NonDeterminismError struct {
NodeId int64
TaskExternalId uuid.UUID
ExpectedIdempotencyKey []byte
ActualIdempotencyKey []byte
}
func (m *NonDeterminismError) Error() string {
return fmt.Sprintf("non-determinism detected for durable event log entry in task %s at node id %d", m.TaskExternalId.String(), m.NodeId)
}
type GetOrCreateLogEntryOpts struct {
TenantId uuid.UUID
DurableTaskExternalId uuid.UUID
DurableTaskId int64
DurableTaskInsertedAt pgtype.Timestamptz
Kind sqlcv1.V1DurableEventLogKind
NodeId int64
ParentNodeId pgtype.Int8
BranchId int64
IdempotencyKey []byte
IsSatisfied bool
}
func (r *durableEventsRepository) getOrCreateEventLogEntry(
ctx context.Context,
tx sqlcv1.DBTX,
tenantId uuid.UUID,
params sqlcv1.CreateDurableEventLogEntryParams,
params GetOrCreateLogEntryOpts,
inputPayload []byte,
resultPayload []byte,
) (*EventLogEntryWithPayloads, error) {
entryExternalId := uuid.New()
alreadyExisted := true
entry, err := r.queries.GetDurableEventLogEntry(ctx, tx, sqlcv1.GetDurableEventLogEntryParams{
Durabletaskid: params.Durabletaskid,
Durabletaskinsertedat: params.Durabletaskinsertedat,
Nodeid: params.Nodeid,
Durabletaskid: params.DurableTaskId,
Durabletaskinsertedat: params.DurableTaskInsertedAt,
Nodeid: params.NodeId,
})
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
@@ -87,16 +116,16 @@ func (r *durableEventsRepository) getOrCreateEventLogEntry(
} else if errors.Is(err, pgx.ErrNoRows) {
alreadyExisted = false
entry, err := r.queries.CreateDurableEventLogEntry(ctx, tx, sqlcv1.CreateDurableEventLogEntryParams{
Tenantid: params.Tenantid,
Externalid: params.Externalid,
Durabletaskid: params.Durabletaskid,
Durabletaskinsertedat: params.Durabletaskinsertedat,
Tenantid: params.TenantId,
Externalid: entryExternalId,
Durabletaskid: params.DurableTaskId,
Durabletaskinsertedat: params.DurableTaskInsertedAt,
Kind: params.Kind,
Nodeid: params.Nodeid,
Nodeid: params.NodeId,
ParentNodeId: params.ParentNodeId,
Branchid: params.Branchid,
Datahash: params.Datahash,
Datahashalg: params.Datahashalg,
Branchid: params.BranchId,
Idempotencykey: params.IdempotencyKey,
Issatisfied: params.IsSatisfied,
})
if err != nil {
@@ -131,7 +160,18 @@ func (r *durableEventsRepository) getOrCreateEventLogEntry(
if err != nil {
return nil, err
}
} else {
incomingIdempotencyKey := params.IdempotencyKey
existingIdempotencyKey := entry.IdempotencyKey
if !bytes.Equal(incomingIdempotencyKey, existingIdempotencyKey) {
return nil, &NonDeterminismError{
NodeId: params.NodeId,
TaskExternalId: params.DurableTaskExternalId,
ExpectedIdempotencyKey: existingIdempotencyKey,
ActualIdempotencyKey: incomingIdempotencyKey,
}
}
}
if alreadyExisted {
@@ -183,19 +223,34 @@ func (r *durableEventsRepository) GetSatisfiedDurableEvents(ctx context.Context,
return nil, fmt.Errorf("failed to list satisfied entries: %w", err)
}
result := make([]*SatisfiedEventWithPayload, 0, len(rows))
retrievePayloadOpts := make([]RetrievePayloadOpts, len(rows))
for _, row := range rows {
payload, err := r.payloadStore.RetrieveSingle(ctx, r.pool, RetrievePayloadOpts{
for i, row := range rows {
retrievePayloadOpts[i] = RetrievePayloadOpts{
Id: row.ID,
InsertedAt: row.InsertedAt,
Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA,
TenantId: tenantId,
})
if err != nil {
r.l.Warn().Err(err).Msgf("failed to retrieve payload for entry %d", row.NodeID)
payload = nil
}
}
payloads, err := r.payloadStore.Retrieve(ctx, r.pool, retrievePayloadOpts...)
if err != nil {
return nil, fmt.Errorf("failed to retrieve payloads for satisfied callbacks: %w", err)
}
result := make([]*SatisfiedEventWithPayload, 0, len(rows))
for _, row := range rows {
retrieveOpt := RetrievePayloadOpts{
Id: row.ID,
InsertedAt: row.InsertedAt,
Type: sqlcv1.V1PayloadTypeDURABLEEVENTLOGENTRYRESULTDATA,
TenantId: tenantId,
}
payload := payloads[retrieveOpt]
result = append(result, &SatisfiedEventWithPayload{
TaskExternalId: row.TaskExternalID,
@@ -211,6 +266,71 @@ func getDurableTaskSignalKey(taskExternalId uuid.UUID, nodeId int64) string {
return fmt.Sprintf("durable:%s:%d", taskExternalId.String(), nodeId)
}
func (r *durableEventsRepository) createIdempotencyKey(opts IngestDurableTaskEventOpts) ([]byte, error) {
// todo: be more intentional about how we construct this key (e.g. do we want to marshal all of the opts?)
dataToHash := []byte(opts.Kind)
if opts.TriggerOpts != nil {
dataToHash = append(dataToHash, opts.TriggerOpts.Data...)
dataToHash = append(dataToHash, []byte(opts.TriggerOpts.WorkflowName)...)
}
if opts.WaitForConditions != nil {
sort.Slice(opts.WaitForConditions, func(i, j int) bool {
condI := opts.WaitForConditions[i]
condJ := opts.WaitForConditions[j]
if condI.Expression != condJ.Expression {
return condI.Expression < condJ.Expression
}
if condI.ReadableDataKey != condJ.ReadableDataKey {
return condI.ReadableDataKey < condJ.ReadableDataKey
}
if condI.Kind != condJ.Kind {
return condI.Kind < condJ.Kind
}
if condI.SleepFor != nil && condJ.SleepFor != nil {
if *condI.SleepFor != *condJ.SleepFor {
return *condI.SleepFor < *condJ.SleepFor
}
}
if condI.UserEventKey != nil && condJ.UserEventKey != nil {
if *condI.UserEventKey != *condJ.UserEventKey {
return *condI.UserEventKey < *condJ.UserEventKey
}
}
return false
})
for _, cond := range opts.WaitForConditions {
toHash := cond.Expression + cond.ReadableDataKey + string(cond.Kind)
if cond.SleepFor != nil {
toHash += *cond.SleepFor
}
if cond.UserEventKey != nil {
toHash += *cond.UserEventKey
}
dataToHash = append(dataToHash, []byte(toHash)...)
}
}
h := sha256.New()
h.Write(dataToHash)
hashBytes := h.Sum(nil)
idempotencyKey := make([]byte, hex.EncodedLen(len(hashBytes)))
hex.Encode(idempotencyKey, hashBytes)
return idempotencyKey, nil
}
func (r *durableEventsRepository) IngestDurableTaskEvent(ctx context.Context, opts IngestDurableTaskEventOpts) (*IngestDurableTaskEventResult, error) {
if err := r.v.Validate(opts); err != nil {
return nil, fmt.Errorf("invalid opts: %w", err)
@@ -297,22 +417,27 @@ func (r *durableEventsRepository) IngestDurableTaskEvent(ctx context.Context, op
return nil, fmt.Errorf("unsupported durable event log entry kind: %s", opts.Kind)
}
idempotencyKey, err := r.createIdempotencyKey(opts)
if err != nil {
return nil, fmt.Errorf("failed to create idempotency key: %w", err)
}
logEntry, err := r.getOrCreateEventLogEntry(
ctx,
tx,
opts.TenantId,
sqlcv1.CreateDurableEventLogEntryParams{
Tenantid: opts.TenantId,
Externalid: uuid.New(),
Durabletaskid: task.ID,
Durabletaskinsertedat: task.InsertedAt,
GetOrCreateLogEntryOpts{
TenantId: opts.TenantId,
DurableTaskExternalId: task.ExternalID,
DurableTaskId: task.ID,
DurableTaskInsertedAt: task.InsertedAt,
Kind: opts.Kind,
Nodeid: nodeId,
NodeId: nodeId,
ParentNodeId: parentNodeId,
Branchid: branchId,
Issatisfied: isSatisfied,
Datahash: nil, // todo: implement this for nondeterminism check
Datahashalg: "",
BranchId: branchId,
IsSatisfied: isSatisfied,
IdempotencyKey: idempotencyKey,
},
opts.Payload,
resultPayload,
+160
View File
@@ -0,0 +1,160 @@
//go:build !e2e && !load && !rampup && !integration
package repository
import (
"testing"
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
"github.com/stretchr/testify/assert"
)
func strPtr(s string) *string { return &s }
func key(t *testing.T, opts IngestDurableTaskEventOpts) string {
t.Helper()
r := &durableEventsRepository{}
k, err := r.createIdempotencyKey(opts)
assert.NoError(t, err)
return string(k)
}
func TestCreateIdempotencyKey_ConditionOrderInvariant(t *testing.T) {
condA := CreateExternalSignalConditionOpt{
Kind: CreateExternalSignalConditionKindSLEEP,
Expression: "aaa",
ReadableDataKey: "output",
SleepFor: strPtr("10s"),
}
condB := CreateExternalSignalConditionOpt{
Kind: CreateExternalSignalConditionKindUSEREVENT,
Expression: "bbb",
ReadableDataKey: "output",
UserEventKey: strPtr("some-event"),
}
optsAB := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindWAITFOR,
WaitForConditions: []CreateExternalSignalConditionOpt{condA, condB},
}
optsBA := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindWAITFOR,
WaitForConditions: []CreateExternalSignalConditionOpt{condB, condA},
}
assert.Equal(t, key(t, optsAB), key(t, optsBA))
}
func TestCreateIdempotencyKey_DifferentConditions(t *testing.T) {
base := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindWAITFOR,
WaitForConditions: []CreateExternalSignalConditionOpt{
{Kind: CreateExternalSignalConditionKindSLEEP, Expression: "true", ReadableDataKey: "output", SleepFor: strPtr("5s")},
},
}
different := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindWAITFOR,
WaitForConditions: []CreateExternalSignalConditionOpt{
{Kind: CreateExternalSignalConditionKindSLEEP, Expression: "true", ReadableDataKey: "output", SleepFor: strPtr("30s")},
},
}
assert.NotEqual(t, key(t, base), key(t, different))
}
func TestCreateIdempotencyKey_DifferentKind(t *testing.T) {
run := IngestDurableTaskEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}
waitFor := IngestDurableTaskEventOpts{Kind: sqlcv1.V1DurableEventLogKindWAITFOR}
memo := IngestDurableTaskEventOpts{Kind: sqlcv1.V1DurableEventLogKindMEMO}
assert.NotEqual(t, key(t, run), key(t, waitFor))
assert.NotEqual(t, key(t, run), key(t, memo))
assert.NotEqual(t, key(t, waitFor), key(t, memo))
}
func TestCreateIdempotencyKey_DifferentWorkflowName(t *testing.T) {
optsA := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "workflow-a"},
},
}
optsB := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "workflow-b"},
},
}
assert.NotEqual(t, key(t, optsA), key(t, optsB))
}
func TestCreateIdempotencyKey_DifferentTriggerData(t *testing.T) {
optsA := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`)},
},
}
optsB := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":2}`)},
},
}
assert.NotEqual(t, key(t, optsA), key(t, optsB))
}
func TestCreateIdempotencyKey_WithAndWithoutTriggerOpts(t *testing.T) {
without := IngestDurableTaskEventOpts{Kind: sqlcv1.V1DurableEventLogKindRUN}
with := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow"},
},
}
assert.NotEqual(t, key(t, without), key(t, with))
}
func int32Ptr(i int32) *int32 { return &i }
func TestCreateIdempotencyKey_PriorityIgnored(t *testing.T) {
base := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`)},
},
}
withPriority := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`), Priority: int32Ptr(3)},
},
}
assert.Equal(t, key(t, base), key(t, withPriority))
}
func TestCreateIdempotencyKey_AdditionalMetadataIgnored(t *testing.T) {
base := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{WorkflowName: "my-workflow", Data: []byte(`{"x":1}`)},
},
}
withMeta := IngestDurableTaskEventOpts{
Kind: sqlcv1.V1DurableEventLogKindRUN,
TriggerOpts: &WorkflowNameTriggerOpts{
TriggerTaskData: &TriggerTaskData{
WorkflowName: "my-workflow",
Data: []byte(`{"x":1}`),
AdditionalMetadata: []byte(`{"env":"prod"}`),
},
},
}
assert.Equal(t, key(t, base), key(t, withMeta))
}
+4 -6
View File
@@ -60,9 +60,8 @@ INSERT INTO v1_durable_event_log_entry (
node_id,
parent_node_id,
branch_id,
is_satisfied,
data_hash,
data_hash_alg
idempotency_key,
is_satisfied
)
VALUES (
@tenantId::UUID,
@@ -74,9 +73,8 @@ VALUES (
@nodeId::BIGINT,
sqlc.narg('parentNodeId')::BIGINT,
@branchId::BIGINT,
@isSatisfied::BOOLEAN,
@dataHash::BYTEA,
@dataHashAlg::TEXT
@idempotencyKey::BYTEA,
@isSatisfied::BOOLEAN
)
ON CONFLICT (durable_task_id, durable_task_inserted_at, node_id) DO NOTHING
RETURNING *
+15 -24
View File
@@ -23,9 +23,8 @@ INSERT INTO v1_durable_event_log_entry (
node_id,
parent_node_id,
branch_id,
is_satisfied,
data_hash,
data_hash_alg
idempotency_key,
is_satisfied
)
VALUES (
$1::UUID,
@@ -37,12 +36,11 @@ VALUES (
$6::BIGINT,
$7::BIGINT,
$8::BIGINT,
$9::BOOLEAN,
$10::BYTEA,
$11::TEXT
$9::BYTEA,
$10::BOOLEAN
)
ON CONFLICT (durable_task_id, durable_task_inserted_at, node_id) DO NOTHING
RETURNING tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, parent_node_id, branch_id, data_hash, data_hash_alg, is_satisfied
RETURNING tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, parent_node_id, branch_id, idempotency_key, is_satisfied
`
type CreateDurableEventLogEntryParams struct {
@@ -54,9 +52,8 @@ type CreateDurableEventLogEntryParams struct {
Nodeid int64 `json:"nodeid"`
ParentNodeId pgtype.Int8 `json:"parentNodeId"`
Branchid int64 `json:"branchid"`
Idempotencykey []byte `json:"idempotencykey"`
Issatisfied bool `json:"issatisfied"`
Datahash []byte `json:"datahash"`
Datahashalg string `json:"datahashalg"`
}
func (q *Queries) CreateDurableEventLogEntry(ctx context.Context, db DBTX, arg CreateDurableEventLogEntryParams) (*V1DurableEventLogEntry, error) {
@@ -69,9 +66,8 @@ func (q *Queries) CreateDurableEventLogEntry(ctx context.Context, db DBTX, arg C
arg.Nodeid,
arg.ParentNodeId,
arg.Branchid,
arg.Idempotencykey,
arg.Issatisfied,
arg.Datahash,
arg.Datahashalg,
)
var i V1DurableEventLogEntry
err := row.Scan(
@@ -85,8 +81,7 @@ func (q *Queries) CreateDurableEventLogEntry(ctx context.Context, db DBTX, arg C
&i.NodeID,
&i.ParentNodeID,
&i.BranchID,
&i.DataHash,
&i.DataHashAlg,
&i.IdempotencyKey,
&i.IsSatisfied,
)
return &i, err
@@ -172,7 +167,7 @@ func (q *Queries) GetAndLockLogFile(ctx context.Context, db DBTX, arg GetAndLock
}
const getDurableEventLogEntry = `-- name: GetDurableEventLogEntry :one
SELECT tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, parent_node_id, branch_id, data_hash, data_hash_alg, is_satisfied
SELECT tenant_id, external_id, inserted_at, id, durable_task_id, durable_task_inserted_at, kind, node_id, parent_node_id, branch_id, idempotency_key, is_satisfied
FROM v1_durable_event_log_entry
WHERE durable_task_id = $1::BIGINT
AND durable_task_inserted_at = $2::TIMESTAMPTZ
@@ -199,8 +194,7 @@ func (q *Queries) GetDurableEventLogEntry(ctx context.Context, db DBTX, arg GetD
&i.NodeID,
&i.ParentNodeID,
&i.BranchID,
&i.DataHash,
&i.DataHashAlg,
&i.IdempotencyKey,
&i.IsSatisfied,
)
return &i, err
@@ -214,7 +208,7 @@ WITH tasks AS (
WHERE lt.external_id = ANY($2::UUID[])
)
SELECT e.tenant_id, e.external_id, e.inserted_at, e.id, e.durable_task_id, e.durable_task_inserted_at, e.kind, e.node_id, e.parent_node_id, e.branch_id, e.data_hash, e.data_hash_alg, e.is_satisfied, t.external_id AS task_external_id
SELECT e.tenant_id, e.external_id, e.inserted_at, e.id, e.durable_task_id, e.durable_task_inserted_at, e.kind, e.node_id, e.parent_node_id, e.branch_id, e.idempotency_key, e.is_satisfied, t.external_id AS task_external_id
FROM v1_durable_event_log_entry e
JOIN tasks t ON (t.id, t.inserted_at) = (e.durable_task_id, e.durable_task_inserted_at)
WHERE
@@ -238,8 +232,7 @@ type ListSatisfiedEntriesRow struct {
NodeID int64 `json:"node_id"`
ParentNodeID pgtype.Int8 `json:"parent_node_id"`
BranchID int64 `json:"branch_id"`
DataHash []byte `json:"data_hash"`
DataHashAlg pgtype.Text `json:"data_hash_alg"`
IdempotencyKey []byte `json:"idempotency_key"`
IsSatisfied bool `json:"is_satisfied"`
TaskExternalID uuid.UUID `json:"task_external_id"`
}
@@ -264,8 +257,7 @@ func (q *Queries) ListSatisfiedEntries(ctx context.Context, db DBTX, arg ListSat
&i.NodeID,
&i.ParentNodeID,
&i.BranchID,
&i.DataHash,
&i.DataHashAlg,
&i.IdempotencyKey,
&i.IsSatisfied,
&i.TaskExternalID,
); err != nil {
@@ -293,7 +285,7 @@ FROM inputs
WHERE v1_durable_event_log_entry.durable_task_id = inputs.durable_task_id
AND v1_durable_event_log_entry.durable_task_inserted_at = inputs.durable_task_inserted_at
AND v1_durable_event_log_entry.node_id = inputs.node_id
RETURNING v1_durable_event_log_entry.tenant_id, v1_durable_event_log_entry.external_id, v1_durable_event_log_entry.inserted_at, v1_durable_event_log_entry.id, v1_durable_event_log_entry.durable_task_id, v1_durable_event_log_entry.durable_task_inserted_at, v1_durable_event_log_entry.kind, v1_durable_event_log_entry.node_id, v1_durable_event_log_entry.parent_node_id, v1_durable_event_log_entry.branch_id, v1_durable_event_log_entry.data_hash, v1_durable_event_log_entry.data_hash_alg, v1_durable_event_log_entry.is_satisfied
RETURNING v1_durable_event_log_entry.tenant_id, v1_durable_event_log_entry.external_id, v1_durable_event_log_entry.inserted_at, v1_durable_event_log_entry.id, v1_durable_event_log_entry.durable_task_id, v1_durable_event_log_entry.durable_task_inserted_at, v1_durable_event_log_entry.kind, v1_durable_event_log_entry.node_id, v1_durable_event_log_entry.parent_node_id, v1_durable_event_log_entry.branch_id, v1_durable_event_log_entry.idempotency_key, v1_durable_event_log_entry.is_satisfied
`
type UpdateDurableEventLogEntriesSatisfiedParams struct {
@@ -322,8 +314,7 @@ func (q *Queries) UpdateDurableEventLogEntriesSatisfied(ctx context.Context, db
&i.NodeID,
&i.ParentNodeID,
&i.BranchID,
&i.DataHash,
&i.DataHashAlg,
&i.IdempotencyKey,
&i.IsSatisfied,
); err != nil {
return nil, err
+1 -2
View File
@@ -3073,8 +3073,7 @@ type V1DurableEventLogEntry struct {
NodeID int64 `json:"node_id"`
ParentNodeID pgtype.Int8 `json:"parent_node_id"`
BranchID int64 `json:"branch_id"`
DataHash []byte `json:"data_hash"`
DataHashAlg pgtype.Text `json:"data_hash_alg"`
IdempotencyKey []byte `json:"idempotency_key"`
IsSatisfied bool `json:"is_satisfied"`
}
@@ -11,6 +11,8 @@ from examples.durable.worker import (
durable_workflow,
wait_for_sleep_twice,
durable_spawn_dag,
durable_non_determinism,
hatchet,
)
from hatchet_sdk import Hatchet
@@ -134,3 +136,23 @@ async def test_durable_spawn_dag() -> None:
assert result["sleep_duration"] <= 2
assert result["spawn_duration"] >= 5
assert result["spawn_duration"] <= 10
@pytest.mark.asyncio(loop_scope="session")
async def test_durable_non_determinism() -> None:
ref = await durable_non_determinism.aio_run_no_wait()
result = await ref.aio_result()
assert result.sleep_time > result.attempt_number
assert ( ## headroom to prevent flakiness
result.sleep_time < result.attempt_number * 3
)
assert not result.non_determinism_detected
await hatchet.runs.aio_replay(ref.workflow_run_id)
replayed_result = await ref.aio_result()
assert replayed_result.non_determinism_detected
assert replayed_result.node_id == 1
assert replayed_result.attempt_number == 2
+33
View File
@@ -3,6 +3,7 @@ import time
from datetime import timedelta
from typing import Any
from uuid import uuid4
from pydantic import BaseModel
from hatchet_sdk import (
Context,
@@ -13,6 +14,7 @@ from hatchet_sdk import (
UserEventCondition,
or_,
)
from hatchet_sdk.exceptions import NonDeterminismError
hatchet = Hatchet(debug=True)
@@ -212,6 +214,36 @@ async def durable_sleep_event_spawn(
}
class NonDeterminismOutput(BaseModel):
attempt_number: int
sleep_time: int
non_determinism_detected: bool = False
node_id: int | None = None
@hatchet.durable_task(execution_timeout=timedelta(seconds=10))
async def durable_non_determinism(
input: EmptyModel, ctx: DurableContext
) -> NonDeterminismOutput:
sleep_time = ctx.attempt_number * 2
try:
await ctx.aio_sleep_for(timedelta(seconds=sleep_time))
except NonDeterminismError as e:
return NonDeterminismOutput(
attempt_number=ctx.attempt_number,
sleep_time=sleep_time,
non_determinism_detected=True,
node_id=e.node_id,
)
return NonDeterminismOutput(
attempt_number=ctx.attempt_number,
sleep_time=sleep_time,
)
def main() -> None:
worker = hatchet.worker(
"durable-worker",
@@ -222,6 +254,7 @@ def main() -> None:
spawn_child_task,
durable_with_spawn,
durable_sleep_event_spawn,
durable_non_determinism,
],
)
worker.start()
+1 -1
View File
@@ -49,7 +49,7 @@ def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
@hatchet.durable_task()
def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
@@ -10,9 +10,6 @@ from examples.unit_testing.workflows import (
durable_async_complex_workflow,
durable_async_simple_workflow,
durable_async_standalone,
durable_sync_complex_workflow,
durable_sync_simple_workflow,
durable_sync_standalone,
start,
sync_complex_workflow,
sync_simple_workflow,
@@ -25,11 +22,8 @@ from hatchet_sdk import Task
"func",
[
sync_standalone,
durable_sync_standalone,
sync_simple_workflow,
durable_sync_simple_workflow,
sync_complex_workflow,
durable_sync_complex_workflow,
],
)
def test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None:
@@ -44,19 +44,6 @@ async def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput
)
@hatchet.durable_task(input_validator=UnitTestInput)
def durable_sync_standalone(
input: UnitTestInput, ctx: DurableContext
) -> UnitTestOutput:
return UnitTestOutput(
key=input.key,
number=input.number,
additional_metadata=ctx.additional_metadata,
retry_count=ctx.retry_count,
mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,
)
@hatchet.durable_task(input_validator=UnitTestInput)
async def durable_async_standalone(
input: UnitTestInput, ctx: DurableContext
@@ -97,19 +84,6 @@ async def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestO
)
@simple_workflow.durable_task()
def durable_sync_simple_workflow(
input: UnitTestInput, ctx: DurableContext
) -> UnitTestOutput:
return UnitTestOutput(
key=input.key,
number=input.number,
additional_metadata=ctx.additional_metadata,
retry_count=ctx.retry_count,
mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,
)
@simple_workflow.durable_task()
async def durable_async_simple_workflow(
input: UnitTestInput, ctx: DurableContext
@@ -153,15 +127,6 @@ async def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTest
return ctx.task_output(start)
@complex_workflow.durable_task(
parents=[start],
)
def durable_sync_complex_workflow(
input: UnitTestInput, ctx: DurableContext
) -> UnitTestOutput:
return ctx.task_output(start)
@complex_workflow.durable_task(
parents=[start],
)
+2
View File
@@ -38,6 +38,7 @@ from examples.durable.worker import (
wait_for_sleep_twice,
dag_child_workflow,
durable_spawn_dag,
durable_non_determinism,
)
from examples.events.worker import event_workflow
from examples.fanout.worker import child_wf, parent_wf
@@ -121,6 +122,7 @@ def main() -> None:
serde_workflow,
durable_spawn_dag,
dag_child_workflow,
durable_non_determinism,
],
lifespan=lifespan,
)
+2
View File
@@ -157,6 +157,7 @@ from hatchet_sdk.exceptions import (
CancelledError,
DedupeViolationError,
FailedTaskRunExceptionGroup,
NonDeterminismError,
NonRetryableException,
TaskRunError,
)
@@ -239,6 +240,7 @@ __all__ = [
"LogLineList",
"LogLineOrderByDirection",
"LogLineOrderByField",
"NonDeterminismError",
"NonRetryableException",
"OTelAttribute",
"OpenTelemetryConfig",
+7 -41
View File
@@ -5,14 +5,10 @@ from __future__ import annotations
import asyncio
import threading
from collections.abc import Callable
from typing import TYPE_CHECKING
from hatchet_sdk.exceptions import CancellationReason
from hatchet_sdk.logger import logger
if TYPE_CHECKING:
pass
class CancellationToken:
"""
@@ -70,22 +66,13 @@ class CancellationToken:
- Signal both async and sync events
- Invoke all registered callbacks
Args:
reason: The reason for cancellation.
:param reason: The reason for cancellation.
"""
with self._lock:
if self._cancelled:
logger.debug(
f"CancellationToken: cancel() called but already cancelled, "
f"reason={self._reason.value if self._reason else 'none'}"
)
return
logger.debug(
f"CancellationToken: cancel() called, reason={reason.value}, "
f"{len(self._child_run_ids)} children registered"
)
self._cancelled = True
self._reason = reason
@@ -99,13 +86,10 @@ class CancellationToken:
for callback in callbacks:
try:
logger.debug(f"CancellationToken: invoking callback {callback}")
callback()
except Exception as e: # noqa: PERF203
logger.warning(f"CancellationToken: callback raised exception: {e}")
logger.debug(f"CancellationToken: cancel() complete, reason={reason.value}")
@property
def is_cancelled(self) -> bool:
"""Check if cancellation has been triggered."""
@@ -123,28 +107,16 @@ class CancellationToken:
This will block until cancel() is called.
"""
await self._get_async_event().wait()
logger.debug(
f"CancellationToken: async wait completed (cancelled), "
f"reason={self._reason.value if self._reason else 'none'}"
)
def wait(self, timeout: float | None = None) -> bool:
"""
Block until cancelled (for use in sync code).
Args:
timeout: Maximum time to wait in seconds. None means wait forever.
:param timeout: Maximum time to wait in seconds. None means wait forever.
Returns:
True if the token was cancelled (event was set), False if timeout expired.
:returns: True if the token was cancelled (event was set), False if timeout expired.
"""
result = self._sync_event.wait(timeout)
if result:
logger.debug(
f"CancellationToken: sync wait interrupted by cancellation, "
f"reason={self._reason.value if self._reason else 'none'}"
)
return result
return self._sync_event.wait(timeout)
def register_child(self, run_id: str) -> None:
"""
@@ -153,11 +125,9 @@ class CancellationToken:
When the parent is cancelled, these child run IDs can be used to cancel
the child workflows as well.
Args:
run_id: The workflow run ID of the child workflow.
:param run_id: The workflow run ID of the child workflow.
"""
with self._lock:
logger.debug(f"CancellationToken: registering child workflow {run_id}")
self._child_run_ids.append(run_id)
@property
@@ -171,8 +141,7 @@ class CancellationToken:
If the token is already cancelled, the callback will be invoked immediately.
Args:
callback: A callable that takes no arguments.
:param callback: A callable that takes no arguments.
"""
with self._lock:
if self._cancelled:
@@ -182,9 +151,6 @@ class CancellationToken:
self._callbacks.append(callback)
if invoke_now:
logger.debug(
f"CancellationToken: invoking callback immediately (already cancelled): {callback}"
)
try:
callback()
except Exception as e:
@@ -2,7 +2,7 @@ import asyncio
import json
import time
from collections.abc import AsyncGenerator
from typing import TYPE_CHECKING, cast
from typing import cast
import grpc
import grpc.aio
@@ -17,6 +17,7 @@ from hatchet_sdk.clients.events import proto_timestamp_now
from hatchet_sdk.clients.listeners.run_event_listener import (
DEFAULT_ACTION_LISTENER_RETRY_INTERVAL,
)
from hatchet_sdk.config import ClientConfig
from hatchet_sdk.connection import new_conn
from hatchet_sdk.contracts.dispatcher_pb2 import ActionType as ActionTypeProto
from hatchet_sdk.contracts.dispatcher_pb2 import (
@@ -34,10 +35,6 @@ from hatchet_sdk.utils.backoff import exp_backoff_sleep
from hatchet_sdk.utils.proto_enums import convert_proto_enum_to_python
from hatchet_sdk.utils.typing import JSONSerializableMapping
if TYPE_CHECKING:
from hatchet_sdk.config import ClientConfig
DEFAULT_ACTION_TIMEOUT = 600 # seconds
DEFAULT_ACTION_LISTENER_RETRY_COUNT = 15
@@ -118,7 +115,7 @@ class ActionListener:
)
if self.last_heartbeat_succeeded is False:
logger.info("listener established")
logger.info("action listener established")
now = time.time()
diff = now - self.time_last_hb_succeeded
@@ -275,12 +272,7 @@ class ActionListener:
self.run_heartbeat = False
logger.info("ListenV2 not available, falling back to Listen")
else:
# TODO retry
if e.code() == grpc.StatusCode.UNAVAILABLE:
logger.exception("action listener error")
else:
# Unknown error, report and break
logger.exception("action listener error")
logger.error("action listener error - reconnecting")
self.retries = self.retries + 1
@@ -12,6 +12,7 @@ from hatchet_sdk.config import ClientConfig
from hatchet_sdk.connection import new_conn
from hatchet_sdk.contracts.v1.dispatcher_pb2 import (
DurableTaskAwaitedCompletedEntry,
DurableTaskErrorType,
DurableTaskEventKind,
DurableTaskEventLogEntryCompletedResponse,
DurableTaskEventRequest,
@@ -22,10 +23,13 @@ from hatchet_sdk.contracts.v1.dispatcher_pb2 import (
)
from hatchet_sdk.contracts.v1.dispatcher_pb2_grpc import V1DispatcherStub
from hatchet_sdk.contracts.v1.shared.condition_pb2 import DurableEventListenerConditions
from hatchet_sdk.exceptions import NonDeterminismError
from hatchet_sdk.logger import logger
from hatchet_sdk.metadata import get_metadata
from hatchet_sdk.utils.typing import JSONSerializableMapping
DEFAULT_RECONNECT_INTERVAL = 3 # seconds
class DurableTaskEventAck(BaseModel):
invocation_count: int
@@ -33,7 +37,7 @@ class DurableTaskEventAck(BaseModel):
node_id: int
class DurableTaskCallbackResult(BaseModel):
class DurableTaskEventLogEntryResult(BaseModel):
durable_task_external_id: str
node_id: int
payload: JSONSerializableMapping | None
@@ -70,7 +74,7 @@ class DurableEventListener:
tuple[str, int], asyncio.Future[DurableTaskEventAck]
] = {}
self._pending_callbacks: dict[
tuple[str, int], asyncio.Future[DurableTaskCallbackResult]
tuple[str, int], asyncio.Future[DurableTaskEventLogEntryResult]
] = {}
self._receive_task: asyncio.Task[None] | None = None
@@ -82,6 +86,28 @@ class DurableEventListener:
def worker_id(self) -> str | None:
return self._worker_id
async def _connect(self) -> None:
if self._conn is not None:
with suppress(Exception):
await self._conn.close()
logger.info("durable event listener connecting...")
self._conn = new_conn(self.config, aio=True)
self._stub = V1DispatcherStub(self._conn)
self._request_queue = asyncio.Queue()
self._stream = cast(
grpc.aio.StreamStreamCall[DurableTaskRequest, DurableTaskResponse],
self._stub.DurableTask(
self._request_iterator(), # type: ignore[arg-type]
metadata=get_metadata(self.token),
),
)
await self._register_worker()
logger.info("durable event listener connected")
async def start(self, worker_id: str) -> None:
async with self._start_lock:
if self._running:
@@ -89,24 +115,12 @@ class DurableEventListener:
self._worker_id = worker_id
self._running = True
self._request_queue = asyncio.Queue()
self._conn = new_conn(self.config, aio=True)
self._stub = V1DispatcherStub(self._conn)
self._stream = cast(
grpc.aio.StreamStreamCall[DurableTaskRequest, DurableTaskResponse],
self._stub.DurableTask(
self._request_iterator(), # type: ignore[arg-type]
metadata=get_metadata(self.token),
),
)
await self._connect()
self._receive_task = asyncio.create_task(self._receive_loop())
self._send_task = asyncio.create_task(self._send_loop())
await self._register_worker()
async def ensure_started(self, worker_id: str) -> None:
if not self._running:
await self.start(worker_id)
@@ -163,28 +177,66 @@ class DurableEventListener:
)
await self._request_queue.put(request)
async def _receive_loop(self) -> None:
if not self._stream:
return
def _fail_pending_acks(self, exc: Exception) -> None:
for future in self._pending_event_acks.values():
if not future.done():
future.set_exception(exc)
self._pending_event_acks.clear()
try:
async for response in self._stream:
await self._handle_response(response)
except grpc.aio.AioRpcError as e:
if e.code() != grpc.StatusCode.CANCELLED:
logger.error(
f"DurableTask stream error: code={e.code()}, details={e.details()}"
async def _receive_loop(self) -> None:
while self._running:
if not self._stream:
await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL)
continue
try:
async for response in self._stream:
await self._handle_response(response)
if self._running:
logger.warning(
f"durable event listener disconnected (EOF), reconnecting in {DEFAULT_RECONNECT_INTERVAL}s..."
)
self._fail_pending_acks(
ConnectionResetError("durable stream disconnected")
)
await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL)
await self._connect()
except grpc.aio.AioRpcError as e:
if e.code() == grpc.StatusCode.CANCELLED:
break
logger.warning(
f"durable event listener disconnected: code={e.code()}, details={e.details()}, reconnecting in {DEFAULT_RECONNECT_INTERVAL}s..."
)
except asyncio.CancelledError:
logger.debug("Receive loop cancelled")
except Exception as e:
logger.exception(f"Unexpected error in receive loop: {e}")
if self._running:
self._fail_pending_acks(
ConnectionResetError(
f"durable stream error: {e.code()} {e.details()}"
)
)
await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL)
try:
await self._connect()
except Exception:
logger.exception("failed to reconnect durable event listener")
except asyncio.CancelledError:
break
except Exception as e:
logger.exception(f"unexpected error in durable event listener: {e}")
if self._running:
self._fail_pending_acks(e)
await asyncio.sleep(DEFAULT_RECONNECT_INTERVAL)
try:
await self._connect()
except Exception:
logger.exception("failed to reconnect durable event listener")
async def _handle_response(self, response: DurableTaskResponse) -> None:
if response.HasField("register_worker"):
logger.info(
f"Registered durable task worker: {response.register_worker.worker_id}"
)
pass
elif response.HasField("trigger_ack"):
trigger_ack = response.trigger_ack
event_key = (
@@ -207,10 +259,48 @@ class DurableEventListener:
completed.node_id,
)
if completed_key in self._pending_callbacks:
future = self._pending_callbacks[completed_key]
if not future.done():
future.set_result(DurableTaskCallbackResult.from_proto(completed))
completed_future = self._pending_callbacks[completed_key]
if not completed_future.done():
completed_future.set_result(
DurableTaskEventLogEntryResult.from_proto(completed)
)
del self._pending_callbacks[completed_key]
elif response.HasField("error"):
error = response.error
exc: Exception
if (
error.error_type
== DurableTaskErrorType.DURABLE_TASK_ERROR_TYPE_NONDETERMINISM
):
exc = NonDeterminismError(
task_external_id=error.durable_task_external_id,
invocation_count=error.invocation_count,
message=error.error_message,
node_id=error.node_id,
)
else:
## fallthrough, this shouldn't happen unless we add an error type to the engine and the SDK
## hasn't been updated to handle it
exc = Exception(
"Unspecified durable task error: "
+ error.error_message
+ f" (type: {error.error_type})"
)
event_key = (error.durable_task_external_id, error.invocation_count)
if event_key in self._pending_event_acks:
error_pending_ack_future = self._pending_event_acks.pop(event_key)
if not error_pending_ack_future.done():
error_pending_ack_future.set_exception(exc)
callback_key = (error.durable_task_external_id, error.node_id)
if callback_key in self._pending_callbacks:
error_pending_callback_future = self._pending_callbacks.pop(
callback_key
)
if not error_pending_callback_future.done():
error_pending_callback_future.set_exception(exc)
async def _register_worker(self) -> None:
if self._request_queue is None or self._worker_id is None:
@@ -271,11 +361,11 @@ class DurableEventListener:
self,
durable_task_external_id: str,
node_id: int,
) -> DurableTaskCallbackResult:
) -> DurableTaskEventLogEntryResult:
key = (durable_task_external_id, node_id)
if key not in self._pending_callbacks:
future: asyncio.Future[DurableTaskCallbackResult] = asyncio.Future()
future: asyncio.Future[DurableTaskEventLogEntryResult] = asyncio.Future()
self._pending_callbacks[key] = future
return await self._pending_callbacks[key]
@@ -3,11 +3,12 @@ from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, Generic, Literal, TypeVar
from typing import Generic, Literal, TypeVar
import grpc
import grpc.aio
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.clients.event_ts import (
ThreadSafeEvent,
UnexpectedEOF,
@@ -18,9 +19,6 @@ from hatchet_sdk.logger import logger
from hatchet_sdk.metadata import get_metadata
from hatchet_sdk.utils.cancellation import race_against_token
if TYPE_CHECKING:
from hatchet_sdk.cancellation import CancellationToken
DEFAULT_LISTENER_RETRY_INTERVAL = 3 # seconds
DEFAULT_LISTENER_RETRY_COUNT = 5
DEFAULT_LISTENER_INTERRUPT_INTERVAL = 1800 # 30 minutes
@@ -237,17 +235,11 @@ class PooledListener(Generic[R, T, L], ABC):
if not self.listener_task or self.listener_task.done():
self.listener_task = asyncio.create_task(self._init_producer())
logger.debug(
f"PooledListener.subscribe: waiting for event on id={id}, "
f"subscription_id={subscription_id}, token={cancellation_token is not None}"
)
if cancellation_token:
result_task = asyncio.create_task(self.events[subscription_id].get())
return await race_against_token(result_task, cancellation_token)
return await self.events[subscription_id].get()
except asyncio.CancelledError:
logger.debug(f"PooledListener.subscribe: externally cancelled for id={id}")
raise
finally:
if subscription_id:
+1 -4
View File
@@ -209,8 +209,7 @@ class Context:
- Set the exit_flag property to True
- Allow child workflow cancellation
Args:
reason: The reason for cancellation.
:param reason: The reason for cancellation.
"""
self.cancellation_token.cancel(reason)
@@ -535,8 +534,6 @@ class DurableContext(Context):
if self.durable_event_listener is None:
raise ValueError("Durable task client is not available")
from hatchet_sdk.contracts.v1.dispatcher_pb2 import DurableTaskEventKind
await self._ensure_stream_started()
flat_conditions = flatten_conditions(list(conditions))
@@ -26,7 +26,7 @@ from hatchet_sdk.contracts.v1.shared import condition_pb2 as v1_dot_shared_dot_c
from hatchet_sdk.contracts.v1.shared import trigger_pb2 as v1_dot_shared_dot_trigger__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/dispatcher.proto\x12\x02v1\x1a\x19v1/shared/condition.proto\x1a\x17v1/shared/trigger.proto\"5\n DurableTaskRequestRegisterWorker\x12\x11\n\tworker_id\x18\x01 \x01(\t\"6\n!DurableTaskResponseRegisterWorker\x12\x11\n\tworker_id\x18\x01 \x01(\t\"\xc5\x02\n\x17\x44urableTaskEventRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x03\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12&\n\x04kind\x18\x03 \x01(\x0e\x32\x18.v1.DurableTaskEventKind\x12\x14\n\x07payload\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x12\x44\n\x13wait_for_conditions\x18\x05 \x01(\x0b\x32\".v1.DurableEventListenerConditionsH\x01\x88\x01\x01\x12\x35\n\x0ctrigger_opts\x18\x06 \x01(\x0b\x32\x1a.v1.TriggerWorkflowRequestH\x02\x88\x01\x01\x42\n\n\x08_payloadB\x16\n\x14_wait_for_conditionsB\x0f\n\r_trigger_opts\"j\n\x1b\x44urableTaskEventAckResponse\x12\x18\n\x10invocation_count\x18\x01 \x01(\x03\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12\x0f\n\x07node_id\x18\x03 \x01(\x03\"o\n)DurableTaskEventLogEntryCompletedResponse\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\"_\n!DurableTaskEvictInvocationRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x03\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\"U\n DurableTaskAwaitedCompletedEntry\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\"\x96\x01\n\x1e\x44urableTaskWorkerStatusRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\x12\x11\n\tbranch_id\x18\x03 \x01(\x03\x12=\n\x0fwaiting_entries\x18\x04 \x03(\x0b\x32$.v1.DurableTaskAwaitedCompletedEntry\"\x8e\x02\n\x12\x44urableTaskRequest\x12?\n\x0fregister_worker\x18\x01 \x01(\x0b\x32$.v1.DurableTaskRequestRegisterWorkerH\x00\x12,\n\x05\x65vent\x18\x02 \x01(\x0b\x32\x1b.v1.DurableTaskEventRequestH\x00\x12\x41\n\x10\x65vict_invocation\x18\x03 \x01(\x0b\x32%.v1.DurableTaskEvictInvocationRequestH\x00\x12;\n\rworker_status\x18\x04 \x01(\x0b\x32\".v1.DurableTaskWorkerStatusRequestH\x00\x42\t\n\x07message\"\xe4\x01\n\x13\x44urableTaskResponse\x12@\n\x0fregister_worker\x18\x01 \x01(\x0b\x32%.v1.DurableTaskResponseRegisterWorkerH\x00\x12\x36\n\x0btrigger_ack\x18\x02 \x01(\x0b\x32\x1f.v1.DurableTaskEventAckResponseH\x00\x12H\n\x0f\x65ntry_completed\x18\x03 \x01(\x0b\x32-.v1.DurableTaskEventLogEntryCompletedResponseH\x00\x42\t\n\x07message\"z\n\x1bRegisterDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x36\n\nconditions\x18\x03 \x01(\x0b\x32\".v1.DurableEventListenerConditions\"\x1e\n\x1cRegisterDurableEventResponse\"C\n\x1cListenForDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\"A\n\x0c\x44urableEvent\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c*\xb0\x01\n\x14\x44urableTaskEventKind\x12)\n%DURABLE_TASK_TRIGGER_KIND_UNSPECIFIED\x10\x00\x12!\n\x1d\x44URABLE_TASK_TRIGGER_KIND_RUN\x10\x01\x12&\n\"DURABLE_TASK_TRIGGER_KIND_WAIT_FOR\x10\x02\x12\"\n\x1e\x44URABLE_TASK_TRIGGER_KIND_MEMO\x10\x03\x32\x84\x02\n\x0cV1Dispatcher\x12\x44\n\x0b\x44urableTask\x12\x16.v1.DurableTaskRequest\x1a\x17.v1.DurableTaskResponse\"\x00(\x01\x30\x01\x12[\n\x14RegisterDurableEvent\x12\x1f.v1.RegisterDurableEventRequest\x1a .v1.RegisterDurableEventResponse\"\x00\x12Q\n\x15ListenForDurableEvent\x12 .v1.ListenForDurableEventRequest\x1a\x10.v1.DurableEvent\"\x00(\x01\x30\x01\x42\x42Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3')
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13v1/dispatcher.proto\x12\x02v1\x1a\x19v1/shared/condition.proto\x1a\x17v1/shared/trigger.proto\"5\n DurableTaskRequestRegisterWorker\x12\x11\n\tworker_id\x18\x01 \x01(\t\"6\n!DurableTaskResponseRegisterWorker\x12\x11\n\tworker_id\x18\x01 \x01(\t\"\xc5\x02\n\x17\x44urableTaskEventRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x03\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12&\n\x04kind\x18\x03 \x01(\x0e\x32\x18.v1.DurableTaskEventKind\x12\x14\n\x07payload\x18\x04 \x01(\x0cH\x00\x88\x01\x01\x12\x44\n\x13wait_for_conditions\x18\x05 \x01(\x0b\x32\".v1.DurableEventListenerConditionsH\x01\x88\x01\x01\x12\x35\n\x0ctrigger_opts\x18\x06 \x01(\x0b\x32\x1a.v1.TriggerWorkflowRequestH\x02\x88\x01\x01\x42\n\n\x08_payloadB\x16\n\x14_wait_for_conditionsB\x0f\n\r_trigger_opts\"j\n\x1b\x44urableTaskEventAckResponse\x12\x18\n\x10invocation_count\x18\x01 \x01(\x03\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\x12\x0f\n\x07node_id\x18\x03 \x01(\x03\"o\n)DurableTaskEventLogEntryCompletedResponse\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\"_\n!DurableTaskEvictInvocationRequest\x12\x18\n\x10invocation_count\x18\x01 \x01(\x03\x12 \n\x18\x64urable_task_external_id\x18\x02 \x01(\t\"U\n DurableTaskAwaitedCompletedEntry\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\"\x96\x01\n\x1e\x44urableTaskWorkerStatusRequest\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\x03\x12\x11\n\tbranch_id\x18\x03 \x01(\x03\x12=\n\x0fwaiting_entries\x18\x04 \x03(\x0b\x32$.v1.DurableTaskAwaitedCompletedEntry\"\x8e\x02\n\x12\x44urableTaskRequest\x12?\n\x0fregister_worker\x18\x01 \x01(\x0b\x32$.v1.DurableTaskRequestRegisterWorkerH\x00\x12,\n\x05\x65vent\x18\x02 \x01(\x0b\x32\x1b.v1.DurableTaskEventRequestH\x00\x12\x41\n\x10\x65vict_invocation\x18\x03 \x01(\x0b\x32%.v1.DurableTaskEvictInvocationRequestH\x00\x12;\n\rworker_status\x18\x04 \x01(\x0b\x32\".v1.DurableTaskWorkerStatusRequestH\x00\x42\t\n\x07message\"\xac\x01\n\x18\x44urableTaskErrorResponse\x12 \n\x18\x64urable_task_external_id\x18\x01 \x01(\t\x12\x18\n\x10invocation_count\x18\x02 \x01(\x03\x12\x0f\n\x07node_id\x18\x03 \x01(\x03\x12,\n\nerror_type\x18\x04 \x01(\x0e\x32\x18.v1.DurableTaskErrorType\x12\x15\n\rerror_message\x18\x05 \x01(\t\"\x93\x02\n\x13\x44urableTaskResponse\x12@\n\x0fregister_worker\x18\x01 \x01(\x0b\x32%.v1.DurableTaskResponseRegisterWorkerH\x00\x12\x36\n\x0btrigger_ack\x18\x02 \x01(\x0b\x32\x1f.v1.DurableTaskEventAckResponseH\x00\x12H\n\x0f\x65ntry_completed\x18\x03 \x01(\x0b\x32-.v1.DurableTaskEventLogEntryCompletedResponseH\x00\x12-\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x1c.v1.DurableTaskErrorResponseH\x00\x42\t\n\x07message\"z\n\x1bRegisterDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x36\n\nconditions\x18\x03 \x01(\x0b\x32\".v1.DurableEventListenerConditions\"\x1e\n\x1cRegisterDurableEventResponse\"C\n\x1cListenForDurableEventRequest\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\"A\n\x0c\x44urableEvent\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x12\n\nsignal_key\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c*\xb0\x01\n\x14\x44urableTaskEventKind\x12)\n%DURABLE_TASK_TRIGGER_KIND_UNSPECIFIED\x10\x00\x12!\n\x1d\x44URABLE_TASK_TRIGGER_KIND_RUN\x10\x01\x12&\n\"DURABLE_TASK_TRIGGER_KIND_WAIT_FOR\x10\x02\x12\"\n\x1e\x44URABLE_TASK_TRIGGER_KIND_MEMO\x10\x03*k\n\x14\x44urableTaskErrorType\x12\'\n#DURABLE_TASK_ERROR_TYPE_UNSPECIFIED\x10\x00\x12*\n&DURABLE_TASK_ERROR_TYPE_NONDETERMINISM\x10\x01\x32\x84\x02\n\x0cV1Dispatcher\x12\x44\n\x0b\x44urableTask\x12\x16.v1.DurableTaskRequest\x1a\x17.v1.DurableTaskResponse\"\x00(\x01\x30\x01\x12[\n\x14RegisterDurableEvent\x12\x1f.v1.RegisterDurableEventRequest\x1a .v1.RegisterDurableEventResponse\"\x00\x12Q\n\x15ListenForDurableEvent\x12 .v1.ListenForDurableEventRequest\x1a\x10.v1.DurableEvent\"\x00(\x01\x30\x01\x42\x42Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -34,8 +34,10 @@ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'v1.dispatcher_pb2', _global
if not _descriptor._USE_C_DESCRIPTORS:
_globals['DESCRIPTOR']._loaded_options = None
_globals['DESCRIPTOR']._serialized_options = b'Z@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1'
_globals['_DURABLETASKEVENTKIND']._serialized_start=1873
_globals['_DURABLETASKEVENTKIND']._serialized_end=2049
_globals['_DURABLETASKEVENTKIND']._serialized_start=2095
_globals['_DURABLETASKEVENTKIND']._serialized_end=2271
_globals['_DURABLETASKERRORTYPE']._serialized_start=2273
_globals['_DURABLETASKERRORTYPE']._serialized_end=2380
_globals['_DURABLETASKREQUESTREGISTERWORKER']._serialized_start=79
_globals['_DURABLETASKREQUESTREGISTERWORKER']._serialized_end=132
_globals['_DURABLETASKRESPONSEREGISTERWORKER']._serialized_start=134
@@ -54,16 +56,18 @@ if not _descriptor._USE_C_DESCRIPTORS:
_globals['_DURABLETASKWORKERSTATUSREQUEST']._serialized_end=1074
_globals['_DURABLETASKREQUEST']._serialized_start=1077
_globals['_DURABLETASKREQUEST']._serialized_end=1347
_globals['_DURABLETASKRESPONSE']._serialized_start=1350
_globals['_DURABLETASKRESPONSE']._serialized_end=1578
_globals['_REGISTERDURABLEEVENTREQUEST']._serialized_start=1580
_globals['_REGISTERDURABLEEVENTREQUEST']._serialized_end=1702
_globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_start=1704
_globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_end=1734
_globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_start=1736
_globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_end=1803
_globals['_DURABLEEVENT']._serialized_start=1805
_globals['_DURABLEEVENT']._serialized_end=1870
_globals['_V1DISPATCHER']._serialized_start=2052
_globals['_V1DISPATCHER']._serialized_end=2312
_globals['_DURABLETASKERRORRESPONSE']._serialized_start=1350
_globals['_DURABLETASKERRORRESPONSE']._serialized_end=1522
_globals['_DURABLETASKRESPONSE']._serialized_start=1525
_globals['_DURABLETASKRESPONSE']._serialized_end=1800
_globals['_REGISTERDURABLEEVENTREQUEST']._serialized_start=1802
_globals['_REGISTERDURABLEEVENTREQUEST']._serialized_end=1924
_globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_start=1926
_globals['_REGISTERDURABLEEVENTRESPONSE']._serialized_end=1956
_globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_start=1958
_globals['_LISTENFORDURABLEEVENTREQUEST']._serialized_end=2025
_globals['_DURABLEEVENT']._serialized_start=2027
_globals['_DURABLEEVENT']._serialized_end=2092
_globals['_V1DISPATCHER']._serialized_start=2383
_globals['_V1DISPATCHER']._serialized_end=2643
# @@protoc_insertion_point(module_scope)
@@ -15,10 +15,17 @@ class DurableTaskEventKind(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
DURABLE_TASK_TRIGGER_KIND_RUN: _ClassVar[DurableTaskEventKind]
DURABLE_TASK_TRIGGER_KIND_WAIT_FOR: _ClassVar[DurableTaskEventKind]
DURABLE_TASK_TRIGGER_KIND_MEMO: _ClassVar[DurableTaskEventKind]
class DurableTaskErrorType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
__slots__ = ()
DURABLE_TASK_ERROR_TYPE_UNSPECIFIED: _ClassVar[DurableTaskErrorType]
DURABLE_TASK_ERROR_TYPE_NONDETERMINISM: _ClassVar[DurableTaskErrorType]
DURABLE_TASK_TRIGGER_KIND_UNSPECIFIED: DurableTaskEventKind
DURABLE_TASK_TRIGGER_KIND_RUN: DurableTaskEventKind
DURABLE_TASK_TRIGGER_KIND_WAIT_FOR: DurableTaskEventKind
DURABLE_TASK_TRIGGER_KIND_MEMO: DurableTaskEventKind
DURABLE_TASK_ERROR_TYPE_UNSPECIFIED: DurableTaskErrorType
DURABLE_TASK_ERROR_TYPE_NONDETERMINISM: DurableTaskErrorType
class DurableTaskRequestRegisterWorker(_message.Message):
__slots__ = ("worker_id",)
@@ -108,15 +115,31 @@ class DurableTaskRequest(_message.Message):
worker_status: DurableTaskWorkerStatusRequest
def __init__(self, register_worker: _Optional[_Union[DurableTaskRequestRegisterWorker, _Mapping]] = ..., event: _Optional[_Union[DurableTaskEventRequest, _Mapping]] = ..., evict_invocation: _Optional[_Union[DurableTaskEvictInvocationRequest, _Mapping]] = ..., worker_status: _Optional[_Union[DurableTaskWorkerStatusRequest, _Mapping]] = ...) -> None: ...
class DurableTaskErrorResponse(_message.Message):
__slots__ = ("durable_task_external_id", "invocation_count", "node_id", "error_type", "error_message")
DURABLE_TASK_EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int]
INVOCATION_COUNT_FIELD_NUMBER: _ClassVar[int]
NODE_ID_FIELD_NUMBER: _ClassVar[int]
ERROR_TYPE_FIELD_NUMBER: _ClassVar[int]
ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int]
durable_task_external_id: str
invocation_count: int
node_id: int
error_type: DurableTaskErrorType
error_message: str
def __init__(self, durable_task_external_id: _Optional[str] = ..., invocation_count: _Optional[int] = ..., node_id: _Optional[int] = ..., error_type: _Optional[_Union[DurableTaskErrorType, str]] = ..., error_message: _Optional[str] = ...) -> None: ...
class DurableTaskResponse(_message.Message):
__slots__ = ("register_worker", "trigger_ack", "entry_completed")
__slots__ = ("register_worker", "trigger_ack", "entry_completed", "error")
REGISTER_WORKER_FIELD_NUMBER: _ClassVar[int]
TRIGGER_ACK_FIELD_NUMBER: _ClassVar[int]
ENTRY_COMPLETED_FIELD_NUMBER: _ClassVar[int]
ERROR_FIELD_NUMBER: _ClassVar[int]
register_worker: DurableTaskResponseRegisterWorker
trigger_ack: DurableTaskEventAckResponse
entry_completed: DurableTaskEventLogEntryCompletedResponse
def __init__(self, register_worker: _Optional[_Union[DurableTaskResponseRegisterWorker, _Mapping]] = ..., trigger_ack: _Optional[_Union[DurableTaskEventAckResponse, _Mapping]] = ..., entry_completed: _Optional[_Union[DurableTaskEventLogEntryCompletedResponse, _Mapping]] = ...) -> None: ...
error: DurableTaskErrorResponse
def __init__(self, register_worker: _Optional[_Union[DurableTaskResponseRegisterWorker, _Mapping]] = ..., trigger_ack: _Optional[_Union[DurableTaskEventAckResponse, _Mapping]] = ..., entry_completed: _Optional[_Union[DurableTaskEventLogEntryCompletedResponse, _Mapping]] = ..., error: _Optional[_Union[DurableTaskErrorResponse, _Mapping]] = ...) -> None: ...
class RegisterDurableEventRequest(_message.Message):
__slots__ = ("task_id", "signal_key", "conditions")
@@ -53,19 +53,13 @@ def emit_deprecation_notice(
) -> None:
"""Emit a time-aware deprecation notice.
Args:
feature: A short identifier for the deprecated feature (used for
deduplication so each feature only logs once per process).
message: The human-readable deprecation message.
start: The UTC datetime when the deprecation window began.
warn_days: Days after *start* during which a warning is logged (default 90).
error_days: Days after *start* during which an error is logged.
After this window, calls have a 20% chance of raising.
If None (default), the error/raise phase is never reached
the notice stays at error-level logging indefinitely.
:param feature: A short identifier for the deprecated feature (used for deduplication so each feature only logs once per process).
:param message: The human-readable deprecation message.
:param start: The UTC datetime when the deprecation window began.
:param warn_days: Days after *start* during which a warning is logged (default 90).
:param error_days: Days after *start* during which an error is logged. After this window, calls have a 20% chance of raising. If None (default), the error/raise phase is never reached the notice stays at error-level logging indefinitely.
Raises:
DeprecationError: After the error_days window, raised ~20% of the time.
:raises: DeprecationError: After the error_days window, raised ~20% of the time.
"""
now = datetime.now(tz=timezone.utc)
days_since = (now - start).days
@@ -80,7 +74,7 @@ def emit_deprecation_notice(
# Phase 2: error-level log (indefinite when error_days is None)
if feature not in _already_logged:
logger.error(
f"{message} " "This fallback will be removed soon. Upgrade immediately."
f"{message} This fallback will be removed soon. Upgrade immediately."
)
_already_logged.add(feature)
+14
View File
@@ -4,6 +4,20 @@ from enum import Enum
from typing import cast
class NonDeterminismError(Exception):
def __init__(
self, task_external_id: str, invocation_count: int, message: str, node_id: int
) -> None:
self.task_external_id = task_external_id
self.invocation_count = invocation_count
self.message = message
self.node_id = node_id
super().__init__(
f"Non-determinism detected in task {task_external_id} on invocation {invocation_count} at node {node_id}.\nCheck out our documentation for more details on expectations of durable tasks: https://docs.hatchet.run/home/durable-best-practices"
)
class InvalidDependencyError(Exception):
pass
+2 -4
View File
@@ -1,16 +1,14 @@
import json
from dataclasses import field
from enum import Enum
from typing import TYPE_CHECKING, Any
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from hatchet_sdk.config import ClientConfig
from hatchet_sdk.utils.opentelemetry import OTelAttribute
from hatchet_sdk.utils.typing import JSONSerializableMapping
if TYPE_CHECKING:
from hatchet_sdk.config import ClientConfig
ActionKey = str
@@ -6,11 +6,11 @@ from collections import Counter
from contextvars import ContextVar
from typing import TYPE_CHECKING
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.runnables.action import ActionKey
from hatchet_sdk.utils.typing import JSONSerializableMapping
if TYPE_CHECKING:
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.clients.admin import AdminClient
from hatchet_sdk.context.context import DurableContext
+14 -6
View File
@@ -51,12 +51,20 @@ class ConcurrencyLimitStrategy(str, Enum):
class ConcurrencyExpression(BaseModel):
"""
Defines concurrency limits for a workflow using a CEL expression.
Args:
expression (str): CEL expression to determine concurrency grouping. (i.e. "input.user_id")
max_runs (int): Maximum number of concurrent workflow runs.
limit_strategy (ConcurrencyLimitStrategy): Strategy for handling limit violations.
Example:
ConcurrencyExpression("input.user_id", 5, ConcurrencyLimitStrategy.CANCEL_IN_PROGRESS)
:ivar expression: CEL expression to determine concurrency grouping. (i.e. "input.user_id")
:ivar max_runs: Maximum number of concurrent workflow runs.
:ivar limit_strategy: Strategy for handling limit violations.
**Example**
```python
ConcurrencyExpression(
"input.user_id",
5,
ConcurrencyLimitStrategy.CANCEL_IN_PROGRESS
)
```
"""
expression: str
+1 -47
View File
@@ -21,6 +21,7 @@ from typing import (
from google.protobuf import timestamp_pb2
from pydantic import BaseModel, ConfigDict, SkipValidation, TypeAdapter, model_validator
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.clients.admin import (
ScheduleTriggerWorkflowOptions,
TriggerWorkflowOptions,
@@ -41,7 +42,6 @@ from hatchet_sdk.contracts.v1.workflows_pb2 import StickyStrategy as StickyStrat
from hatchet_sdk.contracts.workflows.workflows_pb2 import WorkflowVersion
from hatchet_sdk.exceptions import CancellationReason, CancelledError
from hatchet_sdk.labels import DesiredWorkerLabel
from hatchet_sdk.logger import logger
from hatchet_sdk.rate_limit import RateLimit
from hatchet_sdk.runnables.contextvars import (
ctx_cancellation_token,
@@ -68,7 +68,6 @@ from hatchet_sdk.workflow_run import WorkflowRunRef
if TYPE_CHECKING:
from hatchet_sdk import Hatchet
from hatchet_sdk.cancellation import CancellationToken
T = TypeVar("T")
@@ -686,11 +685,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
"""
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.run_no_wait: triggering {self.config.name}, "
f"token={cancellation_token is not None}"
)
ref = self.client._client.admin.run_workflow(
workflow_name=self.config.name,
input=self._serialize_input(input),
@@ -722,11 +716,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
"""
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.run: triggering {self.config.name}, "
f"token={cancellation_token is not None}"
)
ref = self.client._client.admin.run_workflow(
workflow_name=self.config.name,
input=self._serialize_input(input),
@@ -738,8 +727,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
ref.workflow_run_id,
)
logger.debug(f"Workflow.run: awaiting result for {ref.workflow_run_id}")
return ref.result(cancellation_token=cancellation_token)
async def aio_run_no_wait(
@@ -761,11 +748,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
"""
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.aio_run_no_wait: triggering {self.config.name}, "
f"token={cancellation_token is not None}"
)
ref = await self.client._client.admin.aio_run_workflow(
workflow_name=self.config.name,
input=self._serialize_input(input),
@@ -801,11 +783,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.aio_run: triggering {self.config.name}, "
f"token={cancellation_token is not None}"
)
ref = await self.client._client.admin.aio_run_workflow(
workflow_name=self.config.name,
input=self._serialize_input(input),
@@ -817,8 +794,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
ref.workflow_run_id,
)
logger.debug(f"Workflow.aio_run: awaiting result for {ref.workflow_run_id}")
return await await_with_cancellation(
ref.aio_result(),
cancellation_token,
@@ -881,17 +856,11 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
refs,
)
# Pass cancellation_token through to each result() call
# The cancellation check happens INSIDE result()'s polling loop
results: list[dict[str, Any] | BaseException] = []
for ref in refs:
try:
results.append(ref.result(cancellation_token=cancellation_token))
except CancelledError: # noqa: PERF203
logger.debug(
f"Workflow.run_many: cancellation detected, stopping wait, "
f"reason={CancellationReason.PARENT_CANCELLED.value}"
)
if return_exceptions:
results.append(
CancelledError(
@@ -940,11 +909,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
"""
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.aio_run_many: triggering {len(workflows)} workflows, "
f"token={cancellation_token is not None}"
)
refs = await self.client._client.admin.aio_run_workflows(
workflows=workflows,
)
@@ -978,11 +942,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
"""
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.run_many_no_wait: triggering {len(workflows)} workflows, "
f"token={cancellation_token is not None}"
)
refs = self.client._client.admin.run_workflows(
workflows=workflows,
)
@@ -1012,11 +971,6 @@ class Workflow(BaseWorkflow[TWorkflowInput]):
"""
cancellation_token = self._resolve_check_cancellation_token()
logger.debug(
f"Workflow.aio_run_many_no_wait: triggering {len(workflows)} workflows, "
f"token={cancellation_token is not None}"
)
refs = await self.client._client.admin.aio_run_workflows(
workflows=workflows,
)
+30 -45
View File
@@ -1,24 +1,19 @@
"""Utilities for cancellation-aware operations."""
from __future__ import annotations
import asyncio
import contextlib
from collections.abc import Awaitable, Callable
from typing import TYPE_CHECKING, TypeVar
from typing import TypeVar
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.logger import logger
if TYPE_CHECKING:
from hatchet_sdk.cancellation import CancellationToken
T = TypeVar("T")
async def _invoke_cancel_callback(
cancel_callback: Callable[[], Awaitable[None]] | None,
) -> None:
"""Invoke a cancel callback."""
if not cancel_callback:
return
@@ -35,15 +30,12 @@ async def race_against_token(
Waits for either the task to complete or the token to be cancelled. Cleans up
whichever side loses the race.
Args:
main_task: The asyncio task to race.
token: The cancellation token to race against.
:param main_task: The asyncio task to race.
:param token: The cancellation token to race against.
Returns:
The result of the main task if it completes first.
:returns: The result of the main task if it completes first.
Raises:
asyncio.CancelledError: If the token fires before the task completes.
:raises: asyncio.CancelledError: If the token fires before the task completes.
"""
cancel_task = asyncio.create_task(token.aio_wait())
@@ -87,50 +79,44 @@ async def await_with_cancellation(
token is cancelled before the awaitable completes, the awaitable is cancelled
and an asyncio.CancelledError is raised.
Args:
coro: The awaitable to await (coroutine, Future, or asyncio.Task).
token: The cancellation token to check. If None, the coroutine is awaited directly.
cancel_callback: An optional async callback to invoke when cancellation occurs
(e.g., to cancel child workflows).
:param coro: The awaitable to await (coroutine, Future, or asyncio.Task).
:param token: The cancellation token to check. If None, the coroutine is awaited directly.
:param cancel_callback: An optional async callback to invoke when cancellation occurs (e.g., to cancel child workflows).
Returns:
The result of the coroutine.
:returns: The result of the coroutine.
Raises:
asyncio.CancelledError: If the token is cancelled before the coroutine completes.
:raises: asyncio.CancelledError: If the token is cancelled before the coroutine completes.
Example:
```python
async def cleanup() -> None:
print("cleaning up...")
**Example**
async def long_running_task():
await asyncio.sleep(10)
return "done"
```python
async def cleanup() -> None:
print("cleaning up...")
token = CancellationToken()
async def long_running_task():
await asyncio.sleep(10)
return "done"
# This will raise asyncio.CancelledError if token.cancel() is called
result = await await_with_cancellation(
long_running_task(),
token,
cancel_callback=cleanup,
)
```
token = CancellationToken()
# This will raise asyncio.CancelledError if token.cancel() is called
result = await await_with_cancellation(
long_running_task(),
token,
cancel_callback=cleanup,
)
```
"""
if token is None:
logger.debug("await_with_cancellation: no token provided, awaiting directly")
return await coro
logger.debug("await_with_cancellation: starting with cancellation token")
# Check if already cancelled
if token.is_cancelled:
logger.debug("await_with_cancellation: token already cancelled")
if cancel_callback:
logger.debug("await_with_cancellation: invoking cancel callback")
await _invoke_cancel_callback(cancel_callback)
raise asyncio.CancelledError("Operation cancelled by cancellation token")
main_task = asyncio.ensure_future(coro)
@@ -138,12 +124,11 @@ async def await_with_cancellation(
try:
result = await race_against_token(main_task, token)
logger.debug("await_with_cancellation: completed successfully")
return result
return result
except asyncio.CancelledError:
logger.debug("await_with_cancellation: cancelled")
if cancel_callback:
logger.debug("await_with_cancellation: invoking cancel callback")
with contextlib.suppress(asyncio.CancelledError):
await asyncio.shield(_invoke_cancel_callback(cancel_callback))
raise
+17 -32
View File
@@ -126,8 +126,14 @@ class Runner:
admin_client=self.admin_client,
)
self.event_client = EventClient(self.config)
self.durable_event_listener = DurableEventListener(
self.config, admin_client=self.admin_client
has_durable_tasks = any(
task.is_durable for task in self.action_registry.values()
)
self.durable_event_listener: DurableEventListener | None = (
DurableEventListener(self.config, admin_client=self.admin_client)
if has_durable_tasks
else None
)
self.worker_context = WorkerContext(
@@ -147,10 +153,10 @@ class Runner:
if self.worker_context.id() is None:
self.worker_context._worker_id = action.worker_id
## fixme: only do this if durable tasks are registered
self.durable_event_listener_task = asyncio.create_task(
self.durable_event_listener.ensure_started(action.worker_id)
)
if self.durable_event_listener is not None:
self.durable_event_listener_task = asyncio.create_task(
self.durable_event_listener.ensure_started(action.worker_id)
)
t: asyncio.Task[Exception | None] | None = None
match action.action_type:
@@ -509,26 +515,18 @@ class Runner:
start_time = time.monotonic()
logger.info(
f"Cancellation: received cancel action for {action.action_id}, "
f"received cancel action for {action.action_id}, "
f"reason={CancellationReason.WORKFLOW_CANCELLED.value}"
)
try:
# Trigger the cancellation token to signal the context to stop
if key in self.contexts:
ctx = self.contexts[key]
child_count = len(ctx.cancellation_token.child_run_ids)
logger.debug(
f"Cancellation: triggering token for {action.action_id}, "
f"reason={CancellationReason.WORKFLOW_CANCELLED.value}, "
f"{child_count} children registered"
)
ctx._set_cancellation_flag(CancellationReason.WORKFLOW_CANCELLED)
self.cancellations[key] = True
# Note: Child workflows are not cancelled here - they run independently
# and are managed by Hatchet's normal cancellation mechanisms
else:
logger.debug(f"Cancellation: no context found for {action.action_id}")
# Wait with supervision (using timedelta configs)
grace_period = self.config.cancellation_grace_period.total_seconds()
@@ -548,7 +546,7 @@ class Runner:
if task_still_running:
logger.warning(
f"Cancellation: task {action.action_id} has not cancelled after "
f"task {action.action_id} has not cancelled after "
f"{elapsed_ms}ms (warning threshold {warning_threshold_ms}ms). "
f"Consider checking for blocking operations. "
f"See https://docs.hatchet.run/home/cancellation"
@@ -559,25 +557,18 @@ class Runner:
await asyncio.sleep(remaining)
if key in self.tasks and not self.tasks[key].done():
logger.debug(
f"Cancellation: force-cancelling task {action.action_id} "
f"after grace period ({grace_period_ms}ms)"
)
self.tasks[key].cancel()
if key in self.threads:
thread = self.threads[key]
if self.config.enable_force_kill_sync_threads:
logger.debug(
f"Cancellation: force-killing thread for {action.action_id}"
)
self.force_kill_thread(thread)
await asyncio.sleep(1)
if thread.is_alive():
logger.warning(
f"Cancellation: thread {thread.ident} with key {key} is still running "
f"thread {thread.ident} with key {key} is still running "
f"after cancellation. This could cause the thread pool to get blocked "
f"and prevent new tasks from running."
)
@@ -586,15 +577,9 @@ class Runner:
total_elapsed_ms = round(total_elapsed * 1000)
if total_elapsed > grace_period:
logger.warning(
f"Cancellation: cancellation of {action.action_id} took {total_elapsed_ms}ms "
f"cancellation of {action.action_id} took {total_elapsed_ms}ms "
f"(exceeded grace period of {grace_period_ms}ms)"
)
else:
logger.debug(
f"Cancellation: task {action.action_id} eventually completed in {total_elapsed_ms}ms"
)
else:
logger.info(f"Cancellation: task {action.action_id} completed")
finally:
self.cleanup_run_id(key)
+1 -31
View File
@@ -3,6 +3,7 @@ from __future__ import annotations
import time
from typing import TYPE_CHECKING, Any
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.clients.listeners.run_event_listener import (
RunEventListener,
RunEventListenerClient,
@@ -14,11 +15,9 @@ from hatchet_sdk.exceptions import (
FailedTaskRunExceptionGroup,
TaskRunError,
)
from hatchet_sdk.logger import logger
from hatchet_sdk.utils.cancellation import await_with_cancellation
if TYPE_CHECKING:
from hatchet_sdk.cancellation import CancellationToken
from hatchet_sdk.clients.admin import AdminClient
@@ -50,11 +49,6 @@ class WorkflowRunRef:
:param cancellation_token: Optional cancellation token to abort the wait.
:return: A dictionary mapping task names to their outputs.
"""
logger.debug(
f"WorkflowRunRef.aio_result: waiting for {self.workflow_run_id}, "
f"token={cancellation_token is not None}"
)
if cancellation_token:
return await await_with_cancellation(
self.workflow_run_listener.aio_result(self.workflow_run_id),
@@ -88,20 +82,11 @@ class WorkflowRunRef:
"""
from hatchet_sdk.clients.admin import RunStatus
logger.debug(
f"WorkflowRunRef.result: waiting for {self.workflow_run_id}, "
f"token={cancellation_token is not None}"
)
retries = 0
while True:
# Check cancellation at start of each iteration
if cancellation_token and cancellation_token.is_cancelled:
logger.debug(
f"WorkflowRunRef.result: cancellation detected for {self.workflow_run_id}, "
f"reason={CancellationReason.PARENT_CANCELLED.value}"
)
raise CancelledError(
"Operation cancelled by cancellation token",
reason=CancellationReason.PARENT_CANCELLED,
@@ -120,10 +105,6 @@ class WorkflowRunRef:
# Use interruptible sleep via token.wait()
if cancellation_token:
if cancellation_token.wait(timeout=1.0):
logger.debug(
f"WorkflowRunRef.result: cancellation during retry sleep for {self.workflow_run_id}, "
f"reason={CancellationReason.PARENT_CANCELLED.value}"
)
raise CancelledError(
"Operation cancelled by cancellation token",
reason=CancellationReason.PARENT_CANCELLED,
@@ -132,10 +113,6 @@ class WorkflowRunRef:
time.sleep(1)
continue
logger.debug(
f"WorkflowRunRef.result: {self.workflow_run_id} status={details.status}"
)
if (
details.status in [RunStatus.QUEUED, RunStatus.RUNNING]
or details.done is False
@@ -143,10 +120,6 @@ class WorkflowRunRef:
# Use interruptible sleep via token.wait()
if cancellation_token:
if cancellation_token.wait(timeout=1.0):
logger.debug(
f"WorkflowRunRef.result: cancellation during poll sleep for {self.workflow_run_id}, "
f"reason={CancellationReason.PARENT_CANCELLED.value}"
)
raise CancelledError(
"Operation cancelled by cancellation token",
reason=CancellationReason.PARENT_CANCELLED,
@@ -166,9 +139,6 @@ class WorkflowRunRef:
)
if details.status == RunStatus.COMPLETED:
logger.debug(
f"WorkflowRunRef.result: {self.workflow_run_id} completed successfully"
)
return {
readable_id: run.output
for readable_id, run in details.task_runs.items()
+3 -8
View File
@@ -2318,14 +2318,9 @@ CREATE TABLE v1_durable_event_log_entry (
parent_node_id BIGINT,
-- The branch id when this event was first seen. A durable event log can be a part of many branches.
branch_id BIGINT NOT NULL,
-- Todo: Associated data for this event should be stored in the v1_payload table!
-- data JSONB,
-- The hash of the data stored in the v1_payload table to check non-determinism violations.
-- This can be null for event types that don't have associated data.
-- TODO: we can add CHECK CONSTRAINT for event types that require data_hash to be non-null.
data_hash BYTEA,
-- Can discuss: adds some flexibility for future hash algorithms
data_hash_alg TEXT,
-- An idempotency key generated from the incoming data (using the type of event + wait for conditions or the trigger event payload + options)
-- to determine whether or not there's been a non-determinism error
idempotency_key BYTEA NOT NULL,
-- Access patterns:
-- Definite: we'll query directly for the node_id when a durable task is replaying its log
-- Possible: we may want to query a range of node_ids for a durable task