mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-04-23 18:49:47 -05:00
Merge branch 'main' into feat-durable-execution
This commit is contained in:
@@ -14,7 +14,7 @@ permissions:
|
||||
jobs:
|
||||
scan-scheduled:
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v2.3.2"
|
||||
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v2.3.3"
|
||||
with:
|
||||
scan-args: |-
|
||||
-r
|
||||
|
||||
@@ -349,7 +349,7 @@ jobs:
|
||||
matrix:
|
||||
migrate-strategy: ["latest"]
|
||||
rabbitmq-enabled: ["true"]
|
||||
pg-version: ["15-alpine"]
|
||||
pg-version: ["17-alpine"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
@@ -59,6 +59,7 @@ V1WebhookSourceName:
|
||||
- STRIPE
|
||||
- SLACK
|
||||
- LINEAR
|
||||
- SVIX
|
||||
|
||||
V1WebhookHMACAlgorithm:
|
||||
type: string
|
||||
|
||||
@@ -356,22 +356,14 @@ func computeHMACSignature(payload []byte, secret []byte, algorithm sqlcv1.V1Inco
|
||||
}
|
||||
}
|
||||
|
||||
type HttpResponseCode int
|
||||
|
||||
const (
|
||||
Http400 HttpResponseCode = iota
|
||||
Http403
|
||||
Http500
|
||||
)
|
||||
|
||||
type ValidationError struct {
|
||||
Code HttpResponseCode
|
||||
Code int
|
||||
ErrorText string
|
||||
}
|
||||
|
||||
func (vr ValidationError) ToResponse() (gen.V1WebhookReceiveResponseObject, error) {
|
||||
switch vr.Code {
|
||||
case Http400:
|
||||
case http.StatusBadRequest:
|
||||
return gen.V1WebhookReceive400JSONResponse{
|
||||
Errors: []gen.APIError{
|
||||
{
|
||||
@@ -379,7 +371,7 @@ func (vr ValidationError) ToResponse() (gen.V1WebhookReceiveResponseObject, erro
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case Http403:
|
||||
case http.StatusForbidden:
|
||||
return gen.V1WebhookReceive403JSONResponse{
|
||||
Errors: []gen.APIError{
|
||||
{
|
||||
@@ -387,7 +379,7 @@ func (vr ValidationError) ToResponse() (gen.V1WebhookReceiveResponseObject, erro
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case Http500:
|
||||
case http.StatusInternalServerError:
|
||||
return nil, errors.New(vr.ErrorText)
|
||||
default:
|
||||
return nil, fmt.Errorf("no validation error set")
|
||||
@@ -429,161 +421,11 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
) {
|
||||
switch webhook.SourceName {
|
||||
case sqlcv1.V1IncomingWebhookSourceNameSLACK:
|
||||
timestampHeader := request.Header.Get("X-Slack-Request-Timestamp")
|
||||
|
||||
if timestampHeader == "" {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: "missing or invalid timestamp header: X-Slack-Request-Timestamp",
|
||||
}
|
||||
}
|
||||
|
||||
timestamp, err := strconv.ParseInt(strings.TrimSpace(timestampHeader), 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: "Invalid timestamp in header",
|
||||
}
|
||||
}
|
||||
|
||||
// qq: should this be utc?
|
||||
if time.Unix(timestamp, 0).UTC().Before(time.Now().Add(-5 * time.Minute)) {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: "timestamp in header is out of range",
|
||||
}
|
||||
}
|
||||
|
||||
algorithm := webhook.AuthHmacAlgorithm.V1IncomingWebhookHmacAlgorithm
|
||||
encoding := webhook.AuthHmacEncoding.V1IncomingWebhookHmacEncoding
|
||||
decryptedSigningSecret, err := w.config.Encryption.Decrypt(webhook.AuthHmacWebhookSigningSecret, "v1_webhook_hmac_signing_secret")
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt HMAC signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
sigBaseString := fmt.Sprintf("v0:%d:%s", timestamp, webhookPayload)
|
||||
|
||||
hash, err := computeHMACSignature([]byte(sigBaseString), decryptedSigningSecret, algorithm, encoding)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
ErrorText: fmt.Sprintf("failed to compute HMAC signature: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
expectedSignature := fmt.Sprintf("v0=%s", hash)
|
||||
|
||||
signatureHeader := request.Header.Get(webhook.AuthHmacSignatureHeaderName.String)
|
||||
|
||||
if signatureHeader == "" {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
if !signaturesMatch(signatureHeader, expectedSignature) {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: "invalid HMAC signature",
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return w.validateSlackWebhook(webhookPayload, webhook, request)
|
||||
case sqlcv1.V1IncomingWebhookSourceNameSTRIPE:
|
||||
signatureHeader := request.Header.Get(webhook.AuthHmacSignatureHeaderName.String)
|
||||
|
||||
if signatureHeader == "" {
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
splitHeader := strings.Split(signatureHeader, ",")
|
||||
headersMap := make(map[string]string)
|
||||
|
||||
for _, header := range splitHeader {
|
||||
parts := strings.Split(header, "=")
|
||||
if len(parts) != 2 {
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
ErrorText: fmt.Sprintf("invalid signature header format: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
headersMap[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
|
||||
}
|
||||
|
||||
timestampHeader, hasTimestampHeader := headersMap["t"]
|
||||
v1SignatureHeader, hasV1SignatureHeader := headersMap["v1"]
|
||||
|
||||
if timestampHeader == "" || v1SignatureHeader == "" || !hasTimestampHeader || !hasV1SignatureHeader {
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := strings.TrimPrefix(timestampHeader, "t=")
|
||||
signature := strings.TrimPrefix(v1SignatureHeader, "v1=")
|
||||
|
||||
if timestamp == "" || signature == "" {
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
parsedTimestamp, err := strconv.ParseInt(timestamp, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
ErrorText: "Invalid timestamp in signature header",
|
||||
}
|
||||
}
|
||||
|
||||
if time.Unix(parsedTimestamp, 0).UTC().Before(time.Now().Add(-10 * time.Minute)) {
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
ErrorText: "timestamp in signature header is out of range",
|
||||
}
|
||||
}
|
||||
|
||||
decryptedSigningSecret, err := w.config.Encryption.Decrypt(webhook.AuthHmacWebhookSigningSecret, "v1_webhook_hmac_signing_secret")
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt HMAC signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
algorithm := webhook.AuthHmacAlgorithm.V1IncomingWebhookHmacAlgorithm
|
||||
encoding := webhook.AuthHmacEncoding.V1IncomingWebhookHmacEncoding
|
||||
|
||||
signedPayload := fmt.Sprintf("%s.%s", timestamp, webhookPayload)
|
||||
|
||||
expectedSignature, err := computeHMACSignature([]byte(signedPayload), decryptedSigningSecret, algorithm, encoding)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: fmt.Sprintf("failed to compute HMAC signature: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
if !signaturesMatch(signature, expectedSignature) {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
ErrorText: "invalid HMAC signature",
|
||||
}
|
||||
}
|
||||
return w.validateStripeWebhook(webhookPayload, webhook, request)
|
||||
case sqlcv1.V1IncomingWebhookSourceNameSVIX:
|
||||
return w.validateSvixWebhook(webhookPayload, webhook, request)
|
||||
case sqlcv1.V1IncomingWebhookSourceNameGITHUB:
|
||||
fallthrough
|
||||
case sqlcv1.V1IncomingWebhookSourceNameLINEAR:
|
||||
@@ -595,7 +437,7 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if !ok {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "missing or invalid authorization header",
|
||||
}
|
||||
}
|
||||
@@ -604,14 +446,14 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt basic auth password: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
if username != webhook.AuthBasicUsername.String || password != string(decryptedPassword) {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "invalid basic auth credentials",
|
||||
}
|
||||
}
|
||||
@@ -620,7 +462,7 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if apiKey == "" {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: fmt.Sprintf("missing or invalid api key header: %s", webhook.AuthApiKeyHeaderName.String),
|
||||
}
|
||||
}
|
||||
@@ -629,14 +471,14 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt api key: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
if apiKey != string(decryptedApiKey) {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: fmt.Sprintf("invalid api key: %s", webhook.AuthApiKeyHeaderName.String),
|
||||
}
|
||||
}
|
||||
@@ -645,7 +487,7 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if signature == "" {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
@@ -654,7 +496,7 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt HMAC signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
@@ -666,26 +508,26 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: Http500,
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to compute HMAC signature: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
if !signaturesMatch(signature, expectedSignature) {
|
||||
return false, &ValidationError{
|
||||
Code: Http403,
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "invalid HMAC signature",
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("unsupported auth type: %s", webhook.AuthMethod),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false, &ValidationError{
|
||||
Code: Http400,
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("unsupported source name: %+v", webhook.SourceName),
|
||||
}
|
||||
}
|
||||
@@ -693,6 +535,263 @@ func (w *V1WebhooksService) validateWebhook(webhookPayload []byte, webhook sqlcv
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *V1WebhooksService) validateSlackWebhook(webhookPayload []byte, webhook sqlcv1.V1IncomingWebhook, request http.Request) (
|
||||
IsValid,
|
||||
*ValidationError,
|
||||
) {
|
||||
timestampHeader := request.Header.Get("X-Slack-Request-Timestamp")
|
||||
|
||||
if timestampHeader == "" {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "missing or invalid timestamp header: X-Slack-Request-Timestamp",
|
||||
}
|
||||
}
|
||||
|
||||
timestamp, err := strconv.ParseInt(strings.TrimSpace(timestampHeader), 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "Invalid timestamp in header",
|
||||
}
|
||||
}
|
||||
|
||||
// qq: should this be utc?
|
||||
if time.Unix(timestamp, 0).UTC().Before(time.Now().Add(-5 * time.Minute)) {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "timestamp in header is out of range",
|
||||
}
|
||||
}
|
||||
|
||||
algorithm := webhook.AuthHmacAlgorithm.V1IncomingWebhookHmacAlgorithm
|
||||
encoding := webhook.AuthHmacEncoding.V1IncomingWebhookHmacEncoding
|
||||
decryptedSigningSecret, err := w.config.Encryption.Decrypt(webhook.AuthHmacWebhookSigningSecret, "v1_webhook_hmac_signing_secret")
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt HMAC signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
sigBaseString := fmt.Sprintf("v0:%d:%s", timestamp, webhookPayload)
|
||||
|
||||
hash, err := computeHMACSignature([]byte(sigBaseString), decryptedSigningSecret, algorithm, encoding)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to compute HMAC signature: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
expectedSignature := fmt.Sprintf("v0=%s", hash)
|
||||
|
||||
signatureHeader := request.Header.Get(webhook.AuthHmacSignatureHeaderName.String)
|
||||
|
||||
if signatureHeader == "" {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
if !signaturesMatch(signatureHeader, expectedSignature) {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "invalid HMAC signature",
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *V1WebhooksService) validateStripeWebhook(webhookPayload []byte, webhook sqlcv1.V1IncomingWebhook, request http.Request) (
|
||||
IsValid,
|
||||
*ValidationError,
|
||||
) {
|
||||
signatureHeader := request.Header.Get(webhook.AuthHmacSignatureHeaderName.String)
|
||||
|
||||
if signatureHeader == "" {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
splitHeader := strings.Split(signatureHeader, ",")
|
||||
headersMap := make(map[string]string)
|
||||
|
||||
for _, header := range splitHeader {
|
||||
parts := strings.Split(header, "=")
|
||||
if len(parts) != 2 {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("invalid signature header format: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
headersMap[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
|
||||
}
|
||||
|
||||
timestampHeader, hasTimestampHeader := headersMap["t"]
|
||||
v1SignatureHeader, hasV1SignatureHeader := headersMap["v1"]
|
||||
|
||||
if timestampHeader == "" || v1SignatureHeader == "" || !hasTimestampHeader || !hasV1SignatureHeader {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := strings.TrimPrefix(timestampHeader, "t=")
|
||||
signature := strings.TrimPrefix(v1SignatureHeader, "v1=")
|
||||
|
||||
if timestamp == "" || signature == "" {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("missing or invalid signature header: %s", webhook.AuthHmacSignatureHeaderName.String),
|
||||
}
|
||||
}
|
||||
|
||||
parsedTimestamp, err := strconv.ParseInt(timestamp, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: "Invalid timestamp in signature header",
|
||||
}
|
||||
}
|
||||
|
||||
if time.Unix(parsedTimestamp, 0).UTC().Before(time.Now().Add(-10 * time.Minute)) {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: "timestamp in signature header is out of range",
|
||||
}
|
||||
}
|
||||
|
||||
decryptedSigningSecret, err := w.config.Encryption.Decrypt(webhook.AuthHmacWebhookSigningSecret, "v1_webhook_hmac_signing_secret")
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt HMAC signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
algorithm := webhook.AuthHmacAlgorithm.V1IncomingWebhookHmacAlgorithm
|
||||
encoding := webhook.AuthHmacEncoding.V1IncomingWebhookHmacEncoding
|
||||
|
||||
signedPayload := fmt.Sprintf("%s.%s", timestamp, webhookPayload)
|
||||
|
||||
expectedSignature, err := computeHMACSignature([]byte(signedPayload), decryptedSigningSecret, algorithm, encoding)
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: fmt.Sprintf("failed to compute HMAC signature: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
if !signaturesMatch(signature, expectedSignature) {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "invalid HMAC signature",
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *V1WebhooksService) validateSvixWebhook(webhookPayload []byte, webhook sqlcv1.V1IncomingWebhook, request http.Request) (
|
||||
IsValid,
|
||||
*ValidationError,
|
||||
) {
|
||||
decryptedSigningSecret, err := w.config.Encryption.Decrypt(webhook.AuthHmacWebhookSigningSecret, "v1_webhook_hmac_signing_secret")
|
||||
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decrypt SVIX signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(string(decryptedSigningSecret), "whsec_"))
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to decode SVIX signing secret: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
headers := request.Header
|
||||
|
||||
msgId := headers.Get("svix-id")
|
||||
msgSignature := headers.Get("svix-signature")
|
||||
msgTimestamp := headers.Get("svix-timestamp")
|
||||
|
||||
if msgId == "" || msgSignature == "" || msgTimestamp == "" {
|
||||
msgId = headers.Get("webhook-id")
|
||||
msgSignature = headers.Get("webhook-signature")
|
||||
msgTimestamp = headers.Get("webhook-timestamp")
|
||||
if msgId == "" || msgSignature == "" || msgTimestamp == "" {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: "missing or invalid headers",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timestamp, err := svixParseTimestampHeader(msgTimestamp)
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("invalid timestamp header: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
err = svixVerifyTimestamp(timestamp)
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusBadRequest,
|
||||
ErrorText: fmt.Sprintf("invalid timestamp header: %s", err),
|
||||
}
|
||||
}
|
||||
|
||||
computedSignature, err := svixSign(key, msgId, timestamp, webhookPayload)
|
||||
if err != nil {
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusInternalServerError,
|
||||
ErrorText: fmt.Sprintf("failed to sign SVIX payload: %s", err),
|
||||
}
|
||||
}
|
||||
expectedSignature := []byte(strings.Split(computedSignature, ",")[1])
|
||||
|
||||
passedSignatures := strings.SplitSeq(msgSignature, " ")
|
||||
|
||||
for versionedSignature := range passedSignatures {
|
||||
sigParts := strings.Split(versionedSignature, ",")
|
||||
if len(sigParts) < 2 {
|
||||
continue
|
||||
}
|
||||
version := sigParts[0]
|
||||
signature := []byte(sigParts[1])
|
||||
|
||||
if version != "v1" {
|
||||
continue
|
||||
}
|
||||
|
||||
if hmac.Equal(signature, expectedSignature) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, &ValidationError{
|
||||
Code: http.StatusForbidden,
|
||||
ErrorText: "invalid SVIX signature",
|
||||
}
|
||||
}
|
||||
|
||||
func signaturesMatch(providedSignature, expectedSignature string) bool {
|
||||
providedSignature = strings.TrimSpace(providedSignature)
|
||||
expectedSignature = strings.TrimSpace(expectedSignature)
|
||||
@@ -711,3 +810,40 @@ func removePrefixesFromSignature(signature string) string {
|
||||
|
||||
return signature
|
||||
}
|
||||
|
||||
var errInvalidHeaders = errors.New("invalid headers")
|
||||
var errMessageTooOld = errors.New("message too old")
|
||||
var errMessageTooNew = errors.New("message too new")
|
||||
var tolerance = 5 * time.Minute
|
||||
|
||||
func svixParseTimestampHeader(timestampHeader string) (time.Time, error) {
|
||||
timeInt, err := strconv.ParseInt(timestampHeader, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, errInvalidHeaders
|
||||
}
|
||||
timestamp := time.Unix(timeInt, 0)
|
||||
return timestamp, nil
|
||||
}
|
||||
|
||||
func svixVerifyTimestamp(timestamp time.Time) error {
|
||||
now := time.Now()
|
||||
|
||||
if now.Sub(timestamp) > tolerance {
|
||||
return errMessageTooOld
|
||||
}
|
||||
if timestamp.Unix() > now.Add(tolerance).Unix() {
|
||||
return errMessageTooNew
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func svixSign(key []byte, msgId string, timestamp time.Time, payload []byte) (string, error) {
|
||||
toSign := fmt.Sprintf("%s.%d.%s", msgId, timestamp.Unix(), payload)
|
||||
|
||||
h := hmac.New(sha256.New, key)
|
||||
h.Write([]byte(toSign))
|
||||
sig := make([]byte, base64.StdEncoding.EncodedLen(h.Size()))
|
||||
base64.StdEncoding.Encode(sig, h.Sum(nil))
|
||||
return fmt.Sprintf("v1,%s", sig), nil
|
||||
}
|
||||
|
||||
@@ -299,6 +299,7 @@ const (
|
||||
LINEAR V1WebhookSourceName = "LINEAR"
|
||||
SLACK V1WebhookSourceName = "SLACK"
|
||||
STRIPE V1WebhookSourceName = "STRIPE"
|
||||
SVIX V1WebhookSourceName = "SVIX"
|
||||
)
|
||||
|
||||
// Defines values for V1WorkflowType.
|
||||
@@ -16137,13 +16138,13 @@ func (sh *strictHandler) WorkflowVersionGet(ctx echo.Context, workflow openapi_t
|
||||
// Base64 encoded, gzipped, json marshaled Swagger object
|
||||
var swaggerSpec = []string{
|
||||
|
||||
"H4sIAAAAAAAC/+y9e3PbOLI4+lVYurfqN1Ml+ZXJnDmp+v2h2EqiiWN7JTm5e+akvBAJSxhTJJcA7WhT",
|
||||
"H4sIAAAAAAAC/+y9e3PbOLIo/lVY+v2q7kyV5Fcmc+ak6v6h2EqiiWN7JTm5e+ekvBAJSxhTJJcA7WhT",
|
||||
"/u638CJBEiBBvSwlrNracUQ8Go3uRqPRj+8dN1xEYQADgjtvvnewO4cLwP7s3wwHcRzG9O8oDiMYEwTZ",
|
||||
"Fzf0IP2vB7Ebo4igMOi86QDHTTAJF84HQNw5JA6kvR3WuNuB38Ai8mHnzelvJyfdzn0YLwDpvOkkKCC/",
|
||||
"/9bpdsgygp03HRQQOINx57mbH748m/Jv5z6MHTJHmM+pTtfpZw0foYBpATEGM5jNikmMghmbNHTxnY+C",
|
||||
"B92U9HeHhA6ZQ8cL3WQBAwI0AHQddO8g4sBvCBOcA2eGyDyZHrnh4njO8dTz4KP8WwfRPYK+V4aGwsA+",
|
||||
"OWQOiDK5g7ADMA5dBAj0nCdE5gweEEU+csHUz21HJwALDSKeu50Y/jtBMfQ6b/7KTf01bRxO/4YuoTBK",
|
||||
"WsFlYoHp74jABfvj/43hfedN5/85zmjvWBDecUp1z+k0II7BsgSSGNcAzSdIQBkW4Pvh0/kcBDN4AzB+",
|
||||
"WsFlYoHp74jABfvj/4/hfedN5/87zmjvWBDecUp1z+k0II7BsgSSGNcAzSdIQBkW4Pvh0/kcBDN4AzB+",
|
||||
"CmMNYp/mkMxh7ISxE4TESTCMseOCwHFZR7r5KHYi2V/BJYkTmIIzDUMfgoDCw6eNISBwAgMQkCaTsm5O",
|
||||
"AJ8cwvpi6xmHwSMifOGWkyHWwwnZV/4zo3aEHRRgAgIXWs8+RrMgiRpMjtEscJIoY6VGUyZkbkFalCz6",
|
||||
"tOlztxOFmMzDmWWvG9Gadlz6YdCPoqGBK2/od8puzvCCrSbBkPWhXE+piDg4iaIwJjlGPD179dvr3//r",
|
||||
@@ -16166,7 +16167,7 @@ var swaggerSpec = []string{
|
||||
"GM1mMDbuY0ZlnxS1pzSwG4fBoJpuaZMrsQFlpZmKPe3IUYzCGJFlkbSZeBHSqfPmFTu8+N+nZZIvKQh0",
|
||||
"tq5ucQqcpVV9TTFYfVbrcVYgurRNKupTCmQnqbLNGTL0YzGGshvgQXeJo/3ZKWTonm2TuhnlMeRXKXrT",
|
||||
"cZocC+Vh2ScGHBvQuUc+gRSiek7g11GGtWzzxldjxbpg3EUSRsjtxyZ2XID/hIEjFXyHUozzS3909atc",
|
||||
"/fhq7LAx1hFjqaa7QMH/Pe0uwLf/e/b697LKmwJr5npudOz7MCaDBUD++zhMIrP8pk2wTlj6CBO6Rt5C",
|
||||
"/fhq7LAx1hFjqaa7QMH/Pu0uwLf/ffb697LKmwJr5npudOz7MCaDBUD++zhMIrP8pk2wTlj6CBO6Rt5C",
|
||||
"mrZieiJa2n1WWL6HHmGXzVheuwC1buU1lxw+uHav2Se5rXStVJ/gl4yN7K1cV7cTh36tbsRX8wlSfWxE",
|
||||
"22vx0RGD1WHFjI9ghgL4GcZSoNfDJBs/dzsweERxGCwgN3PX9x0oHawvytwWvok9YEgMg2kIYg8Fswsh",
|
||||
"Z/U6Fjc/G+V5NgyXyiR0MAljyB5h9HBne4P9ZGYQg34y2/zCu+LNiZ14zwYTJQNKT0mZJoFtD8IqpGoV",
|
||||
@@ -16225,7 +16226,7 @@ var swaggerSpec = []string{
|
||||
"EXUT+sjVpAvlGn/VJTitnMebapSGBmI3z351PqACOPPqhAXUVqAaBZJSDUDKo88nnW7n86lelHC3yR3E",
|
||||
"5BnDPLk/cJOCNxV55Z/TAceauAVzIZE1Xb2rvbw5QD9jcQ915TVR9xupq2HU3FRARP+XB8TMagdtKW4N",
|
||||
"TS9paNqiAWgrpd0aGPJXtsMbuPAL83kyJyzANyDButxgKptwxykHYSdirR0QeI4LgiAkDmClW1lNeJnX",
|
||||
"unRg6aDDuvt4rT0KeF4MMVbtUjktWho6yuYp+uEDwHPdcTMHeK4O+X9wYTpxAHFFlJdUH/Pq5M75nBVM",
|
||||
"unRg6aDDuvt4rT0KeF4MMVbtUjktWho6yuYp+uEDwHPdcTMHeK4O+b9wYTpxAHFFlJdUH/Pq5M75nBVM",
|
||||
"1k/4GcboHtWhl1nXqAx6FM1FWf8cDHpOmAN8I4v/280BnEh0cDAkBv7axkuWh3Dkg2WOEeT+NTZk5bH7",
|
||||
"1UBg53MQzKBEkJEJAvhkRiLjXfiUYU1q1HrYV9A75Mhs3VElICkQlfhbD4ZSBlbxpZvDkwnll+EMBatX",
|
||||
"N1uNv9cqdrZ3GJdrjOpwLfNeHRS67U5Ig2DYw92S9c1tN01Vq/EcRfhQjawlo/MOT/NtnDJ8Mt22fT49",
|
||||
@@ -16268,192 +16269,192 @@ var swaggerSpec = []string{
|
||||
"KTN15ToqRVRfEUj5UNNuGoreNcVLKuPkwnGLaqpVziu61CkdhSFUh8YEUyXahET5tXagAsrSUWuyX+XD",
|
||||
"U/0Z1QLnCxVv4w/9006X/ufs9e/8j9enZ51u59PF62rspRGvmjyzykT20bNpL5bi1A09i4p2uREGshNz",
|
||||
"uJkFgCQx/LA2HdOhnXQ8rcBEs4CVX3JjaLjeYvaNsWEqy9AssJqgGOKbIkrBk37FRdBqaWSg4D0NPB78",
|
||||
"f6yg4XjAwmb4H7ejy2ry2AvnOqnTWLrMlA8zBQ3vB1eDEZMx74eTD7dvmdPcaHgzYP5u/fOPnW7ncng1",
|
||||
"6Jv8y5SL4OZjmSudPZq7SEhrX+sm0bpJ/FhuEq0nQ/n9YU175n7b4w/GHNzwqbnmbVdjOBbPvWsZj1nr",
|
||||
"zHKc3W3yL725h9f0UVc19ymn4QUkshpBwVc2Cewf90UmAzwH9aYKNaSbtn8Xxhp45LsLS2RhEz3DGmaX",
|
||||
"2Pyj/frxABwcvLmsLLV+EOWQ6E4OJxLdErLy1ubVgfz2ejVBKFsoC6lOWQXsSz1eqNpRg9cLA8Y39ZLx",
|
||||
"Rec5IVFkXsyOYugK/j1qsGX/vaiMplXJhbrP6zVstFJhI7ugqPWgVX6T2JDNWfZNYr+RNUpYDei4ur3O",
|
||||
"oYTnyjLn8N/UIrHdvZnKVZEonJ5izvDeCULiRHH4iDzodR3gxCDwwoXs9IR835lCZwYDGMtrjEpdZ1vD",
|
||||
"eHM0e/tJgKvtza5JOYWzFtlUapkTR+/UPJEXP1YmilwXI2OKS/sdMOwbe1kEgZcVSoz5UKtd+ReQzEOv",
|
||||
"0WoF6J94z1S3Pw89A9V+mExuZApqN/RSCo4F8u3D9u8Aj9tnM+cm/mqJ8GoSEqisOeclzcvW1vm7tBSw",
|
||||
"Mu18Srcus3JNOt3OzfWY/ed2wrQk0wnJY5lwVaATFg8nvJyRCwIngjGlqyP7wnLP3Q54BIhdZs3J03J5",
|
||||
"hcrTwm/QTQh03DAQRS/9pcE5EeGI3ay1ibIo1aE0BR/AGM0C6DlZJ2Z5ur0dXjiCfXZ/o/TBFPq4uuIn",
|
||||
"a8NYKufGwY8BO1LkApWOo9syH2DyAYKYTCEgVbaB3FaxAq6s9AJw5rJ3/lZ+dnJ21js9652+mpy+fnPy",
|
||||
"+5vf/jj6448/Xr3+o3fy+s3JiX1mE8CZmaoHA0zA1GfGtj2EdAG+mQl/Ab6hRbLYHANsX+8w6xsxdGFa",
|
||||
"thSb0rfQNjw8hJetC+NVCHiUn0tDw7Eo95OV+8S1qZawk/VywkDdhwaQFefVQpcElGCGwX1ox6sjpQM9",
|
||||
"dP3QdE5huADRPIyhQxsJMbEimsdyrDGbTxcdb113Ips6TShzPhl+5jWu0z9v+rdjQ+yuTcAIR1YaLMLP",
|
||||
"TWOSLXGSc3lfALLemMd739bpxrejS83wTVVl1l6r5iiivHTKVybDlemTaNdNe9dU1K3m9aprJq/O/VmB",
|
||||
"h5d/ujReClIgR3nmLxStBsEsEU9a1mJhfPER82ORd1ZqPpcz1ujVNiGRBt9IDLQNsPdgHra0OAaRqpxe",
|
||||
"X/ZZ0P7NPycf2APJ5J83g/H5aHgz0Vt4Mk5WfR4Gl+8+XI95zP+n/lWf5xKpqo3/Ran8XzBiqrSpD2hJ",
|
||||
"f7Hwl+02qA/KDxxZIVRfV/LvcGoQrPSLDiAr+vwznOoE+U40ByPmZDU5jfIGZquvNbUuAu3VpPqBSXiT",
|
||||
"ZTeGyhWIF5pmckJ5DJLIrLT6as6F1IffIBPFIwHXG3VV8meQKN9ZDXKN/0Igs1/wZGwzSLBwUEy7OjPa",
|
||||
"Nz3rFMuzFmGsTPKYxIDAWW2aaAXCy1y/5hp2pkTnKygXE82+Oqs3TMipi6vparFatUXDC10+vBTA4YUW",
|
||||
"h7L3RxTkTAHvbq/OJ0MmZi9uR/23l1S1uui/rxSQdBB5fjaiYDa7hr3kd/2hvFYw3o7Pc70a/1yxn8Yk",
|
||||
"QoxJPsKquDoSEuDrKDblsQe4NHjFyOEpWdqF7slbGHBwBF10j9xsEueXCGAMPecRAeGW/KueK4yIaOAy",
|
||||
"lf16o7QmcQI149e9QKq+R+m1/vTk5MToS6QdJu/909CRp9GC/g6nUozZnuOGCgBrh7nyE3HXpi8+t7jT",
|
||||
"vwwIOXeYTbq2qF4LWv8Wc82Jt8sGg0+UXmWHk4YqidFlZZ0k0tlAqjOKAvbXamGyJzc8xW3F/lAYJcEa",
|
||||
"CXbLo7xD0M+d+2r+hoyWc1JMkYw1k4ylO04ru1vZ3crul5Ldhjl+QNFe4c+3gmhmow0JXJg9BA33lfrO",
|
||||
"xrJrY5YTqzrz6po+U1narY1n09rAgAaZXszNWkxSIBbVLSFSGbWOekopQ28GVxc8U2iWM1STDjafPDTN",
|
||||
"M/q2f/7x+t272lOSTbvSvTkvUMzEOMmLk6LHSBjcKJK/BCttMHbn0Ev8ipTphs5rH0dfigkzLAVMzWZj",
|
||||
"Xk7b6EeTy9OxRXasKkyFaxdhNBKw1LtN6EgOdc471mmhheal+TOG0GYZrkroLJlO+1Ewl/ab5NHmaaKr",
|
||||
"FjsBMx16fa4yrm/yDzacZUOYdTmEVfQjhMJ5TC8y93q5oGVpzpd3yMCNdRMy93HtjEyO3Iknx01Pi/Ur",
|
||||
"bK4ZFPCmkbwwDRpYZeAUP5tV7rm6pUdfpoHdiVeI5mjmuUaM8nSTL1tVYCjabJFlc08YNhuivnqwxHX3",
|
||||
"IPHJTWW6HdHImHbH6pFA3CL/xPzgXRhqIf05vr5yONDlwBM2gjYiVD4LvtBjXxh73J/QAg1YqB0TtICh",
|
||||
"oUoKJsh9WJocSOg3B4tnFbuXREVeNGBbpoM9nhZeyqxwrPQZ8yxAOpQ/ZpRtzuZps8An5T3b9t2icdZU",
|
||||
"62ugXJYkjNxAX+s5nZHVJt+GmtDnXuzJrhDOHSqyR6FCedkYMievc3NtiQX4VtPiqZmybyowwWMXEip/",
|
||||
"mfzkEE4hiGEs01YwjLJjhf2cbcqckIhde8LwAUHZHNFd5T/Jt/M3HRGEm/UVGUxo7wSTcGE52TOT+NyZ",
|
||||
"R+P/zmdx+jdDVveIMJtY/teUEDunRydHJ4yOeRhy503n1dHp0YmIKGaYYFHDvqgXOtOFeLyXz/O0VQAx",
|
||||
"dlJ7DN10IKtcdC7F9/cMDdIln81ydnJSHvgDBD6ZMxS95t/dMCCi7pcoLEybHv+NOV/h9ACs4eNBHIdU",
|
||||
"Cj+zo1qd8yok6TpyxNF589fXbgfLYh501VlD6VPyl4DZnUP3ofOV9mf4iyHwlvUIpM1QFQZHssG+o5At",
|
||||
"2CGhA1wXRsQhMbi/R24tRlMM1KL08fQY+FSkBLMeXADk99hDMj7+zn5Wf3vmePEh0dyeLtjv2AFpQifa",
|
||||
"3WHd+dt0aRf6tMWANmCuFnwExjMxWEDC9IG/Kpx8SjM4IuF15w2P5E+FRmkpHVWo8feBbMfWK876tURP",
|
||||
"v5WxNU5cF2J8n/j+0uEo9XLZsErIe+52ftsV5fWdBfApFqDnsERJngyc4WC82jgYOijehfEUeR7kt4+M",
|
||||
"vjmdVJGZpPgJa0IPq2+9WKgc7APv2+lqCOMru/YSV5Mum1+31iFxPsKPQeKMHt6GXB5vhBg4dvimFRCX",
|
||||
"Rl6VyaQSWyR0EonzPDae9WJ/IwvRLkEHe04McEBbMWApBji1bE8MqAdkhHokfIABPRXl3+w0jEJdUP4I",
|
||||
"PoYP0AEBy8nHWgtvrXTGgpiI0IS2kgYd2t1GSqTDG2SChHWvjruYLU/QOYPuxyZq3ISqBenQjZ2InZNk",
|
||||
"nP1WRcnpluco2PXDxDtWb+hmDbqU60xee9ggDgowAYELS0R8Tj9L9xKzYr193DJAnCRIg1j3hsBqtHaO",
|
||||
"YPW9Xmz9J+WF7VtPDtELI+7sIk40Zb+5Ofz4O/vvc9V+UynFWh2VNpRZxflG1koingvYpJywrzsVQpvb",
|
||||
"bJEcqObw5sU0HoVY49hgO9bKthyJK5jJyJujuEKqcfr5aqbw4zqxxrYllWo1NH+RCrCfne4vGAm3tL9f",
|
||||
"tL+AK5/hxtN7dwe3yBnWhKbSI/FADvJNHOF0jGNmp+e7hI07fokwvQD5Tq61aYNp62G+4dZ2m84ldlyZ",
|
||||
"suHmyxwuudXtEyGkW882orAJ5f3PbXIYIBJSaX78nXP883EUh1NovlzKt08H5AoMMLsuL1CQi+A3M3w6",
|
||||
"9U2IySgJbti89rYp06GXSq4dn3oVBCVycXB6Yvg92umpcBUSlmg+jNF/eDJykZWHZ6vgUZolMycByIee",
|
||||
"w+32Dtse552Q58NsW/UHR47MsA/ch+Pv7D8WVnxnTBsqdTTylMO+ivRG9kb73JhG4mEg7qV1Po+TfVJt",
|
||||
"TncDxm2QkTCf+PVuJuZZs1jyQeD74ROdXvciUKRaKXrZ71UqFie6PMcE+Pg7DrAVt1yNValf5pcAN2CT",
|
||||
"/GBmRhEn996xSQEZLaPsIaOUCDZllatxJaMEWMMmUnFRrE161YXOK6/EJRZp/Db2YvpH12wI4PV3VrIE",
|
||||
"KDCcvX6dA+J0EzpQFIf0H9Brz7A9Yk3TJZJVIHBAFElqLx9rvE2BHwmY+vDYAzN8nCYvN14aMbs1snYO",
|
||||
"mQPiTKEfBjM1q0CaKBvMylfKz6cXgNUdnYha2vXmMpmiOkvQwpNGM5b5dwLjZcYzHpjdIa/6mNtWhIiV",
|
||||
"3CnA+1IXH2vq3Vgx9AswS4vIa3NmVcghOqV8/WOz/txWwm7n9a6EH72FokXkwwUMSEk3YMYLSQfp0znA",
|
||||
"D1oJwxoef6f/qXle4rUapkvON0UBQiewNLXz4vSmQ58CuuMjP1+F3yAUZB1/FZZSLNQ27fiFqhSNTG8M",
|
||||
"qz87f/7G7z7bn3WiFmKnmsJ9mPAkTXsiIjJ+LokI852B2IiQYz+c1ekqfjhzfBRAmflIwFGUKJfh7BIF",
|
||||
"vKLIIUoVkeWJhA5LT+ZMlwbJwj53tNCggLDageWgS0PK15iIhMuhM4OEopph2TAzRtzyqJm5InWD4d6U",
|
||||
"5sW3mjoJCPI3MHXfofKuR+A34mAIYnfusJmUUr4V62cddCK9eq2MguEj9H/Bv9KJUOD6iQdN+0tb4o5W",
|
||||
"260W+JIF6AC2yq0nk9tQwFiUipny2Oe76fIu7ZSD0gq4Uk4dq0PWanv24MhVhVADhVhEsbbv5nmtNJX8",
|
||||
"yrFzGc7WP3Xo//ey0GHz66pSasx48KSVxH6Aowc/oMjE/Pf3GG7k3NnqSbd9lTrb6xUcZNprb6tW52Sc",
|
||||
"TsKsr2KzFoqJ3oX+sQenycxspB88Aj9hFYuc88GlA9M6+Q6YARTgrAKYqHDrAQKONPLwHPoXbKpDcSnY",
|
||||
"fETL59PzwSVDQk0AC8MkpqKQVbylYkKP/J3Gsajgy7SLNaIOCurxNGto9Rr1JW6azEospvD8+eDSzPJW",
|
||||
"vG6h1/AHgLzoSevyFvm5mW6zj290P5J+o7nRSmP+A1xi5aJknJa2a369ZGQgIu7rLpbnYYARvUoKEmOP",
|
||||
"TKHLMm94DrgnLAcNwo64tm/T2FANyxTehzGsBWZT5od3fGtImIMGxKzCWegiJkGfEJmrb3HFAsca+LK0",
|
||||
"Eoad3fIzmf26cmn8nQUg7hyxp0cXxgSgIAvdr1pnmo0PrmQoKZQut15cuiVildMlPe5Q7PDnSh3EImHf",
|
||||
"i27LdOlkGXIzH3FWNCu9lxhsKuUEwtqFaIo5yGke4LLH6whFAMXY+cWDTPBR7ls6wPnXm3/9WhRblU4Q",
|
||||
"doYt7IYRtJKHvKXtuljr9eDd7h3V/n7aWqDqLFApb1iGbTRQ0I7ZMWyppfGz3UpT+wiXh6KsbT2MSeKi",
|
||||
"KSMwdLfMoGMGR2iPW2CI74+nvQaBq8y3gGC9f0GTGNY99is0wSQxdaDMKfanPaA2ElqIm4QVppRjxZlc",
|
||||
"x7E5pkTL2jOKq6StOWFfzQmlKrsWCnTt7bNyitIVkV3G+ZxH65eaaHZXwMkUQ+K4IPAQyzMj6Xqjt4eq",
|
||||
"FTu3GHqMjTgshF6Py/AAIm2u7O3eUDRjpxcPhbUbCHYpYlrJnte2JF4y2c7xW6VrdQ1vO+es1JADnAA+",
|
||||
"iYGNopm3/bkfbxgKODpsHnDY+01Kyg4r7MSt+rt8sxHkUcd6ouyUAnD7JL2rJ+mr7BU6x/Apf6a8ac/z",
|
||||
"9locu2Dxv23CG0GdpGicuHO/1DjBrYjFZXtyLfrLVoqJw7xtWYoGGcvZioWXFAu2rN9VCJMe/RWhGKkC",
|
||||
"bzaY8NkO2WKS8vNPzsWzkLSHu9FissIZW2S0yjTB9cfmgQc8547NNMnuSzLcNq4AfJNWvgK8QPJha/kg",
|
||||
"8w238uHwTnkLZZ/5ti+yYnUVaoGQjDIQ2ImTwBE9q/MWcw+KS4QJ96KQtfEOVaaVI6EUNNT4J1kAunZw",
|
||||
"VD00m3JQKtpmmfU38Li3jnn6tKYZeiFPFwo3r1rHSPn/YDXlgAFoUeWOtr+Tre9Y660SW5YCgb/xMVep",
|
||||
"tPJuFn9rSDbAG6Jgdsdr+O0I8r7Ggeih9yh8eiweCTJPortFpSvRyxqxqWAbJYGUaM3jplUp2uY42J8A",
|
||||
"ZrY3i/SgsouxsD9xoxAFxPLcXaAgIZBex+VfMQQPXvgUpEdxg2P4PSQ3dPJDP4TZgSd9g5XQHWGw7nSV",
|
||||
"KvVnJ2envRP6v8nJyRv2v/8xyB3RvX/PbyKbOCAZpKnnsApqSOFbA9h7FCA8h95bNnhzcLcvG3OktoJ0",
|
||||
"ZHzSysc9lY/53dm4lMTHLisFbo5C46XC03w0OnnHm/zcD5QMBUxVqSmQxHN8hY4rkbbTKDI2qQ89nies",
|
||||
"9mVSNm+TRLXRsiUZVZAMG5dMMYx8sKwq7kS/V0om3uSnlkwcBU0kUyyRtkvJxMG0FUyxaN3KpVYuleRS",
|
||||
"QS5sUC6J1J823rcyvXqd963I3t663+6z+y0nF4cOaxe/xtpf0earBEMKmhino9jaWyXRWQMqOlRAWj3J",
|
||||
"i3u4quzTwMU1ZeT2LT7v45oiJpObAsVre7mailikm9j6uQo/V4GPJq/ckilfyNNV0kgTV9d9TH7+c/u6",
|
||||
"ljObW/B+A7WJubuKf9j5u9bKjAP3eKWTy7dHycL1vq8ZVszA7tYObcv/0p+15f29cHWpZe+uSm41Lq2S",
|
||||
"foVPq1APDXx7yG6tBQX4R+NR6a3a8qjBXbXmmIQBPQV7MSCwx26gdHPF3ltyWZ0/a+2xeOAerdvlsO15",
|
||||
"p/64irt0UW0Fwx4p7hp5sPrJrr/B34SY5fdAgRsuUDBL6XUBMQazihN+BF2IHlsZ1EQGBYnvlyg/WDoR",
|
||||
"WPoh8BwUOCBYOmK13Q6B38hx5ANUoLTilOvKkMxT8Cam200QHYcvVMwVTv+GbpUNLoeje+Bj2CoWhppj",
|
||||
"nOk0rLYqd9vc0YW/cC9Ogrr3jXzGwNoXjixDYPvKsf85S7HI4mj1zrGzjI/MBx/EPoKY5bmGVuBtMSDA",
|
||||
"B6QJKBurlrI3Tt+WeWoOJFKBApHG0Nlk14Hxlt37v8whmXMBIKrTOBf995ieXmHgL9Xf05qBOoEU+Ms7",
|
||||
"2aBWSZmGoQ9BYBHPkSsgaYGzFwrt0JS5NMZ4WGT1fbFYD+feBzN21D4Jughj5nyhkkF6twSB54QJoX8K",
|
||||
"1RFT3ZE2kHrgkXMB70Hi81z3/6L08C8H3TtJgCE7xnXLFzPdyUE7lSS0s1p6TV9/W4ehfau5kdMoVUVX",
|
||||
"/j6iv6/5CqVquMcewpEPlj3mKlGj74q2dFjhWhHeVyjB1TrwBR+MuVwctD6siFacvmHlkCJiJQX6BOrM",
|
||||
"ioAiS1+k1PCWze9aEmhFVyu6moouySc9yifVkivHo0x70Cf7z1LbVUiugRhs6B2u4Grvue099ye55+7s",
|
||||
"OMvkQnua/UinWe702MnJJq7X5pCfCW8gPUrzF/aKo6t1LT0VqFOQUvNMnSMFEgrfzV2/TytaMyQA+biZ",
|
||||
"j6lKIe17U9Hls8BAG2DwPD8zf0/ll5oyEnmSA4HHHMnS85+E6VVSFEr6347HiOJ/O05keIzO6MfS5SwH",
|
||||
"A7dtzlhPwwuwsryDzWO4Ape1p/gen+LF0DdLhu6WCHoFFj8W5eKqOJ3wDF8kYYajPN8f1XLxWNajW5GX",
|
||||
"1ekVdf3HZG31+tmy9J46eJ2Hie/xWFp6kdRpLnuUlyTHVWlxyBeRNSzRk0V5XRaSywPcuaXe/uqQFpi3",
|
||||
"Nnr9PNVoMrGqNYD8uBJ1pYqOrVBt9aSi7CJogYJZvbYk2jWWXu8hmYgpDvbuo5VBHozInGcr4RnNHHeO",
|
||||
"fC+GJtcN1qGh9Nu+IOGb00qSg5ckVfy5afECIyFT5J/PxyB25+gR1mlBopUAk3bXipAxgZFw1+3LgS3E",
|
||||
"hxzPaD2V8Lauu6trZNuUSWLfxZ5bSaV8Qsm2JujuczGlXFfIx1QWUjn2V5hfyie6/VQ2VYmmlIXrZZLN",
|
||||
"vUyU7beXRwNZX7WVRj+JNLK/a7Wy6HBkkcL425dEfjir85Tyw5njo6CkG5XN0Zfh7BIF0NYa1Iqhl41n",
|
||||
"8uEj9K1chnjL3MxVzCDpgPZ6h6DvGbPHQXrwOmw2BY6KQiasQ1NAxryXNpQEsECBMPaq1s8+v13ytTSc",
|
||||
"/Frta8ADn95DMXRFpHsFFBdKs1Ugyfpv95BSpUFbPH/d9HOpFFbOgstw1vwYEI5GFWnNmQcEFp5EBsf9",
|
||||
"Cfv5XHV82bRjDh+cT1SXoJe7Jr2MKw6HsJHzjUDqj03jK3jdpMSWZqYV/jRFItdRdOo6V2sy5q4x4oW9",
|
||||
"ksCbJmNKAzvEDMYnn914y70sxct0SS217/a2wYnRCyG/aMBv/AQuFdGwZbZcNtPq/EsBnw0Fs2q+Opws",
|
||||
"TFvyOuUIaHK4RWl+kVwx0/acO6RzTvDJCqxXcd4dA58SRjDrwQVAfm8Wh0lU+XBKlTt5CxTkxcZw2ACO",
|
||||
"GKDIun3aZEBbvKcNDiXSafsnoQ4xDctNGTeh5Z38a2IFtTY6x6yvPuW56hjjpw+pUG9uBdzYnXUllDe6",
|
||||
"2p1ul71XOAE1NNTytfbup+W2zZ6SxxgSUudahNnuyS6O7FKdzUAhFxTMxqLPgST03dExqSBmjTNS3ZOW",
|
||||
"lTTXOg2aNsZHEeqR8AHWJMNz+jdDh7er5pp+hCa0WatP4mPmV3QzZPjAIzFLQz6R/lGtDb2oPFKK5KhV",
|
||||
"mCH9cZ0yLkFG7XbE3uqIDAGS1hW1cJsmjOKkLX9tOGw2Y6aGDFZ14Fh4S/HKcjmXKVPa1cxppk23utfu",
|
||||
"CQ9waeWcQNs1Tz/DyOAjXNrkNclgSt2XhxfYNh8mlxWNAZQu0cOLFUHMYtDWSOVjA+EoCXgcpTB8vYir",
|
||||
"B9vPl3H0YFPvgZuHCofq5FFBLFkGIbh0HoGfQH0eIfgNLCIfUpH9AJenb1jT006X/uuM/+uMivfqfEOf",
|
||||
"NptuKFsGT1yaZhyqpnPWeHj4mYZWirRrvWsCs8+lorQw5K5vQmbjGnSQ9grAEMBwUWMWFomJX8S9h1NC",
|
||||
"E5sv5D1+du/qs//ezawjwZ9CPYXfXAg9aCjlyPemAZ/XX0yOp4n/YHane5v4ooYRxJlMwJVCgfb5iQUD",
|
||||
"XX5D4YBfUjrg5uKhjb7YM/nA2FQVEnjDUsIFgQv9Crdb9p0bMpTE2TkV1yQ1uFsJH+FnVigYAuwVCnFh",
|
||||
"iGHkg+XGxUakFIT6nloCRkkw5MmJt1XEw7rulBBNDGkwy1HSCqm9FVIjRqnbkU/MjGZpY+W2OQs760e4",
|
||||
"bJ/1MmPjSrd1huz2xq67sTvC9rtJPhCngfGc5jyImx3NI3nE/KxHM0fAvhzNmzGrceBarf4nPTC/s//2",
|
||||
"nhCZ9+QnZt2uDT8CBPDDM6g0EF4AAt5D8gWR+USyfa38kOyjFx8lkHf9dvnDn/J001ZJx8Cooj3l875s",
|
||||
"CmaseberIfJqfkbBIyKwacCE7KV3Ah2yr63uK30/FXys5PUpsd36eurCITJa3FIMBJ+gktbb5ywl6oGj",
|
||||
"xC7YgeP2RSMcOLirBDYIwvjZY3vPznak9QJi985V5FudXIABmPqwFwMCe2xMyh6C11bRi4UUkj/0+L+f",
|
||||
"uYjxIYFlYXPBfsepGclG0PA+B+u9l+f6ath6KToO/eSvlS2cQvZZtuTYjBNhRq4mXTS/j7UR9M044XCi",
|
||||
"6A+FE7Yb6L+aVvBiof6WnMvhOxjOFSH4jTm36uRbwMWUMV+jG6TspWfxT+xre4OU1KjgY6UbpMR2e4PU",
|
||||
"3SAzWtxMkKAY7/g7/8NCCXSAAMK5j8NFXZAtp4YfQxUUyzbBxj/vlHd/2wrvrqID/hxcu0e5aq8MqWlT",
|
||||
"Js1tTAN50ZWEbJFGqjSJWQT8GDrwXoiA7Sq/fLvslF+Bjj1JeWUpvTR6sNi3Vni9sPAyypUVhFeV1hPF",
|
||||
"4QKSOUxwb0F1ULe+fFHWxRFdUh+8usyUN2nXT2KyH+KiQOA3chz5ABWoojhSkztAGcstU740U1IO0OzL",
|
||||
"pm4g/05gAq3ZkLVuzIH/oL0OiPkOO7L5kIJVt28PydHeahksnEcYYxQGrUzcJ5mY7k5ZIkrOWVUmZk99",
|
||||
"Nq7ecfrYWOfrPQIEXtKGbV6Nfa5Ou4kcDLWY3GamhZTO9iDbQhGWXZXVyPNag2AChZ1bP8OCFVzFTSZu",
|
||||
"mbfFJf91VYkrevSi0Efusj7lpOzg8A42CSelK/QN69GmmzzWoWW1R6PCbrSPRzvP2op94D5UJ5oc0ybO",
|
||||
"E5zOw/Ch/JzKPn/hX9vnVJ5jUsVJk9tDAdX7xA47qnh8G4CEzMMY/Qd6fOLXu5n4EyTz0GMVPYDvh0/6",
|
||||
"ast8g5geyFlAPc/Yx7UY8RgTEBMjO47pV36OXfcTMnfYZaXIkLdYPtswgK4pQlnPQ+TMVydnGjyo3MNQ",
|
||||
"Jo6VHFbmEHjCa8QPOcHUWDzZhkM3iRFZMvy4YfiAIB2UFUX6qtIDQ2l+RkkIdAdWpoO6vL/jq3GRAAsC",
|
||||
"OcCtHBZy+Go8VFHVQBIXsdzK4r2TxWVGSCXx1XiNdMOFgXUM1kZjMATk+asyy/DmaDY/qXVURXFXW4be",
|
||||
"I4Y2cp4lR1eeqKJOZ28XT1aidPihvVxt31ygQ0wzm0Fazzq3M+2jyj48qqR7s+lnZl1V9UrWzQqoO9Ml",
|
||||
"Z6jC6c0J8UDseN19rey+TYkhtmhF+dBKhJ2VQlVp8Qnweqh1IkI91OlPdKNXrbJdLSdqcwL2CYGLSCS3",
|
||||
"ZG0V8WESHIeWDLCVIFUu8QgzX2khQjgR+Pt3QXjhR7w6RtkVQ8eQdqzIHcaSLNryMGvesvA+ZjOLk0Bs",
|
||||
"VY1HOwqihPlD8Mdd3XKf90JTaXOZVcgXtuEvIVCyNVXaAngz4SxQJ1zeQzLmw7ai5eW0g2ZZeg2WBjFc",
|
||||
"e6HY5wuF3KWtSA0C8EMPE0BqDIYAP7BqUMJSWGMlnAD8MGaD2ouI4cWPaBtMEdGAQ7W4bnl0D8yAJjbY",
|
||||
"RXok4TXTewrjh6pkEZkDttGlqfVmyoJJOCq+MKRShFRV9aTISANeeEdHbkf73LZv7+cK+a+exFAMYmKh",
|
||||
"n/6dPMc/HBs7KsarmdlrlIJQbm3Lufv3UK4y3kqHJaOK6oc0ekJy4V3tJZ+dDT/9YZlhoq15vZEM1VJ7",
|
||||
"yMfore5dKRHNDUHNa1Go1X81JSmUkr1tYQqlMIWCF1xj0M3VV365MhU6uK3L2Su23hzBtJfUvSxfkd+j",
|
||||
"cjhwtSmpicD5rv6zzo8lxwm1J7Ag00N2aymwvh40FYMHrCaI7Vo1s0Dr5mKO68+/INXH9HfzNLU6Px+z",
|
||||
"x8jaxyT+ZMkZWgX6qIavh2z0lrlfnrmzLCY3ShFKDuM67055HLHtbs3aOzJrf1FxH9jkD8k2qanKsDmJ",
|
||||
"g+cgglvSI8Zs7FbeHIwywTes1Sh+II0ijV0RPkOVkaGiUjtjcd9P38exRteoYn0WOMldWQaysF8rAzYO",
|
||||
"4CXAxBlesIT1c+j4QO6gKU0RwGToGfMUvTrT5SnagY9tk4KepbJ8rUlk/3xrVpAl9o43drIQW71MsJZ2",
|
||||
"Gs1PmTjNg/cg8UnnzUk3Jyp2kUItnfv1KpOPeSa16dJhE+gnFZ/M+Rx2oXa1jz2b17c2mZIxHbM2GOhc",
|
||||
"xjVMAXHnpceeKo3pcIKBtuXloLyTcGTYuu2LaJLyU8mmH3sixVLzPVX6Rkkw9HAu9exaCC7n221oEBIR",
|
||||
"SO3rUU16NE42u3i5wcduHAb1Gglt5fwdTjOgSIxms1r3ifM4DH5qNeVg8rumG4s8Ou0MklQlPqpJ4226",
|
||||
"uG3hrktnbgreVZ0qpZ2SUXyT6WiH5lMdZobyipy506VzL/Lybix1rypFsH363ulyexl8FaVgxzl8c8hY",
|
||||
"Q0Nvj12Nll4657akrtND9/g7/U9P/mpX5q58EFs/fFDCOfCid+nqTWDlMLr7sneW9em0m9jmBy7Wi9Oj",
|
||||
"qdlbRZ4gvj53qx4T12SuQ3ZP2mPO2tLR2R6bh2DYb3RYb0Q+1JWXZLOmM1oLhwOvNblf8mFb1SZVATHh",
|
||||
"Bg4rWx+lAl7C0ca2V6cqqMUgW1WhWg4IttyGKLBT5dlxYPugp74y1rsptQazfTaYsUfkBtYy1n6HprJ9",
|
||||
"tONFIKZIM7iuFMDijb+ojxk7gk+TIkYLm3AS2S5cfW18FktEkGBoVW9Rtl3FujVmfYWdyQa4BxR4VlCx",
|
||||
"ho1B+ogCrx6agzemErSADringJacp58AlrHM6hI6Zydnp70T+r/Jyckb9r//MRqrWfc+nUBPvPRY7VEo",
|
||||
"OrbVyCnEU3gfxnCbIL9lM2wS5gos36MA4fnqMMv+O8XzpoDeKKa39zhQtsT/tE8DRd2xtXBsxV16O28C",
|
||||
"zEPaJn8/cARo9KDLs7+a0N8yEOKQK1C3anirhu9eDW91y1a3fJEQKLxmxXYmgNrKIvXn+xaqp2fnPAXV",
|
||||
"S3x6PNZYDdOWq9gPx7Jza0XcZyvi9u5FKQEclOdUq0y1ytTBKFPZMjJRvRHbbAqSFYOnVloNzFuNkSxJ",
|
||||
"mNbqsFmtxKABbFcvOZ4m/kMv80TURxS9TfwH4dS2IUWFjng4/olb8kMo81SGFtuwo2n91uy2jkjlmsyJ",
|
||||
"51QSi9N2rYSQEuKt1T5vXVJwd5UaScEbOb/EUPb+dYNi43Ccq3YqNmSazgZiQ+zT/ooNuaYasSHW0YoN",
|
||||
"g9io3edtio3v6Z+9Us7I2ggIPcgNhcaBx0FocGCsZqRF9d6GRuh3t3V4LMZGGPDUzOPRQBs1URIbYcCD",
|
||||
"rlB8UNy3zQO5vesfegzFtuVIdTRF7jqwIcly4IEWey9cthV7UZIuDeqjZmRUzvv4sleWWgmpBnv8lMrP",
|
||||
"AVR/u626LG1KVtpdotIUms9Z5paqMlYOcAL4ZM7fYp++RcRDHU7Rq/pMItU5MytB25Fo5NheNSxNVI42",
|
||||
"bv5OZWOz4Fu1VpcZ/lYy7l4y7l2hEyHoqqh8O6mzFFmcc+rRy2OpGwiJbK/h6hSjVgrvUgrLHVhBM61Q",
|
||||
"6/ZcMVUlcKuYtuLXJH6FQlKnE29c5PLqeT03TAJSEy/B2shc5LLsI3gEyAdTHzLpq4gbvX3hPSS8Oh8+",
|
||||
"ZzMevOitSxl/4CUjcpu1opmSkwonn/YF0eAwnUPSaoUk8uyfYBjjYzeJY1jN2ZjfDnhDh3Yrce8thvF7",
|
||||
"SM7FYFukOzpTQzpjELcFiF++ADF0kxiRJRPjbhg+INhPqOz66ysVVYWkQ3lyk+TOtl9DxjNE5sn02AW+",
|
||||
"PwXug5Gcz8NF5EMCOU1f0/kd7XlEJ+L2qPds6GuKy3M5fIHAX52c1by9umJerzzvHAKPHW7fO37INyO/",
|
||||
"D0Wx/lxAZg53coH5OSzRhwmIzaJgTL+uhjjWtTnWGDzbxxmDriHCwnDmw+3QGxv6B6c3jr4N01uGuB+O",
|
||||
"3lDwiAisrt2EWTST1IZ5B6Z0Wx3fdIQJ6zsUc23xFFcnsnJm9xGWG5NfYKsvWh+rrCZPAXsZ5U00N8Qc",
|
||||
"7R0D14URMVve+uw7Ti1sYpIStambz/t0tmNP4oPziRRDksEAVEF9fOU6+ms9plLy4tgu7b09fcWQVbeo",
|
||||
"qKRPvzejL96ns6269HTwDdAXX3lLX5X0xbG9An354QwFZrK6DGfYQYED2Nl4VKFgXLKBtuScQY9gOn49",
|
||||
"Ie3uHu2Hsxn0HBS01+cXvj53O7+dne1q3VEcUhpgRttBQBBZOj3nEfjIY5PRTRFNUDBzoBzJrPAywtZf",
|
||||
"5budbz0Y0Kl6MSCwx2zgVIfmbzU6Zg4TUsPNYULs2DlMXt5YJZgs3LNC3a2RqkabZtRja59awMUUxniO",
|
||||
"ogZ3OKWT3T2On4Gfsm4iKcVWCVw/afMLnYqi9lK3yqVOxWA9SUYA46cwrnClSHOx0w6ObF8lUm/kmNtT",
|
||||
"ks7nIJilE+2TtuQyyLwUUa04b5WmZkpTNatzys8z49r6VAxnVBLHVddu3gJXqlSpp9S2+F6CsU8cL5HX",
|
||||
"PjS2TL+Zm5Kk8s1clrAP3IetPFKN6ch7/EZVI0kbPlo9whgLEIzuT3QNop10gcIwftRo6cPgPnwPyWcx",
|
||||
"6EZrEiuQZhkaT49Ojk50OSAVz6O/0q5fLcoNTyoWW/C2rCD2L9CJIUniIIe8wk2HitkkCCj/pFN868kh",
|
||||
"e2HEU06VWeAJTudh+NATjmjH38UPFuHv9KgTrcuOavx3+8h2MZDZESydaMd+YJah4hK+9mB7eeNEMTxd",
|
||||
"JVOj95do8dWKOY4Fnm3MFLKp8Kuv4RihuGHbRJl7yzeb8Z/k0HP3SYEaipmqjCsUK2kdEIGddLta9twj",
|
||||
"9mRWmdIWNeXRlDfZH8813te8ldaxmjlnWvEcdzKt8lnWnPGH47Hc2HdUrLi1R5ackksBX/KCYvZBZmp1",
|
||||
"feXHSkK2TzuwF7S8rSj+3LlhOisEBhKJst3FQVnymhqU33KaoebiOsxWOE2KwT1WicCa1WBtcC/aywiZ",
|
||||
"Jkm0UgDbAL0XzhwhiFWhmBXjY7p1GpY9JzRQuX6GQLEVg8Na3npp3lKj0NZhLBu1z567mumBe8Fgm9cF",
|
||||
"88iwjZUXOUlzXLZr5dBKIhTVw1YeGBXE9ZizRk20KpdHNylfFy9lvMf0pcN4UjYoj7cP/KwpUcELTGyg",
|
||||
"fvDq1YP1gM3iMIlY3Y8MBLlRRlBYp49w2alNA7JlIbFmLS75qNSW49pDbWKl+l+NBJdMTWR0bpFZNZom",
|
||||
"C1opR9BeSq6Jhl2OnOE9s27jhFIH9LqMq3xAICYpTyHs3EPizqFnqg6VCf49V6QEGayYeOjF0g0p8DbK",
|
||||
"M9RmF2qzC20hu1Aj0SxkA7Z41cqd5FZiWfjWHJAJ5keQy1uWctJhaj1VsJV3e6UCZqS4qgpYdPybQhDD",
|
||||
"OHX862pdAZknGZcHSex33nQ6z1+f//8AAAD//8trZ2RWNQMA",
|
||||
"H1bQcDxgYTP8j9vRZTV57IVzndRpLF1myoeZgob3g6vBiMmY98PJh9u3zGluNLwZMH+3/vnHTrdzObwa",
|
||||
"9Jkr2+fh/zHhJ7sPbj6kudLno7mnhDT6td4SrbfEj+Ut0To0lJ8h1jRr7rdZ/mCswg1fnGueeDX2Y/Hq",
|
||||
"u5YNmbXODMjZFSf/4Jt7f03fdlWrn3IaXkAiixIUXGaTwP6NXyQ0wHNQb7FQI7tp+3dhrIFHPr+wfBY2",
|
||||
"QTSsYXaXzb/drx8WwMHBm0vOUusOUY6M7uRwItEtIStvbV4dyG+vVxOLsoXqkOqUVcC+1BuGqh01eMQw",
|
||||
"YHxTDxpfdA4UEkXmxewolK7g5qPGXPbfiwJpWpVcaP28bMNGCxY2Mg+Kkg9a5TeJDUmdZd8k9hsZpYTx",
|
||||
"gI6r2+scSnjKLHMq/00tEttdn6lcFfnC6SnmDO+dICROFIePyINe1wFODAIvXMhOT8j3nSl0ZjCAsbzG",
|
||||
"qNR1tjWMN0ezt58EuNre7JqUUzhrkU2lljl/9E6tFHnxY2WpyHUxMqa4tN8Bw76xB0YQeFm9xJgPtdqV",
|
||||
"fwHJPPQarVaA/on3THX789AzUO2HyeRGZqJ2Qy+l4Fgg3z56/w7w8H02c27ir5YIryYhgcqac17SvGxt",
|
||||
"ncZLSwEr086ndOsyY9ek0+3cXI/Zf24nTEsynZA8pAlXxTth8X7Cqxq5IHAiGFO6OrKvL/fc7YBHgNhl",
|
||||
"1pxDLZdeqDwt/AbdhEDHDQNR+9JfGnwUEY7YzVqbL4tSHUoz8QGM0SyAnpN1Ypan29vhhSPYZ/c3Sh9M",
|
||||
"oY+rC3+yNoylct4c/BiwI0UuUOk4ui3zASYfIIjJFAJSZRvIbRWr48oqMABnLnvnb+VnJ2dnvdOz3umr",
|
||||
"yenrNye/v/ntj6M//vjj1es/eiev35yc2Cc4AZyZqXowwARMfWZs20NIF+CbmfAX4BtaJIvNMcD29Q6z",
|
||||
"vhFDF6bVS7Epiwttw6NEePW6MF6FgEf5uTQ0HIuqP1nVT1ybcQk7WS8nDNR9aABZcV4tdElACWYY3Id2",
|
||||
"vDpSOtBD1w9N5xSGCxDNwxg6tJEQEyuieSzHGrP5dEHy1uUnsqnTvDLnk+FnXuo6/fOmfzs2hPDaxI1w",
|
||||
"ZKUxI/zcNObaEic5l/cFIOuNebz3bZ1ufDu61AzfVFVm7bVqjiLKS6d8ZU5cmUWJdt20k01F+Wpetrpm",
|
||||
"8uoUoBV4ePkXTOOlIAVylGf+Qu1qEMwS8aRlLRbGFx8xPxZ5Z6X0czlxjV5tExJp8I3EQNsAew/mYUuL",
|
||||
"YxCpyun1ZZ/F7t/8c/KBPZBM/nkzGJ+PhjcTvYUn42TV9WFw+e7D9ZiH/n/qX/V5SpGqEvmpGC4bMVXa",
|
||||
"1Me1pL9YuM12G5QJ5QeOLBSqLy/5dzg1CFb6RQeQFX3+GU51gnwnmoMRc7KonEZ5A7PV15paF4H2alL9",
|
||||
"wCScyrIbQ+UKxAtNMzmhPAZJZFZafTXnQurKb5CJ4pGA6426YvkzSJTvrBS5xn8hkEkweE62GSRY+Cmm",
|
||||
"XZ0Z7ZuedYrlWYswVi15TGJA4Kw2W7QC4WWuX3MNO1Oi84WUi/lmX53VGybk1MXVdLVYrdqi4YUuLV4K",
|
||||
"4PBCi0PZ+yMKcqaAd7dX55MhE7MXt6P+20uqWl3031cKSDqIPD8bUTCbXcNe8rv+UF4rJm/H57lejX+u",
|
||||
"2E9jLiHGJB9hVXgdCQnwdRSb8tgDXBq8YuTwlCztIvjkLQw4OIIuukduNonzSwQwhp7ziIDwTv5VzxVG",
|
||||
"RDRwmcp+vVFakziBmvHrXiBV36P0Wn96cnJi9CXSDpP3/mnoyNNoQX+HUynGbM9xQyGAtaNd+Ym4a9MX",
|
||||
"n1vc6V8GhJw7zCZdW1SvBa1/i7n0xNtlg8EnSq+yw0lDlcTosrJOLulsINUZRQH7a7Uw2ZMbnuK2Yn8o",
|
||||
"jJJgjTy75VHeIejnzn01jUNGyzkppkjGmknG0h2nld2t7G5l90vJbsMcP6Bor/DnW0E0s9GGBC7MHoKG",
|
||||
"+0p9Z2P1tTFLjVWdgHVNn6ks+9bGk2ptYECDTC+maC3mKhCL6pYQqYxaRz2lzKE3g6sLnjA0Sx2qyQqb",
|
||||
"zyGapht92z//eP3uXe0pyaZd6d6cFyhmYpzkxUnRYyQMbhTJX4KVNhi7c+glfkXmdEPntY+jL8W8GZYC",
|
||||
"pmazMa+qbfSjyaXr2CI7VtWnwrWLMBoJWAbeJnQkhzrnHeu00ELz0vwZQ2iTDVfldZZMp/0omEv7TfJo",
|
||||
"82zRVYudgJkOvT5XGdc3+QcbTrYhzLocwir6EULhPKYXmXu9XNCyNOfLO2TgxroJmfu4dkYmR+7Ek+Om",
|
||||
"p8X6FTbXDAp400hemAYNrDJwip/NKvdc3dKjL9PA7sQrRHM085QjRnm6yZetKjAUbbbIsrknDJsNUV89",
|
||||
"WP66e5D45KYy645oZMy+Y/VIIG6Rf2J+8C4MJZH+HF9fORzocuAJG0EbESqfBV/osS+MPe5PaIEGLNSO",
|
||||
"CVrA0FAsBRPkPixNDiT0m4PFs4rdS6IiLxqwLdPBHk8LL2VWOFb6jHkyIB3KHzPKNif1tFngk/Kebftu",
|
||||
"0Th5qvU1UC5LEkZuoK/1nM7IapNvQ03ocy/2ZFcI5w4V2aNQocpsDJmT17m5xMQCfKtp8dRM2TfVmeCx",
|
||||
"CwmVv0x+cginEMQwltkrGEbZscJ+zjZlTkjErj1h+ICgbI7orvKf5Nv5m44Iws36ikQmtHeCSbiwnOyZ",
|
||||
"SXzuzKPxf+ezOP2bISt/RJhNLP9rSoid06OToxNGxzwMufOm8+ro9OhERBQzTLCoYV+UDZ3pQjzey+d5",
|
||||
"2iqAGDupPYZuOpDFLjqX4vt7hgbpks9mOTs5KQ/8AQKfzBmKXvPvbhgQUf5L1BemTY//xpyvcHoA1vDx",
|
||||
"II5DKoWf2VGtznkVknQdOeLovPnra7eDZU0PuuqsofQp+UvA7M6h+9D5Svsz/MUQeMt6BNJmqAqDI9lg",
|
||||
"31HIFuyQ0AGuCyPikBjc3yO3FqMpBmpR+nh6DHwqUoJZDy4A8nvsIRkff2c/q789c7z4kGhuTxfsd+yA",
|
||||
"NK8T7e6w7vxturQLfdpiQBswVws+AuOZGCwgYfrAXxVOPqUZHJH3uvOGR/KnQqO0lI4q1Pj7QLZj69Vo",
|
||||
"/Vqip9/K2Bonrgsxvk98f+lwlHq5pFgl5D13O7/tivL6zgL4FAvQc1i+JE8GznAwXm0cDB0U78J4ijwP",
|
||||
"8ttHRt+cTqrITFL8hDWhh9W3XixUDvaB9+10NYTxlV17iavJms2vW+uQOB/hxyBxRg9vQy6PN0IMHDt8",
|
||||
"0wqISyOvymRSiS0SOonEeR4bz3qxv5GFaJeggz0nBjigrRiwFAOcWrYnBtQDMkI9Ej7AgJ6K8m92Gkah",
|
||||
"Lih/BB/DB+iAgKXmY62Ft1Y6Y0FMRGhCW0mDDu1uIyXS4Q0yQcK6V8ddzJYn6JxB92MTNW5C1YJ06MZO",
|
||||
"xM5JMs5+q6LkdMtzFOz6YeIdqzd0swZdynUmrz1sEAcFmIDAhSUiPqefpXuJWbHePm4ZIE4SpEGse0Ng",
|
||||
"NVo7R7D6Xi+2/pPywvatJ4fohRF3dhEnmrLf3Bx+/J3997lqv6mUYq2OShvKrOJ8I2slEU8JbFJO2Ned",
|
||||
"CqHNbbZIDlRzePOaGo9CrHFssB1rZVuOxBXMZOTNUVwh1Tj9fDVT+HGdWGPbkkq1Gpq/SAXYz073F4yE",
|
||||
"W9rfL9pfwJXPcOPpvbuDW+QMa0JT6ZF4IAf5Jo5wOsYxs9PzXcLGHb9EmF6AfCfX2rTBtPUw33Bru03n",
|
||||
"EjuuTNlw82UOl9zq9okQ0q1nG1HYhPL+5zY5DBAJqTQ//s45/vk4isMpNF8u5dunA3J1Bphdl9cpyEXw",
|
||||
"mxk+nfomxGSUBDdsXnvblOnQSyXXjk+9CoISuTg4PTH8Hu30VLgKCcs3H8boPzwnucjKw7NV8CjNkpmT",
|
||||
"AORDz+F2e4dtj/NOyPNhtq36gyNHZtgH7sPxd/YfCyu+M6YNlXIaecphX0V6I3ujfW5MI/EwEPfSOp/H",
|
||||
"yT6pNqe7AeM2yEiYT/x6NxPzrFks+SDw/fCJTq97EShSrRS97PcqFYsTXZ5jAnz8HQfYiluuxqrUL/NL",
|
||||
"gBuwSX4wM6OIk3vv2KSAjJZR9pBRSgSbssrVuJJRAqxhE6m4KNYmvepC55VX4hKLNH4bezH9o2s2BPAy",
|
||||
"PCtZAhQYzl6/zgFxugkdKIpD+g/otWfYHrGm6RLJKhA4IIoktZePNd6mwI8ETH147IEZPk6TlxsvjZjd",
|
||||
"Glk7h8wBcabQD4OZmlUgTZQNZuUr5efTC8DKj05ESe16c5lMUZ0laOFJoxnL/DuB8TLjGQ/M7pBXfcxt",
|
||||
"K0LESu4U4H2pi4819W6sJvoFmKW15LU5syrkEJ1Svv6xWX9uK2G383pXwo/eQtEi8uECBqSkGzDjhaSD",
|
||||
"9Okc4AethGENj7/T/9Q8L/FaDdMl55uiAKETWJraeY1606FPAd3xkZ8vxm8QCrKcvwpLKRZqm3b8QlWK",
|
||||
"RqY3htWfnT9/43ef7c86UeuxU03hPkx4kqY9EREZP5dEhPnOQGxEyLEfzup0FT+cOT4KoMx8JOAoSpTL",
|
||||
"cHaJAl5R5BClisjyREKHpSdzpkuDZGGfO1poUEBYCcFy0KUh5WtMRMLl0JlBQlHNsGyYGSNuedTMXJG6",
|
||||
"wXBvSvPiW02dBAT5G5i671B51yPwG3EwBLE7d9hMSkXfivWzDjqRXr1WRsHwEfq/4F/pRChw/cSDpv2l",
|
||||
"LXFHq+1WC3zJAnQAW+XWk8ltKGAsSsVMeezz3XR5l3bKQWkFXCmnjtUha7U9e3DkqkKogUIsoljbd/O8",
|
||||
"VppKfuXYuQxn65869P97Weiw+XVVKTVmPHjSSmI/wNGDH1BkYv77eww3cu5s9aTbvkqd7fUKDjLttbdV",
|
||||
"q3MyTidh1lexWQvFRO9C/9iD02RmNtIPHoGfsIpFzvng0oFpuXwHzAAKcFYBTFS49QABRxp5eA79CzbV",
|
||||
"obgUbD6i5fPp+eCSIaEmgIVhElNRyCreUjGhR/5O41hU8GXaxRpRBwX1eJo1tHqN+hI3TWYlFlN4/nxw",
|
||||
"aWZ5K1630Gv4A0Be9KR1eYv83Ey32cc3uh9Jv9HcaKUx/wEusXJRMk5L2zW/XjIyEBH3dRfL8zDAiF4l",
|
||||
"BYmxR6bQZZk3PAfcE5aDBmFHXNu3aWyohmUK78MY1gKzKfPDO741JMxBA2JW4Sx0EZOgT4jM1be4YoFj",
|
||||
"DXxZWgnDzm75mcx+Xbk0/s4CEHeO2NOjC2MCUJCF7letM83GB1cylBRKl1svLt0Sscrpkh53KHb4c6UO",
|
||||
"YpGw70W3Zbp0sgy5mY84K5qV3ksMNpVyAmHtQjTFHOQ0D3DZ43WEIoBi7PziQSb4KPctHeD8682/fi2K",
|
||||
"rUonCDvDFnbDCFrJQ97Sdl2s9XrwbveOan8/bS1QdRaolDcswzYaKGjH7Bi21NL42W6lqX2Ey0NR1rYe",
|
||||
"xiRx0ZQRGLpbZtAxgyO0xy0wxPfH016DwFXmW0Cw3r+gSQzrHvsVmmCSmDpQ5hT70x5QGwktxE3CClPK",
|
||||
"seJMruPYHFOiZe0ZxVXS1pywr+aEUpVdCwW69vZZOUXpisgu43zOo/VLTTS7K+BkiiFxXBB4iOWZkXS9",
|
||||
"0dtD1YqdWww9xkYcFkKvx2V4AJE2V/Z2byiasdOLh8LaDQS7FDGtZM9rWxIvmWzn+K3StbqGt51zVmrI",
|
||||
"AU4An8TARtHM2/7cjzcMBRwdNg847P0mJWWHFXbiVv1dvtkI8qhjPVF2SgG4fZLe1ZP0VfYKnWP4lD9T",
|
||||
"3rTneXstjl2w+N824Y2gTlI0Tty5X2qc4FbE4rI9uRb9ZSvFxGHetixFg4zlbMXCS4oFW9bvKoRJj/6K",
|
||||
"UIxUgTcbTPhsh2wxSfn5J+fiWUjaw91oMVnhjC0yWmWa4Ppj88ADnnPHZppk9yUZbhtXAL5JK18BXiD5",
|
||||
"sLV8kPmGW/lweKe8hbLPfNsXWbG6CrVASEYZCOzESeCIntV5i7kHxSXChHtRyNp4hyrTypFQChpq/JMs",
|
||||
"AF07OKoemk05KBVts8z6G3jcW8c8fVrTDL2QpwuFm1etY6T8v7CacsAAtKhyR9vfydZ3rPVWiS1LgcDf",
|
||||
"+JirVFp5N4u/NSQb4A1RMLvjNfx2BHlf40D00HsUPj0WjwSZJ9HdotKV6GWN2FSwjZJASrTmcdOqFG1z",
|
||||
"HOxPADPbm0V6UNnFWNifuFGIAmJ57i5QkBBIr+PyrxiCBy98CtKjuMEx/B6SGzr5oR/C7MCTvsFK6I4w",
|
||||
"WHe6SpX6s5Oz094J/d/k5OQN+9//Ncgd0b1/z28imzggGaSp57AKakjhWwPYexQgPIfeWzZ4c3C3Lxtz",
|
||||
"pLaCdGR80srHPZWP+d3ZuJTExy4rBW6OQuOlwtN8NDp5x5v83A+UDAVMVakpkMRzfIWOK5G20ygyNqkP",
|
||||
"PZ4nrPZlUjZvk0S10bIlGVWQDBuXTDGMfLCsKu5Ev1dKJt7kp5ZMHAVNJFMskbZLycTBtBVMsWjdyqVW",
|
||||
"LpXkUkEubFAuidSfNt63Mr16nfetyN7eut/us/stJxeHDmsXv8baX9HmqwRDCpoYp6PY2lsl0VkDKjpU",
|
||||
"QFo9yYt7uKrs08DFNWXk9i0+7+OaIiaTmwLFa3u5mopYpJvY+rkKP1eBjyav3JIpX8jTVdJIE1fXfUx+",
|
||||
"/nP7upYzm1vwfgO1ibm7in/Y+bvWyowD93ilk8u3R8nC9b6vGVbMwO7WDm3L/9KfteX9vXB1qWXvrkpu",
|
||||
"NS6tkn6FT6tQDw18e8hurQUF+EfjUemt2vKowV215piEAT0FezEgsMduoHRzxd5bclmdP2vtsXjgHq3b",
|
||||
"5bDteaf+uIq7dFFtBcMeKe4aebD6ya6/wd+EmOX3QIEbLlAwS+l1ATEGs4oTfgRdiB5bGdREBgWJ75co",
|
||||
"P1g6EVj6IfAcFDggWDpitd0Ogd/IceQDVKC04pTrypDMU/AmpttNEB2HL1TMFU7/hm6VDS6Ho3vgY9gq",
|
||||
"FoaaY5zpNKy2Knfb3NGFv3AvToK69418xsDaF44sQ2D7yrH/OUuxyOJo9c6xs4yPzAcfxD6CmOW5hlbg",
|
||||
"bTEgwAekCSgbq5ayN07flnlqDiRSgQKRxtDZZNeB8Zbd+7/MIZlzASCq0zgX/feYnl5h4C/V39OagTqB",
|
||||
"FPjLO9mgVkmZhqEPQWARz5ErIGmBsxcK7dCUuTTGeFhk9X2xWA/n3gczdtQ+CboIY+Z8oZJBercEgeeE",
|
||||
"CaF/CtURU92RNpB64JFzAe9B4vNc9/+i9PAvB907SYAhO8Z1yxcz3clBO5UktLNaek1ff1uHoX2ruZHT",
|
||||
"KFVFV/4+or+v+QqlarjHHsKRD5Y95ipRo++KtnRY4VoR3lcowdU68AUfjLlcHLQ+rIhWnL5h5ZAiYiUF",
|
||||
"+gTqzIqAIktfpNTwls3vWhJoRVcrupqKLsknPcon1ZIrx6NMe9An+89S21VIroEYbOgdruBq77ntPfcn",
|
||||
"uefu7DjL5EJ7mv1Ip1nu9NjJySau1+aQnwlvID1K8xf2iqOrdS09FahTkFLzTJ0jBRIK381dv08rWjMk",
|
||||
"APm4mY+pSiHte1PR5bPAQBtg8Dw/M39P5ZeaMhJ5kgOBxxzJ0vOfhOlVUhRK+p+Ox4jifzpOZHiMzujH",
|
||||
"0uUsBwO3bc5YT8MLsLK8g81juAKXtaf4Hp/ixdA3S4bulgh6BRY/FuXiqjid8AxfJGGGozzfH9Vy8VjW",
|
||||
"o1uRl9XpFXX9x2Rt9frZsvSeOnidh4nv8VhaepHUaS57lJckx1VpccgXkTUs0ZNFeV0WkssD3Lml3v7q",
|
||||
"kBaYtzZ6/TzVaDKxqjWA/LgSdaWKjq1QbfWkouwiaIGCWb22JNo1ll7vIZmIKQ727qOVQR6MyJxnK+EZ",
|
||||
"zRx3jnwvhibXDdahofTbviDhm9NKkoOXJFX8uWnxAiMhU+Sfz8cgdufoEdZpQaKVAJN214qQMYGRcNft",
|
||||
"y4EtxIccz2g9lfC2rrura2TblEli38WeW0mlfELJtibo7nMxpVxXyMdUFlI59leYX8onuv1UNlWJppSF",
|
||||
"62WSzb1MlO23l0cDWV+1lUY/iTSyv2u1suhwZJHC+NuXRH44q/OU8sOZ46OgpBuVzdGX4ewSBdDWGtSK",
|
||||
"oZeNZ/LhI/StXIZ4y9zMVcwg6YD2eoeg7xmzx0F68DpsNgWOikImrENTQMa8lzaUBLBAgTD2qtbPPr9d",
|
||||
"8rU0nPxa7WvAA5/eQzF0RaR7BRQXSrNVIMn6b/eQUqVBWzx/3fRzqRRWzoLLcNb8GBCORhVpzZkHBBae",
|
||||
"RAbH/Qn7+Vx1fNm0Yw4fnE9Ul6CXuya9jCsOh7CR841A6o9N4yt43aTElmamFf40RSLXUXTqOldrMuau",
|
||||
"MeKFvZLAmyZjSgM7xAzGJ5/deMu9LMXLdEktte/2tsGJ0Qshv2jAb/wELhXRsGW2XDbT6vxLAZ8NBbNq",
|
||||
"vjqcLExb8jrlCGhyuEVpfpFcMdP2nDukc07wyQqsV3HeHQOfEkYw68EFQH5vFodJVPlwSpU7eQsU5MXG",
|
||||
"cNgAjhigyLp92mRAW7ynDQ4l0mn7J6EOMQ3LTRk3oeWd/GtiBbU2Osesrz7lueoY46cPqVBvbgXc2J11",
|
||||
"JZQ3utqdbpe9VzgBNTTU8rX27qflts2ekscYElLnWoTZ7skujuxSnc1AIRcUzMaiz4Ek9N3RMakgZo0z",
|
||||
"Ut2TlpU01zoNmjbGRxHqkfAB1iTDc/o3Q4e3q+aafoQmtFmrT+Jj5ld0M2T4wCMxS0M+kf5RrQ29qDxS",
|
||||
"iuSoVZgh/XGdMi5BRu12xN7qiAwBktYVtXCbJozipC1/bThsNmOmhgxWdeBYeEvxynI5lylT2tXMaaZN",
|
||||
"t7rX7gkPcGnlnEDbNU8/w8jgI1za5DXJYErdl4cX2DYfJpcVjQGULtHDixVBzGLQ1kjlYwPhKAl4HKUw",
|
||||
"fL2Iqwfbz5dx9GBT74GbhwqH6uRRQSxZBiG4dB6Bn0B9HiH4DSwiH1KR/QCXp29Y09NOl/7rjP/rjIr3",
|
||||
"6nxDnzabbihbBk9cmmYcqqZz1nh4+JmGVoq0a71rArPPpaK0MOSub0Jm4xp0kPYKwBDAcFFjFhaJiV/E",
|
||||
"vYdTQhObL+Q9fnbv6rP/3s2sI8GfQj2F31wIPWgo5cj3pgGf119MjqeJ/2B2p3ub+KKGEcSZTMCVQoH2",
|
||||
"+YkFA11+Q+GAX1I64ObioY2+2DP5wNhUFRJ4w1LCBYEL/Qq3W/adGzKUxNk5FdckNbhbCR/hZ1YoGALs",
|
||||
"FQpxYYhh5IPlxsVGpBSE+p5aAkZJMOTJibdVxMO67pQQTQxpMMtR0gqpvRVSI0ap25FPzIxmaWPltjkL",
|
||||
"O+tHuGyf9TJj40q3dYbs9sauu7E7wva7ST4Qp4HxnOY8iJsdzSN5xPysRzNHwL4czZsxq3HgWq3+Jz0w",
|
||||
"v7P/9p4QmffkJ2bdrg0/AgTwwzOoNBBeAALeQ/IFkflEsn2t/JDsoxcfJZB3/Xb5w5/ydNNWScfAqKI9",
|
||||
"5fO+bApmrHm3qyHyan5GwSMisGnAhOyldwIdsq+t7it9PxV8rOT1KbHd+nrqwiEyWtxSDASfoJLW2+cs",
|
||||
"JeqBo8Qu2IHj9kUjHDi4qwQ2CML42WN7z852pPUCYvfOVeRbnVyAAZj6sBcDAntsTMoegtdW0YuFFJI/",
|
||||
"9Pi/n7mI8SGBZWFzwX7HqRnJRtDwPgfrvZfn+mrYeik6Dv3kr5UtnEL2Wbbk2IwTYUauJl00v4+1EfTN",
|
||||
"OOFwougPhRO2G+i/mlbwYqH+lpzL4TsYzhUh+I05t+rkW8DFlDFfoxuk7KVn8U/sa3uDlNSo4GOlG6TE",
|
||||
"dnuD1N0gM1rcTJCgGO/4O//DQgl0gADCuY/DRV2QLaeGH0MVFMs2wcY/75R3f9sK766iA/4cXLtHuWqv",
|
||||
"DKlpUybNbUwDedGVhGyRRqo0iVkE/Bg68F6IgO0qv3y77JRfgY49SXllKb00erDYt1Z4vbDwMsqVFYRX",
|
||||
"ldYTxeECkjlMcG9BdVC3vnxR1sURXVIfvLrMlDdp109ish/iokDgN3Ic+QAVqKI4UpM7QBnLLVO+NFNS",
|
||||
"DtDsy6ZuIP9OYAKt2ZC1bsyB/6C9Doj5Djuy+ZCCVbdvD8nR3moZLJxHGGMUBq1M3CeZmO5OWSJKzllV",
|
||||
"JmZPfTau3nH62Fjn6z0CBF7Shm1ejX2uTruJHAy1mNxmpoWUzvYg20IRll2V1cjzWoNgAoWdWz/DghVc",
|
||||
"xU0mbpm3xSX/dVWJK3r0otBH7rI+5aTs4PAONgknpSv0DevRpps81qFltUejwm60j0c7z9qKfeA+VCea",
|
||||
"HNMmzhOczsPwofycyj5/4V/b51SeY1LFSZPbQwHV+8QOO6p4fBuAhMzDGP0Henzi17uZ+BMk89BjFT2A",
|
||||
"74dP+mrLfIOYHshZQD3P2Me1GPEYExATIzuO6Vd+jl33EzJ32GWlyJC3WD7bMICuKUJZz0PkzFcnZxo8",
|
||||
"qNzDUCaOlRxW5hB4wmvEDznB1Fg82YZDN4kRWTL8uGH4gCAdlBVF+qrSA0NpfkZJCHQHVqaDury/46tx",
|
||||
"kQALAjnArRwWcvhqPFRR1UASF7HcyuK9k8VlRkgl8dV4jXTDhYF1DNZGYzAE5PmrMsvw5mg2P6l1VEVx",
|
||||
"V1uG3iOGNnKeJUdXnqiiTmdvF09WonT4ob1cbd9coENMM5tBWs86tzPto8o+PKqke7PpZ2ZdVfVK1s0K",
|
||||
"qDvTJWeowunNCfFA7Hjdfa3svk2JIbZoRfnQSoSdlUJVafEJ8HqodSJCPdTpT3SjV62yXS0nanMC9gmB",
|
||||
"i0gkt2RtFfFhEhyHlgywlSBVLvEIM19pIUI4Efj7d0F44Ue8OkbZFUPHkHasyB3Gkiza8jBr3rLwPmYz",
|
||||
"i5NAbFWNRzsKooT5Q/DHXd1yn/dCU2lzmVXIF7bhLyFQsjVV2gJ4M+EsUCdc3kMy5sO2ouXltINmWXoN",
|
||||
"lgYxXHuh2OcLhdylrUgNAvBDDxNAagyGAD+walDCUlhjJZwA/DBmg9qLiOHFj2gbTBHRgEO1uG55dA/M",
|
||||
"gCY22EV6JOE103sK44eqZBGZA7bRpan1ZsqCSTgqvjCkUoRUVfWkyEgDXnhHR25H+9y2b+/nCvmvnsRQ",
|
||||
"DGJioZ/+nTzHPxwbOyrGq5nZa5SCUG5ty7n791CuMt5KhyWjiuqHNHpCcuFd7SWfnQ0//WGZYaKteb2R",
|
||||
"DNVSe8jH6K3uXSkRzQ1BzWtRqNV/NSUplJK9bWEKpTCFghdcY9DN1Vd+uTIVOrity9krtt4cwbSX1L0s",
|
||||
"X5Hfo3I4cLUpqYnA+a7+s86PJccJtSewINNDdmspsL4eNBWDB6wmiO1aNbNA6+ZijuvPvyDVx/R38zS1",
|
||||
"Oj8fs8fI2sck/mTJGVoF+qiGr4ds9Ja5X565sywmN0oRSg7jOu9OeRyx7W7N2jsya39RcR/Y5A/JNqmp",
|
||||
"yrA5iYPnIIJb0iPGbOxW3hyMMsE3rNUofiCNIo1dET5DlZGholI7Y3HfT9/HsUbXqGJ9FjjJXVkGsrBf",
|
||||
"KwM2DuAlwMQZXrCE9XPo+EDuoClNEcBk6BnzFL060+Up2oGPbZOCnqWyfK1JZP98a1aQJfaON3ayEFu9",
|
||||
"TLCWdhrNT5k4zYP3IPFJ581JNycqdpFCLZ379SqTj3kmtenSYRPoJxWfzPkcdqF2tY89m9e3NpmSMR2z",
|
||||
"NhjoXMY1TAFx56XHniqN6XCCgbbl5aC8k3Bk2Lrti2iS8lPJph97IsVS8z1V+kZJMPRwLvXsWggu59tt",
|
||||
"aBASEUjt61FNejRONrt4ucHHbhwG9RoJbeX8HU4zoEiMZrNa94nzOAx+ajXlYPK7phuLPDrtDJJUJT6q",
|
||||
"SeNturht4a5LZ24K3lWdKqWdklF8k+loh+ZTHWaG8oqcudOlcy/y8m4sda8qRbB9+t7pcnsZfBWlYMc5",
|
||||
"fHPIWENDb49djZZeOue2pK7TQ/f4O/1PT/5qV+aufBBbP3xQwjnwonfp6k1g5TC6+7J3lvXptJvY5gcu",
|
||||
"1ovTo6nZW0WeIL4+d6seE9dkrkN2T9pjztrS0dkem4dg2G90WG9EPtSVl2SzpjNaC4cDrzW5X/JhW9Um",
|
||||
"VQEx4QYOK1sfpQJewtHGtlenKqjFIFtVoVoOCLbchiiwU+XZcWD7oKe+Mta7KbUGs302mLFH5AbWMtZ+",
|
||||
"h6ayfbTjRSCmSDO4rhTA4o2/qI8ZO4JPkyJGC5twEtkuXH1tfBZLRJBgaFVvUbZdxbo1Zn2FnckGuAcU",
|
||||
"eFZQsYaNQfqIAq8emoM3phK0gA64p4CWnKefAJaxzOoSOmcnZ6e9E/q/ycnJG/a//2s0VrPufTqBnnjp",
|
||||
"sdqjUHRsq5FTiKfwPozhNkF+y2bYJMwVWL5HAcLz1WGW/XeK500BvVFMb+9xoGyJ/2mfBoq6Y2vh2Iq7",
|
||||
"9HbeBJiHtE3+fuAI0OhBl2d/NaG/ZSDEIVegbtXwVg3fvRre6patbvkiIVB4zYrtTAC1lUXqz/ctVE/P",
|
||||
"znkKqpf49HissRqmLVexH45l59aKuM9WxO3di1ICOCjPqVaZapWpg1GmsmVkonojttkUJCsGT620Gpi3",
|
||||
"GiNZkjCt1WGzWolBA9iuXnI8TfyHXuaJqI8oepv4D8KpbUOKCh3xcPwTt+SHUOapDC22YUfT+q3ZbR2R",
|
||||
"yjWZE8+pJBan7VoJISXEW6t93rqk4O4qNZKCN3J+iaHs/esGxcbhOFftVGzINJ0NxIbYp/0VG3JNNWJD",
|
||||
"rKMVGwaxUbvP2xQb39M/e6WckbUREHqQGwqNA4+D0ODAWM1Ii+q9DY3Q727r8FiMjTDgqZnHo4E2aqIk",
|
||||
"NsKAB12h+KC4b5sHcnvXP/QYim3Lkepoitx1YEOS5cADLfZeuGwr9qIkXRrUR83IqJz38WWvLLUSUg32",
|
||||
"+CmVnwOo/nZbdVnalKy0u0SlKTSfs8wtVWWsHOAE8Mmcv8U+fYuIhzqcolf1mUSqc2ZWgrYj0cixvWpY",
|
||||
"mqgcbdz8ncrGZsG3aq0uM/ytZNy9ZNy7QidC0FVR+XZSZymyOOfUo5fHUjcQEtlew9UpRq0U3qUUljuw",
|
||||
"gmZaodbtuWKqSuBWMW3Fr0n8CoWkTifeuMjl1fN6bpgEpCZegrWRuchl2UfwCJAPpj5k0lcRN3r7wntI",
|
||||
"eHU+fM5mPHjRW5cy/sBLRuQ2a0UzJScVTj7tC6LBYTqHpNUKSeTZP8EwxsduEsewmrMxvx3whg7tVuLe",
|
||||
"Wwzj95Cci8G2SHd0poZ0xiBuCxC/fAFi6CYxIksmxt0wfECwn1DZ9ddXKqoKSYfy5CbJnW2/hoxniMyT",
|
||||
"6bELfH8K3AcjOZ+Hi8iHBHKavqbzO9rziE7E7VHv2dDXFJfncvgCgb86Oat5e3XFvF553jkEHjvcvnf8",
|
||||
"kG9Gfh+KYv25gMwc7uQC83NYog8TEJtFwZh+XQ1xrGtzrDF4to8zBl1DhIXhzIfboTc29A9Obxx9G6a3",
|
||||
"DHE/HL2h4BERWF27CbNoJqkN8w5M6bY6vukIE9Z3KOba4imuTmTlzO4jLDcmv8BWX7Q+VllNngL2Msqb",
|
||||
"aG6IOdo7Bq4LI2K2vPXZd5xa2MQkJWpTN5/36WzHnsQH5xMphiSDAaiC+vjKdfTXekyl5MWxXdp7e/qK",
|
||||
"IatuUVFJn35vRl+8T2dbdenp4BugL77ylr4q6YtjewX68sMZCsxkdRnOsIMCB7Cz8ahCwbhkA23JOYMe",
|
||||
"wXT8ekLa3T3aD2cz6DkoaK/PL3x97nZ+Ozvb1bqjOKQ0wIy2g4AgsnR6ziPwkccmo5simqBg5kA5klnh",
|
||||
"ZYStv8p3O996MKBT9WJAYI/ZwKkOzd9qdMwcJqSGm8OE2LFzmLy8sUowWbhnhbpbI1WNNs2ox9Y+tYCL",
|
||||
"KYzxHEUN7nBKJ7t7HD8DP2XdRFKKrRK4ftLmFzoVRe2lbpVLnYrBepKMAMZPYVzhSpHmYqcdHNm+SqTe",
|
||||
"yDG3pySdz0EwSyfaJ23JZZB5KaJacd4qTc2UpmpW55SfZ8a19akYzqgkjquu3bwFrlSpUk+pbfG9BGOf",
|
||||
"OF4ir31obJl+MzclSeWbuSxhH7gPW3mkGtOR9/iNqkaSNny0eoQxFiAY3Z/oGkQ76QKFYfyo0dKHwX34",
|
||||
"HpLPYtCN1iRWIM0yNJ4enRyd6HJAKp5Hf6Vdv1qUG55ULLbgbVlB7F+gE0OSxEEOeYWbDhWzSRBQ/kmn",
|
||||
"+NaTQ/bCiKecKrPAE5zOw/ChJxzRjr+LHyzC3+lRJ1qXHdX47/aR7WIgsyNYOtGO/cAsQ8UlfO3B9vLG",
|
||||
"iWJ4ukqmRu8v0eKrFXMcCzzbmClkU+FXX8MxQnHDtoky95ZvNuM/yaHn7pMCNRQzVRlXKFbSOiACO+l2",
|
||||
"tey5R+zJrDKlLWrKoylvsj+ea7yveSutYzVzzrTiOe5kWuWzrDnjD8djubHvqFhxa48sOSWXAr7kBcXs",
|
||||
"g8zU6vrKj5WEbJ92YC9oeVtR/Llzw3RWCAwkEmW7i4Oy5DU1KL/lNEPNxXWYrXCaFIN7rBKBNavB2uBe",
|
||||
"tJcRMk2SaKUAtgF6L5w5QhCrQjErxsd06zQse05ooHL9DIFiKwaHtbz10rylRqGtw1g2ap89dzXTA/eC",
|
||||
"wTavC+aRYRsrL3KS5rhs18qhlUQoqoetPDAqiOsxZ42aaFUuj25Svi5eyniP6UuH8aRsUB5vH/hZU6KC",
|
||||
"F5jYQP3g1asH6wGbxWESsbofGQhyo4ygsE4f4bJTmwZky0JizVpc8lGpLce1h9rESvW/GgkumZrI6Nwi",
|
||||
"s2o0TRa0Uo6gvZRcEw27HDnDe2bdxgmlDuh1GVf5gEBMUp5C2LmHxJ1Dz1QdKhP8e65ICTJYMfHQi6Ub",
|
||||
"UuBtlGeozS7UZhfaQnahRqJZyAZs8aqVO8mtxLLwrTkgE8yPIJe3LOWkw9R6qmAr7/ZKBcxIcVUVsOj4",
|
||||
"N4UghnHq+NfVugIyTzIuD5LY77zpdJ6/Pv+/AAAA//9+b1jkXTUDAA==",
|
||||
}
|
||||
|
||||
// GetSwagger returns the content of the embedded swagger specification file
|
||||
|
||||
@@ -277,7 +277,7 @@ func TestDetectComponentsMultiple(t *testing.T) {
|
||||
// Check component stats
|
||||
stats := g.GetComponentStats()
|
||||
assert.Equal(t, 3, stats.TotalComponents)
|
||||
assert.Equal(t, 2, stats.LargestComponent) // A-B or C-D
|
||||
assert.Equal(t, 2, stats.LargestComponent) // A-B or C-D
|
||||
assert.Equal(t, 1, stats.SmallestComponent) // E
|
||||
assert.Equal(t, 1, stats.IsolatedNodes) // E
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TYPE v1_incoming_webhook_source_name ADD VALUE IF NOT EXISTS 'SVIX';
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
-- intentionally blank
|
||||
-- +goose StatementEnd
|
||||
Generated
+4
-4
@@ -473,14 +473,14 @@ setuptools = "*"
|
||||
|
||||
[[package]]
|
||||
name = "hatchet-sdk"
|
||||
version = "1.23.0"
|
||||
version = "1.23.2"
|
||||
description = "This is the official Python SDK for Hatchet, a distributed, fault-tolerant task queue. The SDK allows you to easily integrate Hatchet's task scheduling and workflow orchestration capabilities into your Python applications."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "hatchet_sdk-1.23.0-py3-none-any.whl", hash = "sha256:571b426991f0d84f80df8c86a52fb0bbd8ed8cad267403db1ab7128becea0ac5"},
|
||||
{file = "hatchet_sdk-1.23.0.tar.gz", hash = "sha256:6c888bd6a67fde7429aabce0c47fef05bca25ce3c25e5f24700f04031f7d3729"},
|
||||
{file = "hatchet_sdk-1.23.2-py3-none-any.whl", hash = "sha256:95aa0f330527fa0a64adb1d9c758ae2161beb159b0ad54665eb0f1018c2d880f"},
|
||||
{file = "hatchet_sdk-1.23.2.tar.gz", hash = "sha256:54e9120341ad464c1bb57db76fb29c06ff77df5afd935b22e4bd8c3586ed9f93"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1125,4 +1125,4 @@ propcache = ">=0.2.0"
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = "^3.10"
|
||||
content-hash = "814305cfef2a2b2a6dcd3c3cfd476a64777c6a9423fefdc251a06e7f640984a8"
|
||||
content-hash = "b1e5494e65f47bb499caeffe3e70a906f1b1da98fd8e15182fe863138eada31f"
|
||||
|
||||
@@ -8,7 +8,7 @@ package-mode = false
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
hatchet-sdk = "1.23.0"
|
||||
hatchet-sdk = "1.23.2"
|
||||
|
||||
|
||||
[build-system]
|
||||
|
||||
@@ -241,6 +241,7 @@ export enum V1WebhookSourceName {
|
||||
STRIPE = "STRIPE",
|
||||
SLACK = "SLACK",
|
||||
LINEAR = "LINEAR",
|
||||
SVIX = "SVIX",
|
||||
}
|
||||
|
||||
export enum TenantEnvironment {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Button } from '@/components/v1/ui/button';
|
||||
import { useCurrentTenantId } from '@/hooks/use-tenant';
|
||||
import { Tenant } from '@/lib/api';
|
||||
import { queries } from '@/lib/api/queries';
|
||||
import { BillingContext } from '@/lib/atoms';
|
||||
import { appRoutes } from '@/router';
|
||||
import {
|
||||
CalendarIcon,
|
||||
@@ -11,8 +13,8 @@ import { useQuery } from '@tanstack/react-query';
|
||||
import { Link } from '@tanstack/react-router';
|
||||
|
||||
interface BillingRequiredProps {
|
||||
tenant: any;
|
||||
billing: any;
|
||||
tenant?: Tenant | undefined;
|
||||
billing?: BillingContext | undefined;
|
||||
manageClicked: () => Promise<void>;
|
||||
portalLoading: boolean;
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import { useState } from 'react';
|
||||
|
||||
export default function CreateWorker() {
|
||||
const navigate = useNavigate();
|
||||
const { billing, can } = useTenantDetails();
|
||||
const { tenant, billing, can } = useTenantDetails();
|
||||
const { tenantId } = useCurrentTenantId();
|
||||
|
||||
const [portalLoading, setPortalLoading] = useState(false);
|
||||
@@ -69,7 +69,7 @@ export default function CreateWorker() {
|
||||
if (isBillingRequired) {
|
||||
return (
|
||||
<BillingRequired
|
||||
tenant={tenantId}
|
||||
tenant={tenant}
|
||||
billing={billing}
|
||||
manageClicked={manageClicked}
|
||||
portalLoading={portalLoading}
|
||||
|
||||
@@ -24,11 +24,11 @@ export default function ManagedWorkers() {
|
||||
const [showUpgradeModal, setShowUpgradeModal] = useState(false);
|
||||
|
||||
const computeCostQuery = useQuery({
|
||||
...queries.cloud.getComputeCost(tenant!.metadata.id),
|
||||
...queries.cloud.getComputeCost(tenantId),
|
||||
});
|
||||
|
||||
const listManagedWorkersQuery = useQuery({
|
||||
...queries.cloud.listManagedWorkers(tenant!.metadata.id),
|
||||
...queries.cloud.listManagedWorkers(tenantId),
|
||||
});
|
||||
|
||||
// Check if the user can create more worker pools
|
||||
@@ -55,7 +55,10 @@ export default function ManagedWorkers() {
|
||||
}
|
||||
setPortalLoading(true);
|
||||
billing?.setPollBilling(true);
|
||||
const link = await cloudApi.billingPortalLinkGet(tenant!.metadata.id);
|
||||
if (!tenantId) {
|
||||
return;
|
||||
}
|
||||
const link = await cloudApi.billingPortalLinkGet(tenantId);
|
||||
window.open(link.data.url, '_blank');
|
||||
} catch (e) {
|
||||
handleApiError(e as any);
|
||||
|
||||
@@ -271,6 +271,15 @@ export const AuthSetup = ({
|
||||
helpLink="https://docs.slack.dev/authentication/verifying-requests-from-slack/#validating-a-request"
|
||||
/>
|
||||
);
|
||||
case V1WebhookSourceName.SVIX:
|
||||
return (
|
||||
<PreconfiguredHMACAuth
|
||||
register={register}
|
||||
secretLabel="Svix Signing Secret"
|
||||
secretPlaceholder="whsec_..."
|
||||
helpText="You can find your signing secret in the Svix dashboard under the endpoint's settings."
|
||||
/>
|
||||
);
|
||||
default:
|
||||
const exhaustiveCheck: never = sourceName;
|
||||
throw new Error(`Unhandled source name: ${exhaustiveCheck}`);
|
||||
|
||||
@@ -4,6 +4,33 @@ import { Webhook } from 'lucide-react';
|
||||
import { CgLinear } from 'react-icons/cg';
|
||||
import { FaSlack, FaStripeS } from 'react-icons/fa';
|
||||
|
||||
const SvixLogo = ({ className }: { className?: string }) => (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
viewBox="0 0 230 230"
|
||||
fill="currentColor"
|
||||
className={className}
|
||||
aria-hidden="true"
|
||||
focusable="false"
|
||||
>
|
||||
<g transform="translate(10.7,10.6)">
|
||||
<path
|
||||
d="M208.8 104.4c0 57.6-46.8 104.4-104.4 104.4c-57.6 0-104.4-46.8-104.4-104.4c0-57.6 46.8-104.4 104.4-104.4c57.6 0 104.4 46.8 104.4 104.4Zm-125 42.4c-7.2-16.1-23.3-26.8-41-27.2c-11.7-0.3-22.8 3-31.8 9.1c-2.1-8.3-3.2-17-3.1-26c1-53.2 44.9-95.7 98.2-94.8c35 .7 65.3 19.8 81.7 48c-4.9 5.4-12.8 8.6-21.2 8.3c-8.1-0.2-15.5-5.1-18.8-12.5c-7.3-16.1-23.4-26.8-41-27.2c-8.2-0.2-16.2 1.9-23.3 5.9c-24.5 13.7-32 47-14.3 69.3c8 10.1 20.2 16.2 34.2 17.2c7.3 .5 12.8 3.1 16.5 7.7c6.6 8.4 5.6 21.6-2.1 28.9c-4.1 3.9-9.5 6-15.1 5.8c-8.1-0.2-15.5-5.1-18.9-12.5Zm82.2-57.7c-17.3-0.8-33.5-10.4-41-27.1c-3.4-7.4-10.8-12.3-18.9-12.5c-17.1-0.5-28 21.1-17.2 34.7c3.7 4.6 9.2 7.2 16.5 7.7c14 1 26.1 7.1 34.1 17.2c14.7 18.4 12.6 46.3-4.4 62.5c-20.6 19.5-54.8 15.6-70.4-7.9c-1.4-2.1-2.6-4.3-3.7-6.6c-3.3-7.4-10.7-12.3-18.8-12.5c-8.4-0.2-16.2 3-21.2 8.3c16.4 28.2 46.7 47.4 81.7 48c53.3 .9 97.2-41.5 98.2-94.8c.1-9-1-17.7-3.1-26c.4 1.8-13.4 6.6-15 7c-5.5 1.6-11.2 2.3-16.8 2Z"
|
||||
fillRule="evenodd"
|
||||
/>
|
||||
</g>
|
||||
<ellipse
|
||||
cx="115"
|
||||
cy="115"
|
||||
rx="104.5"
|
||||
ry="104.5"
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
strokeWidth="20"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export const SourceName = ({
|
||||
sourceName,
|
||||
}: {
|
||||
@@ -45,7 +72,13 @@ export const SourceName = ({
|
||||
Linear
|
||||
</span>
|
||||
);
|
||||
|
||||
case V1WebhookSourceName.SVIX:
|
||||
return (
|
||||
<span className="flex flex-row items-center gap-x-2">
|
||||
<SvixLogo className="size-4" />
|
||||
Svix
|
||||
</span>
|
||||
);
|
||||
default:
|
||||
const exhaustiveCheck: never = sourceName;
|
||||
throw new Error(`Unhandled source: ${exhaustiveCheck}`);
|
||||
|
||||
@@ -307,6 +307,28 @@ const buildWebhookPayload = (data: WebhookFormData): V1CreateWebhookRequest => {
|
||||
signingSecret: data.signingSecret,
|
||||
},
|
||||
};
|
||||
case V1WebhookSourceName.SVIX:
|
||||
if (!data.signingSecret) {
|
||||
throw new Error('signing secret is required for Svix webhooks');
|
||||
}
|
||||
|
||||
return {
|
||||
...basePayload,
|
||||
sourceName: data.sourceName,
|
||||
name: data.name,
|
||||
eventKeyExpression: data.eventKeyExpression,
|
||||
authType: V1WebhookAuthType.HMAC,
|
||||
auth: {
|
||||
// Svix uses its own SDK for verification; these HMAC fields are
|
||||
// stored but the server-side validation implements Svix's signature
|
||||
// verification protocol.
|
||||
// See: https://docs.svix.com/receiving/verifying-payloads/how-to-verify-a-payload
|
||||
algorithm: V1WebhookHMACAlgorithm.SHA256,
|
||||
encoding: V1WebhookHMACEncoding.BASE64,
|
||||
signatureHeaderName: 'svix-signature',
|
||||
signingSecret: data.signingSecret,
|
||||
},
|
||||
};
|
||||
default:
|
||||
const exhaustiveCheck: never = data.sourceName;
|
||||
throw new Error(`Unhandled source name: ${exhaustiveCheck}`);
|
||||
@@ -321,6 +343,7 @@ const createSourceInlineDescription = (sourceName: V1WebhookSourceName) => {
|
||||
case V1WebhookSourceName.LINEAR:
|
||||
case V1WebhookSourceName.STRIPE:
|
||||
case V1WebhookSourceName.SLACK:
|
||||
case V1WebhookSourceName.SVIX:
|
||||
return '';
|
||||
default:
|
||||
const exhaustiveCheck: never = sourceName;
|
||||
@@ -344,6 +367,7 @@ const SourceCaption = ({ sourceName }: { sourceName: V1WebhookSourceName }) => {
|
||||
case V1WebhookSourceName.LINEAR:
|
||||
case V1WebhookSourceName.STRIPE:
|
||||
case V1WebhookSourceName.SLACK:
|
||||
case V1WebhookSourceName.SVIX:
|
||||
return '';
|
||||
default:
|
||||
const exhaustiveCheck: never = sourceName;
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
import { useIsTaskRunSkipped } from '../../hooks/use-is-task-run-skipped';
|
||||
import { useWorkflowDetails } from '../../hooks/use-workflow-details';
|
||||
import { TabOption } from './step-run-detail/step-run-detail';
|
||||
import StepRunNode from './step-run-node';
|
||||
import { useRefetchInterval } from '@/contexts/refetch-interval-context';
|
||||
import { queries, WorkflowRunShapeItemForWorkflowRunDetails } from '@/lib/api';
|
||||
import {
|
||||
queries,
|
||||
V1TaskEventType,
|
||||
WorkflowRunShapeItemForWorkflowRunDetails,
|
||||
} from '@/lib/api';
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { useMemo } from 'react';
|
||||
|
||||
@@ -17,7 +22,23 @@ type NodeRelationship = {
|
||||
};
|
||||
|
||||
export const JobMiniMap = ({ onClick }: JobMiniMapProps) => {
|
||||
const { shape, taskRuns: tasks, isLoading, isError } = useWorkflowDetails();
|
||||
const {
|
||||
shape,
|
||||
taskRuns: tasks,
|
||||
taskEvents,
|
||||
isLoading,
|
||||
isError,
|
||||
} = useWorkflowDetails();
|
||||
|
||||
const skippedTaskIds = useMemo(() => {
|
||||
const ids = new Set<string>();
|
||||
for (const event of taskEvents) {
|
||||
if (event.eventType === V1TaskEventType.SKIPPED) {
|
||||
ids.add(event.taskId);
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
}, [taskEvents]);
|
||||
|
||||
const taskRunRelationships: NodeRelationship[] = useMemo(() => {
|
||||
if (!shape || !tasks) {
|
||||
@@ -127,6 +148,9 @@ export const JobMiniMap = ({ onClick }: JobMiniMapProps) => {
|
||||
onClick: () => onClick(taskRun?.metadata.id),
|
||||
childWorkflowsCount: taskRun?.numSpawnedChildren || 0,
|
||||
taskName: shapeItem.taskName,
|
||||
isSkipped: taskRun
|
||||
? skippedTaskIds.has(taskRun.metadata.id)
|
||||
: false,
|
||||
}}
|
||||
/>
|
||||
);
|
||||
@@ -160,6 +184,7 @@ export const TaskRunMiniMap = ({
|
||||
taskRunId,
|
||||
}: JobMiniMapProps & UseTaskRunProps) => {
|
||||
const { taskRun, isLoading, isError } = useTaskRun({ taskRunId });
|
||||
const { isSkipped } = useIsTaskRunSkipped({ taskRunId, limit: 1 });
|
||||
|
||||
if (isLoading || isError || !taskRun) {
|
||||
return null;
|
||||
@@ -175,6 +200,7 @@ export const TaskRunMiniMap = ({
|
||||
onClick: () => onClick(taskRun.metadata.id),
|
||||
childWorkflowsCount: taskRun.numSpawnedChildren,
|
||||
taskName: taskRun.displayName,
|
||||
isSkipped,
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
+15
-12
@@ -1,6 +1,7 @@
|
||||
import { V1RunIndicator } from '../../../components/run-statuses';
|
||||
import { RunsTable } from '../../../components/runs-table';
|
||||
import { RunsProvider } from '../../../hooks/runs-provider';
|
||||
import { useIsTaskRunSkipped } from '../../../hooks/use-is-task-run-skipped';
|
||||
import { isTerminalState } from '../../../hooks/use-workflow-details';
|
||||
import { TaskRunMiniMap } from '../mini-map';
|
||||
import { StepRunEvents } from '../step-run-events-for-workflow-run';
|
||||
@@ -20,14 +21,13 @@ import {
|
||||
TabsTrigger,
|
||||
} from '@/components/v1/ui/tabs';
|
||||
import { useSidePanel } from '@/hooks/use-side-panel';
|
||||
import { useCurrentTenantId } from '@/hooks/use-tenant';
|
||||
import { V1TaskStatus, V1TaskSummary, queries } from '@/lib/api';
|
||||
import { emptyGolangUUID, formatDuration } from '@/lib/utils';
|
||||
import { TaskRunActionButton } from '@/pages/main/v1/task-runs-v1/actions';
|
||||
import { WorkflowDefinitionLink } from '@/pages/main/workflow-runs/$run/v2components/workflow-definition';
|
||||
import { appRoutes } from '@/router';
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { Link } from '@tanstack/react-router';
|
||||
import { Link, useParams } from '@tanstack/react-router';
|
||||
import { FullscreenIcon } from 'lucide-react';
|
||||
import { useCallback, useState } from 'react';
|
||||
|
||||
@@ -59,13 +59,13 @@ const TaskRunPermalinkOrBacklink = ({
|
||||
taskRun: V1TaskSummary;
|
||||
showViewTaskRunButton: boolean;
|
||||
}) => {
|
||||
const { tenantId } = useCurrentTenantId();
|
||||
const { tenant } = useParams({ from: appRoutes.tenantRoute.to });
|
||||
|
||||
if (showViewTaskRunButton) {
|
||||
return (
|
||||
<Link
|
||||
to={appRoutes.tenantRunRoute.to}
|
||||
params={{ tenant: tenantId, run: taskRun.metadata.id }}
|
||||
params={{ tenant: tenant, run: taskRun.metadata.id }}
|
||||
>
|
||||
<Button
|
||||
size={'sm'}
|
||||
@@ -84,7 +84,7 @@ const TaskRunPermalinkOrBacklink = ({
|
||||
return (
|
||||
<Link
|
||||
to={appRoutes.tenantRunRoute.to}
|
||||
params={{ tenant: tenantId, run: taskRun.workflowRunExternalId }}
|
||||
params={{ tenant: tenant, run: taskRun.workflowRunExternalId }}
|
||||
>
|
||||
<Button
|
||||
size={'sm'}
|
||||
@@ -133,6 +133,7 @@ export const TaskRunDetail = ({
|
||||
},
|
||||
});
|
||||
|
||||
const { isSkipped } = useIsTaskRunSkipped({ taskRunId });
|
||||
const taskRun = taskRunQuery.data;
|
||||
|
||||
if (taskRunQuery.isLoading) {
|
||||
@@ -152,7 +153,9 @@ export const TaskRunDetail = ({
|
||||
<div className="flex flex-row items-center justify-between">
|
||||
<div className="flex w-full flex-row items-center justify-between">
|
||||
<div className="flex flex-row items-center gap-4">
|
||||
{taskRun.status && <V1RunIndicator status={taskRun.status} />}
|
||||
{taskRun.status && (
|
||||
<V1RunIndicator status={taskRun.status} isSkipped={isSkipped} />
|
||||
)}
|
||||
<h3 className="flex flex-row items-center gap-4 font-mono text-lg font-semibold leading-tight tracking-tight text-foreground">
|
||||
{taskRun.displayName || 'Task Run Detail'}
|
||||
</h3>
|
||||
@@ -162,7 +165,7 @@ export const TaskRunDetail = ({
|
||||
|
||||
{taskRun.parentTaskExternalId && (
|
||||
<TriggeringParentWorkflowRunSection
|
||||
tenantId={taskRun.tenantId}
|
||||
tenant={taskRun.tenantId}
|
||||
parentTaskExternalId={taskRun.parentTaskExternalId}
|
||||
/>
|
||||
)}
|
||||
@@ -329,9 +332,9 @@ const V1StepRunSummary = ({ taskRunId }: { taskRunId: string }) => {
|
||||
const taskRunQuery = useQuery({
|
||||
...queries.v1Tasks.get(taskRunId),
|
||||
});
|
||||
const { isSkipped: hasSkippedEvent } = useIsTaskRunSkipped({ taskRunId });
|
||||
|
||||
const timings = [];
|
||||
|
||||
const data = taskRunQuery.data;
|
||||
|
||||
if (taskRunQuery.isLoading || !data) {
|
||||
@@ -359,7 +362,7 @@ const V1StepRunSummary = ({ taskRunId }: { taskRunId: string }) => {
|
||||
if (data.status === V1TaskStatus.COMPLETED && data.finishedAt) {
|
||||
timings.push(
|
||||
<div key="finished" className="text-sm text-muted-foreground">
|
||||
{'Succeeded '}
|
||||
{hasSkippedEvent ? 'Skipped ' : 'Succeeded '}
|
||||
<RelativeDate date={data.finishedAt} />
|
||||
</div>,
|
||||
);
|
||||
@@ -393,10 +396,10 @@ const V1StepRunSummary = ({ taskRunId }: { taskRunId: string }) => {
|
||||
};
|
||||
|
||||
function TriggeringParentWorkflowRunSection({
|
||||
tenantId,
|
||||
tenant,
|
||||
parentTaskExternalId,
|
||||
}: {
|
||||
tenantId: string;
|
||||
tenant: string;
|
||||
parentTaskExternalId: string;
|
||||
}) {
|
||||
// Get the parent task to find the parent workflow run
|
||||
@@ -428,7 +431,7 @@ function TriggeringParentWorkflowRunSection({
|
||||
<Link
|
||||
to={appRoutes.tenantRunRoute.to}
|
||||
params={{
|
||||
tenant: tenantId,
|
||||
tenant: tenant,
|
||||
run: parentWorkflowRun.workflowRunExternalId,
|
||||
}}
|
||||
className="font-semibold text-indigo-500 hover:underline dark:text-indigo-200"
|
||||
|
||||
@@ -17,6 +17,7 @@ export type NodeData = {
|
||||
onClick: (defaultOpenTab?: TabOption) => void;
|
||||
childWorkflowsCount: number;
|
||||
taskName: string;
|
||||
isSkipped?: boolean;
|
||||
};
|
||||
|
||||
// eslint-disable-next-line react/display-name
|
||||
@@ -61,7 +62,10 @@ export default memo(({ data }: { data: NodeData }) => {
|
||||
<span className="step-run-backdrop absolute inset-[1px] bg-background transition-colors duration-200" />
|
||||
<div className="z-10 flex w-full flex-row items-center justify-between gap-4">
|
||||
<div className="z-10 flex flex-row items-center justify-start gap-2">
|
||||
<V1RunIndicator status={data.taskRun?.status} />
|
||||
<V1RunIndicator
|
||||
status={data.taskRun?.status}
|
||||
isSkipped={data.isSkipped}
|
||||
/>
|
||||
<div className="max-w-[160px] flex-grow truncate">
|
||||
{data.taskName}
|
||||
</div>
|
||||
|
||||
+15
-3
@@ -1,7 +1,7 @@
|
||||
import { useWorkflowDetails } from '../../hooks/use-workflow-details';
|
||||
import stepRunNode, { NodeData } from './step-run-node';
|
||||
import { useTheme } from '@/components/hooks/use-theme';
|
||||
import { V1TaskStatus } from '@/lib/api';
|
||||
import { V1TaskEventType, V1TaskStatus } from '@/lib/api';
|
||||
import dagre from 'dagre';
|
||||
import { useMemo } from 'react';
|
||||
import ReactFlow, {
|
||||
@@ -30,7 +30,18 @@ const WorkflowRunVisualizer = ({
|
||||
setSelectedTaskRunId: (id: string) => void;
|
||||
}) => {
|
||||
const { theme } = useTheme();
|
||||
const { shape, taskRuns, isLoading, isError } = useWorkflowDetails();
|
||||
const { shape, taskRuns, taskEvents, isLoading, isError } =
|
||||
useWorkflowDetails();
|
||||
|
||||
const skippedTaskIds = useMemo(() => {
|
||||
const ids = new Set<string>();
|
||||
for (const event of taskEvents) {
|
||||
if (event.eventType === V1TaskEventType.SKIPPED) {
|
||||
ids.add(event.taskId);
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
}, [taskEvents]);
|
||||
|
||||
const edges: Edge[] = useMemo(
|
||||
() =>
|
||||
@@ -85,6 +96,7 @@ const WorkflowRunVisualizer = ({
|
||||
onClick: () => task && setSelectedTaskRunId(task.metadata.id),
|
||||
childWorkflowsCount: task?.numSpawnedChildren || 0,
|
||||
taskName: shapeItem.taskName,
|
||||
isSkipped: task ? skippedTaskIds.has(task.metadata.id) : false,
|
||||
};
|
||||
|
||||
return {
|
||||
@@ -95,7 +107,7 @@ const WorkflowRunVisualizer = ({
|
||||
selectable: true,
|
||||
};
|
||||
}) || [],
|
||||
[shape, taskRuns, setSelectedTaskRunId],
|
||||
[shape, taskRuns, setSelectedTaskRunId, skippedTaskIds],
|
||||
);
|
||||
|
||||
const nodeWidth = 230;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { V1TaskStatus } from '@/lib/api';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { CircleMinus } from 'lucide-react';
|
||||
|
||||
function createV2IndicatorVariant(eventType: V1TaskStatus | undefined) {
|
||||
switch (eventType) {
|
||||
@@ -20,9 +21,15 @@ function createV2IndicatorVariant(eventType: V1TaskStatus | undefined) {
|
||||
|
||||
export function V1RunIndicator({
|
||||
status,
|
||||
isSkipped,
|
||||
}: {
|
||||
status: V1TaskStatus | undefined;
|
||||
isSkipped?: boolean;
|
||||
}) {
|
||||
if (isSkipped) {
|
||||
return <CircleMinus className="h-3 w-3 text-muted-foreground" />;
|
||||
}
|
||||
|
||||
const indicator = createV2IndicatorVariant(status);
|
||||
|
||||
return <div className={cn(indicator, 'h-[6px] w-[6px] rounded-full')} />;
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
import { queries, V1TaskEventType } from '@/lib/api';
|
||||
import { appRoutes } from '@/router';
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { useParams } from '@tanstack/react-router';
|
||||
|
||||
type UseIsTaskRunSkippedProps = {
|
||||
taskRunId: string;
|
||||
limit?: number;
|
||||
};
|
||||
|
||||
export const useIsTaskRunSkipped = ({
|
||||
taskRunId,
|
||||
limit = 50,
|
||||
}: UseIsTaskRunSkippedProps) => {
|
||||
const { tenant } = useParams({ from: appRoutes.tenantRoute.to });
|
||||
const eventsQuery = useQuery({
|
||||
...queries.v1TaskEvents.list(tenant, { limit, offset: 0 }, taskRunId),
|
||||
});
|
||||
|
||||
const isSkipped =
|
||||
eventsQuery.data?.rows?.some(
|
||||
(event) => event.eventType === V1TaskEventType.SKIPPED,
|
||||
) ?? false;
|
||||
|
||||
return {
|
||||
isSkipped,
|
||||
isLoading: eventsQuery.isLoading,
|
||||
};
|
||||
};
|
||||
@@ -60,11 +60,11 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.9.0
|
||||
go.uber.org/goleak v1.3.0
|
||||
golang.org/x/time v0.14.0
|
||||
google.golang.org/api v0.265.0
|
||||
google.golang.org/api v0.266.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409
|
||||
k8s.io/api v0.35.0
|
||||
k8s.io/apimachinery v0.35.0
|
||||
k8s.io/client-go v0.35.0
|
||||
k8s.io/api v0.35.1
|
||||
k8s.io/apimachinery v0.35.1
|
||||
k8s.io/client-go v0.35.1
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
@@ -127,7 +127,7 @@ require (
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.16.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
|
||||
@@ -195,7 +195,7 @@ require (
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/term v0.40.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
@@ -234,13 +234,13 @@ require (
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/crypto v0.48.0
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/oauth2 v0.35.0
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/text v0.33.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/text v0.34.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
google.golang.org/grpc v1.78.0
|
||||
google.golang.org/protobuf v1.36.11
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
@@ -214,8 +214,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=
|
||||
github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=
|
||||
github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=
|
||||
github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc=
|
||||
github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
|
||||
@@ -518,22 +518,22 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
|
||||
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
|
||||
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -551,36 +551,36 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
|
||||
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU=
|
||||
google.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY=
|
||||
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=
|
||||
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=
|
||||
google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk=
|
||||
google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0=
|
||||
google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM=
|
||||
google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
@@ -597,12 +597,12 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
|
||||
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
|
||||
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
|
||||
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
|
||||
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
|
||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
|
||||
@@ -5,6 +5,9 @@ const (
|
||||
MsgIDCancelTasks = "cancel-tasks"
|
||||
MsgIDCELEvaluationFailure = "cel-evaluation-failure"
|
||||
MsgIDCheckTenantQueue = "check-tenant-queue"
|
||||
MsgIDNewWorker = "new-worker"
|
||||
MsgIDNewQueue = "new-queue"
|
||||
MsgIDNewConcurrencyStrategy = "new-concurrency-strategy"
|
||||
MsgIDCreateMonitoringEvent = "create-monitoring-event"
|
||||
MsgIDCreatedDAG = "created-dag"
|
||||
MsgIDCreatedEventTrigger = "created-event-trigger"
|
||||
|
||||
@@ -228,7 +228,7 @@ func TestBufferedPubMessageQueueIntegration(t *testing.T) {
|
||||
}()
|
||||
|
||||
cleanupQueue, err := tq.Subscribe(staticQueue, func(receivedMessage *msgqueue.Message) error {
|
||||
for _ = range receivedMessage.Payloads {
|
||||
for range receivedMessage.Payloads {
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
|
||||
@@ -44,11 +44,10 @@ type AdminServiceOpts struct {
|
||||
v validator.Validator
|
||||
localScheduler *scheduler.Scheduler
|
||||
localDispatcher *dispatcher.DispatcherImpl
|
||||
optimisticSchedulingEnabled bool
|
||||
l *zerolog.Logger
|
||||
|
||||
grpcTriggersEnabled bool
|
||||
grpcTriggerSlots int
|
||||
grpcTriggerSlots int
|
||||
optimisticSchedulingEnabled bool
|
||||
grpcTriggersEnabled bool
|
||||
}
|
||||
|
||||
func defaultAdminServiceOpts() *AdminServiceOpts {
|
||||
|
||||
@@ -11,6 +11,9 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/internal/msgqueue"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/admin/contracts"
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/types"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
|
||||
@@ -55,6 +58,44 @@ func (a *AdminServiceImpl) PutWorkflow(ctx context.Context, req *contracts.PutWo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// notify that a new set of queues have been created
|
||||
// important: this assumes that actions correspond 1:1 with queues, which they do at the moment
|
||||
// but might not in the future
|
||||
actions, err := getActionsForTasks(createOpts)
|
||||
|
||||
if tenant.SchedulerPartitionId.Valid && err == nil {
|
||||
go func() {
|
||||
notifyCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for _, action := range actions {
|
||||
a.l.Debug().Msgf("notifying new queue for tenant %s and action %s", tenantId, action)
|
||||
|
||||
msg, err := tasktypes.NotifyNewQueue(tenantId, action)
|
||||
|
||||
if err != nil {
|
||||
a.l.Err(err).Msg("could not create message for notifying new queue")
|
||||
} else {
|
||||
err = a.mqv1.SendMessage(
|
||||
notifyCtx,
|
||||
msgqueue.QueueTypeFromPartitionIDAndController(tenant.SchedulerPartitionId.String, msgqueue.Scheduler),
|
||||
msg,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
a.l.Err(err).Msg("could not add message to scheduler partition queue")
|
||||
}
|
||||
|
||||
a.l.Debug().Msgf("notified new queue for tenant %s and action %s", tenantId, action)
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else if err != nil {
|
||||
a.l.Warn().Err(err).Msgf("could not get actions for tasks for workflow version %s, skipping notifying new queues for tenant %s", currWorkflow.WorkflowVersion.ID.String(), tenantId)
|
||||
} else if !tenant.SchedulerPartitionId.Valid {
|
||||
a.l.Debug().Msgf("tenant %s does not have a valid scheduler partition id, skipping notifying new queues for workflow version %s", tenantId, currWorkflow.WorkflowVersion.ID.String())
|
||||
}
|
||||
|
||||
resp := toWorkflowVersion(currWorkflow)
|
||||
|
||||
return resp, nil
|
||||
@@ -453,3 +494,19 @@ func toWorkflowVersionLegacy(workflowVersion *sqlcv1.GetWorkflowVersionForEngine
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
func getActionsForTasks(createOpts *v1.CreateWorkflowVersionOpts) ([]string, error) {
|
||||
actions := make([]string, len(createOpts.Tasks))
|
||||
|
||||
for i, task := range createOpts.Tasks {
|
||||
parsedAction, err := types.ParseActionID(task.Action)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
actions[i] = parsedAction.String()
|
||||
}
|
||||
|
||||
return actions, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/internal/datautils"
|
||||
"github.com/hatchet-dev/hatchet/internal/msgqueue"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/admin/contracts"
|
||||
|
||||
@@ -658,12 +658,60 @@ func (a *AdminServiceImpl) PutWorkflow(ctx context.Context, req *contracts.Creat
|
||||
},
|
||||
)
|
||||
|
||||
// notify that a new set of queues have been created
|
||||
// important: this assumes that actions correspond 1:1 with queues, which they do at the moment
|
||||
// but might not in the future
|
||||
actions, err := getActionsForTasks(req.Tasks)
|
||||
|
||||
if tenant.SchedulerPartitionId.Valid && err == nil {
|
||||
go func() {
|
||||
notifyCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for _, action := range actions {
|
||||
msg, err := tasktypes.NotifyNewQueue(tenantId, action)
|
||||
|
||||
if err != nil {
|
||||
a.l.Err(err).Msg("could not create message for notifying new queue")
|
||||
} else {
|
||||
err = a.mq.SendMessage(
|
||||
notifyCtx,
|
||||
msgqueue.QueueTypeFromPartitionIDAndController(tenant.SchedulerPartitionId.String, msgqueue.Scheduler),
|
||||
msg,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
a.l.Err(err).Msg("could not add message to scheduler partition queue")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return &contracts.CreateWorkflowVersionResponse{
|
||||
Id: currWorkflow.WorkflowVersion.ID.String(),
|
||||
WorkflowId: currWorkflow.WorkflowVersion.WorkflowId.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getActionsForTasks(tasks []*contracts.CreateTaskOpts) ([]string, error) {
|
||||
actions := make([]string, len(tasks))
|
||||
|
||||
for i, task := range tasks {
|
||||
if task == nil {
|
||||
return nil, fmt.Errorf("task at index %d is nil", i)
|
||||
}
|
||||
|
||||
if task.Action == "" {
|
||||
return nil, fmt.Errorf("task at index %d is missing required field 'Action'", i)
|
||||
}
|
||||
|
||||
actions[i] = task.Action
|
||||
}
|
||||
|
||||
return actions, nil
|
||||
}
|
||||
|
||||
func getCreateWorkflowOpts(req *contracts.CreateWorkflowVersionRequest) (*v1.CreateWorkflowVersionOpts, error) {
|
||||
tasks, err := getCreateTaskOpts(req.Tasks, "DEFAULT")
|
||||
|
||||
|
||||
@@ -791,7 +791,26 @@ func (tc *TasksControllerImpl) handleReplayTasks(ctx context.Context, tenantId u
|
||||
taskIdRetryCounts := make([]tasktypes.TaskIdInsertedAtRetryCountWithExternalId, 0)
|
||||
|
||||
for _, msg := range msgs {
|
||||
opts := make([]v1.TaskIdInsertedAtRetryCount, len(msg.Tasks))
|
||||
|
||||
for i, task := range msg.Tasks {
|
||||
opts[i] = v1.TaskIdInsertedAtRetryCount{
|
||||
Id: task.Id,
|
||||
InsertedAt: task.InsertedAt,
|
||||
RetryCount: task.RetryCount,
|
||||
}
|
||||
}
|
||||
|
||||
validTasks, err := tc.repov1.Tasks().FilterValidTasks(ctx, tenantId, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to filter valid tasks for replay: %w", err)
|
||||
}
|
||||
|
||||
for _, task := range msg.Tasks {
|
||||
if _, ok := validTasks[task.Id]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
taskIdRetryCounts = append(taskIdRetryCounts, tasktypes.TaskIdInsertedAtRetryCountWithExternalId{
|
||||
TaskIdInsertedAtRetryCount: v1.TaskIdInsertedAtRetryCount{
|
||||
Id: task.Id,
|
||||
|
||||
@@ -15,10 +15,13 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/internal/msgqueue"
|
||||
"github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/telemetry"
|
||||
|
||||
tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1"
|
||||
)
|
||||
|
||||
func (s *DispatcherImpl) Register(ctx context.Context, request *contracts.WorkerRegisterRequest) (*contracts.WorkerRegisterResponse, error) {
|
||||
@@ -411,6 +414,33 @@ func (s *DispatcherImpl) Heartbeat(ctx context.Context, req *contracts.Heartbeat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the worker doesn't have a previous heartbeat or hasn't heartbeat in 30 seconds, notify downstream components that a
|
||||
// new worker is available
|
||||
if !worker.LastHeartbeatAt.Valid || worker.LastHeartbeatAt.Time.Before(heartbeatAt.Add(-30*time.Second)) {
|
||||
if tenant.SchedulerPartitionId.Valid {
|
||||
go func() {
|
||||
notifyCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := tasktypes.NotifyNewWorker(tenantId, worker.ID)
|
||||
|
||||
if err != nil {
|
||||
s.l.Err(err).Msg("could not create message for notifying new worker")
|
||||
} else {
|
||||
err = s.mqv1.SendMessage(
|
||||
notifyCtx,
|
||||
msgqueue.QueueTypeFromPartitionIDAndController(tenant.SchedulerPartitionId.String, msgqueue.Scheduler),
|
||||
msg,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
s.l.Err(err).Msg("could not add message to scheduler partition queue")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
return &contracts.HeartbeatResponse{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -312,8 +312,15 @@ func (s *Scheduler) handleTask(ctx context.Context, task *msgqueue.Message) (err
|
||||
}
|
||||
}()
|
||||
|
||||
if task.ID == msgqueue.MsgIDCheckTenantQueue {
|
||||
switch task.ID {
|
||||
case msgqueue.MsgIDCheckTenantQueue:
|
||||
return s.handleCheckQueue(ctx, task)
|
||||
case msgqueue.MsgIDNewWorker:
|
||||
return s.handleNewWorker(ctx, task)
|
||||
case msgqueue.MsgIDNewQueue:
|
||||
return s.handleNewQueue(ctx, task)
|
||||
case msgqueue.MsgIDNewConcurrencyStrategy:
|
||||
return s.handleNewConcurrencyStrategy(ctx, task)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown task: %s", task.ID)
|
||||
@@ -342,6 +349,45 @@ func (s *Scheduler) handleCheckQueue(ctx context.Context, msg *msgqueue.Message)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) handleNewWorker(ctx context.Context, msg *msgqueue.Message) error {
|
||||
ctx, span := telemetry.NewSpanWithCarrier(ctx, "handle-new-worker", msg.OtelCarrier)
|
||||
defer span.End()
|
||||
|
||||
payloads := msgqueue.JSONConvert[tasktypes.NewWorkerPayload](msg.Payloads)
|
||||
|
||||
for _, payload := range payloads {
|
||||
s.pool.NotifyNewWorker(ctx, msg.TenantID, payload.WorkerId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) handleNewQueue(ctx context.Context, msg *msgqueue.Message) error {
|
||||
ctx, span := telemetry.NewSpanWithCarrier(ctx, "handle-new-queue", msg.OtelCarrier)
|
||||
defer span.End()
|
||||
|
||||
payloads := msgqueue.JSONConvert[tasktypes.NewQueuePayload](msg.Payloads)
|
||||
|
||||
for _, payload := range payloads {
|
||||
s.pool.NotifyNewQueue(ctx, msg.TenantID, payload.QueueName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) handleNewConcurrencyStrategy(ctx context.Context, msg *msgqueue.Message) error {
|
||||
ctx, span := telemetry.NewSpanWithCarrier(ctx, "handle-new-concurrency-strategy", msg.OtelCarrier)
|
||||
defer span.End()
|
||||
|
||||
payloads := msgqueue.JSONConvert[tasktypes.NewConcurrencyStrategyPayload](msg.Payloads)
|
||||
|
||||
for _, payload := range payloads {
|
||||
s.pool.NotifyNewConcurrencyStrategy(ctx, msg.TenantID, payload.StrategyId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) runSetTenants(ctx context.Context) func() {
|
||||
return func() {
|
||||
s.l.Debug().Msgf("partition: checking step run requeue")
|
||||
|
||||
@@ -85,3 +85,57 @@ func NotifyTaskCreated(tenantId uuid.UUID, tasks []*v1.V1TaskWithPayload) (*msgq
|
||||
type TaskAssignedBulkTaskPayload struct {
|
||||
WorkerIdToTaskIds map[uuid.UUID][]int64 `json:"worker_id_to_task_id" validate:"required"`
|
||||
}
|
||||
|
||||
type NewWorkerPayload struct {
|
||||
WorkerId uuid.UUID `json:"worker_id"`
|
||||
}
|
||||
|
||||
func NotifyNewWorker(tenantId uuid.UUID, workerId uuid.UUID) (*msgqueue.Message, error) {
|
||||
payload := NewWorkerPayload{
|
||||
WorkerId: workerId,
|
||||
}
|
||||
|
||||
return msgqueue.NewTenantMessage(
|
||||
tenantId,
|
||||
msgqueue.MsgIDNewWorker,
|
||||
true,
|
||||
false,
|
||||
payload,
|
||||
)
|
||||
}
|
||||
|
||||
type NewQueuePayload struct {
|
||||
QueueName string `json:"queue_name"`
|
||||
}
|
||||
|
||||
func NotifyNewQueue(tenantId uuid.UUID, queueName string) (*msgqueue.Message, error) {
|
||||
payload := NewQueuePayload{
|
||||
QueueName: queueName,
|
||||
}
|
||||
|
||||
return msgqueue.NewTenantMessage(
|
||||
tenantId,
|
||||
msgqueue.MsgIDNewQueue,
|
||||
true,
|
||||
false,
|
||||
payload,
|
||||
)
|
||||
}
|
||||
|
||||
type NewConcurrencyStrategyPayload struct {
|
||||
StrategyId int64 `json:"strategy_id"`
|
||||
}
|
||||
|
||||
func NotifyNewConcurrencyStrategy(tenantId uuid.UUID, strategyId int64) (*msgqueue.Message, error) {
|
||||
payload := NewConcurrencyStrategyPayload{
|
||||
StrategyId: strategyId,
|
||||
}
|
||||
|
||||
return msgqueue.NewTenantMessage(
|
||||
tenantId,
|
||||
msgqueue.MsgIDNewConcurrencyStrategy,
|
||||
true,
|
||||
false,
|
||||
payload,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -39,7 +39,11 @@ type WorkflowRun struct {
|
||||
}
|
||||
|
||||
type AdminClient interface {
|
||||
// Deprecated: PutWorkflow is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
PutWorkflow(workflow *types.Workflow, opts ...PutOptFunc) error
|
||||
// Deprecated: PutWorkflowV1 is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
PutWorkflowV1(workflow *v1contracts.CreateWorkflowVersionRequest, opts ...PutOptFunc) error
|
||||
|
||||
ScheduleWorkflow(workflowName string, opts ...ScheduleOptFunc) error
|
||||
@@ -105,6 +109,8 @@ func defaultPutOpts() *putOpts {
|
||||
return &putOpts{}
|
||||
}
|
||||
|
||||
// Deprecated: PutWorkflow is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (a *adminClientImpl) PutWorkflow(workflow *types.Workflow, fs ...PutOptFunc) error {
|
||||
opts := defaultPutOpts()
|
||||
|
||||
@@ -127,6 +133,8 @@ func (a *adminClientImpl) PutWorkflow(workflow *types.Workflow, fs ...PutOptFunc
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: PutWorkflowV1 is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (a *adminClientImpl) PutWorkflowV1(workflow *v1contracts.CreateWorkflowVersionRequest, fs ...PutOptFunc) error {
|
||||
opts := defaultPutOpts()
|
||||
|
||||
|
||||
+26
-2
@@ -28,6 +28,8 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/validator"
|
||||
)
|
||||
|
||||
// Deprecated: Client is an internal interface used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Client interface {
|
||||
Admin() AdminClient
|
||||
Cron() CronClient
|
||||
@@ -69,10 +71,14 @@ type clientImpl struct {
|
||||
v validator.Validator
|
||||
}
|
||||
|
||||
// Deprecated: ClientOpt is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type ClientOpt func(*ClientOpts)
|
||||
|
||||
type filesLoaderFunc func() []*types.Workflow
|
||||
|
||||
// Deprecated: ClientOpts is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type ClientOpts struct {
|
||||
tenantId string
|
||||
l *zerolog.Logger
|
||||
@@ -142,7 +148,8 @@ func defaultClientOpts(token *string, cf *client.ClientConfigFile) *ClientOpts {
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: use WithLogger instead
|
||||
// Deprecated: WithLogLevel is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithLogLevel(lvl string) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
logger := logger.NewDefaultLogger("client")
|
||||
@@ -156,36 +163,48 @@ func WithLogLevel(lvl string) ClientOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithLogger is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithLogger(l *zerolog.Logger) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
opts.l = l
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithTenantId is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithTenantId(tenantId string) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
opts.tenantId = tenantId
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithHostPort is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithHostPort(host string, port int) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
opts.hostPort = fmt.Sprintf("%s:%d", host, port)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithToken is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithToken(token string) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
opts.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithNamespace is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithNamespace(namespace string) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
opts.namespace = namespace + "_"
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithSharedMeta is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithSharedMeta(meta map[string]string) ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
if opts.sharedMeta == nil {
|
||||
@@ -198,6 +217,8 @@ func WithSharedMeta(meta map[string]string) ClientOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: InitWorkflows is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func InitWorkflows() ClientOpt {
|
||||
return func(opts *ClientOpts) {
|
||||
opts.initWorkflows = true
|
||||
@@ -213,7 +234,8 @@ type sharedClientOpts struct {
|
||||
sharedMeta map[string]string
|
||||
}
|
||||
|
||||
// New creates a new client instance.
|
||||
// Deprecated: New is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func New(fs ...ClientOpt) (Client, error) {
|
||||
var token *string
|
||||
initOpts := &ClientOpts{}
|
||||
@@ -233,6 +255,8 @@ func New(fs ...ClientOpt) (Client, error) {
|
||||
return newFromOpts(opts)
|
||||
}
|
||||
|
||||
// Deprecated: NewFromConfigFile is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewFromConfigFile(cf *client.ClientConfigFile, fs ...ClientOpt) (Client, error) {
|
||||
opts := defaultClientOpts(nil, cf)
|
||||
|
||||
|
||||
@@ -311,18 +311,20 @@ func (a *actionListenerImpl) Actions(ctx context.Context) (<-chan *Action, <-cha
|
||||
|
||||
// update the worker with a last heartbeat time every 4 seconds as long as the worker is connected
|
||||
go func() {
|
||||
heartbeatInterval := 4 * time.Second
|
||||
timer := time.NewTicker(100 * time.Millisecond)
|
||||
defer timer.Stop()
|
||||
|
||||
// set last heartbeat to 5 seconds ago so that the first heartbeat is sent immediately
|
||||
lastHeartbeat := time.Now().Add(-5 * time.Second)
|
||||
firstHeartbeat := true
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
if now := time.Now().UTC(); lastHeartbeat.Add(4 * time.Second).Before(now) {
|
||||
if now := time.Now().UTC(); lastHeartbeat.Add(heartbeatInterval).Before(now) {
|
||||
a.l.Debug().Msgf("updating worker %s heartbeat", a.workerId)
|
||||
|
||||
_, err := a.client.Heartbeat(a.ctx.newContext(ctx), &dispatchercontracts.HeartbeatRequest{
|
||||
@@ -339,7 +341,21 @@ func (a *actionListenerImpl) Actions(ctx context.Context) (<-chan *Action, <-cha
|
||||
}
|
||||
}
|
||||
|
||||
lastHeartbeat = time.Now().UTC()
|
||||
// detect heartbeat delays caused by CPU contention or other scheduling issues,
|
||||
// but skip the first heartbeat since lastHeartbeat is artificially backdated
|
||||
if !firstHeartbeat {
|
||||
actualInterval := now.Sub(lastHeartbeat)
|
||||
// add 1 second to the heartbeat interval to account for the time it takes to send the heartbeat
|
||||
if actualInterval > heartbeatInterval+1*time.Second {
|
||||
a.l.Warn().Msgf(
|
||||
"worker %s heartbeat interval delay (%s >> %s), possible CPU resource contention",
|
||||
a.workerId, actualInterval.Round(time.Millisecond), heartbeatInterval+1*time.Second,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
firstHeartbeat = false
|
||||
lastHeartbeat = now
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,6 +296,7 @@ const (
|
||||
LINEAR V1WebhookSourceName = "LINEAR"
|
||||
SLACK V1WebhookSourceName = "SLACK"
|
||||
STRIPE V1WebhookSourceName = "STRIPE"
|
||||
SVIX V1WebhookSourceName = "SVIX"
|
||||
)
|
||||
|
||||
// Defines values for V1WorkflowType.
|
||||
|
||||
@@ -26,6 +26,8 @@ type Concurrency struct {
|
||||
LimitStrategy *WorkflowConcurrencyLimitStrategy `yaml:"limitStrategy,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: Workflow is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Workflow struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
|
||||
@@ -66,6 +68,8 @@ type WorkflowConcurrency struct {
|
||||
ActionID *string `yaml:"action,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: WorkflowTriggers is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowTriggers struct {
|
||||
Events []string `yaml:"events,omitempty"`
|
||||
Cron []string `yaml:"crons,omitempty"`
|
||||
@@ -88,6 +92,8 @@ type WorkflowEvent struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: WorkflowJob is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowJob struct {
|
||||
Description string `yaml:"description,omitempty"`
|
||||
|
||||
@@ -116,6 +122,8 @@ type DesiredWorkerLabel struct {
|
||||
Comparator *WorkerLabelComparator `yaml:"comparator,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: WorkflowStep is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowStep struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
ID string `yaml:"id,omitempty"`
|
||||
@@ -149,6 +157,8 @@ type RateLimit struct {
|
||||
Duration *RateLimitDuration `yaml:"duration,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: ParseYAML is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func ParseYAML(ctx context.Context, yamlBytes []byte) (Workflow, error) {
|
||||
var workflowFile Workflow
|
||||
|
||||
@@ -164,6 +174,8 @@ func ParseYAML(ctx context.Context, yamlBytes []byte) (Workflow, error) {
|
||||
return workflowFile, nil
|
||||
}
|
||||
|
||||
// Deprecated: ToYAML is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func ToYAML(ctx context.Context, workflow *Workflow) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
yamlEncoder := yaml.NewEncoder(&b)
|
||||
|
||||
@@ -20,12 +20,12 @@ type LoggerConfigFile struct {
|
||||
}
|
||||
|
||||
type OpenTelemetryConfigFile struct {
|
||||
CollectorURL string `mapstructure:"collectorURL" json:"collectorURL,omitempty"`
|
||||
ServiceName string `mapstructure:"serviceName" json:"serviceName,omitempty" default:"server"`
|
||||
TraceIdRatio string `mapstructure:"traceIdRatio" json:"traceIdRatio,omitempty" default:"1"`
|
||||
Insecure bool `mapstructure:"insecure" json:"insecure,omitempty" default:"false"`
|
||||
CollectorAuth string `mapstructure:"collectorAuth" json:"collectorAuth,omitempty"`
|
||||
MetricsEnabled bool `mapstructure:"metricsEnabled" json:"metricsEnabled,omitempty" default:"false"`
|
||||
CollectorURL string `mapstructure:"collectorURL" json:"collectorURL,omitempty"`
|
||||
ServiceName string `mapstructure:"serviceName" json:"serviceName,omitempty" default:"server"`
|
||||
TraceIdRatio string `mapstructure:"traceIdRatio" json:"traceIdRatio,omitempty" default:"1"`
|
||||
Insecure bool `mapstructure:"insecure" json:"insecure,omitempty" default:"false"`
|
||||
CollectorAuth string `mapstructure:"collectorAuth" json:"collectorAuth,omitempty"`
|
||||
MetricsEnabled bool `mapstructure:"metricsEnabled" json:"metricsEnabled,omitempty" default:"false"`
|
||||
}
|
||||
|
||||
type PrometheusConfigFile struct {
|
||||
|
||||
@@ -19,7 +19,9 @@ type SchedulerRepository interface {
|
||||
type LeaseRepository interface {
|
||||
ListQueues(ctx context.Context, tenantId uuid.UUID) ([]*sqlcv1.V1Queue, error)
|
||||
ListActiveWorkers(ctx context.Context, tenantId uuid.UUID) ([]*ListActiveWorkersResult, error)
|
||||
GetActiveWorker(ctx context.Context, tenantId, workerId uuid.UUID) (*ListActiveWorkersResult, error)
|
||||
ListConcurrencyStrategies(ctx context.Context, tenantId uuid.UUID) ([]*sqlcv1.V1StepConcurrency, error)
|
||||
GetConcurrencyStrategy(ctx context.Context, tenantId uuid.UUID, id int64) (*sqlcv1.V1StepConcurrency, error)
|
||||
|
||||
AcquireOrExtendLeases(ctx context.Context, tenantId uuid.UUID, kind sqlcv1.LeaseKind, resourceIds []string, existingLeases []*sqlcv1.Lease) ([]*sqlcv1.Lease, error)
|
||||
ReleaseLeases(ctx context.Context, tenantId uuid.UUID, leases []*sqlcv1.Lease) error
|
||||
|
||||
@@ -135,26 +135,23 @@ func (d *leaseRepository) ListActiveWorkers(ctx context.Context, tenantId uuid.U
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workerIdsToLabels := make(map[string][]*sqlcv1.ListManyWorkerLabelsRow, len(labels))
|
||||
workerIdsToLabels := make(map[uuid.UUID][]*sqlcv1.ListManyWorkerLabelsRow, len(labels))
|
||||
|
||||
for _, label := range labels {
|
||||
wId := label.WorkerId.String()
|
||||
|
||||
if _, ok := workerIdsToLabels[wId]; !ok {
|
||||
workerIdsToLabels[wId] = make([]*sqlcv1.ListManyWorkerLabelsRow, 0)
|
||||
if _, ok := workerIdsToLabels[label.WorkerId]; !ok {
|
||||
workerIdsToLabels[label.WorkerId] = make([]*sqlcv1.ListManyWorkerLabelsRow, 0)
|
||||
}
|
||||
|
||||
workerIdsToLabels[wId] = append(workerIdsToLabels[wId], label)
|
||||
workerIdsToLabels[label.WorkerId] = append(workerIdsToLabels[label.WorkerId], label)
|
||||
}
|
||||
|
||||
res := make([]*ListActiveWorkersResult, 0, len(activeWorkers))
|
||||
|
||||
for _, worker := range activeWorkers {
|
||||
wId := worker.ID.String()
|
||||
res = append(res, &ListActiveWorkersResult{
|
||||
ID: worker.ID,
|
||||
MaxRuns: int(worker.MaxRuns),
|
||||
Labels: workerIdsToLabels[wId],
|
||||
Labels: workerIdsToLabels[worker.ID],
|
||||
Name: worker.Name,
|
||||
})
|
||||
}
|
||||
@@ -162,9 +159,56 @@ func (d *leaseRepository) ListActiveWorkers(ctx context.Context, tenantId uuid.U
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *leaseRepository) GetActiveWorker(ctx context.Context, tenantId, workerId uuid.UUID) (*ListActiveWorkersResult, error) {
|
||||
ctx, span := telemetry.NewSpan(ctx, "get-active-worker")
|
||||
defer span.End()
|
||||
|
||||
worker, err := d.queries.GetActiveWorkerById(ctx, d.pool, sqlcv1.GetActiveWorkerByIdParams{
|
||||
Tenantid: tenantId,
|
||||
ID: workerId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labels, err := d.queries.ListManyWorkerLabels(ctx, d.pool, []uuid.UUID{workerId})
|
||||
|
||||
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workerIdsToLabels := make(map[uuid.UUID][]*sqlcv1.ListManyWorkerLabelsRow, len(labels))
|
||||
|
||||
for _, label := range labels {
|
||||
if _, ok := workerIdsToLabels[label.WorkerId]; !ok {
|
||||
workerIdsToLabels[label.WorkerId] = make([]*sqlcv1.ListManyWorkerLabelsRow, 0)
|
||||
}
|
||||
|
||||
workerIdsToLabels[label.WorkerId] = append(workerIdsToLabels[label.WorkerId], label)
|
||||
}
|
||||
|
||||
return &ListActiveWorkersResult{
|
||||
ID: worker.Worker.ID,
|
||||
MaxRuns: int(worker.Worker.MaxRuns),
|
||||
Labels: workerIdsToLabels[worker.Worker.ID],
|
||||
Name: worker.Worker.Name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *leaseRepository) ListConcurrencyStrategies(ctx context.Context, tenantId uuid.UUID) ([]*sqlcv1.V1StepConcurrency, error) {
|
||||
ctx, span := telemetry.NewSpan(ctx, "list-queues")
|
||||
ctx, span := telemetry.NewSpan(ctx, "list-concurrency-strategies")
|
||||
defer span.End()
|
||||
|
||||
return d.queries.ListActiveConcurrencyStrategies(ctx, d.pool, tenantId)
|
||||
}
|
||||
|
||||
func (d *leaseRepository) GetConcurrencyStrategy(ctx context.Context, tenantId uuid.UUID, id int64) (*sqlcv1.V1StepConcurrency, error) {
|
||||
ctx, span := telemetry.NewSpan(ctx, "get-concurrency-strategy")
|
||||
defer span.End()
|
||||
|
||||
return d.queries.GetConcurrencyStrategyById(ctx, d.pool, sqlcv1.GetConcurrencyStrategyByIdParams{
|
||||
ID: id,
|
||||
Tenantid: tenantId,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,6 +9,15 @@ WHERE
|
||||
sc.tenant_id = @tenantId::uuid AND
|
||||
sc.is_active = TRUE;
|
||||
|
||||
-- name: GetConcurrencyStrategyById :one
|
||||
SELECT
|
||||
sc.*
|
||||
FROM
|
||||
v1_step_concurrency sc
|
||||
WHERE
|
||||
sc.tenant_id = @tenantId::uuid AND
|
||||
sc.id = @id::bigint;
|
||||
|
||||
-- name: ListConcurrencyStrategiesByWorkflowVersionId :many
|
||||
SELECT c.*, s."readableId" AS step_readable_id
|
||||
FROM v1_step_concurrency c
|
||||
|
||||
@@ -109,6 +109,39 @@ func (q *Queries) CheckStrategyActive(ctx context.Context, db DBTX, arg CheckStr
|
||||
return isActive, err
|
||||
}
|
||||
|
||||
const getConcurrencyStrategyById = `-- name: GetConcurrencyStrategyById :one
|
||||
SELECT
|
||||
sc.id, sc.parent_strategy_id, sc.workflow_id, sc.workflow_version_id, sc.step_id, sc.is_active, sc.strategy, sc.expression, sc.tenant_id, sc.max_concurrency
|
||||
FROM
|
||||
v1_step_concurrency sc
|
||||
WHERE
|
||||
sc.tenant_id = $1::uuid AND
|
||||
sc.id = $2::bigint
|
||||
`
|
||||
|
||||
type GetConcurrencyStrategyByIdParams struct {
|
||||
Tenantid uuid.UUID `json:"tenantid"`
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
func (q *Queries) GetConcurrencyStrategyById(ctx context.Context, db DBTX, arg GetConcurrencyStrategyByIdParams) (*V1StepConcurrency, error) {
|
||||
row := db.QueryRow(ctx, getConcurrencyStrategyById, arg.Tenantid, arg.ID)
|
||||
var i V1StepConcurrency
|
||||
err := row.Scan(
|
||||
&i.ID,
|
||||
&i.ParentStrategyID,
|
||||
&i.WorkflowID,
|
||||
&i.WorkflowVersionID,
|
||||
&i.StepID,
|
||||
&i.IsActive,
|
||||
&i.Strategy,
|
||||
&i.Expression,
|
||||
&i.TenantID,
|
||||
&i.MaxConcurrency,
|
||||
)
|
||||
return &i, err
|
||||
}
|
||||
|
||||
const getWorkflowConcurrencyQueueCounts = `-- name: GetWorkflowConcurrencyQueueCounts :many
|
||||
SELECT
|
||||
w."name" AS "workflowName",
|
||||
|
||||
@@ -1314,6 +1314,7 @@ const (
|
||||
V1IncomingWebhookSourceNameSTRIPE V1IncomingWebhookSourceName = "STRIPE"
|
||||
V1IncomingWebhookSourceNameSLACK V1IncomingWebhookSourceName = "SLACK"
|
||||
V1IncomingWebhookSourceNameLINEAR V1IncomingWebhookSourceName = "LINEAR"
|
||||
V1IncomingWebhookSourceNameSVIX V1IncomingWebhookSourceName = "SVIX"
|
||||
)
|
||||
|
||||
func (e *V1IncomingWebhookSourceName) Scan(src interface{}) error {
|
||||
|
||||
@@ -1261,3 +1261,23 @@ FROM
|
||||
input
|
||||
RETURNING
|
||||
*;
|
||||
|
||||
-- name: FilterValidTasks :many
|
||||
WITH inputs AS (
|
||||
SELECT
|
||||
UNNEST(@taskIds::bigint[]) AS task_id,
|
||||
UNNEST(@taskInsertedAts::timestamptz[]) AS task_inserted_at,
|
||||
UNNEST(@taskRetryCounts::integer[]) AS task_retry_count
|
||||
)
|
||||
SELECT
|
||||
t.id
|
||||
FROM
|
||||
v1_task t
|
||||
JOIN "Step" s ON s."id" = t.step_id AND s."deletedAt" IS NULL
|
||||
WHERE
|
||||
(t.id, t.inserted_at, t.retry_count) IN (
|
||||
SELECT task_id, task_inserted_at, task_retry_count
|
||||
FROM inputs
|
||||
)
|
||||
AND t.tenant_id = @tenantId::uuid
|
||||
;
|
||||
|
||||
@@ -559,6 +559,58 @@ func (q *Queries) FailTaskInternalFailure(ctx context.Context, db DBTX, arg Fail
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const filterValidTasks = `-- name: FilterValidTasks :many
|
||||
WITH inputs AS (
|
||||
SELECT
|
||||
UNNEST($2::bigint[]) AS task_id,
|
||||
UNNEST($3::timestamptz[]) AS task_inserted_at,
|
||||
UNNEST($4::integer[]) AS task_retry_count
|
||||
)
|
||||
SELECT
|
||||
t.id
|
||||
FROM
|
||||
v1_task t
|
||||
JOIN "Step" s ON s."id" = t.step_id AND s."deletedAt" IS NULL
|
||||
WHERE
|
||||
(t.id, t.inserted_at, t.retry_count) IN (
|
||||
SELECT task_id, task_inserted_at, task_retry_count
|
||||
FROM inputs
|
||||
)
|
||||
AND t.tenant_id = $1::uuid
|
||||
`
|
||||
|
||||
type FilterValidTasksParams struct {
|
||||
Tenantid uuid.UUID `json:"tenantid"`
|
||||
Taskids []int64 `json:"taskids"`
|
||||
Taskinsertedats []pgtype.Timestamptz `json:"taskinsertedats"`
|
||||
Taskretrycounts []int32 `json:"taskretrycounts"`
|
||||
}
|
||||
|
||||
func (q *Queries) FilterValidTasks(ctx context.Context, db DBTX, arg FilterValidTasksParams) ([]int64, error) {
|
||||
rows, err := db.Query(ctx, filterValidTasks,
|
||||
arg.Tenantid,
|
||||
arg.Taskids,
|
||||
arg.Taskinsertedats,
|
||||
arg.Taskretrycounts,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []int64
|
||||
for rows.Next() {
|
||||
var id int64
|
||||
if err := rows.Scan(&id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, id)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const findOldestRunningTask = `-- name: FindOldestRunningTask :one
|
||||
SELECT task_id, task_inserted_at, retry_count, worker_id, tenant_id, timeout_at
|
||||
FROM v1_task_runtime
|
||||
|
||||
@@ -71,6 +71,29 @@ LEFT JOIN
|
||||
WHERE
|
||||
w."id" = @id::uuid;
|
||||
|
||||
-- name: GetActiveWorkerById :one
|
||||
SELECT
|
||||
sqlc.embed(w),
|
||||
ww."url" AS "webhookUrl",
|
||||
w."maxRuns" - (
|
||||
SELECT COUNT(*)
|
||||
FROM v1_task_runtime runtime
|
||||
WHERE
|
||||
runtime.tenant_id = w."tenantId" AND
|
||||
runtime.worker_id = w."id"
|
||||
) AS "remainingSlots"
|
||||
FROM
|
||||
"Worker" w
|
||||
LEFT JOIN
|
||||
"WebhookWorker" ww ON w."webhookId" = ww."id"
|
||||
WHERE
|
||||
w."id" = @id::uuid
|
||||
AND w."tenantId" = @tenantId::uuid
|
||||
AND w."dispatcherId" IS NOT NULL
|
||||
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
|
||||
AND w."isActive" = true
|
||||
AND w."isPaused" = false;
|
||||
|
||||
-- name: ListSemaphoreSlotsWithStateForWorker :many
|
||||
SELECT
|
||||
*
|
||||
@@ -158,6 +181,7 @@ SELECT
|
||||
w."id" AS "id",
|
||||
w."tenantId" AS "tenantId",
|
||||
w."dispatcherId" AS "dispatcherId",
|
||||
w."lastHeartbeatAt" AS "lastHeartbeatAt",
|
||||
d."lastHeartbeatAt" AS "dispatcherLastHeartbeatAt",
|
||||
w."isActive" AS "isActive",
|
||||
w."lastListenerEstablished" AS "lastListenerEstablished"
|
||||
|
||||
@@ -179,6 +179,70 @@ func (q *Queries) DeleteWorker(ctx context.Context, db DBTX, id uuid.UUID) (*Wor
|
||||
return &i, err
|
||||
}
|
||||
|
||||
const getActiveWorkerById = `-- name: GetActiveWorkerById :one
|
||||
SELECT
|
||||
w.id, w."createdAt", w."updatedAt", w."deletedAt", w."tenantId", w."lastHeartbeatAt", w.name, w."dispatcherId", w."maxRuns", w."isActive", w."lastListenerEstablished", w."isPaused", w.type, w."webhookId", w.language, w."languageVersion", w.os, w."runtimeExtra", w."sdkVersion",
|
||||
ww."url" AS "webhookUrl",
|
||||
w."maxRuns" - (
|
||||
SELECT COUNT(*)
|
||||
FROM v1_task_runtime runtime
|
||||
WHERE
|
||||
runtime.tenant_id = w."tenantId" AND
|
||||
runtime.worker_id = w."id"
|
||||
) AS "remainingSlots"
|
||||
FROM
|
||||
"Worker" w
|
||||
LEFT JOIN
|
||||
"WebhookWorker" ww ON w."webhookId" = ww."id"
|
||||
WHERE
|
||||
w."id" = $1::uuid
|
||||
AND w."tenantId" = $2::uuid
|
||||
AND w."dispatcherId" IS NOT NULL
|
||||
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
|
||||
AND w."isActive" = true
|
||||
AND w."isPaused" = false
|
||||
`
|
||||
|
||||
type GetActiveWorkerByIdParams struct {
|
||||
ID uuid.UUID `json:"id"`
|
||||
Tenantid uuid.UUID `json:"tenantid"`
|
||||
}
|
||||
|
||||
type GetActiveWorkerByIdRow struct {
|
||||
Worker Worker `json:"worker"`
|
||||
WebhookUrl pgtype.Text `json:"webhookUrl"`
|
||||
RemainingSlots int32 `json:"remainingSlots"`
|
||||
}
|
||||
|
||||
func (q *Queries) GetActiveWorkerById(ctx context.Context, db DBTX, arg GetActiveWorkerByIdParams) (*GetActiveWorkerByIdRow, error) {
|
||||
row := db.QueryRow(ctx, getActiveWorkerById, arg.ID, arg.Tenantid)
|
||||
var i GetActiveWorkerByIdRow
|
||||
err := row.Scan(
|
||||
&i.Worker.ID,
|
||||
&i.Worker.CreatedAt,
|
||||
&i.Worker.UpdatedAt,
|
||||
&i.Worker.DeletedAt,
|
||||
&i.Worker.TenantId,
|
||||
&i.Worker.LastHeartbeatAt,
|
||||
&i.Worker.Name,
|
||||
&i.Worker.DispatcherId,
|
||||
&i.Worker.MaxRuns,
|
||||
&i.Worker.IsActive,
|
||||
&i.Worker.LastListenerEstablished,
|
||||
&i.Worker.IsPaused,
|
||||
&i.Worker.Type,
|
||||
&i.Worker.WebhookId,
|
||||
&i.Worker.Language,
|
||||
&i.Worker.LanguageVersion,
|
||||
&i.Worker.Os,
|
||||
&i.Worker.RuntimeExtra,
|
||||
&i.Worker.SdkVersion,
|
||||
&i.WebhookUrl,
|
||||
&i.RemainingSlots,
|
||||
)
|
||||
return &i, err
|
||||
}
|
||||
|
||||
const getWorkerActionsByWorkerId = `-- name: GetWorkerActionsByWorkerId :many
|
||||
WITH inputs AS (
|
||||
SELECT UNNEST($2::UUID[]) AS "workerId"
|
||||
@@ -284,6 +348,7 @@ SELECT
|
||||
w."id" AS "id",
|
||||
w."tenantId" AS "tenantId",
|
||||
w."dispatcherId" AS "dispatcherId",
|
||||
w."lastHeartbeatAt" AS "lastHeartbeatAt",
|
||||
d."lastHeartbeatAt" AS "dispatcherLastHeartbeatAt",
|
||||
w."isActive" AS "isActive",
|
||||
w."lastListenerEstablished" AS "lastListenerEstablished"
|
||||
@@ -305,6 +370,7 @@ type GetWorkerForEngineRow struct {
|
||||
ID uuid.UUID `json:"id"`
|
||||
TenantId uuid.UUID `json:"tenantId"`
|
||||
DispatcherId *uuid.UUID `json:"dispatcherId"`
|
||||
LastHeartbeatAt pgtype.Timestamp `json:"lastHeartbeatAt"`
|
||||
DispatcherLastHeartbeatAt pgtype.Timestamp `json:"dispatcherLastHeartbeatAt"`
|
||||
IsActive bool `json:"isActive"`
|
||||
LastListenerEstablished pgtype.Timestamp `json:"lastListenerEstablished"`
|
||||
@@ -317,6 +383,7 @@ func (q *Queries) GetWorkerForEngine(ctx context.Context, db DBTX, arg GetWorker
|
||||
&i.ID,
|
||||
&i.TenantId,
|
||||
&i.DispatcherId,
|
||||
&i.LastHeartbeatAt,
|
||||
&i.DispatcherLastHeartbeatAt,
|
||||
&i.IsActive,
|
||||
&i.LastListenerEstablished,
|
||||
|
||||
+33
-1
@@ -109,7 +109,7 @@ type ReplayTaskOpts struct {
|
||||
}
|
||||
|
||||
type TaskIdInsertedAtRetryCount struct {
|
||||
// (required) the external id
|
||||
// (required) the id
|
||||
Id int64 `validate:"required"`
|
||||
|
||||
// (required) the inserted at time
|
||||
@@ -272,6 +272,8 @@ type TaskRepository interface {
|
||||
|
||||
// run "details" getter, used for retrieving payloads and status of a run for external consumption without going through the REST API
|
||||
GetWorkflowRunResultDetails(ctx context.Context, tenantId uuid.UUID, externalId uuid.UUID) (*WorkflowRunDetails, error)
|
||||
|
||||
FilterValidTasks(ctx context.Context, tenantId uuid.UUID, opts []TaskIdInsertedAtRetryCount) (map[int64]struct{}, error)
|
||||
}
|
||||
|
||||
type TaskRepositoryImpl struct {
|
||||
@@ -4177,3 +4179,33 @@ func (r *TaskRepositoryImpl) GetWorkflowRunResultDetails(ctx context.Context, te
|
||||
AdditionalMetadata: additionalMeta,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *TaskRepositoryImpl) FilterValidTasks(ctx context.Context, tenantId uuid.UUID, opts []TaskIdInsertedAtRetryCount) (map[int64]struct{}, error) {
|
||||
res := make(map[int64]struct{})
|
||||
|
||||
taskIds := make([]int64, len(opts))
|
||||
taskInsertedAts := make([]pgtype.Timestamptz, len(opts))
|
||||
taskRetryCounts := make([]int32, len(opts))
|
||||
|
||||
for i, opt := range opts {
|
||||
taskIds[i] = opt.Id
|
||||
taskInsertedAts[i] = opt.InsertedAt
|
||||
taskRetryCounts[i] = opt.RetryCount
|
||||
}
|
||||
|
||||
taskIds, err := r.queries.FilterValidTasks(ctx, r.pool, sqlcv1.FilterValidTasksParams{
|
||||
Tenantid: tenantId,
|
||||
Taskids: taskIds,
|
||||
Taskinsertedats: taskInsertedAts,
|
||||
Taskretrycounts: taskRetryCounts,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, taskId := range taskIds {
|
||||
res[taskId] = struct{}{}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -757,6 +757,15 @@ func (r *workflowRepository) createJobTx(ctx context.Context, tx sqlcv1.DBTX, te
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// upsert the queue based on the action
|
||||
// note: we don't use the postCommit func, it just sets the queue in the cache which is not necessary for writing a
|
||||
// workflow version, only when we're inserting a bunch of tasks for that queue
|
||||
_, err = r.upsertQueues(ctx, tx, tenantId, []string{createStepParams.Actionid})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(stepOpts.DesiredWorkerLabels) > 0 {
|
||||
for i := range stepOpts.DesiredWorkerLabels {
|
||||
key := (stepOpts.DesiredWorkerLabels)[i].Key
|
||||
|
||||
@@ -2,6 +2,7 @@ package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -9,6 +10,8 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/repository"
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
|
||||
)
|
||||
@@ -22,23 +25,26 @@ type LeaseManager struct {
|
||||
|
||||
tenantId uuid.UUID
|
||||
|
||||
workerLeases []*sqlcv1.Lease
|
||||
workersCh chan<- []*v1.ListActiveWorkersResult
|
||||
workerLeasesMu sync.Mutex
|
||||
workerLeases []*sqlcv1.Lease
|
||||
workersCh notifierCh[*v1.ListActiveWorkersResult]
|
||||
|
||||
queueLeases []*sqlcv1.Lease
|
||||
queuesCh chan<- []string
|
||||
queueLeasesMu sync.Mutex
|
||||
queueLeases []*sqlcv1.Lease
|
||||
queuesCh notifierCh[string]
|
||||
|
||||
concurrencyLeasesMu sync.Mutex
|
||||
concurrencyLeases []*sqlcv1.Lease
|
||||
concurrencyLeasesCh chan<- []*sqlcv1.V1StepConcurrency
|
||||
concurrencyLeasesCh notifierCh[*sqlcv1.V1StepConcurrency]
|
||||
|
||||
cleanedUp bool
|
||||
processMu sync.Mutex
|
||||
processMu sync.RWMutex
|
||||
}
|
||||
|
||||
func newLeaseManager(conf *sharedConfig, tenantId uuid.UUID) (*LeaseManager, <-chan []*v1.ListActiveWorkersResult, <-chan []string, <-chan []*sqlcv1.V1StepConcurrency) {
|
||||
workersCh := make(chan []*v1.ListActiveWorkersResult)
|
||||
queuesCh := make(chan []string)
|
||||
concurrencyLeasesCh := make(chan []*sqlcv1.V1StepConcurrency)
|
||||
func newLeaseManager(conf *sharedConfig, tenantId uuid.UUID) (*LeaseManager, notifierCh[*v1.ListActiveWorkersResult], notifierCh[string], notifierCh[*sqlcv1.V1StepConcurrency]) {
|
||||
workersCh := make(notifierCh[*v1.ListActiveWorkersResult])
|
||||
queuesCh := make(notifierCh[string])
|
||||
concurrencyLeasesCh := make(notifierCh[*sqlcv1.V1StepConcurrency])
|
||||
|
||||
return &LeaseManager{
|
||||
lr: conf.repo.Lease(),
|
||||
@@ -50,61 +56,65 @@ func newLeaseManager(conf *sharedConfig, tenantId uuid.UUID) (*LeaseManager, <-c
|
||||
}, workersCh, queuesCh, concurrencyLeasesCh
|
||||
}
|
||||
|
||||
func (l *LeaseManager) sendWorkerIds(workerIds []*v1.ListActiveWorkersResult) {
|
||||
func (l *LeaseManager) sendWorkerIds(workerIds []*v1.ListActiveWorkersResult, isIncremental bool) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
l.conf.l.Error().Interface("recovered", r).Msg("recovered from panic")
|
||||
}
|
||||
}()
|
||||
|
||||
// at this point, we have a cleanupMu lock, so it's safe to read
|
||||
if l.cleanedUp {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case l.workersCh <- workerIds:
|
||||
case l.workersCh <- notifierMsg[*v1.ListActiveWorkersResult]{
|
||||
items: workerIds,
|
||||
isIncremental: isIncremental,
|
||||
}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LeaseManager) sendQueues(queues []string) {
|
||||
func (l *LeaseManager) sendQueues(queues []string, isIncremental bool) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
l.conf.l.Error().Interface("recovered", r).Msg("recovered from panic")
|
||||
}
|
||||
}()
|
||||
|
||||
// at this point, we have a cleanupMu lock, so it's safe to read
|
||||
if l.cleanedUp {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case l.queuesCh <- queues:
|
||||
case l.queuesCh <- notifierMsg[string]{
|
||||
items: queues,
|
||||
isIncremental: isIncremental,
|
||||
}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LeaseManager) sendConcurrencyLeases(concurrencyLeases []*sqlcv1.V1StepConcurrency) {
|
||||
func (l *LeaseManager) sendConcurrencyLeases(concurrencyLeases []*sqlcv1.V1StepConcurrency, isIncremental bool) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
l.conf.l.Error().Interface("recovered", r).Msg("recovered from panic")
|
||||
}
|
||||
}()
|
||||
|
||||
// at this point, we have a cleanupMu lock, so it's safe to read
|
||||
if l.cleanedUp {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case l.concurrencyLeasesCh <- concurrencyLeases:
|
||||
case l.concurrencyLeasesCh <- notifierMsg[*sqlcv1.V1StepConcurrency]{
|
||||
items: concurrencyLeases,
|
||||
isIncremental: isIncremental,
|
||||
}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LeaseManager) acquireWorkerLeases(ctx context.Context) error {
|
||||
l.processMu.RLock()
|
||||
defer l.processMu.RUnlock()
|
||||
|
||||
if l.cleanedUp {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.workerLeasesMu.Lock()
|
||||
defer l.workerLeasesMu.Unlock()
|
||||
|
||||
activeWorkers, err := l.lr.ListActiveWorkers(ctx, l.tenantId)
|
||||
|
||||
if err != nil {
|
||||
@@ -154,7 +164,7 @@ func (l *LeaseManager) acquireWorkerLeases(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
l.sendWorkerIds(successfullyAcquiredWorkerIds)
|
||||
l.sendWorkerIds(successfullyAcquiredWorkerIds, false)
|
||||
|
||||
if len(leasesToRelease) != 0 {
|
||||
if err := l.lr.ReleaseLeases(ctx, l.tenantId, leasesToRelease); err != nil {
|
||||
@@ -165,7 +175,69 @@ func (l *LeaseManager) acquireWorkerLeases(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LeaseManager) notifyNewWorker(ctx context.Context, workerId uuid.UUID) error {
|
||||
l.processMu.RLock()
|
||||
defer l.processMu.RUnlock()
|
||||
|
||||
if l.cleanedUp {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !l.workerLeasesMu.TryLock() {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer l.workerLeasesMu.Unlock()
|
||||
|
||||
// check that we don't already have a lease for this worker
|
||||
for _, lease := range l.workerLeases {
|
||||
if lease.ResourceId == workerId.String() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
worker, err := l.lr.GetActiveWorker(ctx, l.tenantId, workerId)
|
||||
|
||||
if err != nil {
|
||||
// if the worker isn't active yet, just abort
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
lease, err := l.lr.AcquireOrExtendLeases(ctx, l.tenantId, sqlcv1.LeaseKindWORKER, []string{workerId.String()}, []*sqlcv1.Lease{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(lease) == 0 || lease[0].ResourceId == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.workerLeases = append(l.workerLeases, lease...)
|
||||
|
||||
l.sendWorkerIds([]*v1.ListActiveWorkersResult{worker}, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LeaseManager) acquireQueueLeases(ctx context.Context) error {
|
||||
l.processMu.RLock()
|
||||
defer l.processMu.RUnlock()
|
||||
|
||||
if l.cleanedUp {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.queueLeasesMu.Lock()
|
||||
defer l.queueLeasesMu.Unlock()
|
||||
|
||||
queues, err := l.lr.ListQueues(ctx, l.tenantId)
|
||||
|
||||
if err != nil {
|
||||
@@ -212,7 +284,7 @@ func (l *LeaseManager) acquireQueueLeases(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
l.sendQueues(successfullyAcquiredQueues)
|
||||
l.sendQueues(successfullyAcquiredQueues, false)
|
||||
|
||||
if len(leasesToRelease) != 0 {
|
||||
if err := l.lr.ReleaseLeases(ctx, l.tenantId, leasesToRelease); err != nil {
|
||||
@@ -223,7 +295,65 @@ func (l *LeaseManager) acquireQueueLeases(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LeaseManager) notifyNewQueue(ctx context.Context, queueName string) error {
|
||||
l.conf.l.Debug().Msgf("[notifyNewQueue] notifying new queue %s for tenant %s", queueName, l.tenantId)
|
||||
|
||||
l.processMu.RLock()
|
||||
defer l.processMu.RUnlock()
|
||||
|
||||
if l.cleanedUp {
|
||||
l.conf.l.Debug().Msgf("[notifyNewQueue] lease manager already cleaned up, skipping notifying new queue %s for tenant %s", queueName, l.tenantId)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !l.queueLeasesMu.TryLock() {
|
||||
l.conf.l.Debug().Msgf("[notifyNewQueue] could not acquire queueLeasesMu, skipping notifying new queue %s for tenant %s", queueName, l.tenantId)
|
||||
return nil
|
||||
}
|
||||
|
||||
defer l.queueLeasesMu.Unlock()
|
||||
|
||||
// check that we don't already have a lease for this queue
|
||||
for _, lease := range l.queueLeases {
|
||||
if lease.ResourceId == queueName {
|
||||
l.conf.l.Debug().Msgf("[notifyNewQueue] already have lease for queue %s for tenant %s, skipping", queueName, l.tenantId)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
lease, err := l.lr.AcquireOrExtendLeases(ctx, l.tenantId, sqlcv1.LeaseKindQUEUE, []string{queueName}, []*sqlcv1.Lease{})
|
||||
|
||||
if err != nil {
|
||||
l.conf.l.Debug().Err(err).Msgf("[notifyNewQueue] error acquiring lease for queue %s for tenant %s", queueName, l.tenantId)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(lease) == 0 || lease[0].ResourceId == "" {
|
||||
l.conf.l.Debug().Msgf("[notifyNewQueue] did not acquire lease for queue %s for tenant %s, skipping", queueName, l.tenantId)
|
||||
return nil
|
||||
}
|
||||
|
||||
l.queueLeases = append(l.queueLeases, lease...)
|
||||
|
||||
l.sendQueues([]string{queueName}, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LeaseManager) acquireConcurrencyLeases(ctx context.Context) error {
|
||||
l.processMu.RLock()
|
||||
defer l.processMu.RUnlock()
|
||||
|
||||
if l.cleanedUp {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.concurrencyLeasesMu.Lock()
|
||||
defer l.concurrencyLeasesMu.Unlock()
|
||||
|
||||
strats, err := l.lr.ListConcurrencyStrategies(ctx, l.tenantId)
|
||||
|
||||
if err != nil {
|
||||
@@ -274,7 +404,7 @@ func (l *LeaseManager) acquireConcurrencyLeases(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
l.sendConcurrencyLeases(successfullyAcquiredStrats)
|
||||
l.sendConcurrencyLeases(successfullyAcquiredStrats, false)
|
||||
|
||||
if len(leasesToRelease) != 0 {
|
||||
if err := l.lr.ReleaseLeases(ctx, l.tenantId, leasesToRelease); err != nil {
|
||||
@@ -285,18 +415,63 @@ func (l *LeaseManager) acquireConcurrencyLeases(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loopForLeases acquires new leases every 1 second for workers and queues
|
||||
func (l *LeaseManager) notifyNewConcurrencyStrategy(ctx context.Context, strategyId int64) error {
|
||||
l.processMu.RLock()
|
||||
defer l.processMu.RUnlock()
|
||||
|
||||
if l.cleanedUp {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !l.concurrencyLeasesMu.TryLock() {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer l.concurrencyLeasesMu.Unlock()
|
||||
|
||||
// check that we don't already have a lease for this concurrency strategy
|
||||
for _, lease := range l.concurrencyLeases {
|
||||
if lease.ResourceId == fmt.Sprintf("%d", strategyId) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
strategy, err := l.lr.GetConcurrencyStrategy(ctx, l.tenantId, strategyId)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lease, err := l.lr.AcquireOrExtendLeases(ctx, l.tenantId, sqlcv1.LeaseKindCONCURRENCYSTRATEGY, []string{fmt.Sprintf("%d", strategyId)}, []*sqlcv1.Lease{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(lease) == 0 || lease[0].ResourceId == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.concurrencyLeases = append(l.concurrencyLeases, lease...)
|
||||
|
||||
// send the new concurrency strategy to the channel
|
||||
l.sendConcurrencyLeases([]*sqlcv1.V1StepConcurrency{strategy}, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loopForLeases acquires new leases every 5 seconds for workers, queues, and concurrency strategies
|
||||
func (l *LeaseManager) loopForLeases(ctx context.Context) {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
// we acquire a processMu lock here to prevent cleanup from occurring simultaneously
|
||||
l.processMu.Lock()
|
||||
|
||||
// we don't want to block the cleanup process, so we use a separate context with a timeout
|
||||
loopCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
|
||||
@@ -331,7 +506,6 @@ func (l *LeaseManager) loopForLeases(ctx context.Context) {
|
||||
wg.Wait()
|
||||
|
||||
cancel()
|
||||
l.processMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,16 @@ func (m *mockLeaseRepo) ReleaseLeases(ctx context.Context, tenantId uuid.UUID, l
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *mockLeaseRepo) GetActiveWorker(ctx context.Context, tenantId, workerId uuid.UUID) (*v1.ListActiveWorkersResult, error) {
|
||||
args := m.Called(ctx, tenantId, workerId)
|
||||
return args.Get(0).(*v1.ListActiveWorkersResult), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockLeaseRepo) GetConcurrencyStrategy(ctx context.Context, tenantId uuid.UUID, id int64) (*sqlcv1.V1StepConcurrency, error) {
|
||||
args := m.Called(ctx, tenantId, id)
|
||||
return args.Get(0).(*sqlcv1.V1StepConcurrency), args.Error(1)
|
||||
}
|
||||
|
||||
func TestLeaseManager_AcquireWorkerLeases(t *testing.T) {
|
||||
l := zerolog.Nop()
|
||||
tenantId := uuid.UUID{}
|
||||
@@ -106,7 +116,7 @@ func TestLeaseManager_AcquireQueueLeases(t *testing.T) {
|
||||
|
||||
func TestLeaseManager_SendWorkerIds(t *testing.T) {
|
||||
tenantId := uuid.UUID{}
|
||||
workersCh := make(chan []*v1.ListActiveWorkersResult)
|
||||
workersCh := make(notifierCh[*v1.ListActiveWorkersResult])
|
||||
leaseManager := &LeaseManager{
|
||||
tenantId: tenantId,
|
||||
workersCh: workersCh,
|
||||
@@ -116,15 +126,15 @@ func TestLeaseManager_SendWorkerIds(t *testing.T) {
|
||||
{ID: uuid.New(), Labels: nil},
|
||||
}
|
||||
|
||||
go leaseManager.sendWorkerIds(mockWorkers)
|
||||
go leaseManager.sendWorkerIds(mockWorkers, false)
|
||||
|
||||
result := <-workersCh
|
||||
assert.Equal(t, mockWorkers, result)
|
||||
assert.Equal(t, mockWorkers, result.items)
|
||||
}
|
||||
|
||||
func TestLeaseManager_SendQueues(t *testing.T) {
|
||||
tenantId := uuid.UUID{}
|
||||
queuesCh := make(chan []string)
|
||||
queuesCh := make(notifierCh[string])
|
||||
leaseManager := &LeaseManager{
|
||||
tenantId: tenantId,
|
||||
queuesCh: queuesCh,
|
||||
@@ -132,15 +142,15 @@ func TestLeaseManager_SendQueues(t *testing.T) {
|
||||
|
||||
mockQueues := []string{"queue-1", "queue-2"}
|
||||
|
||||
go leaseManager.sendQueues(mockQueues)
|
||||
go leaseManager.sendQueues(mockQueues, false)
|
||||
|
||||
result := <-queuesCh
|
||||
assert.Equal(t, mockQueues, result)
|
||||
assert.Equal(t, mockQueues, result.items)
|
||||
}
|
||||
|
||||
func TestLeaseManager_AcquireWorkersBeforeListenerReady(t *testing.T) {
|
||||
tenantId := uuid.UUID{}
|
||||
workersCh := make(chan []*v1.ListActiveWorkersResult)
|
||||
workersCh := make(notifierCh[*v1.ListActiveWorkersResult])
|
||||
leaseManager := &LeaseManager{
|
||||
tenantId: tenantId,
|
||||
workersCh: workersCh,
|
||||
@@ -155,14 +165,15 @@ func TestLeaseManager_AcquireWorkersBeforeListenerReady(t *testing.T) {
|
||||
}
|
||||
|
||||
// Send workers before listener is ready
|
||||
go leaseManager.sendWorkerIds(mockWorkers1)
|
||||
go leaseManager.sendWorkerIds(mockWorkers1, false)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
resultCh := make(chan []*v1.ListActiveWorkersResult)
|
||||
go func() {
|
||||
resultCh <- <-workersCh
|
||||
msg := <-workersCh
|
||||
resultCh <- msg.items
|
||||
}()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
go leaseManager.sendWorkerIds(mockWorkers2)
|
||||
go leaseManager.sendWorkerIds(mockWorkers2, false)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Ensure only the latest workers are sent over the channel
|
||||
|
||||
@@ -186,6 +186,24 @@ func (p *SchedulingPool) NotifyConcurrency(ctx context.Context, tenantId uuid.UU
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SchedulingPool) NotifyNewWorker(ctx context.Context, tenantId uuid.UUID, workerId uuid.UUID) {
|
||||
if tm := p.getTenantManager(tenantId, false); tm != nil {
|
||||
tm.notifyNewWorker(ctx, workerId)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SchedulingPool) NotifyNewQueue(ctx context.Context, tenantId uuid.UUID, queueName string) {
|
||||
if tm := p.getTenantManager(tenantId, false); tm != nil {
|
||||
tm.notifyNewQueue(ctx, queueName)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SchedulingPool) NotifyNewConcurrencyStrategy(ctx context.Context, tenantId uuid.UUID, strategyId int64) {
|
||||
if tm := p.getTenantManager(tenantId, false); tm != nil {
|
||||
tm.notifyNewConcurrencyStrategy(ctx, strategyId)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SchedulingPool) getTenantManager(tenantId uuid.UUID, storeIfNotFound bool) *tenantManager {
|
||||
tm, ok := p.tenants.Load(tenantId)
|
||||
|
||||
|
||||
@@ -121,6 +121,8 @@ func (q *Queuer) queue(ctx context.Context) {
|
||||
func (q *Queuer) loopQueue(ctx context.Context) {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
|
||||
q.l.Debug().Msgf("starting queue loop for tenant %s and queue %s with limit %d", q.tenantId, q.queueName, q.limit)
|
||||
|
||||
for {
|
||||
var carrier map[string]string
|
||||
|
||||
@@ -131,6 +133,8 @@ func (q *Queuer) loopQueue(ctx context.Context) {
|
||||
case carrier = <-q.notifyQueueCh:
|
||||
}
|
||||
|
||||
q.l.Debug().Msgf("queue loop tick for tenant %s and queue %s", q.tenantId, q.queueName)
|
||||
|
||||
prometheus.QueueInvocations.Inc()
|
||||
prometheus.TenantQueueInvocations.WithLabelValues(q.tenantId.String()).Inc()
|
||||
|
||||
@@ -162,6 +166,8 @@ func (q *Queuer) loopQueue(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
q.l.Debug().Int("refilled_items", len(qis)).Msgf("refilled queue for tenant %s and queue %s", q.tenantId, q.queueName)
|
||||
|
||||
// NOTE: we don't terminate early out of this loop because calling `tryAssign` is necessary
|
||||
// for calling the scheduling extensions.
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ func newScheduler(cf *sharedConfig, tenantId uuid.UUID, rl *rateLimiter, exts *E
|
||||
rl: rl,
|
||||
actionsMu: newRWMu(cf.l),
|
||||
replenishMu: newMu(cf.l),
|
||||
workers: map[uuid.UUID]*worker{},
|
||||
workersMu: newMu(cf.l),
|
||||
assignedCountMu: newMu(cf.l),
|
||||
unackedMu: newMu(cf.l),
|
||||
@@ -103,11 +104,26 @@ func (s *Scheduler) setWorkers(workers []*v1.ListActiveWorkersResult) {
|
||||
s.workers = newWorkers
|
||||
}
|
||||
|
||||
func (s *Scheduler) getWorkers() map[uuid.UUID]*worker {
|
||||
func (s *Scheduler) addWorker(newWorker *v1.ListActiveWorkersResult) {
|
||||
s.workersMu.Lock()
|
||||
defer s.workersMu.Unlock()
|
||||
|
||||
return s.workers
|
||||
s.workers[newWorker.ID] = &worker{
|
||||
ListActiveWorkersResult: newWorker,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) copyWorkers() map[uuid.UUID]*worker {
|
||||
s.workersMu.Lock()
|
||||
defer s.workersMu.Unlock()
|
||||
|
||||
copied := make(map[uuid.UUID]*worker, len(s.workers))
|
||||
|
||||
for k, v := range s.workers {
|
||||
copied[k] = v
|
||||
}
|
||||
|
||||
return copied
|
||||
}
|
||||
|
||||
// replenish loads new slots from the database.
|
||||
@@ -135,7 +151,7 @@ func (s *Scheduler) replenish(ctx context.Context, mustReplenish bool) error {
|
||||
|
||||
s.l.Debug().Msg("replenishing slots")
|
||||
|
||||
workers := s.getWorkers()
|
||||
workers := s.copyWorkers()
|
||||
workerIds := make([]uuid.UUID, 0)
|
||||
|
||||
for workerId := range workers {
|
||||
@@ -863,7 +879,7 @@ func (s *Scheduler) getSnapshotInput(mustSnapshot bool) (*SnapshotInput, bool) {
|
||||
|
||||
defer s.actionsMu.RUnlock()
|
||||
|
||||
workers := s.getWorkers()
|
||||
workers := s.copyWorkers()
|
||||
|
||||
res := &SnapshotInput{
|
||||
Workers: make(map[uuid.UUID]*WorkerCp),
|
||||
|
||||
@@ -12,6 +12,16 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/sqlcv1"
|
||||
)
|
||||
|
||||
type notifierMsg[T any] struct {
|
||||
items []T
|
||||
|
||||
// isIncremental refers to whether the resource should be added to the current set, or
|
||||
// should replace the current set
|
||||
isIncremental bool
|
||||
}
|
||||
|
||||
type notifierCh[T any] chan notifierMsg[T]
|
||||
|
||||
// tenantManager manages the scheduler and queuers for a tenant and multiplexes
|
||||
// messages to the relevant queuer.
|
||||
type tenantManager struct {
|
||||
@@ -35,9 +45,9 @@ type tenantManager struct {
|
||||
|
||||
leaseManager *LeaseManager
|
||||
|
||||
workersCh <-chan []*v1.ListActiveWorkersResult
|
||||
queuesCh <-chan []string
|
||||
concurrencyCh <-chan []*sqlcv1.V1StepConcurrency
|
||||
workersCh notifierCh[*v1.ListActiveWorkersResult]
|
||||
queuesCh notifierCh[string]
|
||||
concurrencyCh notifierCh[*sqlcv1.V1StepConcurrency]
|
||||
|
||||
concurrencyResultsCh chan *ConcurrencyResults
|
||||
|
||||
@@ -110,8 +120,25 @@ func (t *tenantManager) listenForWorkerLeases(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case workerIds := <-t.workersCh:
|
||||
t.scheduler.setWorkers(workerIds)
|
||||
case msg := <-t.workersCh:
|
||||
if msg.isIncremental {
|
||||
for _, worker := range msg.items {
|
||||
t.scheduler.addWorker(worker)
|
||||
}
|
||||
|
||||
t.replenish(ctx)
|
||||
|
||||
// notify all queues to check if the new worker can take any tasks
|
||||
t.queuersMu.RLock()
|
||||
|
||||
for _, q := range t.queuers {
|
||||
q.queue(ctx)
|
||||
}
|
||||
|
||||
t.queuersMu.RUnlock()
|
||||
} else {
|
||||
t.scheduler.setWorkers(msg.items)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,8 +148,14 @@ func (t *tenantManager) listenForQueueLeases(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case queueNames := <-t.queuesCh:
|
||||
t.setQueuers(queueNames)
|
||||
case msg := <-t.queuesCh:
|
||||
if msg.isIncremental {
|
||||
for _, queueName := range msg.items {
|
||||
t.addQueuer(queueName)
|
||||
}
|
||||
} else {
|
||||
t.setQueuers(msg.items)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -132,8 +165,14 @@ func (t *tenantManager) listenForConcurrencyLeases(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case strategies := <-t.concurrencyCh:
|
||||
t.setConcurrencyStrategies(strategies)
|
||||
case msg := <-t.concurrencyCh:
|
||||
if msg.isIncremental {
|
||||
for _, strategy := range msg.items {
|
||||
t.addConcurrencyStrategy(strategy)
|
||||
}
|
||||
} else {
|
||||
t.setConcurrencyStrategies(msg.items)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -158,6 +197,8 @@ func (t *tenantManager) setQueuers(queueNames []string) {
|
||||
delete(queueNamesSet, q.queueName)
|
||||
} else {
|
||||
// if not in new set, cleanup
|
||||
t.cf.l.Debug().Msgf("cleaning up queuer for queue %s for tenant %s", q.queueName, t.tenantId)
|
||||
|
||||
go q.Cleanup()
|
||||
}
|
||||
}
|
||||
@@ -169,6 +210,25 @@ func (t *tenantManager) setQueuers(queueNames []string) {
|
||||
t.queuers = newQueueArr
|
||||
}
|
||||
|
||||
func (t *tenantManager) addQueuer(queueName string) {
|
||||
t.queuersMu.Lock()
|
||||
|
||||
for _, q := range t.queuers {
|
||||
if q.queueName == queueName {
|
||||
t.queuersMu.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
q := newQueuer(t.cf, t.tenantId, queueName, t.scheduler, t.resultsCh)
|
||||
|
||||
t.queuers = append(t.queuers, q)
|
||||
|
||||
t.queuersMu.Unlock()
|
||||
|
||||
t.queue(context.Background(), []string{queueName})
|
||||
}
|
||||
|
||||
func (t *tenantManager) setConcurrencyStrategies(strategies []*sqlcv1.V1StepConcurrency) {
|
||||
t.concurrencyMu.Lock()
|
||||
defer t.concurrencyMu.Unlock()
|
||||
@@ -200,6 +260,19 @@ func (t *tenantManager) setConcurrencyStrategies(strategies []*sqlcv1.V1StepConc
|
||||
t.concurrencyStrategies = newArr
|
||||
}
|
||||
|
||||
func (t *tenantManager) addConcurrencyStrategy(strategy *sqlcv1.V1StepConcurrency) {
|
||||
t.concurrencyMu.Lock()
|
||||
defer t.concurrencyMu.Unlock()
|
||||
|
||||
for _, c := range t.concurrencyStrategies {
|
||||
if c.strategy.ID == strategy.ID {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.concurrencyStrategies = append(t.concurrencyStrategies, newConcurrencyManager(t.cf, t.tenantId, strategy, t.concurrencyResultsCh))
|
||||
}
|
||||
|
||||
func (t *tenantManager) replenish(ctx context.Context) {
|
||||
err := t.scheduler.replenish(ctx, false)
|
||||
|
||||
@@ -274,6 +347,35 @@ func (t *tenantManager) notifyConcurrency(ctx context.Context, strategyIds []int
|
||||
t.concurrencyMu.RUnlock()
|
||||
}
|
||||
|
||||
func (t *tenantManager) notifyNewWorker(ctx context.Context, workerId uuid.UUID) {
|
||||
err := t.leaseManager.notifyNewWorker(ctx, workerId)
|
||||
|
||||
if err != nil {
|
||||
t.cf.l.Error().Err(err).Msg("error notifying new worker")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tenantManager) notifyNewQueue(ctx context.Context, queueName string) {
|
||||
t.cf.l.Debug().Msgf("notifying new queue %s for tenant %s", queueName, t.tenantId)
|
||||
|
||||
err := t.leaseManager.notifyNewQueue(ctx, queueName)
|
||||
|
||||
if err != nil {
|
||||
t.cf.l.Error().Err(err).Msg("error notifying new queue")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tenantManager) notifyNewConcurrencyStrategy(ctx context.Context, strategyId int64) {
|
||||
err := t.leaseManager.notifyNewConcurrencyStrategy(ctx, strategyId)
|
||||
|
||||
if err != nil {
|
||||
t.cf.l.Error().Err(err).Msg("error notifying new concurrency strategy")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tenantManager) queue(ctx context.Context, queueNames []string) {
|
||||
queueNamesMap := make(map[string]struct{}, len(queueNames))
|
||||
|
||||
|
||||
+6
-4
@@ -1,3 +1,5 @@
|
||||
// Deprecated: This package is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
package v1
|
||||
|
||||
import (
|
||||
@@ -9,8 +11,8 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/v1/workflow"
|
||||
)
|
||||
|
||||
// HatchetClient is the main interface for interacting with the Hatchet task orchestrator.
|
||||
// It provides access to workflow creation, worker registration, and legacy V0 client functionality.
|
||||
// Deprecated: HatchetClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type HatchetClient interface {
|
||||
// V0 returns the underlying V0 client for backward compatibility.
|
||||
V0() v0Client.Client
|
||||
@@ -59,8 +61,8 @@ type v1HatchetClientImpl struct {
|
||||
webhooks features.WebhooksClient
|
||||
}
|
||||
|
||||
// NewHatchetClient creates a new V1 Hatchet client with the provided configuration.
|
||||
// If no configuration is provided, default settings will be used.
|
||||
// Deprecated: NewHatchetClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewHatchetClient(config ...Config) (HatchetClient, error) {
|
||||
cf := &v0Config.ClientConfigFile{}
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/config/shared"
|
||||
)
|
||||
|
||||
// Deprecated: Config is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Config struct {
|
||||
TenantId uuid.UUID
|
||||
Token string
|
||||
@@ -22,6 +24,8 @@ type Config struct {
|
||||
Logger *zerolog.Logger
|
||||
}
|
||||
|
||||
// Deprecated: TLSConfig is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type TLSConfig struct {
|
||||
Base *shared.TLSConfigFile
|
||||
TLSServerName string
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/worker"
|
||||
)
|
||||
|
||||
// Deprecated: NewDurableTask is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewDurableTask creates a durable task that is implemented as a simple workflow with a single task.
|
||||
// It provides proper type inference for the input and output types.
|
||||
//
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/worker"
|
||||
)
|
||||
|
||||
// Deprecated: NewTask is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewTask creates a standalone task that is implemented as a simple workflow with a single task.
|
||||
// It provides proper type inference for the input and output types.
|
||||
//
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// Deprecated: This package is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
package factory
|
||||
|
||||
import (
|
||||
@@ -7,9 +9,8 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/v1/workflow"
|
||||
)
|
||||
|
||||
// NewWorkflow creates a new workflow declaration with the specified input and output types before a client is initialized.
|
||||
// This function is used to create strongly typed workflow declarations with the given client.
|
||||
// NOTE: This is placed on the client due to circular dependency concerns.
|
||||
// Deprecated: NewWorkflow is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewWorkflow[I any, O any](opts create.WorkflowCreateOpts[I], client v1.HatchetClient) workflow.WorkflowDeclaration[I, O] {
|
||||
var v0 v0Client.Client
|
||||
if client != nil {
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: CELClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// The CEL client is a client for debugging CEL expressions within Hatchet
|
||||
type CELClient interface {
|
||||
Debug(ctx context.Context, expression string, input map[string]interface{}, additionalMetadata, filterPayload *map[string]interface{}) (*CELEvaluationResult, error)
|
||||
@@ -19,6 +22,8 @@ type celClientImpl struct {
|
||||
tenantId uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewCELClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewCELClient(
|
||||
api *rest.ClientWithResponses,
|
||||
tenantId *string,
|
||||
@@ -31,6 +36,8 @@ func NewCELClient(
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: CELEvaluationResult is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type CELEvaluationResult struct {
|
||||
status gen.V1CELDebugResponseStatus
|
||||
output *bool
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: CronsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// CronsClient provides methods for interacting with cron workflow triggers
|
||||
// in the Hatchet platform.
|
||||
type CronsClient interface {
|
||||
@@ -25,6 +28,9 @@ type CronsClient interface {
|
||||
Get(ctx context.Context, cronId string) (*rest.CronWorkflows, error)
|
||||
}
|
||||
|
||||
// Deprecated: CreateCronTrigger is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// CreateCronTrigger contains the configuration for creating a cron trigger.
|
||||
type CreateCronTrigger struct {
|
||||
// Name is the unique identifier for the cron trigger.
|
||||
@@ -48,6 +54,9 @@ type cronsClientImpl struct {
|
||||
tenantId uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewCronsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewCronsClient creates a new client for interacting with cron workflow triggers.
|
||||
func NewCronsClient(
|
||||
api *rest.ClientWithResponses,
|
||||
@@ -61,6 +70,9 @@ func NewCronsClient(
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: ValidateCronExpression is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// ValidateCronExpression validates that a string is a valid cron expression.
|
||||
func ValidateCronExpression(expression string) bool {
|
||||
parser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
|
||||
@@ -155,6 +167,9 @@ func (c *cronsClientImpl) Get(ctx context.Context, cronId string) (*rest.CronWor
|
||||
return resp.JSON200, nil
|
||||
}
|
||||
|
||||
// Deprecated: InvalidCronExpressionError is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// InvalidCronExpressionError represents an error when an invalid cron expression is provided.
|
||||
type InvalidCronExpressionError struct {
|
||||
Expression string
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: FiltersClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type FiltersClient interface {
|
||||
List(ctx context.Context, opts *rest.V1FilterListParams) (*rest.V1FilterList, error)
|
||||
|
||||
@@ -25,6 +27,8 @@ type filtersClientImpl struct {
|
||||
tenantID uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewFiltersClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewFiltersClient(
|
||||
api *rest.ClientWithResponses,
|
||||
tenantID *string,
|
||||
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: MetricsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// MetricsClient provides methods for retrieving metrics data
|
||||
// in the Hatchet platform.
|
||||
type MetricsClient interface {
|
||||
@@ -27,6 +30,9 @@ type metricsClientImpl struct {
|
||||
workflows *WorkflowsClient
|
||||
}
|
||||
|
||||
// Deprecated: NewMetricsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewMetricsClient creates a new client for interacting with metrics.
|
||||
func NewMetricsClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/types"
|
||||
)
|
||||
|
||||
// Deprecated: CreateRatelimitOpts is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// createRatelimitOpts contains options for creating or updating a rate limit.
|
||||
type CreateRatelimitOpts struct {
|
||||
// key is the unique identifier for the rate limit
|
||||
@@ -20,6 +23,9 @@ type CreateRatelimitOpts struct {
|
||||
Duration types.RateLimitDuration
|
||||
}
|
||||
|
||||
// Deprecated: RateLimitsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// rateLimitsClient provides an interface for managing rate limits.
|
||||
type RateLimitsClient interface {
|
||||
// upsert creates or updates a rate limit with the provided options.
|
||||
@@ -36,6 +42,9 @@ type rlClientImpl struct {
|
||||
tenantId uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewRateLimitsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// newRateLimitsClient creates a new rateLimitsClient with the provided api client, tenant id, and admin client.
|
||||
func NewRateLimitsClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
@@ -12,6 +12,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: RunsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// RunsClient provides methods for interacting with workflow runs
|
||||
// in the Hatchet platform.
|
||||
type RunsClient interface {
|
||||
@@ -46,6 +49,9 @@ type runsClientImpl struct {
|
||||
l *zerolog.Logger
|
||||
}
|
||||
|
||||
// Deprecated: NewRunsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewRunsClient creates a new client for interacting with workflow runs.
|
||||
func NewRunsClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/config/client"
|
||||
)
|
||||
|
||||
// Deprecated: SchedulesClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// SchedulesClient provides methods for interacting with workflow schedules
|
||||
// in the Hatchet platform.
|
||||
type SchedulesClient interface {
|
||||
@@ -25,6 +28,9 @@ type SchedulesClient interface {
|
||||
Get(ctx context.Context, scheduledRunId string) (*rest.ScheduledWorkflows, error)
|
||||
}
|
||||
|
||||
// Deprecated: CreateScheduledRunTrigger is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// CreateScheduledRunTrigger contains the configuration for creating a scheduled run trigger.
|
||||
type CreateScheduledRunTrigger struct {
|
||||
// TriggerAt specifies when the workflow should be triggered.
|
||||
@@ -46,6 +52,9 @@ type schedulesClientImpl struct {
|
||||
namespace *string
|
||||
}
|
||||
|
||||
// Deprecated: NewSchedulesClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewSchedulesClient creates a new client for interacting with workflow schedules.
|
||||
func NewSchedulesClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: TenantClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// TenantClient provides methods for interacting with your Tenant
|
||||
type TenantClient interface {
|
||||
// Get the details of the current tenant
|
||||
@@ -19,6 +22,8 @@ type tenantClientImpl struct {
|
||||
tenantId uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewTenantCliet is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewTenantCliet(
|
||||
api *rest.ClientWithResponses,
|
||||
tenantId *string,
|
||||
|
||||
@@ -9,10 +9,14 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: WebhookAuth is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WebhookAuth interface {
|
||||
toCreateRequest(opts CreateWebhookOpts) (rest.V1CreateWebhookRequest, error)
|
||||
}
|
||||
|
||||
// Deprecated: BasicAuth is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type BasicAuth struct {
|
||||
Username string
|
||||
Password string
|
||||
@@ -33,6 +37,8 @@ func (a BasicAuth) toCreateRequest(opts CreateWebhookOpts) (rest.V1CreateWebhook
|
||||
return req, err
|
||||
}
|
||||
|
||||
// Deprecated: APIKeyAuth is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type APIKeyAuth struct {
|
||||
HeaderName string
|
||||
APIKey string
|
||||
@@ -53,6 +59,8 @@ func (a APIKeyAuth) toCreateRequest(opts CreateWebhookOpts) (rest.V1CreateWebhoo
|
||||
return req, err
|
||||
}
|
||||
|
||||
// Deprecated: HMACAuth is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type HMACAuth struct {
|
||||
SigningSecret string
|
||||
SignatureHeaderName string
|
||||
@@ -77,6 +85,8 @@ func (a HMACAuth) toCreateRequest(opts CreateWebhookOpts) (rest.V1CreateWebhookR
|
||||
return req, err
|
||||
}
|
||||
|
||||
// Deprecated: CreateWebhookOpts is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type CreateWebhookOpts struct {
|
||||
Name string
|
||||
SourceName rest.V1WebhookSourceName
|
||||
@@ -84,10 +94,15 @@ type CreateWebhookOpts struct {
|
||||
Auth WebhookAuth
|
||||
}
|
||||
|
||||
// Deprecated: UpdateWebhookOpts is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type UpdateWebhookOpts struct {
|
||||
EventKeyExpression string
|
||||
}
|
||||
|
||||
// Deprecated: WebhooksClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// WebhooksClient provides methods for managing webhook configurations.
|
||||
type WebhooksClient interface {
|
||||
// List retrieves a collection of webhooks based on the provided parameters.
|
||||
@@ -111,6 +126,9 @@ type webhooksClientImpl struct {
|
||||
tenantId uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewWebhooksClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewWebhooksClient creates a new client for managing webhook configurations.
|
||||
func NewWebhooksClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/rest"
|
||||
)
|
||||
|
||||
// Deprecated: WorkersClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// WorkersClient provides methods for interacting with workers
|
||||
// in the Hatchet platform.
|
||||
type WorkersClient interface {
|
||||
@@ -32,6 +35,9 @@ type workersClientImpl struct {
|
||||
tenantId uuid.UUID
|
||||
}
|
||||
|
||||
// Deprecated: NewWorkersClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewWorkersClient creates a new client for interacting with workers.
|
||||
func NewWorkersClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/repository/cache"
|
||||
)
|
||||
|
||||
// Deprecated: WorkflowsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// WorkflowsClient provides methods for interacting with workflows
|
||||
// in the Hatchet platform.
|
||||
type WorkflowsClient interface {
|
||||
@@ -42,6 +45,9 @@ type workflowsClientImpl struct {
|
||||
cache *cache.Cache
|
||||
}
|
||||
|
||||
// Deprecated: NewWorkflowsClient is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// NewWorkflowsClient creates a new client for interacting with workflows.
|
||||
func NewWorkflowsClient(
|
||||
api *rest.ClientWithResponses,
|
||||
|
||||
+12
-3
@@ -1,3 +1,5 @@
|
||||
// Deprecated: This package is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
package task
|
||||
|
||||
import (
|
||||
@@ -11,14 +13,20 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/worker/condition"
|
||||
)
|
||||
|
||||
// Deprecated: NamedTaskImpl is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type NamedTaskImpl struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// Deprecated: TaskBase is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type TaskBase interface {
|
||||
Dump(workflowName string, taskDefaults *create.TaskDefaults) *contracts.CreateTaskOpts
|
||||
}
|
||||
|
||||
// Deprecated: TaskShared is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type TaskShared struct {
|
||||
// ExecutionTimeout specifies the maximum duration a task can run before being terminated
|
||||
ExecutionTimeout *time.Duration
|
||||
@@ -49,7 +57,8 @@ type TaskShared struct {
|
||||
Fn interface{}
|
||||
}
|
||||
|
||||
// TaskDeclaration represents a standard (non-durable) task configuration that can be added to a workflow.
|
||||
// Deprecated: TaskDeclaration is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type TaskDeclaration[I any] struct {
|
||||
TaskBase
|
||||
NamedTaskImpl
|
||||
@@ -75,8 +84,8 @@ type TaskDeclaration[I any] struct {
|
||||
Fn interface{}
|
||||
}
|
||||
|
||||
// DurableTaskDeclaration represents a durable task configuration that can be added to a workflow.
|
||||
// Durable tasks can use the DurableHatchetContext for operations that persist across worker restarts.
|
||||
// Deprecated: DurableTaskDeclaration is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type DurableTaskDeclaration[I any] struct {
|
||||
TaskBase
|
||||
NamedTaskImpl
|
||||
|
||||
+14
-11
@@ -1,5 +1,5 @@
|
||||
// package worker provides functionality for creating and managing hatchet workers.
|
||||
// workers are responsible for executing workflow tasks and communicating with the hatchet API.
|
||||
// Deprecated: This package is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
package worker
|
||||
|
||||
import (
|
||||
@@ -15,7 +15,8 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Worker defines the interface for interacting with a hatchet worker.
|
||||
// Deprecated: Worker is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Worker interface {
|
||||
// Start begins worker execution in a non-blocking manner and returns a cleanup function.
|
||||
// the cleanup function should be called when the worker needs to be stopped.
|
||||
@@ -37,11 +38,12 @@ type Worker interface {
|
||||
Unpause(ctx context.Context) error
|
||||
}
|
||||
|
||||
// WorkerLabels represents a map of labels that can be assigned to a worker
|
||||
// for filtering and identification purposes.
|
||||
// Deprecated: WorkerLabels is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkerLabels map[string]interface{}
|
||||
|
||||
// CreateOpts defines the options for creating a new worker.
|
||||
// Deprecated: WorkerOpts is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkerOpts struct {
|
||||
// (required) the friendly name of the worker
|
||||
Name string
|
||||
@@ -65,7 +67,8 @@ type WorkerOpts struct {
|
||||
DurableSlots int
|
||||
}
|
||||
|
||||
// WorkerImpl is the concrete implementation of the Worker interface.
|
||||
// Deprecated: WorkerImpl is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkerImpl struct {
|
||||
// v0 is the client used to communicate with the hatchet API.
|
||||
v0 v0Client.Client
|
||||
@@ -101,9 +104,8 @@ type WorkerImpl struct {
|
||||
labels WorkerLabels
|
||||
}
|
||||
|
||||
// NewWorker creates and configures a new Worker with the provided client and options.
|
||||
// additional functional options can be provided to further customize the worker configuration.
|
||||
// returns the created Worker interface and any error encountered during creation.
|
||||
// Deprecated: NewWorker is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewWorker(workersClient features.WorkersClient, v0 v0Client.Client, opts WorkerOpts) (Worker, error) {
|
||||
w := &WorkerImpl{
|
||||
v0: v0,
|
||||
@@ -138,7 +140,8 @@ func NewWorker(workersClient features.WorkersClient, v0 v0Client.Client, opts Wo
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// NamedFunction represents a function with its associated action ID
|
||||
// Deprecated: NamedFunction is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type NamedFunction struct {
|
||||
ActionID string
|
||||
Fn workflow.WrappedTaskFn
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Package workflow provides functionality for defining, managing, and executing
|
||||
// workflows in Hatchet. A workflow is a collection of tasks with defined
|
||||
// dependencies and execution logic.
|
||||
// Deprecated: This package is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
package workflow
|
||||
|
||||
import (
|
||||
@@ -24,40 +23,46 @@ import (
|
||||
contracts "github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1"
|
||||
)
|
||||
|
||||
// WrappedTaskFn represents a task function that can be executed by the Hatchet worker.
|
||||
// It takes a HatchetContext and returns an interface{} result and an error.
|
||||
// Deprecated: WrappedTaskFn is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WrappedTaskFn func(ctx worker.HatchetContext) (interface{}, error)
|
||||
|
||||
// DurableWrappedTaskFn represents a durable task function that can be executed by the Hatchet worker.
|
||||
// It takes a DurableHatchetContext and returns an interface{} result and an error.
|
||||
// Deprecated: DurableWrappedTaskFn is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type DurableWrappedTaskFn func(ctx worker.DurableHatchetContext) (interface{}, error)
|
||||
|
||||
// NamedFunction represents a function with its associated action ID
|
||||
// Deprecated: NamedFunction is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type NamedFunction struct {
|
||||
ActionID string
|
||||
Fn WrappedTaskFn
|
||||
}
|
||||
|
||||
// WorkflowBase defines the common interface for all workflow types.
|
||||
// Deprecated: WorkflowBase is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowBase interface {
|
||||
// Dump converts the workflow declaration into a protobuf request and function mappings.
|
||||
// Returns the workflow definition, regular task functions, durable task functions, and the on failure task function.
|
||||
Dump() (*contracts.CreateWorkflowVersionRequest, []NamedFunction, []NamedFunction, WrappedTaskFn)
|
||||
}
|
||||
|
||||
// Deprecated: RunOpts is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type RunOpts struct {
|
||||
AdditionalMetadata *map[string]interface{}
|
||||
Priority *int32
|
||||
}
|
||||
|
||||
// Deprecated: RunAsChildOpts is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type RunAsChildOpts struct {
|
||||
RunOpts
|
||||
Sticky *bool
|
||||
Key *string
|
||||
}
|
||||
|
||||
// WorkflowDeclaration represents a workflow with input type I and output type O.
|
||||
// It provides methods to define tasks, specify dependencies, and execute the workflow.
|
||||
// Deprecated: WorkflowDeclaration is part of the old generics-based v1 Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowDeclaration[I, O any] interface {
|
||||
WorkflowBase
|
||||
|
||||
|
||||
@@ -300,6 +300,14 @@ func (h *hatchetContext) WorkflowVersionId() *string {
|
||||
|
||||
func (h *hatchetContext) Log(message string) {
|
||||
infoLevel := "INFO"
|
||||
|
||||
runes := []rune(message)
|
||||
|
||||
if len(runes) > 10_000 {
|
||||
h.l.Warn().Msg("log message is too long, truncating to the first 10,000 characters")
|
||||
message = string(runes[:10_000])
|
||||
}
|
||||
|
||||
err := h.c.Event().PutLog(h, h.a.StepRunId, message, &infoLevel, &h.a.RetryCount)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Deprecated: MiddlewareFunc is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type MiddlewareFunc func(ctx HatchetContext, next func(HatchetContext) error) error
|
||||
|
||||
type middlewares struct {
|
||||
|
||||
+20
-1
@@ -7,6 +7,8 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/client/types"
|
||||
)
|
||||
|
||||
// Deprecated: Service is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Service struct {
|
||||
Name string
|
||||
|
||||
@@ -15,15 +17,20 @@ type Service struct {
|
||||
worker *Worker
|
||||
}
|
||||
|
||||
// Deprecated: Use is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (s *Service) Use(mws ...MiddlewareFunc) {
|
||||
s.mws.add(mws...)
|
||||
}
|
||||
|
||||
// Deprecated: RegisterWorkflow is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (s *Service) RegisterWorkflow(workflow workflowConverter) error {
|
||||
return s.On(workflow.ToWorkflowTrigger(), workflow)
|
||||
}
|
||||
|
||||
// Deprecated: Use RegisterWorkflow instead
|
||||
// Deprecated: On is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (s *Service) On(t triggerConverter, workflow workflowConverter) error {
|
||||
namespace := s.worker.client.Namespace()
|
||||
|
||||
@@ -67,25 +74,35 @@ func (s *Service) On(t triggerConverter, workflow workflowConverter) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: registerActionOpts is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type registerActionOpts struct {
|
||||
name string
|
||||
compute *compute.Compute
|
||||
}
|
||||
|
||||
// Deprecated: RegisterActionOpt is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type RegisterActionOpt func(*registerActionOpts)
|
||||
|
||||
// Deprecated: WithActionName is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithActionName(name string) RegisterActionOpt {
|
||||
return func(opts *registerActionOpts) {
|
||||
opts.name = name
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithCompute is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithCompute(compute *compute.Compute) RegisterActionOpt {
|
||||
return func(opts *registerActionOpts) {
|
||||
opts.compute = compute
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: RegisterAction is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (s *Service) RegisterAction(fn any, opts ...RegisterActionOpt) error {
|
||||
fnOpts := ®isterActionOpts{}
|
||||
|
||||
@@ -100,6 +117,8 @@ func (s *Service) RegisterAction(fn any, opts ...RegisterActionOpt) error {
|
||||
return s.worker.registerAction(s.Name, fnOpts.name, fn, fnOpts.compute)
|
||||
}
|
||||
|
||||
// Deprecated: Call is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (s *Service) Call(verb string) *WorkflowStep {
|
||||
actionId := fmt.Sprintf("%s:%s", s.Name, verb)
|
||||
|
||||
|
||||
+54
-10
@@ -25,7 +25,8 @@ import (
|
||||
|
||||
type actionFunc func(args ...any) []any
|
||||
|
||||
// Action is an individual action that can be run by the worker.
|
||||
// Deprecated: Action is an internal interface used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Action interface {
|
||||
// Name returns the name of the action
|
||||
Name() string
|
||||
@@ -77,8 +78,12 @@ func (j *actionImpl) Compute() *compute.Compute {
|
||||
return j.compute
|
||||
}
|
||||
|
||||
// Deprecated: ActionRegistry is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type ActionRegistry map[string]Action
|
||||
|
||||
// Deprecated: Worker is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Worker struct {
|
||||
client client.Client
|
||||
|
||||
@@ -111,8 +116,12 @@ type Worker struct {
|
||||
panicHandler func(ctx HatchetContext, recovered any)
|
||||
}
|
||||
|
||||
// Deprecated: WorkerOpt is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkerOpt func(*WorkerOpts)
|
||||
|
||||
// Deprecated: WorkerOpts is an internal type used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of using this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkerOpts struct {
|
||||
client client.Client
|
||||
name string
|
||||
@@ -136,54 +145,70 @@ func defaultWorkerOpts() *WorkerOpts {
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithInternalData is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithInternalData(actions []string) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.actions = actions
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithName is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithName(name string) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.name = name
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithClient is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithClient(client client.Client) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.client = client
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithIntegration is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithIntegration(integration integrations.Integration) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.integrations = append(opts.integrations, integration)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithErrorAlerter is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithErrorAlerter(alerter errors.Alerter) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.alerter = alerter
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: use WithSlots instead.
|
||||
// Deprecated: WithMaxRuns is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithMaxRuns(maxRuns int) WorkerOpt {
|
||||
return WithSlots(maxRuns)
|
||||
}
|
||||
|
||||
// WithSlots sets the number of concurrent slots this worker can handle.
|
||||
// Deprecated: WithSlots is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithSlots(slots int) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.slots = &slots
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithLabels is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithLabels(labels map[string]interface{}) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
opts.labels = labels
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithLogger is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithLogger(l *zerolog.Logger) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
if opts.l != nil {
|
||||
@@ -195,6 +220,8 @@ func WithLogger(l *zerolog.Logger) WorkerOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: WithLogLevel is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func WithLogLevel(lvl string) WorkerOpt {
|
||||
return func(opts *WorkerOpts) {
|
||||
var l zerolog.Logger
|
||||
@@ -215,7 +242,8 @@ func WithLogLevel(lvl string) WorkerOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// NewWorker creates a new worker instance
|
||||
// Deprecated: NewWorker is an internal function used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NewWorker(fs ...WorkerOpt) (*Worker, error) {
|
||||
opts := defaultWorkerOpts()
|
||||
|
||||
@@ -273,14 +301,20 @@ func NewWorker(fs ...WorkerOpt) (*Worker, error) {
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) Use(mws ...MiddlewareFunc) {
|
||||
w.middlewares.add(mws...)
|
||||
}
|
||||
|
||||
// Deprecated: SetPanicHandler is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) SetPanicHandler(panicHandler func(ctx HatchetContext, recovered any)) {
|
||||
w.panicHandler = panicHandler
|
||||
}
|
||||
|
||||
// Deprecated: NewService is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) NewService(name string) *Service {
|
||||
ns := w.client.Namespace()
|
||||
svcName := strings.ToLower(clientconfig.ApplyNamespace(name, &ns))
|
||||
@@ -296,6 +330,8 @@ func (w *Worker) NewService(name string) *Service {
|
||||
return svc
|
||||
}
|
||||
|
||||
// Deprecated: RegisterWorkflow is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) RegisterWorkflow(workflow workflowConverter) error {
|
||||
wf, ok := workflow.(*WorkflowJob)
|
||||
if ok && wf.On == nil {
|
||||
@@ -307,6 +343,8 @@ func (w *Worker) RegisterWorkflow(workflow workflowConverter) error {
|
||||
return w.On(workflow.ToWorkflowTrigger(), workflow)
|
||||
}
|
||||
|
||||
// Deprecated: RegisterWorkflowV1 is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) RegisterWorkflowV1(workflow *contracts.CreateWorkflowVersionRequest) error {
|
||||
namespace := w.client.Namespace()
|
||||
namespaced := namespace + workflow.Name
|
||||
@@ -316,7 +354,8 @@ func (w *Worker) RegisterWorkflowV1(workflow *contracts.CreateWorkflowVersionReq
|
||||
return w.client.Admin().PutWorkflowV1(workflow)
|
||||
}
|
||||
|
||||
// Deprecated: Use RegisterWorkflow instead
|
||||
// Deprecated: On is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) On(t triggerConverter, workflow workflowConverter) error {
|
||||
svcName := workflow.ToWorkflow("", "").Name
|
||||
svcName = strings.ToLower(svcName)
|
||||
@@ -337,7 +376,8 @@ func (w *Worker) On(t triggerConverter, workflow workflowConverter) error {
|
||||
return svc.(*Service).On(t, workflow)
|
||||
}
|
||||
|
||||
// RegisterAction can be used to register a single action which can be reused across multiple workflows.
|
||||
// Deprecated: RegisterAction is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
//
|
||||
// An action should be of the format <service>:<verb>, for example slack:create-channel.
|
||||
//
|
||||
@@ -403,8 +443,8 @@ func (w *Worker) registerAction(service, verb string, method any, compute *compu
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts the worker in non-blocking fashion, returning a cleanup function and an error if the
|
||||
// worker could not be started.
|
||||
// Deprecated: Start is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) Start() (func() error, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
@@ -428,16 +468,20 @@ func (w *Worker) Start() (func() error, error) {
|
||||
return cleanup, nil
|
||||
}
|
||||
|
||||
// Run starts the worker in blocking fashion, returning an error if the worker could not be started
|
||||
// or if the worker stopped due to a networking issue.
|
||||
// Deprecated: Run is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) Run(ctx context.Context) error {
|
||||
return w.startBlocking(ctx)
|
||||
}
|
||||
|
||||
// Deprecated: Logger is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) Logger() *zerolog.Logger {
|
||||
return w.l
|
||||
}
|
||||
|
||||
// Deprecated: ID is an internal method used by the new Go SDK.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead of calling this directly. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) ID() *string {
|
||||
return w.id
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ type triggerConverter interface {
|
||||
|
||||
type cron string
|
||||
|
||||
// Deprecated: Cron is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Cron(c string) cron {
|
||||
return cron(c)
|
||||
}
|
||||
@@ -32,6 +34,8 @@ func (c cron) ToWorkflowTriggers(wt *types.WorkflowTriggers, namespace string) {
|
||||
|
||||
type cronArr []string
|
||||
|
||||
// Deprecated: Crons is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Crons(c ...string) cronArr {
|
||||
return cronArr(c)
|
||||
}
|
||||
@@ -46,6 +50,8 @@ func (c cronArr) ToWorkflowTriggers(wt *types.WorkflowTriggers, namespace string
|
||||
|
||||
type noTrigger struct{}
|
||||
|
||||
// Deprecated: NoTrigger is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func NoTrigger() noTrigger {
|
||||
return noTrigger{}
|
||||
}
|
||||
@@ -56,6 +62,8 @@ func (n noTrigger) ToWorkflowTriggers(wt *types.WorkflowTriggers, namespace stri
|
||||
|
||||
type scheduled []time.Time
|
||||
|
||||
// Deprecated: At is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func At(t ...time.Time) scheduled {
|
||||
return t
|
||||
}
|
||||
@@ -68,6 +76,8 @@ func (s scheduled) ToWorkflowTriggers(wt *types.WorkflowTriggers, namespace stri
|
||||
wt.Schedules = append(wt.Schedules, s...)
|
||||
}
|
||||
|
||||
// Deprecated: Call is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *Worker) Call(action string) *WorkflowStep {
|
||||
registeredAction, exists := w.actions[action]
|
||||
|
||||
@@ -89,6 +99,8 @@ func (w *Worker) Call(action string) *WorkflowStep {
|
||||
|
||||
type event string
|
||||
|
||||
// Deprecated: Event is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Event(e string) event {
|
||||
return event(e)
|
||||
}
|
||||
@@ -108,6 +120,8 @@ func (e event) ToWorkflowTriggers(wt *types.WorkflowTriggers, namespace string)
|
||||
|
||||
type eventsArr []string
|
||||
|
||||
// Deprecated: Events is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Events(events ...string) eventsArr {
|
||||
return events
|
||||
}
|
||||
@@ -131,12 +145,18 @@ type workflowConverter interface {
|
||||
ToWorkflowTrigger() triggerConverter
|
||||
}
|
||||
|
||||
// Deprecated: Workflow is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type Workflow struct {
|
||||
Jobs []WorkflowJob
|
||||
}
|
||||
|
||||
// Deprecated: GetWorkflowConcurrencyGroupFn is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type GetWorkflowConcurrencyGroupFn func(ctx HatchetContext) (string, error)
|
||||
|
||||
// Deprecated: WorkflowJob is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowJob struct {
|
||||
// The name of the job
|
||||
Name string
|
||||
@@ -157,6 +177,8 @@ type WorkflowJob struct {
|
||||
StickyStrategy *types.StickyStrategy
|
||||
}
|
||||
|
||||
// Deprecated: WorkflowConcurrency is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowConcurrency struct {
|
||||
fn GetWorkflowConcurrencyGroupFn
|
||||
expr *string
|
||||
@@ -164,23 +186,31 @@ type WorkflowConcurrency struct {
|
||||
limitStrategy *types.WorkflowConcurrencyLimitStrategy
|
||||
}
|
||||
|
||||
// Deprecated: Expression is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Expression(expr string) *WorkflowConcurrency {
|
||||
return &WorkflowConcurrency{
|
||||
expr: &expr,
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: Concurrency is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Concurrency(fn GetWorkflowConcurrencyGroupFn) *WorkflowConcurrency {
|
||||
return &WorkflowConcurrency{
|
||||
fn: fn,
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: MaxRuns is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (c *WorkflowConcurrency) MaxRuns(maxRuns int32) *WorkflowConcurrency {
|
||||
c.maxRuns = &maxRuns
|
||||
return c
|
||||
}
|
||||
|
||||
// Deprecated: LimitStrategy is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (c *WorkflowConcurrency) LimitStrategy(limitStrategy types.WorkflowConcurrencyLimitStrategy) *WorkflowConcurrency {
|
||||
c.limitStrategy = &limitStrategy
|
||||
return c
|
||||
@@ -303,6 +333,8 @@ func (j *WorkflowJob) ToActionMap(svcName string) ActionMap {
|
||||
return res
|
||||
}
|
||||
|
||||
// Deprecated: WorkflowStep is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type WorkflowStep struct {
|
||||
// The step timeout
|
||||
Timeout string
|
||||
@@ -329,6 +361,8 @@ type WorkflowStep struct {
|
||||
Compute *compute.Compute
|
||||
}
|
||||
|
||||
// Deprecated: RateLimit is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
type RateLimit struct {
|
||||
// Key is the rate limit key
|
||||
Key string `yaml:"key,omitempty"`
|
||||
@@ -343,6 +377,8 @@ type RateLimit struct {
|
||||
Duration *types.RateLimitDuration `yaml:"duration,omitempty"`
|
||||
}
|
||||
|
||||
// Deprecated: Fn is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func Fn(f any) *WorkflowStep {
|
||||
return &WorkflowStep{
|
||||
Function: f,
|
||||
@@ -351,46 +387,64 @@ func Fn(f any) *WorkflowStep {
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: SetName is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetName(name string) *WorkflowStep {
|
||||
w.Name = name
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetCompute is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetCompute(compute *compute.Compute) *WorkflowStep {
|
||||
w.Compute = compute
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetDesiredLabels is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetDesiredLabels(labels map[string]*types.DesiredWorkerLabel) *WorkflowStep {
|
||||
w.DesiredLabels = labels
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetRateLimit is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetRateLimit(rateLimit RateLimit) *WorkflowStep {
|
||||
w.RateLimit = append(w.RateLimit, rateLimit)
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetTimeout is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetTimeout(timeout string) *WorkflowStep {
|
||||
w.Timeout = timeout
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetRetries is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetRetries(retries int) *WorkflowStep {
|
||||
w.Retries = retries
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetRetryBackoffFactor is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetRetryBackoffFactor(retryBackoffFactor float32) *WorkflowStep {
|
||||
w.RetryBackoffFactor = &retryBackoffFactor
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: SetRetryMaxBackoffSeconds is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) SetRetryMaxBackoffSeconds(retryMaxBackoffSeconds int32) *WorkflowStep {
|
||||
w.RetryMaxBackoffSeconds = &retryMaxBackoffSeconds
|
||||
return w
|
||||
}
|
||||
|
||||
// Deprecated: AddParents is part of the legacy v0 workflow definition system.
|
||||
// Use the new Go SDK at github.com/hatchet-dev/hatchet/sdks/go instead. Migration guide: https://docs.hatchet.run/home/migration-guide-go
|
||||
func (w *WorkflowStep) AddParents(parents ...string) *WorkflowStep {
|
||||
w.Parents = append(w.Parents, parents...)
|
||||
return w
|
||||
|
||||
@@ -83,6 +83,32 @@ func (a HMACAuth) toCreateRequest(opts CreateWebhookOpts) (rest.V1CreateWebhookR
|
||||
return req, err
|
||||
}
|
||||
|
||||
// SvixAuth implements Svix's signature verification protocol for webhooks.
|
||||
// Only the signing secret (whsec_...) is required; signature headers and
|
||||
// algorithms are configured explicitly to match Svix's expectations.
|
||||
type SvixAuth struct {
|
||||
SigningSecret string
|
||||
}
|
||||
|
||||
func (a SvixAuth) toCreateRequest(opts CreateWebhookOpts) (rest.V1CreateWebhookRequest, error) {
|
||||
var req rest.V1CreateWebhookRequest
|
||||
err := req.FromV1CreateWebhookRequestHMAC(rest.V1CreateWebhookRequestHMAC{
|
||||
Name: opts.Name,
|
||||
SourceName: rest.SVIX,
|
||||
EventKeyExpression: opts.EventKeyExpression,
|
||||
ScopeExpression: opts.ScopeExpression,
|
||||
StaticPayload: opts.StaticPayload,
|
||||
AuthType: rest.V1CreateWebhookRequestHMACAuthType("HMAC"),
|
||||
Auth: rest.V1WebhookHMACAuth{
|
||||
SigningSecret: a.SigningSecret,
|
||||
SignatureHeaderName: "svix-signature",
|
||||
Algorithm: rest.SHA256,
|
||||
Encoding: rest.BASE64,
|
||||
},
|
||||
})
|
||||
return req, err
|
||||
}
|
||||
|
||||
type CreateWebhookOpts struct {
|
||||
Name string
|
||||
SourceName rest.V1WebhookSourceName
|
||||
|
||||
@@ -5,6 +5,27 @@ All notable changes to Hatchet's Python SDK will be documented in this changelog
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [1.23.3] - 2026-02-12
|
||||
|
||||
### Added
|
||||
|
||||
- Adds type-hinted `Standalone.output_validator` and `Standalone.output_validator_type` properties to support easier type-safety and match the `input_validator` property pattern on `BaseWorkflow`.
|
||||
- Adds type-hinted `Task.output_validator` and `Task.output_validator_type` properties to support easier type-safety and match the patterns on `BaseWorkflow/Standalone`.
|
||||
|
||||
## [1.23.2] - 2026-02-11
|
||||
|
||||
### Changed
|
||||
|
||||
- Improves error handling for REST transport-level failures by raising typed exceptions for timeouts, connection, TLS, and protocol errors while preserving existing diagnostics.
|
||||
|
||||
|
||||
## [1.23.1] - 2026-02-10
|
||||
|
||||
### Changed
|
||||
|
||||
- Fixes a bug introduced in v1.21.0 where the `BaseWorkflow.input_validator` class property became incorrectly typed. Now separate properties are available for the type adapter and the underlying type.
|
||||
|
||||
|
||||
## [1.23.0] - 2026-02-05
|
||||
|
||||
### Internal Only
|
||||
|
||||
@@ -87,21 +87,202 @@ def patch_grpc_init_signature(content: str) -> str:
|
||||
)
|
||||
|
||||
|
||||
def patch_rest_transport_exceptions(content: str) -> str:
|
||||
"""Insert typed REST transport exception classes into exceptions.py.
|
||||
|
||||
Adds exception classes above render_path function, idempotently.
|
||||
"""
|
||||
# Check if already patched
|
||||
if "class RestTransportError" in content:
|
||||
return content
|
||||
|
||||
new_exceptions = '''\
|
||||
|
||||
class RestTransportError(ApiException):
|
||||
"""Base exception for REST transport-level errors (network, timeout, TLS)."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestTimeoutError(RestTransportError):
|
||||
"""Raised when a REST request times out (connect or read timeout)."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestConnectionError(RestTransportError):
|
||||
"""Raised when a REST request fails to establish a connection."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestTLSError(RestTransportError):
|
||||
"""Raised when a REST request fails due to SSL/TLS errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestProtocolError(RestTransportError):
|
||||
"""Raised when a REST request fails due to protocol-level errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
'''
|
||||
|
||||
# Insert before render_path function (match any arguments)
|
||||
pattern = r"(\ndef render_path\([^)]*\):)"
|
||||
replacement = new_exceptions + r"\1"
|
||||
|
||||
return re.sub(pattern, replacement, content)
|
||||
|
||||
|
||||
def patch_rest_imports(content: str) -> str:
|
||||
"""Update rest.py imports to include typed transport exceptions.
|
||||
|
||||
Handles both single-line and parenthesized import formats. Idempotent.
|
||||
"""
|
||||
# The exceptions we need to ensure are imported
|
||||
required_exceptions = [
|
||||
"RestConnectionError",
|
||||
"RestProtocolError",
|
||||
"RestTimeoutError",
|
||||
"RestTLSError",
|
||||
]
|
||||
|
||||
# Idempotency check: if RestTLSError is already imported from this module, do nothing.
|
||||
if re.search(
|
||||
r"(?m)^from\s+hatchet_sdk\.clients\.rest\.exceptions\s+import[^\n]*\bRestTLSError\b",
|
||||
content,
|
||||
):
|
||||
return content
|
||||
|
||||
# Parenthesized import block includes RestTLSError
|
||||
if re.search(
|
||||
r"^from\s+hatchet_sdk\.clients\.rest\.exceptions\s+import\s*\(\s*.*?\bRestTLSError\b.*?\)\s*$",
|
||||
content,
|
||||
flags=re.MULTILINE | re.DOTALL,
|
||||
):
|
||||
return content
|
||||
|
||||
# The target import statement we want with trailing newline to preserve spacing
|
||||
new_import = (
|
||||
"from hatchet_sdk.clients.rest.exceptions import (\n"
|
||||
" ApiException,\n"
|
||||
" ApiValueError,\n"
|
||||
" RestConnectionError,\n"
|
||||
" RestProtocolError,\n"
|
||||
" RestTimeoutError,\n"
|
||||
" RestTLSError,\n"
|
||||
")\n"
|
||||
)
|
||||
|
||||
# Single line import
|
||||
# Matches: from hatchet_sdk.clients.rest.exceptions import ApiException, ApiValueError
|
||||
single_line_pattern = (
|
||||
r"^from\s+hatchet_sdk\.clients\.rest\.exceptions\s+import\s+"
|
||||
r"ApiException\s*,\s*ApiValueError\s*$"
|
||||
)
|
||||
|
||||
modified = re.sub(single_line_pattern, new_import, content, flags=re.MULTILINE)
|
||||
if modified != content:
|
||||
return modified
|
||||
|
||||
# More flexible parenthesized import which matches any order, with or without trailing comma
|
||||
# This handles cases where ApiException and ApiValueError might be in different orders
|
||||
flexible_paren_pattern = (
|
||||
r"^from\s+hatchet_sdk\.clients\.rest\.exceptions\s+import\s*\("
|
||||
r"[^)]*?" # Non-greedy match of contents (only ApiException/ApiValueError expected)
|
||||
r"\)"
|
||||
)
|
||||
|
||||
# Only apply if the block contains just ApiException and/or ApiValueError (no Rest* yet)
|
||||
match = re.search(flexible_paren_pattern, content, flags=re.MULTILINE | re.DOTALL)
|
||||
if match:
|
||||
block = match.group(0)
|
||||
# Verify it only has ApiException/ApiValueError, not our new exceptions
|
||||
if not any(exc in block for exc in required_exceptions):
|
||||
if "ApiException" in block or "ApiValueError" in block:
|
||||
modified = (
|
||||
content[: match.start()] + new_import + content[match.end() :]
|
||||
)
|
||||
return modified
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def patch_rest_error_diagnostics(content: str) -> str:
|
||||
pattern = (
|
||||
"""Patch rest.py exception handlers to use typed exceptions.
|
||||
|
||||
Replaces the generic ApiException handler with typed exception handlers.
|
||||
Handler ordering is critical: NewConnectionError must be caught before
|
||||
ConnectTimeoutError because it inherits from ConnectTimeoutError in urllib3.
|
||||
"""
|
||||
# This pattern matches either the original SSLError only handler or
|
||||
# the previously patched multi-exception handler raising ApiException
|
||||
pattern_original = (
|
||||
r"(?ms)^([ \t]*)except urllib3\.exceptions\.SSLError as e:\s*\n"
|
||||
r"^\1[ \t]*msg = \"\\n\"\.join\(\[type\(e\)\.__name__, str\(e\)\]\)\s*\n"
|
||||
r"^\1[ \t]*raise ApiException\(status=0, reason=msg\)\s*\n"
|
||||
)
|
||||
|
||||
pattern_expanded = (
|
||||
r"(?ms)^([ \t]*)except \(\s*\n"
|
||||
r"^\1[ \t]*urllib3\.exceptions\.SSLError,\s*\n"
|
||||
r"^\1[ \t]*urllib3\.exceptions\.ConnectTimeoutError,\s*\n"
|
||||
r"^\1[ \t]*urllib3\.exceptions\.ReadTimeoutError,\s*\n"
|
||||
r"^\1[ \t]*urllib3\.exceptions\.MaxRetryError,\s*\n"
|
||||
r"^\1[ \t]*urllib3\.exceptions\.NewConnectionError,\s*\n"
|
||||
r"^\1[ \t]*urllib3\.exceptions\.ProtocolError,\s*\n"
|
||||
r"^\1\) as e:\s*\n"
|
||||
r'^\1[ \t]*msg = "\\n"\.join\(\s*\n'
|
||||
r"^\1[ \t]*\[\s*\n"
|
||||
r"^\1[ \t]*type\(e\)\.__name__,\s*\n"
|
||||
r"^\1[ \t]*str\(e\),\s*\n"
|
||||
r'^\1[ \t]*f"method=\{method\}",\s*\n'
|
||||
r'^\1[ \t]*f"url=\{url\}",\s*\n'
|
||||
r'^\1[ \t]*f"timeout=\{_request_timeout\}",\s*\n'
|
||||
r"^\1[ \t]*\]\s*\n"
|
||||
r"^\1[ \t]*\)\s*\n"
|
||||
r"^\1[ \t]*raise ApiException\(status=0, reason=msg\)\s*\n"
|
||||
)
|
||||
|
||||
# Check if already using typed exceptions
|
||||
if "raise RestTLSError" in content:
|
||||
return content
|
||||
|
||||
# Build typed replacement with proper handler ordering
|
||||
# NewConnectionError inherits from ConnectTimeoutError, so must be caught first
|
||||
replacement = (
|
||||
r"\1except urllib3.exceptions.SSLError as e:\n"
|
||||
r'\1 msg = "\\n".join(\n'
|
||||
r"\1 [\n"
|
||||
r"\1 type(e).__name__,\n"
|
||||
r"\1 str(e),\n"
|
||||
r'\1 f"method={method}",\n'
|
||||
r'\1 f"url={url}",\n'
|
||||
r'\1 f"timeout={_request_timeout}",\n'
|
||||
r"\1 ]\n"
|
||||
r"\1 )\n"
|
||||
r"\1 raise RestTLSError(status=0, reason=msg) from e\n"
|
||||
r"\1except (\n"
|
||||
r"\1 urllib3.exceptions.SSLError,\n"
|
||||
r"\1 urllib3.exceptions.ConnectTimeoutError,\n"
|
||||
r"\1 urllib3.exceptions.ReadTimeoutError,\n"
|
||||
r"\1 urllib3.exceptions.MaxRetryError,\n"
|
||||
r"\1 urllib3.exceptions.NewConnectionError,\n"
|
||||
r"\1 urllib3.exceptions.ProtocolError,\n"
|
||||
r"\1) as e:\n"
|
||||
r"\1 # NewConnectionError inherits from ConnectTimeoutError, so must be caught first\n"
|
||||
r'\1 msg = "\\n".join(\n'
|
||||
r"\1 [\n"
|
||||
r"\1 type(e).__name__,\n"
|
||||
r"\1 str(e),\n"
|
||||
r'\1 f"method={method}",\n'
|
||||
r'\1 f"url={url}",\n'
|
||||
r'\1 f"timeout={_request_timeout}",\n'
|
||||
r"\1 ]\n"
|
||||
r"\1 )\n"
|
||||
r"\1 raise RestConnectionError(status=0, reason=msg) from e\n"
|
||||
r"\1except (\n"
|
||||
r"\1 urllib3.exceptions.ConnectTimeoutError,\n"
|
||||
r"\1 urllib3.exceptions.ReadTimeoutError,\n"
|
||||
r"\1) as e:\n"
|
||||
r'\1 msg = "\\n".join(\n'
|
||||
r"\1 [\n"
|
||||
@@ -112,10 +293,27 @@ def patch_rest_error_diagnostics(content: str) -> str:
|
||||
r'\1 f"timeout={_request_timeout}",\n'
|
||||
r"\1 ]\n"
|
||||
r"\1 )\n"
|
||||
r"\1 raise ApiException(status=0, reason=msg)\n"
|
||||
r"\1 raise RestTimeoutError(status=0, reason=msg) from e\n"
|
||||
r"\1except urllib3.exceptions.ProtocolError as e:\n"
|
||||
r'\1 msg = "\\n".join(\n'
|
||||
r"\1 [\n"
|
||||
r"\1 type(e).__name__,\n"
|
||||
r"\1 str(e),\n"
|
||||
r'\1 f"method={method}",\n'
|
||||
r'\1 f"url={url}",\n'
|
||||
r'\1 f"timeout={_request_timeout}",\n'
|
||||
r"\1 ]\n"
|
||||
r"\1 )\n"
|
||||
r"\1 raise RestProtocolError(status=0, reason=msg) from e\n"
|
||||
)
|
||||
|
||||
return apply_patch(content, pattern, replacement)
|
||||
# Try expanded pattern first. Relevant if previously patched with ApiException
|
||||
modified = re.sub(pattern_expanded, replacement, content)
|
||||
if modified != content:
|
||||
return modified
|
||||
|
||||
# Otherwise try original pattern
|
||||
return re.sub(pattern_original, replacement, content)
|
||||
|
||||
|
||||
def apply_patches_to_matching_files(
|
||||
@@ -164,7 +362,12 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
atomically_patch_file(
|
||||
"hatchet_sdk/clients/rest/rest.py", [patch_rest_error_diagnostics]
|
||||
"hatchet_sdk/clients/rest/exceptions.py",
|
||||
[patch_rest_transport_exceptions],
|
||||
)
|
||||
atomically_patch_file(
|
||||
"hatchet_sdk/clients/rest/rest.py",
|
||||
[patch_rest_imports, patch_rest_error_diagnostics],
|
||||
)
|
||||
|
||||
grpc_patches: list[Callable[[str], str]] = [
|
||||
|
||||
@@ -209,6 +209,36 @@ class UnprocessableEntityException(ApiException):
|
||||
pass
|
||||
|
||||
|
||||
class RestTransportError(ApiException):
|
||||
"""Base exception for REST transport-level errors (network, timeout, TLS)."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestTimeoutError(RestTransportError):
|
||||
"""Raised when a REST request times out (connect or read timeout)."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestConnectionError(RestTransportError):
|
||||
"""Raised when a REST request fails to establish a connection."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestTLSError(RestTransportError):
|
||||
"""Raised when a REST request fails due to SSL/TLS errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class RestProtocolError(RestTransportError):
|
||||
"""Raised when a REST request fails due to protocol-level errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def render_path(path_to_item):
|
||||
"""Returns a string representation of a path"""
|
||||
result = ""
|
||||
|
||||
@@ -18,7 +18,14 @@ import ssl
|
||||
|
||||
import urllib3
|
||||
|
||||
from hatchet_sdk.clients.rest.exceptions import ApiException, ApiValueError
|
||||
from hatchet_sdk.clients.rest.exceptions import (
|
||||
ApiException,
|
||||
ApiValueError,
|
||||
RestConnectionError,
|
||||
RestProtocolError,
|
||||
RestTimeoutError,
|
||||
RestTLSError,
|
||||
)
|
||||
|
||||
SUPPORTED_SOCKS_PROXIES = {"socks5", "socks5h", "socks4", "socks4a"}
|
||||
RESTResponseType = urllib3.HTTPResponse
|
||||
@@ -239,13 +246,35 @@ class RESTClientObject:
|
||||
headers=headers,
|
||||
preload_content=False,
|
||||
)
|
||||
except urllib3.exceptions.SSLError as e:
|
||||
msg = "\n".join(
|
||||
[
|
||||
type(e).__name__,
|
||||
str(e),
|
||||
f"method={method}",
|
||||
f"url={url}",
|
||||
f"timeout={_request_timeout}",
|
||||
]
|
||||
)
|
||||
raise RestTLSError(status=0, reason=msg) from e
|
||||
except (
|
||||
urllib3.exceptions.SSLError,
|
||||
urllib3.exceptions.ConnectTimeoutError,
|
||||
urllib3.exceptions.ReadTimeoutError,
|
||||
urllib3.exceptions.MaxRetryError,
|
||||
urllib3.exceptions.NewConnectionError,
|
||||
urllib3.exceptions.ProtocolError,
|
||||
) as e:
|
||||
# NewConnectionError inherits from ConnectTimeoutError, so must be caught first
|
||||
msg = "\n".join(
|
||||
[
|
||||
type(e).__name__,
|
||||
str(e),
|
||||
f"method={method}",
|
||||
f"url={url}",
|
||||
f"timeout={_request_timeout}",
|
||||
]
|
||||
)
|
||||
raise RestConnectionError(status=0, reason=msg) from e
|
||||
except (
|
||||
urllib3.exceptions.ConnectTimeoutError,
|
||||
urllib3.exceptions.ReadTimeoutError,
|
||||
) as e:
|
||||
msg = "\n".join(
|
||||
[
|
||||
@@ -256,5 +285,16 @@ class RESTClientObject:
|
||||
f"timeout={_request_timeout}",
|
||||
]
|
||||
)
|
||||
raise ApiException(status=0, reason=msg)
|
||||
raise RestTimeoutError(status=0, reason=msg) from e
|
||||
except urllib3.exceptions.ProtocolError as e:
|
||||
msg = "\n".join(
|
||||
[
|
||||
type(e).__name__,
|
||||
str(e),
|
||||
f"method={method}",
|
||||
f"url={url}",
|
||||
f"timeout={_request_timeout}",
|
||||
]
|
||||
)
|
||||
raise RestProtocolError(status=0, reason=msg) from e
|
||||
return RESTResponse(r)
|
||||
|
||||
@@ -574,3 +574,11 @@ class Task(Generic[TWorkflowInput, R]):
|
||||
)
|
||||
|
||||
return await self.aio_call(ctx, dependencies)
|
||||
|
||||
@property
|
||||
def output_validator(self) -> TypeAdapter[R]:
|
||||
return cast(TypeAdapter[R], self.validators.step_output)
|
||||
|
||||
@property
|
||||
def output_validator_type(self) -> type[R]:
|
||||
return cast(type[R], self.validators.step_output._type)
|
||||
|
||||
@@ -263,8 +263,12 @@ class BaseWorkflow(Generic[TWorkflowInput]):
|
||||
return options_copy
|
||||
|
||||
@property
|
||||
def input_validator(self) -> type[TWorkflowInput]:
|
||||
return cast(type[TWorkflowInput], self.config.input_validator)
|
||||
def input_validator(self) -> TypeAdapter[TWorkflowInput]:
|
||||
return cast(TypeAdapter[TWorkflowInput], self.config.input_validator)
|
||||
|
||||
@property
|
||||
def input_validator_type(self) -> type[TWorkflowInput]:
|
||||
return cast(type[TWorkflowInput], self.config.input_validator._type)
|
||||
|
||||
@property
|
||||
def tasks(self) -> list[Task[TWorkflowInput, Any]]:
|
||||
@@ -1552,3 +1556,11 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]):
|
||||
run_ref = self.get_run_ref(run_id)
|
||||
|
||||
return run_ref.result()
|
||||
|
||||
@property
|
||||
def output_validator(self) -> TypeAdapter[R]:
|
||||
return cast(TypeAdapter[R], self._output_validator)
|
||||
|
||||
@property
|
||||
def output_validator_type(self) -> type[R]:
|
||||
return cast(type[R], self._output_validator._type)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "hatchet-sdk"
|
||||
version = "1.23.0"
|
||||
version = "1.23.3"
|
||||
description = "This is the official Python SDK for Hatchet, a distributed, fault-tolerant task queue. The SDK allows you to easily integrate Hatchet's task scheduling and workflow orchestration capabilities into your Python applications."
|
||||
authors = [
|
||||
"Alexander Belanger <alexander@hatchet.run>",
|
||||
|
||||
@@ -0,0 +1,386 @@
|
||||
"""Unit tests for REST transport exception translation.
|
||||
|
||||
These tests verify that urllib3 transport exceptions are correctly translated
|
||||
to typed Hatchet REST exceptions while preserving:
|
||||
- status=0 (no HTTP status was received)
|
||||
- diagnostic information in reason (method, url, timeout)
|
||||
- exception chaining via __cause__
|
||||
- backward compatibility (all exceptions inherit from ApiException)
|
||||
"""
|
||||
|
||||
from typing import Any, NoReturn, cast
|
||||
|
||||
import pytest
|
||||
import urllib3.exceptions
|
||||
|
||||
from hatchet_sdk.clients.rest.configuration import Configuration
|
||||
from hatchet_sdk.clients.rest.exceptions import (
|
||||
ApiException,
|
||||
RestConnectionError,
|
||||
RestProtocolError,
|
||||
RestTimeoutError,
|
||||
RestTLSError,
|
||||
RestTransportError,
|
||||
)
|
||||
from hatchet_sdk.clients.rest.rest import RESTClientObject
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def rest_client() -> Any:
|
||||
config = Configuration(host="http://localhost:8080")
|
||||
return cast(Any, RESTClientObject(config))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def request_params() -> dict[str, Any]:
|
||||
return {
|
||||
"method": "GET",
|
||||
"url": "http://localhost:8080/api/test",
|
||||
"headers": {"Content-Type": "application/json"},
|
||||
"_request_timeout": 30,
|
||||
}
|
||||
|
||||
|
||||
# --- Hierarchy tests ---
|
||||
|
||||
|
||||
def test_hierarchy__rest_transport_error_inherits_from_api_exception() -> None:
|
||||
assert issubclass(RestTransportError, ApiException)
|
||||
|
||||
|
||||
def test_hierarchy__rest_timeout_error_inherits_from_transport_error() -> None:
|
||||
assert issubclass(RestTimeoutError, RestTransportError)
|
||||
assert issubclass(RestTimeoutError, ApiException)
|
||||
|
||||
|
||||
def test_hierarchy__rest_connection_error_inherits_from_transport_error() -> None:
|
||||
assert issubclass(RestConnectionError, RestTransportError)
|
||||
assert issubclass(RestConnectionError, ApiException)
|
||||
|
||||
|
||||
def test_hierarchy__rest_tls_error_inherits_from_transport_error() -> None:
|
||||
assert issubclass(RestTLSError, RestTransportError)
|
||||
assert issubclass(RestTLSError, ApiException)
|
||||
|
||||
|
||||
def test_hierarchy__rest_protocol_error_inherits_from_transport_error() -> None:
|
||||
assert issubclass(RestProtocolError, RestTransportError)
|
||||
assert issubclass(RestProtocolError, ApiException)
|
||||
|
||||
|
||||
# --- Translation tests ---
|
||||
|
||||
|
||||
def test_translation__ssl_error_raises_rest_tls_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
original_exc = urllib3.exceptions.SSLError("SSL certificate verify failed")
|
||||
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise original_exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestTLSError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
exc = exc_info.value
|
||||
assert exc.status == 0
|
||||
assert "SSLError" in exc.reason
|
||||
assert "method=GET" in exc.reason
|
||||
assert "url=http://localhost:8080/api/test" in exc.reason
|
||||
assert "timeout=30" in exc.reason
|
||||
assert exc.__cause__ is original_exc
|
||||
|
||||
|
||||
def test_translation__connect_timeout_error_raises_rest_timeout_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
original_exc = urllib3.exceptions.ConnectTimeoutError(
|
||||
None, "http://localhost:8080", "Connection timed out"
|
||||
)
|
||||
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise original_exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestTimeoutError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
exc = exc_info.value
|
||||
assert exc.status == 0
|
||||
assert "ConnectTimeoutError" in exc.reason
|
||||
assert "method=GET" in exc.reason
|
||||
assert "url=http://localhost:8080/api/test" in exc.reason
|
||||
assert "timeout=30" in exc.reason
|
||||
assert exc.__cause__ is original_exc
|
||||
|
||||
|
||||
def test_translation__read_timeout_error_raises_rest_timeout_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
original_exc = urllib3.exceptions.ReadTimeoutError(
|
||||
cast(Any, None), "http://localhost:8080", "Read timed out"
|
||||
)
|
||||
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise original_exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestTimeoutError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
exc = exc_info.value
|
||||
assert exc.status == 0
|
||||
assert "ReadTimeoutError" in exc.reason
|
||||
assert "method=GET" in exc.reason
|
||||
assert "url=http://localhost:8080/api/test" in exc.reason
|
||||
assert "timeout=30" in exc.reason
|
||||
assert exc.__cause__ is original_exc
|
||||
|
||||
|
||||
def test_translation__max_retry_error_raises_rest_connection_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
original_exc = urllib3.exceptions.MaxRetryError(
|
||||
cast(Any, None), "http://localhost:8080", Exception("Max retries exceeded")
|
||||
)
|
||||
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise original_exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestConnectionError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
exc = exc_info.value
|
||||
assert exc.status == 0
|
||||
assert "MaxRetryError" in exc.reason
|
||||
assert "method=GET" in exc.reason
|
||||
assert "url=http://localhost:8080/api/test" in exc.reason
|
||||
assert "timeout=30" in exc.reason
|
||||
assert exc.__cause__ is original_exc
|
||||
|
||||
|
||||
def test_translation__new_connection_error_raises_rest_connection_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
original_exc = urllib3.exceptions.NewConnectionError(
|
||||
cast(Any, None), "Failed to establish a new connection"
|
||||
)
|
||||
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise original_exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestConnectionError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
exc = exc_info.value
|
||||
assert exc.status == 0
|
||||
assert "NewConnectionError" in exc.reason
|
||||
assert "method=GET" in exc.reason
|
||||
assert "url=http://localhost:8080/api/test" in exc.reason
|
||||
assert "timeout=30" in exc.reason
|
||||
assert exc.__cause__ is original_exc
|
||||
|
||||
|
||||
def test_translation__protocol_error_raises_rest_protocol_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
original_exc = urllib3.exceptions.ProtocolError("Connection aborted")
|
||||
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise original_exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestProtocolError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
exc = exc_info.value
|
||||
assert exc.status == 0
|
||||
assert "ProtocolError" in exc.reason
|
||||
assert "method=GET" in exc.reason
|
||||
assert "url=http://localhost:8080/api/test" in exc.reason
|
||||
assert "timeout=30" in exc.reason
|
||||
assert exc.__cause__ is original_exc
|
||||
|
||||
|
||||
# --- Backward compatibility tests ---
|
||||
|
||||
|
||||
def test_backward_compat__catching_api_exception_catches_tls_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise urllib3.exceptions.SSLError("SSL failed")
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(ApiException) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
assert isinstance(exc_info.value, RestTLSError)
|
||||
|
||||
|
||||
def test_backward_compat__catching_api_exception_catches_timeout_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise urllib3.exceptions.ConnectTimeoutError(None, "url", "timeout")
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(ApiException) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
assert isinstance(exc_info.value, RestTimeoutError)
|
||||
|
||||
|
||||
def test_backward_compat__catching_api_exception_catches_connection_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise urllib3.exceptions.NewConnectionError(
|
||||
cast(Any, None), "connection failed"
|
||||
)
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(ApiException) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
assert isinstance(exc_info.value, RestConnectionError)
|
||||
|
||||
|
||||
def test_backward_compat__catching_api_exception_catches_protocol_error(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise urllib3.exceptions.ProtocolError("protocol error")
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(ApiException) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
assert isinstance(exc_info.value, RestProtocolError)
|
||||
|
||||
|
||||
def test_backward_compat__catching_rest_transport_error_catches_all_subtypes(
|
||||
rest_client: Any,
|
||||
request_params: dict[str, Any],
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
exceptions_to_test = [
|
||||
(urllib3.exceptions.SSLError("ssl"), RestTLSError),
|
||||
(
|
||||
urllib3.exceptions.ConnectTimeoutError(None, "url", "msg"),
|
||||
RestTimeoutError,
|
||||
),
|
||||
(
|
||||
urllib3.exceptions.ReadTimeoutError(cast(Any, None), "url", "msg"),
|
||||
RestTimeoutError,
|
||||
),
|
||||
(
|
||||
urllib3.exceptions.MaxRetryError(cast(Any, None), "url", Exception("msg")),
|
||||
RestConnectionError,
|
||||
),
|
||||
(
|
||||
urllib3.exceptions.NewConnectionError(cast(Any, None), "msg"),
|
||||
RestConnectionError,
|
||||
),
|
||||
(urllib3.exceptions.ProtocolError("msg"), RestProtocolError),
|
||||
]
|
||||
|
||||
for urllib3_exc, expected_type in exceptions_to_test:
|
||||
|
||||
def mock_request(
|
||||
*args: Any, _exc: Exception = urllib3_exc, **kwargs: Any
|
||||
) -> NoReturn:
|
||||
raise _exc
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestTransportError) as exc_info:
|
||||
rest_client.request(**request_params)
|
||||
|
||||
assert isinstance(
|
||||
exc_info.value, expected_type
|
||||
), f"Expected {expected_type.__name__} for {type(urllib3_exc).__name__}"
|
||||
|
||||
|
||||
# --- Diagnostics tests ---
|
||||
|
||||
|
||||
def test_diagnostics__reason_includes_all_diagnostic_fields(
|
||||
rest_client: Any,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise urllib3.exceptions.SSLError("test error message")
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestTLSError) as exc_info:
|
||||
rest_client.request(
|
||||
method="POST",
|
||||
url="https://example.com/api/v1/resource",
|
||||
headers={},
|
||||
_request_timeout=(5, 30),
|
||||
)
|
||||
|
||||
reason = exc_info.value.reason
|
||||
assert "SSLError" in reason
|
||||
assert "test error message" in reason
|
||||
assert "method=POST" in reason
|
||||
assert "url=https://example.com/api/v1/resource" in reason
|
||||
assert "timeout=(5, 30)" in reason
|
||||
|
||||
|
||||
def test_diagnostics__reason_handles_none_timeout(
|
||||
rest_client: Any,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
) -> None:
|
||||
def mock_request(*args: Any, **kwargs: Any) -> NoReturn:
|
||||
raise urllib3.exceptions.NewConnectionError(
|
||||
cast(Any, None), "connection refused"
|
||||
)
|
||||
|
||||
monkeypatch.setattr(rest_client.pool_manager, "request", mock_request)
|
||||
|
||||
with pytest.raises(RestConnectionError) as exc_info:
|
||||
rest_client.request(
|
||||
method="GET",
|
||||
url="http://localhost/test",
|
||||
headers={},
|
||||
_request_timeout=None,
|
||||
)
|
||||
|
||||
reason = exc_info.value.reason
|
||||
assert "timeout=None" in reason
|
||||
@@ -563,7 +563,7 @@ CREATE TYPE v1_incoming_webhook_hmac_algorithm AS ENUM ('SHA1', 'SHA256', 'SHA51
|
||||
CREATE TYPE v1_incoming_webhook_hmac_encoding AS ENUM ('HEX', 'BASE64', 'BASE64URL');
|
||||
|
||||
-- Can add more sources in the future
|
||||
CREATE TYPE v1_incoming_webhook_source_name AS ENUM ('GENERIC', 'GITHUB', 'STRIPE', 'SLACK', 'LINEAR');
|
||||
CREATE TYPE v1_incoming_webhook_source_name AS ENUM ('GENERIC', 'GITHUB', 'STRIPE', 'SLACK', 'LINEAR', 'SVIX');
|
||||
|
||||
CREATE TABLE v1_incoming_webhook (
|
||||
tenant_id UUID NOT NULL,
|
||||
|
||||
Reference in New Issue
Block a user