feat: queue v4 (#842)

* wip: v4 of queue

* fix: correct query for updating counts

* tmp: save migration files

* feat: wrap up initial queue

* fix compilation

* fix: reassigns
This commit is contained in:
abelanger5
2024-09-06 16:12:22 -04:00
committed by GitHub
parent c720311aba
commit 891514b461
42 changed files with 13621 additions and 857 deletions
@@ -15,10 +15,6 @@ WorkerLabel:
SemaphoreSlots:
properties:
slot:
type: string
description: The slot name.
format: uuid
stepRunId:
type: string
description: The step run id.
@@ -41,7 +37,10 @@ SemaphoreSlots:
status:
$ref: "./_index.yaml#/StepRunStatus"
required:
- slot
- stepRunId
- actionId
- workflowRunId
- status
RecentStepRuns:
properties:
@@ -94,12 +94,6 @@ withWorker:
format: uuid
minLength: 36
maxLength: 36
- description: Filter recent by failed
in: query
name: recentFailed
required: false
schema:
type: boolean
responses:
"200":
content:
+2 -7
View File
@@ -12,16 +12,11 @@ import (
func (t *WorkerService) WorkerGet(ctx echo.Context, request gen.WorkerGetRequestObject) (gen.WorkerGetResponseObject, error) {
worker := ctx.Get("worker").(*dbsqlc.GetWorkerByIdRow)
recentFailFilter := false
if request.Params.RecentFailed != nil {
recentFailFilter = *request.Params.RecentFailed
}
slotState, recent, err := t.config.APIRepository.Worker().ListWorkerState(
sqlchelpers.UUIDToStr(worker.Worker.TenantId),
sqlchelpers.UUIDToStr(worker.Worker.ID),
recentFailFilter)
int(worker.Worker.MaxRuns),
)
if err != nil {
return nil, err
+174 -194
View File
@@ -548,23 +548,20 @@ type SNSIntegration struct {
// SemaphoreSlots defines model for SemaphoreSlots.
type SemaphoreSlots struct {
// ActionId The action id.
ActionId *string `json:"actionId,omitempty"`
// Slot The slot name.
Slot openapi_types.UUID `json:"slot"`
ActionId string `json:"actionId"`
// StartedAt The time this slot was started.
StartedAt *time.Time `json:"startedAt,omitempty"`
Status *StepRunStatus `json:"status,omitempty"`
StartedAt *time.Time `json:"startedAt,omitempty"`
Status StepRunStatus `json:"status"`
// StepRunId The step run id.
StepRunId *openapi_types.UUID `json:"stepRunId,omitempty"`
StepRunId openapi_types.UUID `json:"stepRunId"`
// TimeoutAt The time this slot will timeout.
TimeoutAt *time.Time `json:"timeoutAt,omitempty"`
// WorkflowRunId The workflow run id.
WorkflowRunId *openapi_types.UUID `json:"workflowRunId,omitempty"`
WorkflowRunId openapi_types.UUID `json:"workflowRunId"`
}
// SlackWebhook defines model for SlackWebhook.
@@ -1381,12 +1378,6 @@ type WorkflowRunGetMetricsParams struct {
CreatedBefore *time.Time `form:"createdBefore,omitempty" json:"createdBefore,omitempty"`
}
// WorkerGetParams defines parameters for WorkerGet.
type WorkerGetParams struct {
// RecentFailed Filter recent by failed
RecentFailed *bool `form:"recentFailed,omitempty" json:"recentFailed,omitempty"`
}
// WorkflowGetMetricsParams defines parameters for WorkflowGetMetrics.
type WorkflowGetMetricsParams struct {
// Status A status of workflow run statuses to filter by
@@ -1682,7 +1673,7 @@ type ServerInterface interface {
WebhookRequestsList(ctx echo.Context, webhook openapi_types.UUID) error
// Get worker
// (GET /api/v1/workers/{worker})
WorkerGet(ctx echo.Context, worker openapi_types.UUID, params WorkerGetParams) error
WorkerGet(ctx echo.Context, worker openapi_types.UUID) error
// Update worker
// (PATCH /api/v1/workers/{worker})
WorkerUpdate(ctx echo.Context, worker openapi_types.UUID) error
@@ -3244,17 +3235,8 @@ func (w *ServerInterfaceWrapper) WorkerGet(ctx echo.Context) error {
ctx.Set(CookieAuthScopes, []string{})
// Parameter object where we will unmarshal all parameters from the context
var params WorkerGetParams
// ------------- Optional query parameter "recentFailed" -------------
err = runtime.BindQueryParameter("form", true, false, "recentFailed", ctx.QueryParams(), &params.RecentFailed)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter recentFailed: %s", err))
}
// Invoke the callback with all the unmarshaled arguments
err = w.Handler.WorkerGet(ctx, worker, params)
err = w.Handler.WorkerGet(ctx, worker)
return err
}
@@ -5995,7 +5977,6 @@ func (response WebhookRequestsList405JSONResponse) VisitWebhookRequestsListRespo
type WorkerGetRequestObject struct {
Worker openapi_types.UUID `json:"worker"`
Params WorkerGetParams
}
type WorkerGetResponseObject interface {
@@ -8308,11 +8289,10 @@ func (sh *strictHandler) WebhookRequestsList(ctx echo.Context, webhook openapi_t
}
// WorkerGet operation middleware
func (sh *strictHandler) WorkerGet(ctx echo.Context, worker openapi_types.UUID, params WorkerGetParams) error {
func (sh *strictHandler) WorkerGet(ctx echo.Context, worker openapi_types.UUID) error {
var request WorkerGetRequestObject
request.Worker = worker
request.Params = params
handler := func(ctx echo.Context, request interface{}) (interface{}, error) {
return sh.ssi.WorkerGet(ctx, request.(WorkerGetRequestObject))
@@ -8552,174 +8532,174 @@ func (sh *strictHandler) WorkflowVersionGetDefinition(ctx echo.Context, workflow
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
"H4sIAAAAAAAC/+x9e2/bOPboVxF0L3B3AefZdna2wO8PN3Fb76RJ1k6m2N+gCGiJsTmRJY1I5bFFvvsF",
"H4sIAAAAAAAC/+x9e2/bOPboVxF0L3B3AefZdna2wO8PN3Fb76RJ1k6m2N+gCBiJtjmRJY1I5bFFvvsF",
"n6IsUqL8itMKWOykFh+Hh+ccHp4Xv/tBMk+TGMYE+++/+ziYwTlgf/Yvh4MsSzL6d5olKcwIguxLkISQ",
"/jeEOMhQSlAS++994AU5Jsnc+wxIMIPEg7S3xxr3fPgI5mkE/fdHbw8Pe/5tks0B8d/7OYrJL2/9nk+e",
"Uui/91FM4BRm/nOvPHx1Nu3f3m2SeWSGMJ9Tn87vFw3voYBpDjEGU1jMikmG4imbNAnwTYTiO9OU9HeP",
"JB6ZQS9MgnwOYwIMAPQ8dOsh4sFHhAkugTNFZJZP9oNkfjDjeNoL4b382wTRLYJRWIWGwsA+eWQGiDa5",
"h7AHME4CBAgMvQdEZgwekKYRCsAkKm2HH4O5ARHPPT+Df+Uog6H//o/S1N9U42TyJwwIhVHSCq4SC1S/",
"IwLn7I//m8Fb/73/fw4K2jsQhHegqO5ZTQOyDDxVQBLjWqD5AgmowgKiKHk4mYF4Ci8Bxg9JZkDswwyS",
"Gcy8JPPihHg5hhn2AhB7AetINx9lXir7a7gkWQ4VOJMkiSCIKTx82gwCAq9gDGLSZlLWzYvhg0dYX+w8",
"4zC+R4Qv3HEyxHp4CfvKf2bUjrCHYkxAHEDn2cdoGudpi8kxmsZenhas1GrKnMwcSIuSRZ82fe75aYLJ",
"LJk69roUrWnHpyiJ+2k6tHDlJf1O2c0bnrLV5BiyPpTrKRURD+dpmmSkxIhHx2/evvvlH7/u0T8W/o/+",
"/s/Do2Mjo9rovy9wUuYBti4TVVDQBVww9Oig2EtuPYpZGBMUMEGnQ/yHPwEYBX7PnybJNIKUFxWPV8RY",
"hZltYA/pCZABKfYXpElMBVgN1wrKUUNQaSg6eUnMJLdGV1VCYuLQiBv6hSKED1HAWJXujeJUyFy5mBoZ",
"dlkQ6YIoS9HnBBMLBSaYfE6mXv9y6M1oKx3GGSEpfn9wIOh/X3yhxGk6fkCKfoNPzfPcwafSNOns7qYg",
"XTAJQnjrTL4jiJM8C6BZjHOZGPYtqydoDrVDMRNjeQ8AC3Faktr+8eHx8d7R8d7Rm6vjw/eHv7x/++v+",
"r7/++r++pqaEgMA9OrAJRcgiCFDI6UUDoueh2Lu+5oKBDq0DMpkcH7399fAfe8dvf4F7b9+Ad3vg+F24",
"9/boH78chUfB7e0/6fxz8HgG4yll7je/GMDJ03BZ9EQAE0/0XyeOFugf0cGLXdRBtvDCVXIHTeLgMUUZ",
"xKalfp1Bzu6UOAnt7onW+84bO4cEhICTYMMZUaJYqxy5WpAjCrb98r4ev3vXhEMFW0+JE4UMIxKDAKaE",
"6wQj+FcOufAo45MrAByzq1HlHMV2Iu35j3sJSNEevRxMYbwHH0kG9giYMijuQYTovvjv1Yp7eY5C/7lC",
"SBxe03q5viVJx7pigbRhbN6sMM8KBf9hhoIZ2zdOTwh7DNR9f/kFJnNEYhT15ERsn83E0+ekw/WjlWiH",
"je+CNJwmMYZVrBHJjlWMlcCqB4OPYodjcA9jYt05EIaIzguiLxqbLqBMtfEkvyj8QTq6BmQxu3ksRg5u",
"A9yZDkza/w4+WbtbkMTPVQbSN4WZ8flYU5OsKCJJioJ+ZtupOfhvEntScnnnlLr+1h+d/12Kp/H52GNj",
"rELhioXnKP6fo94cPP7P8btfqrysgLUTBL899SOYkcEcoOhTluSpnbVpE2ziowhhQtfIW0gdPaNXK0cF",
"donlh+ge9tiM1bULUJtW3iC9+eDGvWaf5LbStdKLHZeea9lbua6enyURbDov+Wq+wPkEZiPa3ogPXwzW",
"/jeEOMhQSlAS++994AU5Jsnc+wxIMIPEg7S3xxr3fPgI5mkE/fdHbw8Pe/4kyeaA+O/9HMXkl7d+zydP",
"KfTf+ygmcAoz/7lXHr46m/Zvb5JkHpkhzOfUp/P7RcN7KGCaQ4zBFBazYpKheMomTQJ8E6H4zjQl/d0j",
"iUdm0AuTIJ/DmAADAD0PTTxEPPiIMMElcKaIzPLb/SCZH8w4nvZCeC//NkE0QTAKq9BQGNgnj8wA0Sb3",
"EPYAxkmAAIGh94DIjMED0jRCAbiNStvhx2BuQMRzz8/gXznKYOi//6M09TfVOLn9EwaEwihpBVeJBarf",
"EYFz9sf/zeDEf+//n4OC9g4E4R0oqntW04AsA08VkMS4Fmi+QAKqsIAoSh5OZiCewkuA8UOSGRD7MINk",
"BjMvybw4IV6OYYa9AMRewDrSzUeZl8r+Gi5JlkMFzm2SRBDEFB4+bQYBgVcwBjFpMynr5sXwwSOsL3ae",
"cRjfI8IX7jgZYj28hH3lPzNqR9hDMSYgDqDz7GM0jfO0xeQYTWMvTwtWajVlTmYOpEXJok+bPvf8NMFk",
"lkwde12K1rTjU5TE/TQdWrjykn6n7OYNT9lqcgxZH8r1lIqIh/M0TTJSYsSj4zdv3/3yj1/36B8L/0d/",
"/+fh0bGRUW303xc4KfMAW5eJKijoAi4YenRQ7CUTj2IWxgQFTNDpEP/h3wKMAr/nT5NkGkHKi4rHK2Ks",
"wsw2sIf0BMiAFPsL0iSmAqyGawXlqCGoNBSdvCRmklujqyohMXFoxA39QhHChyhgrEr3RnEqZK5cTI0M",
"uyyIdEGUpehzgomFAhNMPidTr3859Ga0lQ7jjJAUvz84EPS/L75Q4jQdPyBFv8Gn5nnu4FNpmnR2d1OQ",
"LrgNQjhxJt8RxEmeBdAsxrlMDPuW1RM0h9qhmImxvAeAhTgtSW3/+PD4eO/oeO/ozdXx4fvDX96//XX/",
"119//V9fU1NCQOAeHdiEImQRBCjk9KIB0fNQ7F1fc8FAh9YBub09Pnr76+E/9o7f/gL33r4B7/bA8btw",
"7+3RP345Co+CyeSfdP45eDyD8ZQy95tfDODkabgseiKAiSf6rxNHC/SP6ODFLuogW3jhKrmDJnHwmKIM",
"YtNSv84gZ3dKnIR290TrfeeNnUMCQsBJsOGMKFGsVY5cLcgRBdt+eV+P371rwqGCrafEiUKGEYlBAFPC",
"dYIR/CuHXHiU8ckVAI7Z1ahyjmI7kfb8x70EpGiPXg6mMN6DjyQDewRMGRT3IEJ0X/z3asW9PEeh/1wh",
"JA6vab1c35KkY12xQNowNm9WmGeFgv8wQ8GM7RunJ4Q9Buq+v/wCkzkiMYp6ciK2z2bi6XPS4frRSrTD",
"xndBGk6TGMMq1ohkxyrGSmDVg8FHscMxuIcxse4cCENE5wXRF41NF1Cm2niSXxT+IB1dA7KY3TwWIwe3",
"Ae5MBybtfwefrN0tSOLnKgPpm8LM+HysqUlWFJEkRUE/s+3UHPw3iT0pubxzSl1/64/O/y7F0/h87LEx",
"VqFwxcJzFP/PUW8OHv/n+N0vVV5WwNoJgt+e+hHMyGAOUPQpS/LUztq0CTbxUYQwoWvkLaSOntGrlaMC",
"u8TyQ3QPe2zG6toFqE0rb5DefHDjXrNPclvpWunFjkvPteytXFfPz5IINp2XfDVf4PwWZiPa3ogPXwzW",
"hBUrPtzOYH6tXgcW2DJwlE/Nk9Iv65+0J0xHlHurhCVUAwaUCY9MxG5Ttq4kGlfSyIgyvTSTZtHedMmn",
"4A5Py1u5aHITBjnrQh6S7O42Sh5GeTzO53OQPTVBxrbqa7VbjWLIzw61kG9yw0+B6VrV5tjz/vav8cW5",
"N3kiEP+9+RBTxxeb/rfVaECOcYZMTJ+CKYqV9aAOoZeqpVJ0mPx6cLdWquVUDRwS0F2BsgbEiyyE2Yen",
"U5TBQIIE43xOdw7gwOemeE1+LOyF6P9RGqpl3+KGZ+06hiALZsbz1kbvFVzeAmQ0CTFBn9MzhrIqb+Vl",
"eVy+ANr9DymMQwpLw8CiWZuR/8ph3gwxb9Vm3CyPYweIRbM2I+M8CCAMm4FWDd1Hp3T4r2RiEEh1DiAm",
"lzQXkJDGfyaT/Q1d5StjYgJTdy4cE5hWmbB8zlSVTjSHSU7Myxcfm5Z+DzOMktg4g/3sUGDpAyhbA1+6",
"SaP4VzIZ5QZTTQDiAEaRtEu5GWBUJ+WJtDcZQYA5oRhcaDHCs3ZT/8kpsm5HKdHylpbdW4HoMojziGij",
"FhjGBGSk3WIwASTHDuuhcpa3FfQ9yuN2JE43vz2VB3cwq2eBNsvVlKsmkLUDZqHn8vxSHkQSiNoFO9eM",
"1TbJI/RycH46PP/k9/zR9fk5/2t8fXIyGJwOTv2e/7E/PGN/nPTPTwZn9G/TWUuVELN7xdUpu9jVsMVi",
"EmY6wnbb0VZVH2U6Nmo/FOKyKQW/MLxlaBqd4BpsYiITcbFlRiC4+wonsyS5e/FFarCsa4nJ9AzFsJWv",
"iB6h7DNVH6g8kQdplEy9CMWwjaOAB5QY56DDiQaNqomtN29huFEvYEt3qhRRLmqGbwWqzuA9jHRRczr4",
"cE3Fy/D844Xf87/2R+d+zx+MRhcjs0zRxlGqv9P+lyAwCRLx/eVvTpKszNKDf1zh9lQeoeX9SXSuuUEZ",
"EKB7dr77QZ5lMCY3KaPd454fw0f5rzc9P87n7B/Yf390SC9EZc4qdTZ5GkULL+VUqCY+drpyaLAY3fHw",
"sTryG7eRi3UZHaQJAZF+waNNmV0iQphwK10RznbocsMxSKx/09vdF0gyFBjkcZzPL92un4yO5SV037be",
"fzvdOPlYiPtN2fXTOuDI7arJRxQXzn0zakr2SgVqaZaejhCT/B/BAMZkrCmsCwZNxpo2Yx7/6pk8efoN",
"o40KusyVY4XrwsbuBAKlxaWgoiEXoZw58+vXa8xqI3q68ixgWRzdvNP0r5/Hjz2CaQSe6l2hzFY6DMvn",
"WuuVL25kG0w0RmtKCL+pJWk3L2xdWYkeXnZ9WvN3h4cN612A27Zq2x1J6+6urCxcZV3hk9BllMsZs9ew",
"VZqbbBIVaqbN6KgL1xnDgFOIyXVm8Vpej848kngYxiHzRQuNFnsk2Yx3yHZA5DH6K4ceCmFM0C2CmXJT",
"CO+PiKfiLnM9/HACoySeSogbZGVvkx57N9tFrRd+DOcgnSUZHEcJWfMpi6OE2PymCWEeWycMlk5Cs82U",
"++bYsA8Ae6KH+21vyZNTmNNs6KGfqaYkENRMKrpdrHmhKIqkwdh9pZXDvjqPbOIO+gIdso1nwkI3C1Qv",
"8jMQxzCygSE+eyg0e2IxHdx74KOb1Tw+wrk1aEBOwYIHlpxkJQkF5rbV028rLJ12t6+bDb7KondCtrpJ",
"P4kIhe4yXfQ0MjQKSAJTm1g0ey9mKAozWDbFNqhWG/I4pCCTWWDukGQQhGASQdvmyu8qFprLuUYyWckR",
"ZpnBTgHaKkrkIA33YgO5TaJm6zfg+OqTQZqU7DvaPXxN7jFGhF9tKmcjDSxHw9AK8jJX56JPDboW1deS",
"s8/BVyRcm6r9+nkwyYkNxCXZk1lx+rcEZu7IXLvvkXep2ZkVNCpXtztta5MtDoKnzYpVl5oVU7XJ4vJ0",
"OqkUBaqV1foXBer6WTBD9/BVCindkeMG306JmCQLYWbuVMP1G+M37SqyHZJf1PoVBBI1PaO/w0bCO+Ac",
"WuApo49ItLGE2QZJHhPzDskDpIIGFJo7aC5Ig0CXbOWwHmFqZD0oqcB7mCHy1Kb3WPZxIrWPKMNkDLkC",
"4U5uZ6BdL1MOWnn6hXF7JSIVONQQontY+U7WkO2uxIKWCLKRZAt5LH2So8G/rwfXg9Ob84ubrxej3wYj",
"v1f8OOpfDW7Ohl+GV37PH598Hpxenw3PP91cDb8MTm8urunP/fF4+OmcBa2Mr/qjKx7HMjwfjj+XQ1pG",
"g6vRf3jISxHd0vPpWBfXVzejwcfRQPQZDbRR9cnGZxe05dmgP1ZjDgenNx/+c3M9Hpgd20ZK1lCgOcgF",
"dKPh1fCkf1Y3Wl1oj/jrhq/hy+B8AU3OoT/qb9raBExRfmCxMALMRD7GwJI181UmWCceay1v53PWC+8b",
"s6lBDKInggJ8kZKLnNSMWlz3ZwB7SUpg6IkrnRrEPMfGkzRtuRorJ3s0p3Ra8zaMmVDbTYFaBfX2hddk",
"QhnXvAMi1bwXpoyxabLHSc4f0QmYuNV6o3g6hoT+B2+PRXkRgsFjiugus2A5Bkz9+LwXnwZ7DyzXmsX9",
"eSCDHkjTLAHBDMVTnnTNEFw3v8zk4kRyhuaILAkFX7LMaq/CE9Gxa3GhWUI+AhTlGXQAhfmodEB0uzhm",
"2QfmOSOA+VLtPguWlc/s+SAWO8v8FiJBxjG4ADxKIvvIzAJxYMnCmYNH71Y28QCRec+CqtZr17ZLAiPA",
"drkwVC7/zSRFPqsE+1p/iyyrIErpbLPkwHKZl03mecFQNueC/GzHGm9R515gI5Ry25c4MUspo8Ve6elv",
"DbSzM0eJIOV2Jwjf0yr8L0ZQ7pmWlPWaWl9jmPEel/kkQkEdKbDxapKHdZh3ZtPF/i2z6SOxT/JmcfH1",
"nF3L+qdfhud+z/8y+PLBctfhw9THIrJ4yKZFlIbQPMflbOLL0sBtxluMOFGwS6LVEaAupoPf+WWK/vDx",
"7OLrzej6nF3YLs6LC+qgBjMljcSklIFs/juIcotsY9+9e9rALD6ZNkKPnQeQsRy2iqrCe5vjMKliMIK3",
"KIqaFAcW1s6Go5pDxvq0CXlgfWsWyse2L9EM/2r5UWrbm5lLEclzz7+3r0IGLDdtmHk1DygOOcEbdCpI",
"YObxFuqU42N5f0P7cN878kLw1POOvAcI7+h/50lMZn9f0pGt0FPaOrl4u1CUiLpMIhQYcm259lx3oVT1",
"pHhTw5HeQiiW2a8pqE0AZ1xdhqZTmGmqfcu6MlU7bNtYuGtW4+pnLGGir7whcHct1UOsB78OiH3/X7Et",
"rLvMv+xlfoOX7I2UFHM2dT5buekrc2TbQ4bxJcixKfVEJ3fuDfcQ9lLW2gNx6AUgjhPiAVa4jlXAlSUF",
"FhFvhA6bbkON1gAQhhnEWLcKlLQkec2sGgfoh88Az0zSegbwTB/y/+GF6YT85ooGLyA75rVYvZMZINYJ",
"f4cZukVN6GW2DSpL7kVzUcS4BIOZomcA20slG+cAqjayhyHZos0+RDiNwFOJoOX+tTYjlLH7zUJg5VrS",
"9kJU8MGORMaD8KHAmtSYzLAvcWyrWtXPLI6oDhAFRC3+VoOhkuCrKmnreLKh/CyZonj5EmjL8fdKFdF2",
"DuNyjWkTrkdwijCpke67iG63k84iGHZwt2R1V9dN09VjPEMpfq0mrorJb4un+SZOGT6ZadtE0gNXpdZq",
"wnVjBhG8L9QwI1vkthwt2TfPomU83HTcRpTwao8r1nl0WCSGQQZtWUjsmypRJ3iY3oS84S17yiDNknsU",
"wrDnAS8DcZjMZSeWfDOB3hTGMAMi10jPNzzeGMbbozncTQJcbm+2TcoKzkZkU6m8I0VyyuLH6a2GUhcr",
"Y4qAzhtArHUlIbvqKRt1xofSa/W38p3OkrDVagXoX3hPFYl+YnzhiIL8+erq0uON2NNGkoIzgXyHGgca",
"VhTMpYm/OSK8noQEKrHNYM/th5LmZWtnA62RApamnS9q66QP59Pgyu/5lxdj9p/rK2ZDtZ2QPEEH1+Wd",
"Ym6/F5aGAMReCjNKV/utQp7APUARmERQ5sk0FFqsTgsfYZAT6AVJLPwN0ZPZoUBVDVbcNxs2vDDBnBVo",
"GsPQKzqt462JFXPmIzCBEa53trA2jKWK40AdA86J7zA7o+OYtiwCmHyGICMTCBzSZcVWMd8ZpgACbyZ7",
"76/34RA6B2ViqhYMMAGTiGUY7BCEc/BoJ/Q5eETzfL4+gt+8nmHXL7JKBRlTNiNtozK1C+dWS4JdqFZj",
"oFksE+xNyo5IwueJ3ZxxlgRkIaHfBIiKWzZlrLNjsIJbKcL7J1fD3wespJj687J/PbYUKeQ/FCfAeHD2",
"8fPFmAeIf+mf9z+xv74OPny+uPjNOIQ4zay56uKwE+9ylaFuTLgXva+b1Mfr0Zlh+LbaJGtv1AQ0aVc5",
"CGvLq8vydrTrurPTa7zq3JveMHl9EfMaPLy8ccOqNwsgZRjM6sWNpRtq31Kw1sKn9ItpCKfViSK3awv9",
"biOqrWuVljTDKQmmy69V7tYVMOp+og5y+4o8v/OOqpJDXRDDoiSwkT4d90SeqKZQjSkk2neVILDgmopl",
"WRbuf5xCghnugqKrN6V9lUjTPKr71lChMckAgdMn28HBv3ok4V4v+VaXPisPKWIvlIFgxu+B8mjgaS43",
"w/Oby9HFp9FgPPZ7/uno4vLmfPB1wK4MLCOp+Oen0cX15c3o4vr89GZ08WF4bjw/Wqo7hUZT9hAvlj1/",
"c9x8K5RTLyKwZ9zIOqoYnppckArA4alx22Tv31Bcuod9vD4/uRqyeLnT61H/wxk9xU/75vwifRApmVtx",
"CpvdwHryu1ncr1T1YssnBTsN3O7JorU1MJPx5W/w6USmkRrE4UItyypb38EnbNbG5fCULGumWND+qZgA",
"Hk5hgG5RUEzi/S0FGMPQu0fAu0URgdnfHUtlfi2X83YJ1zKHnJZej9Xe++Im/XNbnX8VSKFXnTs61IrA",
"aVhbb9K5ijJrtSBeHMCdLovCFms85nnBCq6Kb9vuwOce66nH2wZhYxU59SdRVCWM+hIWPPgRhh+eWgx+",
"pfXSArqFLtNS9TGMsHp9/d+1dylUBVF9sd/qhcmO3B3qajbWgV9XeLo/PqHH9GB8UntOF6PUFJ/Wabkk",
"xTTJ2DDJWh42EDrdqdtkS6kfZZ6y6CALvGHweiTxpSb8DKVMknhMVdo8Mh84UJaLcHrVSdRW3Uw9sJZc",
"qjo1sB4+YSVflqn7uskytYtlWxsWYdXMWDmINlQnhzrhHZtYf6F5ZX7BPcZCIZLzjB8Fhxm/SUY1fix4",
"11z7xboaetc24C/iStTqZpGVrQ1mhzqHsI5AhIw4yejxcGsWEzWVvm6QhfOaJhSFO24tZaJvhIlw3dNi",
"8wrbH4ULeDOVjruvVEJrMbDCz3rVXa6fmNFXqCw3wobUHs2a3rXIKyUjkAsmdLuRZjl8IXugqozVaDLp",
"+Vgcm1dFOUibutuWLrBm4rMYsvlHJ7Gj5z+6mgRkn5ZnroS5KKSlDfStmaROIdXpzAW6M/BQ/mxwioEH",
"7z/9L2deqBq2l6rleRyAZsSzTkNQGyr8CaiER1zlGSJPVFWdc/xOIMhg1s8Jy3Jg0LEbNfu5WOCMEFYA",
"JUiSOwRlc0QxxH+Sxu33vni9uOgLUsQeVH1m1o/bxBKGw7t5/cshK9tF2M2+/KvaJf9o/3D/kG1yCmOQ",
"Iv+9/2b/aP+QKaxkxpZ2AFJ0EIniiFNTpNknaRunrWKIsacunZQGgax97p+J75/YumRkEJvl+PCwOvBn",
"CCIyY2L0nen7eULUnKWd8d//8a3nY/kwKoWwaCi9JH+I8YMZDO78b7Q/W2sGQfjUvFjaDNWtdiQbrHO5",
"DDiWDcWzf0gGbm9F1n/d6hW0jcu/PzoAIlVrj0Xm7jFTJT74zn7Wf3vmMEaQGDTJU/Y79oB6CJplBPL4",
"Y9a9grGF7E8+Ar88AZYoTMGuKZVRmcFjFyHGX5SeC+6qLMXX5QO3HnIZs/LN6vlbZe/fVrE1zoMAYnyb",
"R9GTx1Eall7RriDvuee/5VQSJDER92KQphEKGEYP/hQV6op1NEh+VodUxJgveknmIKJYgKGXZN4EhDIu",
"joPxZu1gmKD4mGQTFIaQJ7YV9M3ppI7MJMWL0hrfev7jnkqepB9EZY6egTC+sSsACQz5azzRbxUS5yP8",
"GCTO6OFDwmXnWojBITPcQCa12CKJl0ucl7HxbBbRa1mIpRJaFfaSGOCAdmLAUQxwatmcGNAPyBTt8Uzw",
"g+/qb3Yapgk2KA0jeJ/csSpl/cshzyEX/kA144KYSBFLUudr4t1dpIQa3iITJKw7ddxlbHmCzuWjtD8w",
"UeM2VC1Ih27sldg5ScbFb3WUrLa8RMFBlOThgX4ttGu7spWKdJHXCTaIh2JMQMxKqpSJ+IR+ls5nuxK8",
"edwyQLw8VjHqO0NgDVo7R7Du7BNbr1z6bOflEHtJyl3h4kTT9pubBg++s/8+HzRtOttmtecg9li//coW",
"M5vhqdreRvnEhrGqLNy9s03RtD4SUJhoPNYzSDIE74XA4xhh+9FJvRLxa5gpCJ8792rkHaehkqybw6Ul",
"nFW2bU+siXKgbWhKCYxXIubWIeDoGAdo4cF6446fIUzVw8grtbZtMG09LDfc2G7TucSOlx67b7X5MoGt",
"tLpdIgS19WwjFjahuv/6JrMn1Q6+s/84GJ+8sf4EW2WL9Xf13G1NpTGtRxkDcSeNSmWc7NKZc7QdMK5j",
"kJNZkqH/wpBP/G47E/NcTpYSD6IoeYCh2ZC1SLWSJ9jvdWcfJ7oyx9ArKo6xE7eUnxGs8kuMW7DJwpuE",
"VkYRInXn2GQBGR2j7CCjVAhWscr5uJZR2OPOi2zCPz/LS5LdnEPnlXeVCou0NunaOENBuynm6NlvaHfw",
"adkrmgbD8bt3JSCOnK9kNQyaZgn9Bwy7M2yHWNOm3SMyyyceSFNJ7dVjjbdZ4EcC070sZ4eX+PP5APAH",
"xZo0e9FKJlyInOQqq/IgeKZzy4EdmFZ7jdpyoAl4t824It2EJB6+Q6mE7a8cZk8FcMntLWY3VgMoKCa/",
"vDVmntRPx/PRJk+WKdnnljNu0lBjeC5vCYsN/smtNXTWt9uZtcR1DwAz4XOb5HFouk+W2F9jfqUZ0J9G",
"ea3NXLFws0wqQi7tEom3aSGPBnzQThr9NNKoeAWxk0U/jizSGH/zkihKpvVyCHtRMvUiFFd0o6pf5yyZ",
"nqGYn46dGNoNMdSzP3YQwXsYYTovTyCumZi1LM1ca5EWdEB78Uw4y8oxpAevx2bT4LhNMgsgvENbQMa8",
"lwGIr+yFscRjQbz29Sd6Vl/LyUsZgRY88OlDlXpYC8Wp1mwZSIr+mz2kdGnQdD5RkuwOJ4tbk50KSgpr",
"Z8FZMm1/DPDP2G6n4nVLsQdYHXVLqBEPhuJN/c3E8fHBy8+L1AfukURUuXyBML1GEhcZwFpcXheFp0ic",
"73VBbE0xdyaKVqZYnnZeE3vLQlMeESYontYT+Osxy24hmNaNCYsUohcNm+34cW1RsS1iYGv50pwhUh9j",
"A5S2aovQxU3R8q7XkZ3g4G2Gki9hObBvQsc7JXWtjlrdmanXQkVrn0aitLef9XDTNcz1ZYo4q6BHL5wp",
"Uj0Bu0wRVx11pUwRt1PyAENC/4ubs0plF092qc8T0cgFxdOx6OMYjP2THJMaYlY4I/U96VipFL5rRdPa",
"+EilW9U72lT2E3bLrur0SRVzzPCBi3JtrfhEFkzobH2LyqNK0cLt8raaFMYlUgk7HZEhQNK6phZu0oSx",
"OGnHX+viL8EISyZG1h84DlEdmKWQlEI7ikeCDUlyr+Ws+ZndqHfwycmJStuVZnUqEMbIgJXwqRZbtMOk",
"lSd3gq2QFa0B1OqkLwdilseiGA50glW2dXZ/mouZvpBLmu3nyzik2dQ74I7W4dCd0TXEolIt7+CTeAEl",
"BSir0IsqS/0HZbej96zpEX/25Jj/65iKd9N6DKXPjczQVAN14/nCnUd9LVo2lPGSjlnCrubYuqT3Tp1m",
"CBBFQ2tNrDyl5GVc+qIYdAv7KZTlo3/uiMrjf25nVlluUqh68DGAMKwkfAllX2YfOfN5s5J/wJQ5R02f",
"a4gO2v5v8KkzLhUq71LnHEN2d9aZzjpP3EDWyQcZTCPwVFfDin7X/fi8o4UDZOUqNujPe0pyBLifkihk",
"mnAm8bblk9IxkIAC16mkO3xecrJbQjGuExQovkcEtg3zkb3Mrssh+9qdldJjqeFjKV+lxHbnoTQF8RS0",
"uKHIHT5BLa13F0ctVoejxC1Eh+P2ReNyOLjLhOMIwujY0hyDo/hmPQEDgs/lD3v83+3KlTuwcusC5bvl",
"1SnzVT1sewodr/1sbeReQ/X1HeNeUy0ctT+2HKLyPrapau7ACa+86M0OcsJmE0CWO3dfLAXEkXMNBdN3",
"mXNFakZrzq07+eZwPhFPPLW4o8leZhb/wr52dzRJjRo+lrqjSWx3yqDpjlbQ4np0QTHewXf+h0shRCCA",
"8G6zZN4UfM2p4cdQBcWybbDxz9sv17h23l1GB/w5uHaHaq2cW0qrKCYtbcza5MVfOczh3rx4wba2Rj5r",
"7YnWqnx4rcD4BMm/aS/5SO5rlBmvKj6tCznSJWCJ9paLQ1ZPOUou6WTiC8tEKo7U7syVYJESUT2TuaRM",
"lD320iRCgdPjiMJLxTu4JLFJx9Yl69GlsB2Y0LLchWNhN7qLx9YzQXll9trktVLVd1z7WEF3Fed5azpO",
"2pxlC6ju6kfvUGl3jRcsT4M0PIPgwIgHmICMWNlxTL/yc+yin5OZx47ORYa8xjDjFjwG0AVFKOv5Gjnz",
"zeFxQ9l1hjJxrJSwMoMgFBbHKOEEU6aVxbmfFwqGfy89h/3Ht+dSBXGG0vKMkhDoDixNB025xAtvC2BT",
"qf9ODgs5fD4uPZHUQhIvYrmTxTsni6uM4PTKRmMKs8NzM12sDENAmb9qM5fXR7PlSZ1jXrp3c3aYoa2c",
"58jRtSeqoSh1rfm0qD/tTZ445xor4r8SU0BvVwtjb6F8fUuzhCqG3tkNd61uPWXMtdaqd5ITBwGIAxjZ",
"k0/6hMB5SpjJnrd1eEqD385O+NCdBHndEiREmMWrCRHCiSDaPR3jhXPEmhhlWwydQdqxJpmMdnDmYda8",
"Y+FdTG/L8lhsVUM0IYrTnGWBc/+QabnPO6GpdMltNfKFbfhLCJRiTbXxGLyZ41tbnyAZ82E70fJy2oEY",
"L5n8CQOy5E1C7Ht3odjpC4XcpY1IDeHO23tIsru6COiipIPV19q5WYuYK46KrwypFCF1JQwpMlRcmHgg",
"X25HZwfcNcO+Rv7L574Wz/kaWeinN+CX+IdjY0uVRw0zh60yV7vntXfXgq8z3jLGei6V683z9ITkwrs+",
"fK84G376w7LARFfgd+WrpoxpLScDcRwv66SSiObXy/Ylj/RSp4bKR1p90q7+kVb/SMMLbjATlYrJvlw1",
"JBPczrW7NQtSiWC66+lOVkkq71E1ap5eUPF6JM53/Z9N7vESKzQewYJOX7O3fIH3zaDpGHzFeoLYrmUz",
"cDrvuT3/pWyYbs596ZVpyo2fsZPyzFq68W6nQBeM0anQ62eLdeaDqTEbAzdOpA96Akgwq6jQdSfZ6wnc",
"2JTtSNM+OTJcXazC819VQNetQhdPBNN/PRQAD0NcyntdCcHVZN+WTiMRLdLp5A3ZcJxsXPTh1SUHG9z1",
"DGWgOOvA3Vs7u/rWjl7omM45hURt7b5lYtZ+GPrbuny4Qya7bB64FGQUaZYL0gJYvPFXXRhvCT5DOIIR",
"NhF/sVm4dvqBIgNwdygO3Z6fog1bg/QbisNmaF5TrQxzJSM0hx64pYAunvUsTkF4uPQl+MeHx0d7h/R/",
"V4eH79n//teCe9G9TycwE28ICNyjUPiutZcoxBN4m2RwkyB/YDOsCWb5qJV3K96tWtvTVhoHuT9wNXna",
"3BtXVYD0l662ZCZa4ULcabkNtqLNXIyZudelihTwBGhU/JaZXy8r5Wj7fc1FpTrlsFMOt68cdhpPp/G8",
"iOsHr1iEjQmgrgLbJs/3HMMMHwR5loml1Fc9Ew092s1YL+YTJCdisA3SGKuL0o6oGMRdoN/LB/q5lsWh",
"RL5AbuWyOFUyniIyyycHAYiiSV01spOESlwCW1RA+sSGZiWQTuTwrSsMBWLeTdQYKuFOLrCuqpAVfest",
"HaUhTtaO2pm6TO3KMBkQliTTCG6G3tjQPzi9cfStmd4KxP1w9Nb0PF2R+FN+DUxVbW48vukI+oMU2N+l",
"9+C091N/qsfgXJRD12PV7bE4K+0dgCCAKampY8G+t3tbh/fxNxNjwAevPAdjiQuooT6+8u7Rs/rqDAxJ",
"jY+e2ekrgyzeoSZinX5vR1+8j7+p+G86+Broi6+8o6+G7HyKpCXoK0qmqKZcx1kyxR6KPcDOxv0aBeOM",
"DbShB6zoEUzH31IGndM9OkqmUxh6qKt0t1vX5/KxTqnG9Z4cJdMkJw3MkOTEjRvoUDtCoxSUjkhfj42H",
"U48r2Yr3iWYobXEF0jq5XYP0V5RYNxFrt1ECN0/a/j6ko6i7Ey1zJ9Ix2EySKcD4IclCuywVTyBySerJ",
"9nUi9VKOuTkd42QG4qmaaJeUjYBBFipEdeL8FYlzTlZlSndgogxOqSDL6i59vAWu1UhO9EfTN8E2Eoxd",
"YhiJvM7N9Sr0dElCrjoPf81jEx6G4k2P3XQwNIialh6HhdpZB9/FD44v5jcUAXJ/G1UWjbLm/qqJtpzd",
"6PhUaFcyZwdL5qj3QptK5vQUfbkxx4HAs8t9SzYVsZwNHCOOUOcHZnaWb9ZfyUqgZplSdGq7Ovbc0Vp0",
"xRa15VHFm+wPl8oaBuMGpzDHChp8jNoyFRt9K7sSWfmRB6tmMIAxYbkZAEXQFmvLm32UTSqaxyRJIgji",
"rVTMWiICcsf0+J0pldWiUlZP0iir8whIMKuxz9RyDG/1GphmQ5n6HAGlA6quvhW94EiUba+klSOvccg6",
"TjNzmmCIVZht4diyFYc6YNUcGgue85oPQqvUop73GxKFhmx0R5b9UYoyFXkYl1rhCg7jKmXIyzhimO0q",
"kW+pEvlXHfexy/voxSa1rvVWVyqqyP1TjOxgP9HZ1inPr4U1pUhaq+farXLs2zb2FAlgx01b4qZzCwMJ",
"YtUoZpn6akzVdKl46MQJLS5qu8cG608pW/LM6rQ78zGxPIk3nAlOWeF0h8rp34qM72GGTe/lamzRIgt8",
"F7jDkJ3LK6+soXjL8qVbzIBNsyRPWa5wAYLcKCsorNNv8MlkaNmqjFgx5VSQXpd1uivHckmvnSuu35Dg",
"IhmaTusiEa54Aw94MXxYrn6x+4shOym5rgzssu8Nb9nNBOeUOmDYY1wVAQIxUTyFsHcL2QOXtooSheDf",
"cXOYIANtV9u8DbhQx3e7BjLXssylh1K6oswvKxJ3rrC8lIMNJambXj5rIZqFbMCuJeWl1HESy7/zxq/o",
"vvUjyOUNSzmxqSuqgp282ykVsCDFDamAUs4chPAWxUgGrrUROUXPttLntJizk0M/mBzS9nbFy6lGmZ1w",
"2kHhpG/Q8nJqMRx2AkEGMxUO2zMGyMLsXsqLPIv8977//O35/wcAAP//kNGoJuuVAQA=",
"4A5Py1u5aHITBjnrQh6S7G4SJQ+jPB7n8znInpogY1v1tdqtRjHkZ4dayDe54afAdK1qc+x5f/vX+OLc",
"u30iEP+9+RBTxxeb/rfVaECOcYZMTJ+CKYqV9aAOoZeqpVJ0mPx6cLdWquVUDRwS0F2BsgbEiyyE2Yen",
"U5TBQIIE43xOdw7gwOemeE1+LOyF6P9RGqpl3+KGZ+06hiALZsbz1kbvFVxOADKahJigz+kZQ1mVt/Ky",
"PC5fAO3+hxTGIYWlYWDRrM3If+Uwb4aYt2ozbpbHsQPEolmbkXEeBBCGzUCrhu6jUzr8V3JrEEh1DiAm",
"lzQXkJDGfya3+xu6ylfGxASm7lw4JjCtMmH5nKkqnWgOk5yYly8+Ni39HmYYJbFxBvvZocDSB1C2Br50",
"k0bxr+R2lBtMNQGIAxhF0i7lZoBRnZQn0t5kBAHmhGJwocUIz9pN/SenyLodpUTLW1p2bwWiyyDOI6KN",
"WmAYE5CRdovBBJAcO6yHylneVtD3KI/bkTjd/PZUHtzBrJ4F2ixXU66aQNYOmIWey/NLeRBJIGoX7Fwz",
"Vtskj9DLwfnp8PyT3/NH1+fn/K/x9cnJYHA6OPV7/sf+8Iz9cdI/Pxmc0b9NZy1VQszuFVen7GJXwxaL",
"SZjpCNttR1tVfZTp2Kj9UIjLphT8wvCWoWl0gmuwiYlMxMWWGYHg7iu8nSXJ3YsvUoNlXUtMpmcohq18",
"RfQIZZ+p+kDliTxIo2TqRSiGbRwFPKDEOAcdTjRoVE1svXkLw416AVu6U6WIclEzfCtQdQbvYaSLmtPB",
"h2sqXobnHy/8nv+1Pzr3e/5gNLoYmWWKNo5S/Z32vwSBSZCI7y9/c5JkZZYe/OMKt6fyCC3vT6JzzQ3K",
"gADds/PdD/IsgzG5SRntHvf8GD7Kf73p+XE+Z//A/vujQ3ohKnNWqbPJ0yhaeCmnQjXxsdOVQ4PF6I6H",
"j9WR37iNXKzL6CBNCIj0Cx5tyuwSEcKEW+mKcLZDlxuOQWL9m97uvkCSocAgj+N8ful2/WR0LC+h+7b1",
"/tvpxsnHQtxvyq6f1gFHbldNPqK4cO6bUVOyVypQS7P0dISY5P8IBjAmY01hXTBoMta0GfP4V8/kydNv",
"GG1U0GWuHCtcFzZ2JxAoLS4FFQ25COXMmV+/XmNWG9HTlWcBy+Lo5p2mf/08fuwRTCPwVO8KZbbSYVg+",
"11qvfHEj22CiMVpTQvhNLUm7eWHrykr08LLr05q/OzxsWO8C3LZV2+5IWnd3ZWXhKusKn4Quo1zOmL2G",
"rdLcZJOoUDNtRkdduM4YBpxCTK4zi9fyenTmkcTDMA6ZL1potNgjyWa8Q7YDIo/RXzn0UAhjgiYIZspN",
"Ibw/Ip6Ku8z18MNbGCXxVELcICt7m/TYu9kuar3wYzgH6SzJ4DhKyJpP2dIJZrZ1cp8ajhLCgtVED/db",
"2pInnjCD2ZZFP1MNRyyseYt1e1bzQlEUSUOv+0orh3R1HtnEHfQF+inQ0tNP9UXjlzR6UfLR7/3Vm/oM",
"xDGMbPCKzx4Kza5WTAf3HvjoZj2Oj3BujQqQU7DogCUnWUkEgblt9fTbCkun3e3rZoOvsuidEJ5u4k0i",
"QqG7TBc9jQyNEpDA1Cb3zO6JGYrCDJZtrQ2604ZcCinIZJqXOyQZBCG4jaBtc+V3FezMBWIjmazk6bLM",
"YKcAbRUlcpCWebGB3OhQs/Ub8Gz1ySBNSgYc7aK9Jv8XI8KvNp2ykQaWo2FoBXmZu3HRpwZdi/ppyZvn",
"4AwSvkvVfv08mOTEBuKS7MnMNP0JgZk7MtfuXORdanZmBdXL1a9O29pki4PgabNi1aVmxVQPsvg0nU4q",
"RYFqZbUORIG6fhbM0D18lUJK99S4wbdTIibJQpiZO9Vw/cb4TbuzbIfka64HHDU9o0PDRsI74P1Z4Cmj",
"E0i0scTRBkkeE/MOyQOkggYUmjtoPkaDQJds5bAeYUtkPSipwHuYIfLUpvdY9nEitY8ow2QMuQLhTm5n",
"oF0vU5JZefqFcXslIhU41BCiu1D5TtaQ7a4Ee5YIspFkC3ksnY6jwb+vB9eD05vzi5uvF6PfBiO/V/w4",
"6l8Nbs6GX4ZXfs8fn3wenF6fDc8/3VwNvwxOby6u6c/98Xj46ZxFpYyv+qMrHqgyPB+OP5djVkaDq9F/",
"eExLEb7S8+lYF9dXN6PBx9FA9BkNtFH1ycZnF7Tl2aA/VmMOB6c3H/5zcz0emD3XRkrWUKB5wAV0o+HV",
"8KR/VjdaXeyO+OuGr+HL4HwBTc6xPepv2toETFFfYLHyAcxEwsXAkhbzVWZQJx5rLW/nc9YL7xvTpUEM",
"oieCAnyRkouc1IxaXPdnAHtJSmDoiSudGsQ8x8azMG3JGCtnczTnbFoTM4ypTtvNcVoF9faF16Q6Gde8",
"AyLVvBemlLBpssdJzh/RCZi41XqjeDqGhP4Hb49FeZWBwWOK6C6zaDgGTP34vBefBnsPLJmaBfZ5IIMe",
"SNMsAcEMxVOeVc0QXDe/TNXiRHKG5ogsCQVfskxbr8IT0bFrcaFZQj4CFOUZdACFOaF0QHQDOmbpBeY5",
"I4D5Uu3ODZZ2zwz/IBY7yxwcIgPGMXoAPEoi+8jMAnFgSbOZg0dvIpt4gMjEZkFV67Vr2yWBEWC7XBgq",
"n/5msh6fVQZ9rWNG1k0QtXK2WVNgudTKJvO8YCibc0F+tmONt6hzL7ARSsnrS5yYpZzQYq/0/LYG2tmZ",
"o0SQcrsThO9pFf4XIyj3VErKek2trzHMeI/L/DZCQR0psPFqsoN1mHdm08X+LbPpI7FP8mZx8fWcXcv6",
"p1+G537P/zL48sFy1+HD1AcbsoDHpkWUhtBczOV04cvSwG3GWwwpUbBLotURoC6mg9/5ZYr+8PHs4uvN",
"6PqcXdguzosL6qAGMyWNxKSUgWz+O4hyi2xj37172sAsPpk2Qo+dB5CxJLWKqsJ7mwMtqWIwghMURU2K",
"A4tbZ8NRzSFjfdrERrC+NQvlY9uXaIZ/tQQote3NzKWI5Lnn39tXISOSmzbMvJoHFIec4A06FSQw83gL",
"dcrxsby/oX247x15IXjqeUfeA4R39L/zJCazvy/pyFboKW2dXLxdKEpEXSYRCgzJtFx7rrtQqoJRvKnh",
"SG8hFMvs1xS1JoAzri5D0ynMNNW+ZeGYqh22bbDbNSti9TPWKNFX3hCZu5byINaDXwfEvv+v2BbWXeZf",
"9jK/wUv2RmqGOZs6n63c9JU5su0xwfgS5NiUW6KTO/eGewh7KWvtgTj0AhDHCfEAq0zHStzKmgGLiDdC",
"h023oUZrAAjDDGKsWwVKWpK8ZlaNA/TDZ4BnJmk9A3imD/n/8MJ0Qn5zRYNXiB3zYqveyQwQ64S/wwxN",
"UBN6mW2DypJ70VxUKS7BYKboGcD2WsjGOYAqfuxhSLZosw8RTiPwVCJouX+tzQhl7H6zEFi5WLS90hR8",
"sCOR8SB8KLAmNSYz7Esc26oY9TOLI6oDRAFRi7/VYKhk8KpS2TqebCg/S6YoXr7G2XL8vVLJs53DuFxj",
"2oTrEZwiTGqk+y6i2+2kswiGHdwtWb7VddN09RjPUIpfq4mrYvLb4mm+iVOGT2baNpH0wFWptZpw3ZhB",
"BO8LNczIFrktCUv2zbNoGQ83HbcRJbyc44qFHB0WiWGQQYsTjn9TNegED9ObkDecsLcK0iy5RyEMex7w",
"MhCHyVx2Ylk6t9CbwhhmQCQl6QmFxxvDeHs0h7tJgMvtzbZJWcHZiGwqlXekCk5Z/Dg9xlDqYmVMEdB5",
"A4i1cCRkVz1lo874UHox/la+01kStlqtAP0L76ki0U+MTxhRkD9fXV16vBF7u0hScCaQ71DEQMOKgrk0",
"8TdHhNeTkEAlthnsuf1Q0rxs7WygNVLA0rTzRW2d9OF8Glz5Pf/yYsz+c33FbKi2E5In6OC6xFLM7ffC",
"0hCA2EthRulqv1XIE7gHKAK3EZR5Mg2VFKvTwkcY5AR6QRILf0P0ZHYoUFWDVe/Nhg1PSDBnBZrGMPSK",
"Tut4TGLFpPgI3MII1ztbWBvGUsVxoI4B58x2mJ3RcUxbFgFMPkOQkVsIHPJqxVYx3xmmAAJvJnvvr/dl",
"EDoHZWKqFgwwAbcRyzDYIQjn4NFO6HPwiOb5fH0Ev3k9w65fZJUSMaZsRtpGpXQXzq2WBLtQjsZAs1hm",
"0JuUHZFlzzPAOeMsCchCxr4JEBW3bEptZ8dgBbdShPdProa/D1jNMPXnZf96bKlCyH8oToDx4Ozj54sx",
"DxD/0j/vf2J/fR18+Hxx8ZtxCHGaWZPaxWEnHt4qQ92YmS96Xzepj9ejM8PwbbVJ1t6oCWjSrnIQ1tZP",
"l/XraNd1Z6fXeNW5N71h8voq5TV4eHnjhlVvFkDKMJjVqxdLN9S+pSKthU/pF9MQTqsTVWzXFvrdRlRb",
"1yotaYZTEkyXX6vcrStg1P1EoeP2JXd+5x1VJYe6IIZFSWAjfTruiTxRTaEaU0i07ypBYME1Fcu6K9z/",
"OIUEM9wFRVdvSvsqkaZ5VPetoUJjkgECp0+2g4N/9UjCvV7yMS59Vh5SxJ4gA8GM3wPl0cDTXG6G5zeX",
"o4tPo8F47Pf809HF5c354OuAXRlYRlLxz0+ji+vLm9HF9fnpzejiw/DceH60VHcKjabsIV6sa/7muPlW",
"KKdeRGDPuJF1VDE8NbkgFYDDU+O2yd6/obh0D/t4fX5yNWTxcqfXo/6HM3qKn/bN+UX6IFIyt+IUNruB",
"9eR3s7hfqerFlk8Kdhq43ZNFa2tgJuPL3+DTiUwjNYjDhWKVVba+g0/YrI3L4SlZ1kyxoP1TMQE8nMIA",
"TVBQTOL9LQUYw9C7R8CboIjA7O+OtTC/lut1u4RrmUNOS8/Dag96cZP+ua2Qvwqk0MvKHR1qVd40rK03",
"6VxFmbVaEC8O4E6XRWGLNR7zvGAFV8W3bXfgc4/11ONtg7Cxkpv6myeqEkZ9CQse/AjDD08tBr/SemkB",
"3UKXaan6GEZYvYD+79rDE6pEqL7Yb/XCZEfuDnVFGevAr6ss3R+f0GN6MD6pPaeLUWqqS+u0XJJimmRs",
"mGQtLxcIne7UbbKl1I8yT1l0kAXeMHg9kvhSE36GUiZJPKYqbR6ZDxwoy0U4Pdskiqduph5YSy5VnRpY",
"D5+wki/LFHbdZB3axbqsDYuwamasHEQbqpNDnfCOTay/0Lwyv+AeY6EQyXnGj4LDjN8koxo/Frxrrv1i",
"XQ29axvwF3ElanWzyMrWBrNDnUNYRyBCRpxk9HiYmMVETaWvG2ThvKYJReGOiaUO9I0wEa57WmxeYfuj",
"cAFvptJx95VKaC0GVvhZr7rL9RMz+gqV5UbYkNqjWdO7FnmlZARywYRuN9Ishy9kD1SVsRpNJj0fi2Pz",
"qigHaVN329IF1kx8FkM2/+gkdvT8R1eTgOzT8syVMBeFtLSBvjWT1CmkOp25AncGHsqfDU4x8OD9p//l",
"zAtVw/ZStTyPA9CMeNZpCGpDhT8BlfCIqzxD5ImqqnOO31sIMpj1c8KyHBh07EbNfi4WOCOEFUAJkuQO",
"QdkcUQzxn6Rx+70vnicu+oIUsRdTn5n1Y5JYwnB4N69/OWRluwi72Zd/VbvkH+0f7h+yTU5hDFLkv/ff",
"7B/tHzKFlczY0g5Aig4iURxxaoo0+yRt47RVDDH21KWT0iCQxc39M/H9E1uXjAxisxwfHlYH/gxBRGZM",
"jL4zfT9PiJqztDP++z++9XwsXz6lEBYNpZfkDzF+MIPBnf+N9mdrzSAIn5oXS5uhutWOZIN1LpcBx7Kh",
"ePYPycBkIrL+61avoG1c/v3RARCpWnssMnePmSrxwXf2s/7bM4cxgsSgSZ6y37EH1EvPLCOQxx+z7hWM",
"LWR/8hH45QmwRGEKdk2pjMoMHrsIMf6i9FxwV2Upvi4fuPWQy5iVb1bP3yp7/7aKrXEeBBDjSR5FTx5H",
"aVh6JruCvOee/5ZTSZDERNyLQZpGKGAYPfhTVKgr1tEg+VkdUhFjvuglmYOIYgGGXpJ5tyCUcXEcjDdr",
"B8MExccku0VhCHliW0HfnE7qyExSvCit8a3nP+6p5En6QVTm6BkI4xu7ApDAkL/GE/1WIXE+wo9B4owe",
"PiRcdq6FGBwyww1kUostkni5xHkZG89mEb2WhVgqoVVhL4kBDmgnBhzFAKeWzYkB/YBM0R7PBD/4rv5m",
"p2GaYIPSMIL3yR2rUta/HPIccuEPVDMuiIkUsSR1vibe3UVKqOEtMkHCulPHXcaWJ+hcvjr7AxM1bkPV",
"gnToxl6JnZNkXPxWR8lqy0sUHERJHh7o10K7titbqUgXeZ1gg3goxgTErKRKmYhP6GfpfLYrwZvHLQPE",
"y2MVo74zBNagtXME684+sfXKpc92Xg6xl6TcFS5ONG2/uWnw4Dv77/NB06azbVZ7DmKP9duvbDGzGZ6q",
"7W2UT2wYq8rC3TvbFE3rIwGFicZjPYMkQ/BeCDyOEbYfndQrEb+GmYLwuXOvRt5xGirJujlcWsJZZdv2",
"xJooB9qGppTAeCVibh0Cjo5xgBZepDfu+BnCVD2MvFJr2wbT1sNyw43tNp1L7HjpNftWmy8T2Eqr2yVC",
"UFvPNmJhE6r7r28ye1Lt4Dv7j4PxyRvrT7BVtlh/V8/d1lQa03qUMRB30qhUxskunTlH2wHjOgY5mSUZ",
"+i8M+cTvtjMxz+VkKfEgipIHGJoNWYtUK3mC/V539nGiK3MMvaLiGDtxS/kZwSq/xLgFmyy8SWhlFCFS",
"d45NFpDRMcoOMkqFYBWrnI9rGYW93rzIJvzzs7wk2c05dF55V6mwSGuTro0zFLSbYo6e/YZ2B5+WvaJp",
"MBy/e1cC4sj5SlbDoGmW0H/AsDvDdog1bdo9IrP81gNpKqm9eqzxNgv8SGC6l+Xs8BJ/Ph8A/qBYk2Yv",
"WsmEC5GTXGVVHgTPdG45sAPTas9WWw40Ae+2GVekm5DEw3colbD9lcPsqQAumUwwu7EaQEEx+eWtMfOk",
"fjqej3b7ZJmSfW454yYNNYbn8paw2OCf3FpDZ327nVlLXPcAMBM+kySPQ9N9ssT+GvMrzYD+NMprbeaK",
"hZtlUhFyaZdIvE0LeTTgg3bS6KeRRsUriJ0s+nFkkcb4m5dEUTKtl0PYi5KpF6G4ohtV/TpnyfQMxfx0",
"7MTQboihnv2xgwjewwjTeXkCcc3ErGVp5lqLtKAD2otnwllWjiE9eD02mwbHJMksgPAObQEZ814GIL6y",
"F8YSjwXx2tef6Fl9LScvZQRa8MCnD1XqYS0Up1qzZSAp+m/2kNKlQdP5REmyO5wsbk12KigprJ0FZ8m0",
"/THAP2O7nYrXLcUeYHXULaFGPBiKN/U3E8fHBy8/L1IfuEcSUeXyBcL0GklcZABrcXldFJ4icb7XBbE1",
"xdyZKFqZYnnaeU3sLQtNeUSYoHhaT+Cvxyy7hWBaNyYsUoheNGy248e1RcW2iIGt5Utzhkh9jA1Q2qot",
"Qhc3Rcu7Xkd2goO3GUq+hOXAvgkd75TUtTpqdWemXgsVrX0aidLeftbDTdcw15cp4qyCHr1wpkj1BOwy",
"RVx11JUyRdxOyQMMCf0vbs4qlV082aU+T0QjFxRPx6KPYzD2T3JMaohZ4YzU96RjpVL4rhVNa+MjlW5V",
"72hT2U/YLbuq0ydVzDHDBy7KtbXiE1kwobP1LSqPKkULt8vbalIYl0gl7HREhgBJ65pauEkTxuKkHX+t",
"i78EIyyZGFl/4DhEdWCWQlIK7SgeCTYkyb2Ws+ZndqPewScnJyptV5rVqUAYIwNWwqdabNEOk1ae3Am2",
"Qla0BlCrk74ciFkei2I40AlW2dbZ/WkuZvpCLmm2ny/jkGZT74A7WodDd0bXEItKtbyDT+IFlBSgrEIv",
"qiz1H5Tdjt6zpkf82ZNj/q9jKt5N6zGUPjcyQ1MN1I3nC3ce9bVo2VDGSzpmCbuaY+uS3jt1miFAFA2t",
"NbHylJKXcemLYtAt7KdQlo/+uSMqj/+5nVlluUmh6sHHAMKwkvAllH2ZfeTM581K/gFT5hw1fa4hOmj7",
"v8GnzrhUqLxLnXMM2d1ZZzrrPHEDWScfZDCNwFNdDSv6Xffj844WDpCVq9igP+8pyRHgfkqikGnCmcTb",
"lk9Kx0ACClynku7wecnJbgnFuE5QoPgeEdg2zEf2Mrsuh+xrd1ZKj6WGj6V8lRLbnYfSFMRT0OKGInf4",
"BLW03l0ctVgdjhK3EB2O2xeNy+HgLhOOIwijY0tzDI7im/UEDAg+lz/s8X+3K1fuwMqtC5TvllenzFf1",
"sO0pdLz2s7WRew3V13eMe021cNT+2HKIyvvYpqq5Aye88qI3O8gJm00AWe7cfbEUEEfONRRM32XOFakZ",
"rTm37uSbw/mteOKpxR1N9jKz+Bf2tbujSWrU8LHUHU1iu1MGTXe0ghbXowuK8Q6+8z9cCiECAYQ3yZJ5",
"U/A1p4YfQxUUy7bBxj9vv1zj2nl3GR3w5+DaHaq1cm4praKYtLQxa5MXf+Uwh3vz4gXb2hr5rLUnWqvy",
"4bUC4xMk/6a95CO5r1FmvKr4tC7kSJeAJdpbLg5ZPeUouaSTiS8sE6k4UrszV4JFSkT1TOaSMlH22EuT",
"CAVOjyMKLxXv4JLEJh1bl6xHl8J2YELLcheOhd3oLh5bzwTlldlrk9dKVd9x7WMF3VWc563pOGlzli2g",
"uqsfvUOl3TVesDwN0vAMggMjHmACMmJlxzH9ys+xi35OZh47OhcZ8hrDjFvwGEAXFKGs52vkzDeHxw1l",
"1xnKxLFSwsoMglBYHKOEE0yZVhbnfl4oGP699Bz2H9+eSxXEGUrLM0pCoDuwNB005RIvvC2ATaX+Ozks",
"5PD5uPREUgtJvIjlThbvnCyuMoLTKxuNKcwOz810sTIMAWX+qs1cXh/Nlid1jnnp3s3ZYYa2cp4jR9ee",
"qIai1LXm06L+tHf7xDnXWBH/lZgCertaGHsL5etbmiVUMfTObrhrdespY661Vr2TnDgIQBzAyJ580icE",
"zlPCTPa8rcNTGvx2dsKH7iTI65YgIcIsXk2IEE4E0e7pGC+cI9bEKNti6AzSjjXJZLSDMw+z5h0L72J6",
"W5bHYqsaoglRnOYsC5z7h0zLfd4JTaVLbquRL2zDX0KgFGuqjcfgzRzf2voEyZgP24mWl9MOxHjJ7Z8w",
"IEveJMS+dxeKnb5QyF3aiNQQ7ry9hyS7q4uALko6WH2tnZu1iLniqPjKkEoRUlfCkCJDxYWJB/LldnR2",
"wF0z7Gvkv3zua/Gcr5GFfnoDfol/ODa2VHnUMHPYKnO1e157dy34OuMtY6znUrnePE9PSC6868P3irPh",
"pz8sC0x0BX5XvmrKmNZyMhDH8bJOKolofr1sX/JIL3VqqHyk1Sft6h9p9Y80vOAGM1GpmOzLVUMywe1c",
"u1uzIJUIprue7mSVpPIeVaPm6QUVr0fifNf/2eQeL7FC4xEs6PQ1e8sXeN8Mmo7BV6wniO1aNgOn857b",
"81/Khunm3Jdemabc+Bk7Kc+spRvvdgp0wRidCr1+tlhnPpgaszFw40T6oG8BCWYVFbruJHs9gRubsh1p",
"2idHhquLVXj+qwroulXo4olg+q+HAuBhiEt5ryshuJrs29JpJKJFOp28IRuOk42LPry65GCDu56hDBRn",
"Hbh7a2dX39rRCx3TOaeQqK3dt0zM2g9Df1uXD3fIZJfNA5eCjCLNckFaAIs3/qoL4y3BZwhHMMIm4i82",
"C9dOP1BkAO4OxaHb81O0YWuQfkNx2AzNa6qVYa5khObQAxMK6OJZz+IUhIdLX4J/fHh8tHdI/3d1ePie",
"/e9/LbgX3ft0AjPxhoDAPQqF71p7iUJ8CydJBjcJ8gc2w5pglo9aeRPxbtXanrbSOMj9gavbp829cVUF",
"SH/paktmohUuxJ2W22Ar2szFmJl7XapIAU+ARsVvmfn1slKOtt/XXFSqUw475XD7ymGn8XQaz4u4fvCK",
"RdiYAOoqsG3yfM8xzPBBkGeZWEp91TPR0KPdjPViPkFyIgbbII2xuijtiIpB3AX6vXygn2tZHErkC+RW",
"LotTJeMpIrP89iAAUXRbV43sJKESl8AWFZA+saFZCaQTOXzrCkOBmHcTNYZKuJMLrKsqZEXfektHaYiT",
"taN2pi5TuzJMBoQlyTSCm6E3NvQPTm8cfWumtwJxPxy9NT1PVyT+lF8DU1WbG49vOoL+IAX2d+k9OO39",
"1J/qMTgX5dD1WHV7LM5KewcgCGBKaupYsO/t3tbhffzNxBjwwSvPwVjiAmqoj6+8e/SsvjoDQ1Ljo2d2",
"+sogi3eoiVin39vRF+/jbyr+mw6+BvriK+/oqyE7nyJpCfqKkimqKddxlkyxh2IPsLNxv0bBOGMDbegB",
"K3oE0/G3lEHndI+OkukUhh7qKt3t1vW5fKxTqnG9J0fJNMlJAzMkOXHjBjrUjtAoBaUj0tdj4+HU40q2",
"4n2iGUpbXIG0Tm7XIP0VJdZNxNptlMDNk7a/D+ko6u5Ey9yJdAw2k2QKMH5IstAuS8UTiFySerJ9nUi9",
"lGNuTsc4mYF4qibaJWUjYJCFClGdOH9F4pyTVZnSHZgog1MqyLK6Sx9vgWs1khP90fRNsI0EY5cYRiKv",
"c3O9Cj1dkpCrzsNf89iEh6F402M3HQwNoqalx2GhdtbBd/GD44v5DUWA3N9GlUWjrLm/aqItZzc6PhXa",
"lczZwZI56r3QppI5PUVfbsxxIPDsct+STUUsZwPHiCPU+YGZneWb9VeyEqhZphSd2q6OPXe0Fl2xRW15",
"VPEm+8OlsobBuMEpzLGCBh+jtkzFlt/KXnchqyUCE3dMvd6ZClYtClj1JOmw8ouABLMas0ktIfNWr4aW",
"N3ArZQgonRt1ZafovUOibHuVphx5jUPWcZqZ0wRDrMJsC6eJrWbTASuy0FiHnJdiEMqeFoy835C/M2Sj",
"O7Lsj1IrqUiPuNTqSXAYV6kOXsYRw2xXIHxLBcK/6riPXZ4tLzapdQm2ugpORUqeYmQHs4bOtk7pdy2M",
"HEUuWT3XbpVj37Yxc0gAO2564Xf/BbFqFLNM2TOmaroUInTihBb3p91jg/Vnei15ZnXanfmYWJ7EG84E",
"p2RtukPlrGxFxvcww6ZnbDW2aJGcvQvcYUia5QVR1lBTZfmKKmbAplmSpyyFtwBBbpQVFNbpN/hUAuYl",
"ZMSKmaCC9Lpk0F05lkt67Vxx/YYEF8nQdFoXIHDFG3jAi+HDcmWF3R/y2EnJdWVgl31vOGE3E5xT6oBh",
"j3FVBAjERPEUwt4EsncnbYUeCsG/4+YwQQbarrZ5sm+hvO52DWSu1ZJL75d0tZJfViTuXL13KQcbKkU3",
"PUjWQjQL2YBdK71LqeMkln/njV/RfetHkMsblnJiU1dUBTt5t1MqYEGKG1IBpZw5COEExUjGk7UROUXP",
"ttLntJizk0M/mBzS9nbFy6lGmZ1w2kHhpG/Q8nJqMUr1FoIMZipKtWeMW4XZvZQXeRb5733/+dvz/w8A",
"AP//WVO/o2OVAQA=",
}
// GetSwagger returns the content of the embedded swagger specification file
+4 -7
View File
@@ -19,8 +19,6 @@ func ToSlotState(slots []*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow) *[]gen
for i := range slots {
slot := slots[i]
slotId := uuid.MustParse(sqlchelpers.UUIDToStr(slot.Slot))
var stepRunId uuid.UUID
if slot.StepRunId.Valid {
@@ -34,11 +32,10 @@ func ToSlotState(slots []*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow) *[]gen
}
resp[i] = gen.SemaphoreSlots{
Slot: slotId,
StepRunId: &stepRunId,
Status: (*gen.StepRunStatus)(&slot.Status.StepRunStatus),
ActionId: &slot.ActionId.String,
WorkflowRunId: &workflowRunId,
StepRunId: stepRunId,
Status: gen.StepRunStatus(slot.Status),
ActionId: slot.ActionId,
WorkflowRunId: workflowRunId,
TimeoutAt: &slot.TimeoutAt.Time,
StartedAt: &slot.StartedAt.Time,
}
@@ -238,16 +238,15 @@ func ToStepRun(stepRun *db.StepRunModel) (*gen.StepRun, error) {
return res, nil
}
func ToRecentStepRun(stepRun *dbsqlc.ListRecentStepRunsForWorkerRow) (*gen.RecentStepRuns, error) {
func ToRecentStepRun(stepRun *dbsqlc.GetStepRunForEngineRow) (*gen.RecentStepRuns, error) {
workflowRunId := uuid.MustParse(sqlchelpers.UUIDToStr(stepRun.WorkflowRunId))
res := &gen.RecentStepRuns{
Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(stepRun.ID), stepRun.CreatedAt.Time, stepRun.UpdatedAt.Time),
Status: gen.StepRunStatus(stepRun.Status),
StartedAt: &stepRun.StartedAt.Time,
FinishedAt: &stepRun.FinishedAt.Time,
CancelledAt: &stepRun.CancelledAt.Time,
Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(stepRun.SRID), stepRun.SRCreatedAt.Time, stepRun.SRUpdatedAt.Time),
Status: gen.StepRunStatus(stepRun.SRStatus),
StartedAt: &stepRun.SRStartedAt.Time,
FinishedAt: &stepRun.SRFinishedAt.Time,
CancelledAt: &stepRun.SRCancelledAt.Time,
ActionId: stepRun.ActionId,
WorkflowRunId: workflowRunId,
}
+1 -9
View File
@@ -1577,18 +1577,10 @@ export class Api<SecurityDataType = unknown> extends HttpClient<SecurityDataType
* @request GET:/api/v1/workers/{worker}
* @secure
*/
workerGet = (
worker: string,
query?: {
/** Filter recent by failed */
recentFailed?: boolean;
},
params: RequestParams = {},
) =>
workerGet = (worker: string, params: RequestParams = {}) =>
this.request<Worker, APIErrors>({
path: `/api/v1/workers/${worker}`,
method: 'GET',
query: query,
secure: true,
format: 'json',
...params,
@@ -860,18 +860,13 @@ export interface WorkerList {
}
export interface SemaphoreSlots {
/**
* The slot name.
* @format uuid
*/
slot: string;
/**
* The step run id.
* @format uuid
*/
stepRunId?: string;
stepRunId: string;
/** The action id. */
actionId?: string;
actionId: string;
/**
* The time this slot was started.
* @format date-time
@@ -886,8 +881,8 @@ export interface SemaphoreSlots {
* The workflow run id.
* @format uuid
*/
workflowRunId?: string;
status?: StepRunStatus;
workflowRunId: string;
status: StepRunStatus;
}
export interface RecentStepRuns {
+3 -8
View File
@@ -230,14 +230,9 @@ export const queries = createQueryKeyStore({
queryKey: ['worker:list', tenant],
queryFn: async () => (await api.workerList(tenant)).data,
}),
get: (
worker: string,
query: {
recentFailed: boolean;
},
) => ({
queryKey: ['worker:get', worker, query.recentFailed],
queryFn: async () => (await api.workerGet(worker, query)).data,
get: (worker: string) => ({
queryKey: ['worker:get', worker],
queryFn: async () => (await api.workerGet(worker)).data,
}),
},
github: {
@@ -28,8 +28,6 @@ import WorkerSlotGrid from './components/slot-grid';
import { useState } from 'react';
import { DataTable } from '@/components/molecules/data-table/data-table';
import { columns } from './components/step-runs-columns';
import { Switch } from '@/components/ui/switch';
import { Label } from '@/components/ui/label';
import { RecentWebhookRequests } from '../webhooks/components/recent-webhook-requests';
export const isHealthy = (worker?: Worker) => {
const reasons = [];
@@ -100,10 +98,8 @@ export default function ExpandedWorkflowRun() {
const params = useParams();
invariant(params.worker);
const [filterFailed, setFilterFailed] = useState(false);
const workerQuery = useQuery({
...queries.workers.get(params.worker, { recentFailed: filterFailed }),
...queries.workers.get(params.worker),
refetchInterval: 3000,
});
@@ -119,9 +115,7 @@ export default function ExpandedWorkflowRun() {
(await api.workerUpdate(worker!.metadata.id, data)).data,
onSuccess: async () => {
await queryClient.invalidateQueries({
queryKey: queries.workers.get(worker!.metadata.id, {
recentFailed: filterFailed,
}).queryKey,
queryKey: queries.workers.get(worker!.metadata.id).queryKey,
});
},
onError: handleApiError,
@@ -232,19 +226,6 @@ export default function ExpandedWorkflowRun() {
<h3 className="text-xl font-bold leading-tight text-foreground">
Recent Step Runs
</h3>
<div className="flex flex-row items-center gap-2">
<Label htmlFor="sa" className="text-sm">
Filter Failed{' '}
</Label>
<Switch
id="sa"
checked={filterFailed}
onClick={async () => {
setFilterFailed((x) => !x);
}}
/>
</div>
</div>
<DataTable
isLoading={workerQuery.isLoading}
@@ -519,15 +519,6 @@ func (ec *JobsControllerImpl) handleStepRunReplay(ctx context.Context, task *msg
return fmt.Errorf("could not archive step run result: %w", err)
}
// Unlink the step run from its existing worker. This is necessary because automatic retries increment the
// worker semaphore on failure/cancellation, but in this case we don't want to increment the semaphore.
// FIXME: this is very far decoupled from the actual worker logic, and should be refactored.
err = ec.repo.StepRun().UnlinkStepRunFromWorker(ctx, metadata.TenantId, payload.StepRunId)
if err != nil {
return fmt.Errorf("could not unlink step run from worker: %w", err)
}
stepRun, err := ec.repo.StepRun().GetStepRunForEngine(ctx, metadata.TenantId, payload.StepRunId)
if err != nil {
+75 -4
View File
@@ -33,7 +33,8 @@ type queue struct {
// a custom queue logger
ql *zerolog.Logger
tenantOperations sync.Map
tenantQueueOperations sync.Map
tenantWorkerSemOperations sync.Map
}
func newQueue(
@@ -162,6 +163,18 @@ func (q *queue) Start() (func() error, error) {
return nil, fmt.Errorf("could not schedule step run reassign: %w", err)
}
_, err = q.s.NewJob(
gocron.DurationJob(time.Second*1),
gocron.NewTask(
q.runTenantWorkerSemaphores(ctx),
),
)
if err != nil {
cancel()
return nil, fmt.Errorf("could not schedule worker semaphore update: %w", err)
}
q.s.Start()
f := func(task *msgqueue.Message) error {
@@ -234,13 +247,20 @@ func (q *queue) handleCheckQueue(ctx context.Context, task *msgqueue.Message) er
}
// if this tenant is registered, then we should check the queue
if opInt, ok := q.tenantOperations.Load(metadata.TenantId); ok {
if opInt, ok := q.tenantQueueOperations.Load(metadata.TenantId); ok {
op := opInt.(*operation)
op.setContinue(true)
op.run(q.l, q.ql, q.scheduleStepRuns)
}
if opInt, ok := q.tenantWorkerSemOperations.Load(metadata.TenantId); ok {
op := opInt.(*operation)
op.setContinue(true)
op.run(q.l, q.ql, q.processWorkerSemaphores)
}
return nil
}
@@ -261,7 +281,7 @@ func (q *queue) runTenantQueues(ctx context.Context) func() {
var op *operation
opInt, ok := q.tenantOperations.Load(tenantId)
opInt, ok := q.tenantQueueOperations.Load(tenantId)
if !ok {
op = &operation{
@@ -269,7 +289,7 @@ func (q *queue) runTenantQueues(ctx context.Context) func() {
lastRun: time.Now(),
}
q.tenantOperations.Store(tenantId, op)
q.tenantQueueOperations.Store(tenantId, op)
} else {
op = opInt.(*operation)
}
@@ -324,6 +344,57 @@ func (q *queue) scheduleStepRuns(ctx context.Context, tenantId string) (bool, er
return queueResults.Continue, err
}
func (q *queue) runTenantWorkerSemaphores(ctx context.Context) func() {
return func() {
q.l.Debug().Msgf("partition: updating worker semaphore counts")
// list all tenants
tenants, err := q.repo.Tenant().ListTenantsByControllerPartition(ctx, q.p.GetControllerPartitionId())
if err != nil {
q.l.Err(err).Msg("could not list tenants")
return
}
for i := range tenants {
tenantId := sqlchelpers.UUIDToStr(tenants[i].ID)
var op *operation
opInt, ok := q.tenantWorkerSemOperations.Load(tenantId)
if !ok {
op = &operation{
tenantId: tenantId,
lastRun: time.Now(),
}
q.tenantWorkerSemOperations.Store(tenantId, op)
} else {
op = opInt.(*operation)
}
op.run(q.l, q.ql, q.processWorkerSemaphores)
}
}
}
func (q *queue) processWorkerSemaphores(ctx context.Context, tenantId string) (bool, error) {
ctx, span := telemetry.NewSpan(ctx, "process-worker-semaphores")
defer span.End()
dbCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
shouldContinue, err := q.repo.StepRun().UpdateWorkerSemaphoreCounts(dbCtx, q.ql, tenantId)
if err != nil {
return false, fmt.Errorf("could not queue step runs: %w", err)
}
return shouldContinue, nil
}
func getStepRunCancelTask(tenantId, stepRunId, reason string) *msgqueue.Message {
payload, _ := datautils.ToJSONMap(tasktypes.StepRunCancelTaskPayload{
StepRunId: stepRunId,
@@ -173,6 +173,18 @@ func (rc *RetentionControllerImpl) Start() (func() error, error) {
return nil, fmt.Errorf("could not set up runDeleteExpiredStepRuns: %w", err)
}
_, err = rc.s.NewJob(
gocron.DurationJob(interval),
gocron.NewTask(
rc.runDeleteOldWorkers(ctx),
),
)
if err != nil {
cancel()
return nil, fmt.Errorf("could not set up runDeleteOldWorkers: %w", err)
}
_, err = rc.s.NewJob(
gocron.DurationJob(interval),
gocron.NewTask(
@@ -185,6 +197,18 @@ func (rc *RetentionControllerImpl) Start() (func() error, error) {
return nil, fmt.Errorf("could not set up runDeleteQueueItems: %w", err)
}
_, err = rc.s.NewJob(
gocron.DurationJob(interval),
gocron.NewTask(
rc.runDeleteInternalQueueItems(ctx),
),
)
if err != nil {
cancel()
return nil, fmt.Errorf("could not set up runDeleteInternalQueueItems: %w", err)
}
_, err = rc.s.NewJob(
gocron.DurationJob(interval),
gocron.NewTask(
@@ -19,7 +19,7 @@ func (rc *RetentionControllerImpl) runDeleteQueueItems(ctx context.Context) func
err := rc.ForTenants(ctx, rc.runDeleteQueueItemsTenant)
if err != nil {
rc.l.Err(err).Msg("could not run delete expired job runs")
rc.l.Err(err).Msg("could not run delete queue items")
}
}
}
@@ -31,3 +31,26 @@ func (rc *RetentionControllerImpl) runDeleteQueueItemsTenant(ctx context.Context
tenantId := sqlchelpers.UUIDToStr(tenant.ID)
return rc.repo.StepRun().CleanupQueueItems(ctx, tenantId)
}
func (rc *RetentionControllerImpl) runDeleteInternalQueueItems(ctx context.Context) func() {
return func() {
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
rc.l.Debug().Msgf("retention controller: deleting internal queue items")
err := rc.ForTenants(ctx, rc.runDeleteInternalQueueItemsTenant)
if err != nil {
rc.l.Err(err).Msg("could not run delete internal queue items")
}
}
}
func (rc *RetentionControllerImpl) runDeleteInternalQueueItemsTenant(ctx context.Context, tenant dbsqlc.Tenant) error {
ctx, span := telemetry.NewSpan(ctx, "delete-internal-queue-items-tenant")
defer span.End()
tenantId := sqlchelpers.UUIDToStr(tenant.ID)
return rc.repo.StepRun().CleanupInternalQueueItems(ctx, tenantId)
}
@@ -0,0 +1,56 @@
package retention
import (
"context"
"fmt"
"time"
"github.com/hatchet-dev/hatchet/internal/telemetry"
"github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc"
"github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers"
)
func (rc *RetentionControllerImpl) runDeleteOldWorkers(ctx context.Context) func() {
return func() {
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
rc.l.Debug().Msgf("retention controller: deleting old workers")
err := rc.ForTenants(ctx, rc.runDeleteOldWorkersTenant)
if err != nil {
rc.l.Err(err).Msg("could not run delete old workers")
}
}
}
func (wc *RetentionControllerImpl) runDeleteOldWorkersTenant(ctx context.Context, tenant dbsqlc.Tenant) error {
ctx, span := telemetry.NewSpan(ctx, "delete-old-workers-tenant")
defer span.End()
tenantId := sqlchelpers.UUIDToStr(tenant.ID)
// hard-coded to last heartbeat before 24 hours
lastHeartbeatBefore := time.Now().UTC().Add(-24 * time.Hour)
// keep deleting until the context is done
for {
select {
case <-ctx.Done():
return nil
default:
}
// delete expired workflow runs
hasMore, err := wc.repo.Worker().DeleteOldWorkers(ctx, tenantId, lastHeartbeatBefore)
if err != nil {
return fmt.Errorf("could not delete expired events: %w", err)
}
if !hasMore {
return nil
}
}
}
@@ -280,7 +280,7 @@ func (wc *WorkflowsControllerImpl) queueWorkflowRunJobs(ctx context.Context, wor
jobRunId := sqlchelpers.UUIDToStr(jobRuns[i].ID)
err := wc.mq.AddMessage(
context.Background(),
ctx,
msgqueue.JOB_PROCESSING_QUEUE,
tasktypes.JobRunQueuedToTask(tenantId, jobRunId),
)
+17
View File
@@ -13,6 +13,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
"github.com/rs/zerolog"
"google.golang.org/grpc/codes"
@@ -277,6 +278,10 @@ func (s *DispatcherImpl) Listen(request *contracts.WorkerListenRequest, stream c
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil
}
s.l.Error().Err(err).Msgf("could not update worker %s dispatcher", request.WorkerId)
return err
}
@@ -322,6 +327,10 @@ func (s *DispatcherImpl) Listen(request *contracts.WorkerListenRequest, stream c
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return
}
s.l.Error().Err(err).Msgf("could not update worker %s heartbeat", request.WorkerId)
return
}
@@ -372,6 +381,10 @@ func (s *DispatcherImpl) ListenV2(request *contracts.WorkerListenRequest, stream
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil
}
s.l.Error().Err(err).Msgf("could not update worker %s dispatcher", request.WorkerId)
return err
}
@@ -382,6 +395,10 @@ func (s *DispatcherImpl) ListenV2(request *contracts.WorkerListenRequest, stream
_, err = s.repo.Worker().UpdateWorkerActiveStatus(ctx, tenantId, request.WorkerId, true, sessionEstablished)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil
}
lastSessionEstablished := "NULL"
if worker.LastListenerEstablished.Valid {
+12 -43
View File
@@ -545,23 +545,20 @@ type SNSIntegration struct {
// SemaphoreSlots defines model for SemaphoreSlots.
type SemaphoreSlots struct {
// ActionId The action id.
ActionId *string `json:"actionId,omitempty"`
// Slot The slot name.
Slot openapi_types.UUID `json:"slot"`
ActionId string `json:"actionId"`
// StartedAt The time this slot was started.
StartedAt *time.Time `json:"startedAt,omitempty"`
Status *StepRunStatus `json:"status,omitempty"`
StartedAt *time.Time `json:"startedAt,omitempty"`
Status StepRunStatus `json:"status"`
// StepRunId The step run id.
StepRunId *openapi_types.UUID `json:"stepRunId,omitempty"`
StepRunId openapi_types.UUID `json:"stepRunId"`
// TimeoutAt The time this slot will timeout.
TimeoutAt *time.Time `json:"timeoutAt,omitempty"`
// WorkflowRunId The workflow run id.
WorkflowRunId *openapi_types.UUID `json:"workflowRunId,omitempty"`
WorkflowRunId openapi_types.UUID `json:"workflowRunId"`
}
// SlackWebhook defines model for SlackWebhook.
@@ -1378,12 +1375,6 @@ type WorkflowRunGetMetricsParams struct {
CreatedBefore *time.Time `form:"createdBefore,omitempty" json:"createdBefore,omitempty"`
}
// WorkerGetParams defines parameters for WorkerGet.
type WorkerGetParams struct {
// RecentFailed Filter recent by failed
RecentFailed *bool `form:"recentFailed,omitempty" json:"recentFailed,omitempty"`
}
// WorkflowGetMetricsParams defines parameters for WorkflowGetMetrics.
type WorkflowGetMetricsParams struct {
// Status A status of workflow run statuses to filter by
@@ -1787,7 +1778,7 @@ type ClientInterface interface {
WebhookRequestsList(ctx context.Context, webhook openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error)
// WorkerGet request
WorkerGet(ctx context.Context, worker openapi_types.UUID, params *WorkerGetParams, reqEditors ...RequestEditorFn) (*http.Response, error)
WorkerGet(ctx context.Context, worker openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error)
// WorkerUpdateWithBody request with any body
WorkerUpdateWithBody(ctx context.Context, worker openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error)
@@ -2850,8 +2841,8 @@ func (c *Client) WebhookRequestsList(ctx context.Context, webhook openapi_types.
return c.Client.Do(req)
}
func (c *Client) WorkerGet(ctx context.Context, worker openapi_types.UUID, params *WorkerGetParams, reqEditors ...RequestEditorFn) (*http.Response, error) {
req, err := NewWorkerGetRequest(c.Server, worker, params)
func (c *Client) WorkerGet(ctx context.Context, worker openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) {
req, err := NewWorkerGetRequest(c.Server, worker)
if err != nil {
return nil, err
}
@@ -6129,7 +6120,7 @@ func NewWebhookRequestsListRequest(server string, webhook openapi_types.UUID) (*
}
// NewWorkerGetRequest generates requests for WorkerGet
func NewWorkerGetRequest(server string, worker openapi_types.UUID, params *WorkerGetParams) (*http.Request, error) {
func NewWorkerGetRequest(server string, worker openapi_types.UUID) (*http.Request, error) {
var err error
var pathParam0 string
@@ -6154,28 +6145,6 @@ func NewWorkerGetRequest(server string, worker openapi_types.UUID, params *Worke
return nil, err
}
if params != nil {
queryValues := queryURL.Query()
if params.RecentFailed != nil {
if queryFrag, err := runtime.StyleParamWithLocation("form", true, "recentFailed", runtime.ParamLocationQuery, *params.RecentFailed); err != nil {
return nil, err
} else if parsed, err := url.ParseQuery(queryFrag); err != nil {
return nil, err
} else {
for k, v := range parsed {
for _, v2 := range v {
queryValues.Add(k, v2)
}
}
}
}
queryURL.RawQuery = queryValues.Encode()
}
req, err := http.NewRequest("GET", queryURL.String(), nil)
if err != nil {
return nil, err
@@ -6869,7 +6838,7 @@ type ClientWithResponsesInterface interface {
WebhookRequestsListWithResponse(ctx context.Context, webhook openapi_types.UUID, reqEditors ...RequestEditorFn) (*WebhookRequestsListResponse, error)
// WorkerGetWithResponse request
WorkerGetWithResponse(ctx context.Context, worker openapi_types.UUID, params *WorkerGetParams, reqEditors ...RequestEditorFn) (*WorkerGetResponse, error)
WorkerGetWithResponse(ctx context.Context, worker openapi_types.UUID, reqEditors ...RequestEditorFn) (*WorkerGetResponse, error)
// WorkerUpdateWithBodyWithResponse request with any body
WorkerUpdateWithBodyWithResponse(ctx context.Context, worker openapi_types.UUID, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*WorkerUpdateResponse, error)
@@ -9475,8 +9444,8 @@ func (c *ClientWithResponses) WebhookRequestsListWithResponse(ctx context.Contex
}
// WorkerGetWithResponse request returning *WorkerGetResponse
func (c *ClientWithResponses) WorkerGetWithResponse(ctx context.Context, worker openapi_types.UUID, params *WorkerGetParams, reqEditors ...RequestEditorFn) (*WorkerGetResponse, error) {
rsp, err := c.WorkerGet(ctx, worker, params, reqEditors...)
func (c *ClientWithResponses) WorkerGetWithResponse(ctx context.Context, worker openapi_types.UUID, reqEditors ...RequestEditorFn) (*WorkerGetResponse, error) {
rsp, err := c.WorkerGet(ctx, worker, reqEditors...)
if err != nil {
return nil, err
}
File diff suppressed because it is too large Load Diff
+63
View File
@@ -55,6 +55,48 @@ func (ns NullConcurrencyLimitStrategy) Value() (driver.Value, error) {
return string(ns.ConcurrencyLimitStrategy), nil
}
type InternalQueue string
const (
InternalQueueWORKERSEMAPHORECOUNT InternalQueue = "WORKER_SEMAPHORE_COUNT"
InternalQueueSTEPRUNUPDATE InternalQueue = "STEP_RUN_UPDATE"
)
func (e *InternalQueue) Scan(src interface{}) error {
switch s := src.(type) {
case []byte:
*e = InternalQueue(s)
case string:
*e = InternalQueue(s)
default:
return fmt.Errorf("unsupported scan type for InternalQueue: %T", src)
}
return nil
}
type NullInternalQueue struct {
InternalQueue InternalQueue `json:"InternalQueue"`
Valid bool `json:"valid"` // Valid is true if InternalQueue is not NULL
}
// Scan implements the Scanner interface.
func (ns *NullInternalQueue) Scan(value interface{}) error {
if value == nil {
ns.InternalQueue, ns.Valid = "", false
return nil
}
ns.Valid = true
return ns.InternalQueue.Scan(value)
}
// Value implements the driver Valuer interface.
func (ns NullInternalQueue) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
return string(ns.InternalQueue), nil
}
type InviteLinkStatus string
const (
@@ -884,6 +926,16 @@ type GetGroupKeyRun struct {
ScheduleTimeoutAt pgtype.Timestamp `json:"scheduleTimeoutAt"`
}
type InternalQueueItem struct {
ID int64 `json:"id"`
Queue InternalQueue `json:"queue"`
IsQueued bool `json:"isQueued"`
Data []byte `json:"data"`
TenantId pgtype.UUID `json:"tenantId"`
Priority int32 `json:"priority"`
UniqueKey pgtype.Text `json:"uniqueKey"`
}
type Job struct {
ID pgtype.UUID `json:"id"`
CreatedAt pgtype.Timestamp `json:"createdAt"`
@@ -1312,6 +1364,12 @@ type Worker struct {
WebhookId pgtype.UUID `json:"webhookId"`
}
type WorkerAssignEvent struct {
ID int64 `json:"id"`
WorkerId pgtype.UUID `json:"workerId"`
AssignedStepRuns []byte `json:"assignedStepRuns"`
}
type WorkerLabel struct {
ID int64 `json:"id"`
CreatedAt pgtype.Timestamp `json:"createdAt"`
@@ -1327,6 +1385,11 @@ type WorkerSemaphore struct {
Slots int32 `json:"slots"`
}
type WorkerSemaphoreCount struct {
WorkerId pgtype.UUID `json:"workerId"`
Count int32 `json:"count"`
}
type WorkerSemaphoreSlot struct {
ID pgtype.UUID `json:"id"`
WorkerId pgtype.UUID `json:"workerId"`
+72
View File
@@ -67,6 +67,24 @@ AND
AND "id" <= @maxId::bigint
AND "tenantId" = @tenantId::uuid;
-- name: GetMinMaxProcessedInternalQueueItems :one
SELECT
COALESCE(MIN("id"), 0)::bigint AS "minId",
COALESCE(MAX("id"), 0)::bigint AS "maxId"
FROM
"InternalQueueItem"
WHERE
"isQueued" = 'f'
AND "tenantId" = @tenantId::uuid;
-- name: CleanupInternalQueueItems :exec
DELETE FROM "InternalQueueItem"
WHERE "isQueued" = 'f'
AND
"id" >= @minId::bigint
AND "id" <= @maxId::bigint
AND "tenantId" = @tenantId::uuid;
-- name: ListQueueItems :batchmany
SELECT
*
@@ -96,3 +114,57 @@ SET
"isQueued" = false
WHERE
qi."id" = ANY(@ids::bigint[]);
-- name: ListInternalQueueItems :many
SELECT
*
FROM
"InternalQueueItem" qi
WHERE
qi."isQueued" = true
AND qi."tenantId" = @tenantId::uuid
AND qi."queue" = @queue::"InternalQueue"
AND (
sqlc.narg('gtId')::bigint IS NULL OR
qi."id" >= sqlc.narg('gtId')::bigint
)
-- Added to ensure that the index is used
AND qi."priority" >= 1 AND qi."priority" <= 4
ORDER BY
qi."priority" DESC,
qi."id" ASC
LIMIT
COALESCE(sqlc.narg('limit')::integer, 100)
FOR UPDATE SKIP LOCKED;
-- name: MarkInternalQueueItemsProcessed :exec
UPDATE
"InternalQueueItem" qi
SET
"isQueued" = false
WHERE
qi."id" = ANY(@ids::bigint[]);
-- name: CreateInternalQueueItemsBulk :exec
INSERT INTO
"InternalQueueItem" (
"queue",
"isQueued",
"data",
"tenantId",
"priority",
"uniqueKey"
)
SELECT
@queue::"InternalQueue",
true,
input."data",
@tenantId::uuid,
1,
input."uniqueKey"
FROM (
SELECT
unnest(@datas::json[]) AS "data",
unnest(@uniqueKeys::text[]) AS "uniqueKey"
) AS input
ON CONFLICT DO NOTHING;
+162
View File
@@ -25,6 +25,26 @@ func (q *Queries) BulkQueueItems(ctx context.Context, db DBTX, ids []int64) erro
return err
}
const cleanupInternalQueueItems = `-- name: CleanupInternalQueueItems :exec
DELETE FROM "InternalQueueItem"
WHERE "isQueued" = 'f'
AND
"id" >= $1::bigint
AND "id" <= $2::bigint
AND "tenantId" = $3::uuid
`
type CleanupInternalQueueItemsParams struct {
Minid int64 `json:"minid"`
Maxid int64 `json:"maxid"`
Tenantid pgtype.UUID `json:"tenantid"`
}
func (q *Queries) CleanupInternalQueueItems(ctx context.Context, db DBTX, arg CleanupInternalQueueItemsParams) error {
_, err := db.Exec(ctx, cleanupInternalQueueItems, arg.Minid, arg.Maxid, arg.Tenantid)
return err
}
const cleanupQueueItems = `-- name: CleanupQueueItems :exec
DELETE FROM "QueueItem"
WHERE "isQueued" = 'f'
@@ -45,6 +65,48 @@ func (q *Queries) CleanupQueueItems(ctx context.Context, db DBTX, arg CleanupQue
return err
}
const createInternalQueueItemsBulk = `-- name: CreateInternalQueueItemsBulk :exec
INSERT INTO
"InternalQueueItem" (
"queue",
"isQueued",
"data",
"tenantId",
"priority",
"uniqueKey"
)
SELECT
$1::"InternalQueue",
true,
input."data",
$2::uuid,
1,
input."uniqueKey"
FROM (
SELECT
unnest($3::json[]) AS "data",
unnest($4::text[]) AS "uniqueKey"
) AS input
ON CONFLICT DO NOTHING
`
type CreateInternalQueueItemsBulkParams struct {
Queue InternalQueue `json:"queue"`
Tenantid pgtype.UUID `json:"tenantid"`
Datas [][]byte `json:"datas"`
Uniquekeys []string `json:"uniquekeys"`
}
func (q *Queries) CreateInternalQueueItemsBulk(ctx context.Context, db DBTX, arg CreateInternalQueueItemsBulkParams) error {
_, err := db.Exec(ctx, createInternalQueueItemsBulk,
arg.Queue,
arg.Tenantid,
arg.Datas,
arg.Uniquekeys,
)
return err
}
const createQueueItem = `-- name: CreateQueueItem :exec
INSERT INTO
"QueueItem" (
@@ -105,6 +167,29 @@ func (q *Queries) CreateQueueItem(ctx context.Context, db DBTX, arg CreateQueueI
return err
}
const getMinMaxProcessedInternalQueueItems = `-- name: GetMinMaxProcessedInternalQueueItems :one
SELECT
COALESCE(MIN("id"), 0)::bigint AS "minId",
COALESCE(MAX("id"), 0)::bigint AS "maxId"
FROM
"InternalQueueItem"
WHERE
"isQueued" = 'f'
AND "tenantId" = $1::uuid
`
type GetMinMaxProcessedInternalQueueItemsRow struct {
MinId int64 `json:"minId"`
MaxId int64 `json:"maxId"`
}
func (q *Queries) GetMinMaxProcessedInternalQueueItems(ctx context.Context, db DBTX, tenantid pgtype.UUID) (*GetMinMaxProcessedInternalQueueItemsRow, error) {
row := db.QueryRow(ctx, getMinMaxProcessedInternalQueueItems, tenantid)
var i GetMinMaxProcessedInternalQueueItemsRow
err := row.Scan(&i.MinId, &i.MaxId)
return &i, err
}
const getMinMaxProcessedQueueItems = `-- name: GetMinMaxProcessedQueueItems :one
SELECT
COALESCE(MIN("id"), 0)::bigint AS "minId",
@@ -128,6 +213,69 @@ func (q *Queries) GetMinMaxProcessedQueueItems(ctx context.Context, db DBTX, ten
return &i, err
}
const listInternalQueueItems = `-- name: ListInternalQueueItems :many
SELECT
id, queue, "isQueued", data, "tenantId", priority, "uniqueKey"
FROM
"InternalQueueItem" qi
WHERE
qi."isQueued" = true
AND qi."tenantId" = $1::uuid
AND qi."queue" = $2::"InternalQueue"
AND (
$3::bigint IS NULL OR
qi."id" >= $3::bigint
)
-- Added to ensure that the index is used
AND qi."priority" >= 1 AND qi."priority" <= 4
ORDER BY
qi."priority" DESC,
qi."id" ASC
LIMIT
COALESCE($4::integer, 100)
FOR UPDATE SKIP LOCKED
`
type ListInternalQueueItemsParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Queue InternalQueue `json:"queue"`
GtId pgtype.Int8 `json:"gtId"`
Limit pgtype.Int4 `json:"limit"`
}
func (q *Queries) ListInternalQueueItems(ctx context.Context, db DBTX, arg ListInternalQueueItemsParams) ([]*InternalQueueItem, error) {
rows, err := db.Query(ctx, listInternalQueueItems,
arg.Tenantid,
arg.Queue,
arg.GtId,
arg.Limit,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*InternalQueueItem
for rows.Next() {
var i InternalQueueItem
if err := rows.Scan(
&i.ID,
&i.Queue,
&i.IsQueued,
&i.Data,
&i.TenantId,
&i.Priority,
&i.UniqueKey,
); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listQueues = `-- name: ListQueues :many
SELECT
id, "tenantId", name
@@ -157,6 +305,20 @@ func (q *Queries) ListQueues(ctx context.Context, db DBTX, tenantid pgtype.UUID)
return items, nil
}
const markInternalQueueItemsProcessed = `-- name: MarkInternalQueueItemsProcessed :exec
UPDATE
"InternalQueueItem" qi
SET
"isQueued" = false
WHERE
qi."id" = ANY($1::bigint[])
`
func (q *Queries) MarkInternalQueueItemsProcessed(ctx context.Context, db DBTX, ids []int64) error {
_, err := db.Exec(ctx, markInternalQueueItemsProcessed, ids)
return err
}
const upsertQueue = `-- name: UpsertQueue :exec
INSERT INTO
"Queue" (
+54
View File
@@ -1,6 +1,9 @@
-- CreateEnum
CREATE TYPE "ConcurrencyLimitStrategy" AS ENUM ('CANCEL_IN_PROGRESS', 'DROP_NEWEST', 'QUEUE_NEWEST', 'GROUP_ROUND_ROBIN');
-- CreateEnum
CREATE TYPE "InternalQueue" AS ENUM ('WORKER_SEMAPHORE_COUNT', 'STEP_RUN_UPDATE');
-- CreateEnum
CREATE TYPE "InviteLinkStatus" AS ENUM ('PENDING', 'ACCEPTED', 'REJECTED');
@@ -141,6 +144,19 @@ CREATE TABLE "GetGroupKeyRun" (
CONSTRAINT "GetGroupKeyRun_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "InternalQueueItem" (
"id" BIGSERIAL NOT NULL,
"queue" "InternalQueue" NOT NULL,
"isQueued" BOOLEAN NOT NULL,
"data" JSONB,
"tenantId" UUID NOT NULL,
"priority" INTEGER NOT NULL DEFAULT 1,
"uniqueKey" TEXT,
CONSTRAINT "InternalQueueItem_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Job" (
"id" UUID NOT NULL,
@@ -654,6 +670,15 @@ CREATE TABLE "Worker" (
CONSTRAINT "Worker_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkerAssignEvent" (
"id" BIGSERIAL NOT NULL,
"workerId" UUID NOT NULL,
"assignedStepRuns" JSONB,
CONSTRAINT "WorkerAssignEvent_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkerLabel" (
"id" BIGSERIAL NOT NULL,
@@ -673,6 +698,14 @@ CREATE TABLE "WorkerSemaphore" (
"slots" INTEGER NOT NULL
);
-- CreateTable
CREATE TABLE "WorkerSemaphoreCount" (
"workerId" UUID NOT NULL,
"count" INTEGER NOT NULL,
CONSTRAINT "WorkerSemaphoreCount_pkey" PRIMARY KEY ("workerId")
);
-- CreateTable
CREATE TABLE "WorkerSemaphoreSlot" (
"id" UUID NOT NULL,
@@ -928,6 +961,12 @@ CREATE INDEX "GetGroupKeyRun_workerId_idx" ON "GetGroupKeyRun"("workerId" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "GetGroupKeyRun_workflowRunId_key" ON "GetGroupKeyRun"("workflowRunId" ASC);
-- CreateIndex
CREATE INDEX "InternalQueueItem_isQueued_tenantId_queue_priority_id_idx" ON "InternalQueueItem"("isQueued" ASC, "tenantId" ASC, "queue" ASC, "priority" DESC, "id" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "InternalQueueItem_tenantId_queue_uniqueKey_key" ON "InternalQueueItem"("tenantId" ASC, "queue" ASC, "uniqueKey" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "Job_id_key" ON "Job"("id" ASC);
@@ -1132,6 +1171,9 @@ CREATE UNIQUE INDEX "Worker_id_key" ON "Worker"("id" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "Worker_webhookId_key" ON "Worker"("webhookId" ASC);
-- CreateIndex
CREATE INDEX "WorkerAssignEvent_workerId_id_idx" ON "WorkerAssignEvent"("workerId" ASC, "id" ASC);
-- CreateIndex
CREATE INDEX "WorkerLabel_workerId_idx" ON "WorkerLabel"("workerId" ASC);
@@ -1141,6 +1183,12 @@ CREATE UNIQUE INDEX "WorkerLabel_workerId_key_key" ON "WorkerLabel"("workerId" A
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphore_workerId_key" ON "WorkerSemaphore"("workerId" ASC);
-- CreateIndex
CREATE INDEX "WorkerSemaphoreCount_workerId_idx" ON "WorkerSemaphoreCount"("workerId" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphoreCount_workerId_key" ON "WorkerSemaphoreCount"("workerId" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphoreSlot_id_key" ON "WorkerSemaphoreSlot"("id" ASC);
@@ -1468,12 +1516,18 @@ ALTER TABLE "Worker" ADD CONSTRAINT "Worker_tenantId_fkey" FOREIGN KEY ("tenantI
-- AddForeignKey
ALTER TABLE "Worker" ADD CONSTRAINT "Worker_webhookId_fkey" FOREIGN KEY ("webhookId") REFERENCES "WebhookWorker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerAssignEvent" ADD CONSTRAINT "WorkerAssignEvent_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerLabel" ADD CONSTRAINT "WorkerLabel_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphore" ADD CONSTRAINT "WorkerSemaphore_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreCount" ADD CONSTRAINT "WorkerSemaphoreCount_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreSlot" ADD CONSTRAINT "WorkerSemaphoreSlot_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+123 -96
View File
@@ -226,16 +226,6 @@ WHERE
"tenantId" = @tenantId::uuid
RETURNING "StepRun".*;
-- name: UnlinkStepRunFromWorker :one
UPDATE
"StepRun"
SET
"workerId" = NULL
WHERE
"id" = @stepRunId::uuid AND
"tenantId" = @tenantId::uuid
RETURNING *;
-- name: ResolveLaterStepRuns :many
WITH RECURSIVE currStepRun AS (
SELECT *
@@ -404,17 +394,10 @@ WITH inactive_workers AS (
AND w."lastHeartbeatAt" < NOW() - INTERVAL '30 seconds'
),
step_runs_to_reassign AS (
SELECT "stepRunId"
FROM "WorkerSemaphoreSlot"
SELECT "id", "workerId", "retryCount"
FROM "StepRun"
WHERE
"workerId" = ANY(SELECT "id" FROM inactive_workers)
AND "stepRunId" IS NOT NULL
),
update_semaphore_steps AS (
UPDATE "WorkerSemaphoreSlot" wss
SET "stepRunId" = NULL
FROM step_runs_to_reassign
WHERE wss."stepRunId" = step_runs_to_reassign."stepRunId"
),
step_runs_with_data AS (
SELECT
@@ -430,7 +413,7 @@ step_runs_with_data AS (
JOIN
"Step" s ON sr."stepId" = s."id"
WHERE
sr."id" = ANY(SELECT "stepRunId" FROM step_runs_to_reassign)
sr."id" = ANY(SELECT "id" FROM step_runs_to_reassign)
FOR UPDATE SKIP LOCKED
),
inserted_queue_items AS (
@@ -464,15 +447,18 @@ updated_step_runs AS (
SET
"status" = 'PENDING_ASSIGNMENT',
"scheduleTimeoutAt" = CURRENT_TIMESTAMP + COALESCE(convert_duration_to_interval(srs."scheduleTimeout"), INTERVAL '5 minutes'),
"updatedAt" = CURRENT_TIMESTAMP
"updatedAt" = CURRENT_TIMESTAMP,
"workerId" = NULL
FROM step_runs_with_data srs
WHERE sr."id" = srs."id"
RETURNING sr."id"
)
SELECT
srs."id"
srtr."id",
srtr."workerId",
srtr."retryCount"
FROM
step_runs_with_data srs;
step_runs_to_reassign srtr;
-- name: ListStepRunsToTimeout :many
SELECT "id"
@@ -499,17 +485,25 @@ WHERE
"tenantId" = @tenantId::uuid
RETURNING *;
-- name: ReleaseWorkerSemaphoreSlot :one
WITH step_run as (
SELECT "workerId"
FROM "StepRun"
WHERE "id" = @stepRunId::uuid AND "tenantId" = @tenantId::uuid
)
UPDATE "WorkerSemaphoreSlot"
SET "stepRunId" = NULL
WHERE "stepRunId" = @stepRunId::uuid
AND "workerId" = (SELECT "workerId" FROM step_run)
RETURNING *;
-- name: UpdateStepRunUnsetWorkerId :one
UPDATE "StepRun" newsr
SET
"workerId" = NULL
FROM
(
SELECT
"id",
"workerId"
FROM
"StepRun"
WHERE
"id" = @stepRunId::uuid AND
"tenantId" = @tenantId::uuid
) AS oldsr
WHERE
newsr."id" = oldsr."id"
-- return whether old worker id was set
RETURNING oldsr."workerId";
-- name: CheckWorker :one
SELECT
@@ -565,69 +559,102 @@ WHERE
wss."stepRunId" IS NULL
FOR UPDATE SKIP LOCKED;
-- name: BulkAssignStepRunsToWorkers :many
WITH already_assigned_step_runs AS (
SELECT
input."id",
wss."id" AS "slotId"
FROM
(
SELECT
unnest(@stepRunIds::uuid[]) AS "id"
) AS input
JOIN
"WorkerSemaphoreSlot" wss ON input."id" = wss."stepRunId"
), already_assigned_slots AS (
SELECT
wss."id"
FROM
(
SELECT
unnest(@slotIds::uuid[]) AS "id"
) AS input
JOIN
"WorkerSemaphoreSlot" wss ON input."id" = wss."id"
WHERE
wss."stepRunId" IS NOT NULL
), updated_step_runs AS (
UPDATE
"StepRun" sr
SET
"status" = 'ASSIGNED',
"workerId" = input."workerId",
"tickerId" = NULL,
"updatedAt" = CURRENT_TIMESTAMP,
"timeoutAt" = CURRENT_TIMESTAMP + convert_duration_to_interval(input."stepTimeout")
FROM (
SELECT
"id",
"stepTimeout",
"workerId",
"slotId"
FROM
(
SELECT
unnest(@stepRunIds::uuid[]) AS "id",
unnest(@stepRunTimeouts::text[]) AS "stepTimeout",
unnest(@workerIds::uuid[]) AS "workerId",
unnest(@slotIds::uuid[]) AS "slotId"
) AS subquery
WHERE
"id" NOT IN (SELECT "id" FROM already_assigned_step_runs)
AND "slotId" NOT IN (SELECT "id" FROM already_assigned_slots)
) AS input
WHERE
sr."id" = input."id"
RETURNING input."id", input."slotId", input."workerId"
)
UPDATE
"WorkerSemaphoreSlot" wss
SET
"stepRunId" = updated_step_runs."id"
FROM updated_step_runs
-- name: GetWorkerSemaphoreCounts :many
SELECT
"workerId",
"count"
FROM
"WorkerSemaphoreCount"
WHERE
wss."id" = updated_step_runs."slotId"
RETURNING updated_step_runs."id"::uuid, updated_step_runs."workerId"::uuid;
"workerId" = ANY(@workers::uuid[]);
-- name: GetWorkerDispatcherActions :many
WITH actions AS (
SELECT
"id",
"actionId"
FROM
"Action"
WHERE
"tenantId" = @tenantId::uuid AND
"actionId" = ANY(@actionIds::text[])
)
SELECT
w."id",
a."actionId",
w."dispatcherId"
FROM
"Worker" w
JOIN
"_ActionToWorker" atw ON w."id" = atw."B"
JOIN
actions a ON atw."A" = a."id"
WHERE
w."tenantId" = @tenantId::uuid
AND w."dispatcherId" IS NOT NULL
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND w."isActive" = true
AND w."isPaused" = false;
-- name: UpdateWorkerSemaphoreCounts :exec
UPDATE
"WorkerSemaphoreCount" wsc
SET
"count" = input."count"
FROM (
SELECT
"workerId",
"count"
FROM
(
SELECT
unnest(@workerIds::uuid[]) AS "workerId",
unnest(@counts::int[]) AS "count"
) AS subquery
) AS input
WHERE
wsc."workerId" = input."workerId";
-- name: CreateWorkerAssignEvents :exec
INSERT INTO "WorkerAssignEvent" (
"workerId",
"assignedStepRuns"
)
SELECT
input."workerId",
input."assignedStepRuns"
FROM (
SELECT
unnest(@workerIds::uuid[]) AS "workerId",
unnest(@assignedStepRuns::jsonb[]) AS "assignedStepRuns"
) AS input
RETURNING *;
-- name: UpdateStepRunsToAssigned :many
UPDATE
"StepRun" sr
SET
"status" = 'ASSIGNED',
"workerId" = input."workerId",
"tickerId" = NULL,
"updatedAt" = CURRENT_TIMESTAMP,
"timeoutAt" = CURRENT_TIMESTAMP + convert_duration_to_interval(input."stepTimeout")
FROM (
SELECT
"id",
"stepTimeout",
"workerId"
FROM
(
SELECT
unnest(@stepRunIds::uuid[]) AS "id",
unnest(@stepRunTimeouts::text[]) AS "stepTimeout",
unnest(@workerIds::uuid[]) AS "workerId"
) AS subquery
) AS input
WHERE
sr."id" = input."id"
RETURNING input."id", input."workerId";
-- name: GetCancelledStepRuns :many
SELECT
+257 -198
View File
@@ -98,108 +98,6 @@ func (q *Queries) ArchiveStepRunResultFromStepRun(ctx context.Context, db DBTX,
return &i, err
}
const bulkAssignStepRunsToWorkers = `-- name: BulkAssignStepRunsToWorkers :many
WITH already_assigned_step_runs AS (
SELECT
input."id",
wss."id" AS "slotId"
FROM
(
SELECT
unnest($1::uuid[]) AS "id"
) AS input
JOIN
"WorkerSemaphoreSlot" wss ON input."id" = wss."stepRunId"
), already_assigned_slots AS (
SELECT
wss."id"
FROM
(
SELECT
unnest($2::uuid[]) AS "id"
) AS input
JOIN
"WorkerSemaphoreSlot" wss ON input."id" = wss."id"
WHERE
wss."stepRunId" IS NOT NULL
), updated_step_runs AS (
UPDATE
"StepRun" sr
SET
"status" = 'ASSIGNED',
"workerId" = input."workerId",
"tickerId" = NULL,
"updatedAt" = CURRENT_TIMESTAMP,
"timeoutAt" = CURRENT_TIMESTAMP + convert_duration_to_interval(input."stepTimeout")
FROM (
SELECT
"id",
"stepTimeout",
"workerId",
"slotId"
FROM
(
SELECT
unnest($1::uuid[]) AS "id",
unnest($3::text[]) AS "stepTimeout",
unnest($4::uuid[]) AS "workerId",
unnest($2::uuid[]) AS "slotId"
) AS subquery
WHERE
"id" NOT IN (SELECT "id" FROM already_assigned_step_runs)
AND "slotId" NOT IN (SELECT "id" FROM already_assigned_slots)
) AS input
WHERE
sr."id" = input."id"
RETURNING input."id", input."slotId", input."workerId"
)
UPDATE
"WorkerSemaphoreSlot" wss
SET
"stepRunId" = updated_step_runs."id"
FROM updated_step_runs
WHERE
wss."id" = updated_step_runs."slotId"
RETURNING updated_step_runs."id"::uuid, updated_step_runs."workerId"::uuid
`
type BulkAssignStepRunsToWorkersParams struct {
Steprunids []pgtype.UUID `json:"steprunids"`
Slotids []pgtype.UUID `json:"slotids"`
Stepruntimeouts []string `json:"stepruntimeouts"`
Workerids []pgtype.UUID `json:"workerids"`
}
type BulkAssignStepRunsToWorkersRow struct {
UpdatedStepRunsID pgtype.UUID `json:"updated_step_runs_id"`
UpdatedStepRunsWorkerId pgtype.UUID `json:"updated_step_runs_workerId"`
}
func (q *Queries) BulkAssignStepRunsToWorkers(ctx context.Context, db DBTX, arg BulkAssignStepRunsToWorkersParams) ([]*BulkAssignStepRunsToWorkersRow, error) {
rows, err := db.Query(ctx, bulkAssignStepRunsToWorkers,
arg.Steprunids,
arg.Slotids,
arg.Stepruntimeouts,
arg.Workerids,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*BulkAssignStepRunsToWorkersRow
for rows.Next() {
var i BulkAssignStepRunsToWorkersRow
if err := rows.Scan(&i.UpdatedStepRunsID, &i.UpdatedStepRunsWorkerId); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const bulkCreateStepRunEvent = `-- name: BulkCreateStepRunEvent :exec
WITH input_values AS (
SELECT
@@ -514,6 +412,32 @@ func (q *Queries) CreateStepRunEvent(ctx context.Context, db DBTX, arg CreateSte
return err
}
const createWorkerAssignEvents = `-- name: CreateWorkerAssignEvents :exec
INSERT INTO "WorkerAssignEvent" (
"workerId",
"assignedStepRuns"
)
SELECT
input."workerId",
input."assignedStepRuns"
FROM (
SELECT
unnest($1::uuid[]) AS "workerId",
unnest($2::jsonb[]) AS "assignedStepRuns"
) AS input
RETURNING id, "workerId", "assignedStepRuns"
`
type CreateWorkerAssignEventsParams struct {
Workerids []pgtype.UUID `json:"workerids"`
Assignedstepruns [][]byte `json:"assignedstepruns"`
}
func (q *Queries) CreateWorkerAssignEvents(ctx context.Context, db DBTX, arg CreateWorkerAssignEventsParams) error {
_, err := db.Exec(ctx, createWorkerAssignEvents, arg.Workerids, arg.Assignedstepruns)
return err
}
const getCancelledStepRuns = `-- name: GetCancelledStepRuns :many
SELECT
"id"
@@ -1056,6 +980,66 @@ func (q *Queries) GetStepRunMeta(ctx context.Context, db DBTX, arg GetStepRunMet
return &i, err
}
const getWorkerDispatcherActions = `-- name: GetWorkerDispatcherActions :many
WITH actions AS (
SELECT
"id",
"actionId"
FROM
"Action"
WHERE
"tenantId" = $1::uuid AND
"actionId" = ANY($2::text[])
)
SELECT
w."id",
a."actionId",
w."dispatcherId"
FROM
"Worker" w
JOIN
"_ActionToWorker" atw ON w."id" = atw."B"
JOIN
actions a ON atw."A" = a."id"
WHERE
w."tenantId" = $1::uuid
AND w."dispatcherId" IS NOT NULL
AND w."lastHeartbeatAt" > NOW() - INTERVAL '5 seconds'
AND w."isActive" = true
AND w."isPaused" = false
`
type GetWorkerDispatcherActionsParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Actionids []string `json:"actionids"`
}
type GetWorkerDispatcherActionsRow struct {
ID pgtype.UUID `json:"id"`
ActionId string `json:"actionId"`
DispatcherId pgtype.UUID `json:"dispatcherId"`
}
func (q *Queries) GetWorkerDispatcherActions(ctx context.Context, db DBTX, arg GetWorkerDispatcherActionsParams) ([]*GetWorkerDispatcherActionsRow, error) {
rows, err := db.Query(ctx, getWorkerDispatcherActions, arg.Tenantid, arg.Actionids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*GetWorkerDispatcherActionsRow
for rows.Next() {
var i GetWorkerDispatcherActionsRow
if err := rows.Scan(&i.ID, &i.ActionId, &i.DispatcherId); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getWorkerLabels = `-- name: GetWorkerLabels :many
SELECT
"key",
@@ -1093,6 +1077,36 @@ func (q *Queries) GetWorkerLabels(ctx context.Context, db DBTX, workerid pgtype.
return items, nil
}
const getWorkerSemaphoreCounts = `-- name: GetWorkerSemaphoreCounts :many
SELECT
"workerId",
"count"
FROM
"WorkerSemaphoreCount"
WHERE
"workerId" = ANY($1::uuid[])
`
func (q *Queries) GetWorkerSemaphoreCounts(ctx context.Context, db DBTX, workers []pgtype.UUID) ([]*WorkerSemaphoreCount, error) {
rows, err := db.Query(ctx, getWorkerSemaphoreCounts, workers)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*WorkerSemaphoreCount
for rows.Next() {
var i WorkerSemaphoreCount
if err := rows.Scan(&i.WorkerId, &i.Count); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const listNonFinalChildStepRuns = `-- name: ListNonFinalChildStepRuns :many
WITH RECURSIVE currStepRun AS (
SELECT id, "createdAt", "updatedAt", "deletedAt", "tenantId", "jobRunId", "stepId", "order", "workerId", "tickerId", status, input, output, "requeueAfter", "scheduleTimeoutAt", error, "startedAt", "finishedAt", "timeoutAt", "cancelledAt", "cancelledReason", "cancelledError", "inputSchema", "callerFiles", "gitRepoBranch", "retryCount", "semaphoreReleased", queue, priority
@@ -1520,17 +1534,10 @@ WITH inactive_workers AS (
AND w."lastHeartbeatAt" < NOW() - INTERVAL '30 seconds'
),
step_runs_to_reassign AS (
SELECT "stepRunId"
FROM "WorkerSemaphoreSlot"
SELECT "id", "workerId", "retryCount"
FROM "StepRun"
WHERE
"workerId" = ANY(SELECT "id" FROM inactive_workers)
AND "stepRunId" IS NOT NULL
),
update_semaphore_steps AS (
UPDATE "WorkerSemaphoreSlot" wss
SET "stepRunId" = NULL
FROM step_runs_to_reassign
WHERE wss."stepRunId" = step_runs_to_reassign."stepRunId"
),
step_runs_with_data AS (
SELECT
@@ -1546,7 +1553,7 @@ step_runs_with_data AS (
JOIN
"Step" s ON sr."stepId" = s."id"
WHERE
sr."id" = ANY(SELECT "stepRunId" FROM step_runs_to_reassign)
sr."id" = ANY(SELECT "id" FROM step_runs_to_reassign)
FOR UPDATE SKIP LOCKED
),
inserted_queue_items AS (
@@ -1580,30 +1587,39 @@ updated_step_runs AS (
SET
"status" = 'PENDING_ASSIGNMENT',
"scheduleTimeoutAt" = CURRENT_TIMESTAMP + COALESCE(convert_duration_to_interval(srs."scheduleTimeout"), INTERVAL '5 minutes'),
"updatedAt" = CURRENT_TIMESTAMP
"updatedAt" = CURRENT_TIMESTAMP,
"workerId" = NULL
FROM step_runs_with_data srs
WHERE sr."id" = srs."id"
RETURNING sr."id"
)
SELECT
srs."id"
srtr."id",
srtr."workerId",
srtr."retryCount"
FROM
step_runs_with_data srs
step_runs_to_reassign srtr
`
func (q *Queries) ListStepRunsToReassign(ctx context.Context, db DBTX, tenantid pgtype.UUID) ([]pgtype.UUID, error) {
type ListStepRunsToReassignRow struct {
ID pgtype.UUID `json:"id"`
WorkerId pgtype.UUID `json:"workerId"`
RetryCount int32 `json:"retryCount"`
}
func (q *Queries) ListStepRunsToReassign(ctx context.Context, db DBTX, tenantid pgtype.UUID) ([]*ListStepRunsToReassignRow, error) {
rows, err := db.Query(ctx, listStepRunsToReassign, tenantid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []pgtype.UUID
var items []*ListStepRunsToReassignRow
for rows.Next() {
var id pgtype.UUID
if err := rows.Scan(&id); err != nil {
var i ListStepRunsToReassignRow
if err := rows.Scan(&i.ID, &i.WorkerId, &i.RetryCount); err != nil {
return nil, err
}
items = append(items, id)
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
@@ -1701,31 +1717,6 @@ func (q *Queries) RefreshTimeoutBy(ctx context.Context, db DBTX, arg RefreshTime
return &i, err
}
const releaseWorkerSemaphoreSlot = `-- name: ReleaseWorkerSemaphoreSlot :one
WITH step_run as (
SELECT "workerId"
FROM "StepRun"
WHERE "id" = $1::uuid AND "tenantId" = $2::uuid
)
UPDATE "WorkerSemaphoreSlot"
SET "stepRunId" = NULL
WHERE "stepRunId" = $1::uuid
AND "workerId" = (SELECT "workerId" FROM step_run)
RETURNING id, "workerId", "stepRunId"
`
type ReleaseWorkerSemaphoreSlotParams struct {
Steprunid pgtype.UUID `json:"steprunid"`
Tenantid pgtype.UUID `json:"tenantid"`
}
func (q *Queries) ReleaseWorkerSemaphoreSlot(ctx context.Context, db DBTX, arg ReleaseWorkerSemaphoreSlotParams) (*WorkerSemaphoreSlot, error) {
row := db.QueryRow(ctx, releaseWorkerSemaphoreSlot, arg.Steprunid, arg.Tenantid)
var i WorkerSemaphoreSlot
err := row.Scan(&i.ID, &i.WorkerId, &i.StepRunId)
return &i, err
}
const replayStepRunResetJobRun = `-- name: ReplayStepRunResetJobRun :one
UPDATE
"JobRun"
@@ -2078,59 +2069,6 @@ func (q *Queries) ResolveLaterStepRuns(ctx context.Context, db DBTX, arg Resolve
return items, nil
}
const unlinkStepRunFromWorker = `-- name: UnlinkStepRunFromWorker :one
UPDATE
"StepRun"
SET
"workerId" = NULL
WHERE
"id" = $1::uuid AND
"tenantId" = $2::uuid
RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "jobRunId", "stepId", "order", "workerId", "tickerId", status, input, output, "requeueAfter", "scheduleTimeoutAt", error, "startedAt", "finishedAt", "timeoutAt", "cancelledAt", "cancelledReason", "cancelledError", "inputSchema", "callerFiles", "gitRepoBranch", "retryCount", "semaphoreReleased", queue, priority
`
type UnlinkStepRunFromWorkerParams struct {
Steprunid pgtype.UUID `json:"steprunid"`
Tenantid pgtype.UUID `json:"tenantid"`
}
func (q *Queries) UnlinkStepRunFromWorker(ctx context.Context, db DBTX, arg UnlinkStepRunFromWorkerParams) (*StepRun, error) {
row := db.QueryRow(ctx, unlinkStepRunFromWorker, arg.Steprunid, arg.Tenantid)
var i StepRun
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.TenantId,
&i.JobRunId,
&i.StepId,
&i.Order,
&i.WorkerId,
&i.TickerId,
&i.Status,
&i.Input,
&i.Output,
&i.RequeueAfter,
&i.ScheduleTimeoutAt,
&i.Error,
&i.StartedAt,
&i.FinishedAt,
&i.TimeoutAt,
&i.CancelledAt,
&i.CancelledReason,
&i.CancelledError,
&i.InputSchema,
&i.CallerFiles,
&i.GitRepoBranch,
&i.RetryCount,
&i.SemaphoreReleased,
&i.Queue,
&i.Priority,
)
return &i, err
}
const updateStepRun = `-- name: UpdateStepRun :one
UPDATE
"StepRun"
@@ -2315,6 +2253,127 @@ func (q *Queries) UpdateStepRunOverridesData(ctx context.Context, db DBTX, arg U
return input, err
}
const updateStepRunUnsetWorkerId = `-- name: UpdateStepRunUnsetWorkerId :one
UPDATE "StepRun" newsr
SET
"workerId" = NULL
FROM
(
SELECT
"id",
"workerId"
FROM
"StepRun"
WHERE
"id" = $1::uuid AND
"tenantId" = $2::uuid
) AS oldsr
WHERE
newsr."id" = oldsr."id"
RETURNING oldsr."workerId"
`
type UpdateStepRunUnsetWorkerIdParams struct {
Steprunid pgtype.UUID `json:"steprunid"`
Tenantid pgtype.UUID `json:"tenantid"`
}
// return whether old worker id was set
func (q *Queries) UpdateStepRunUnsetWorkerId(ctx context.Context, db DBTX, arg UpdateStepRunUnsetWorkerIdParams) (pgtype.UUID, error) {
row := db.QueryRow(ctx, updateStepRunUnsetWorkerId, arg.Steprunid, arg.Tenantid)
var workerId pgtype.UUID
err := row.Scan(&workerId)
return workerId, err
}
const updateStepRunsToAssigned = `-- name: UpdateStepRunsToAssigned :many
UPDATE
"StepRun" sr
SET
"status" = 'ASSIGNED',
"workerId" = input."workerId",
"tickerId" = NULL,
"updatedAt" = CURRENT_TIMESTAMP,
"timeoutAt" = CURRENT_TIMESTAMP + convert_duration_to_interval(input."stepTimeout")
FROM (
SELECT
"id",
"stepTimeout",
"workerId"
FROM
(
SELECT
unnest($1::uuid[]) AS "id",
unnest($2::text[]) AS "stepTimeout",
unnest($3::uuid[]) AS "workerId"
) AS subquery
) AS input
WHERE
sr."id" = input."id"
RETURNING input."id", input."workerId"
`
type UpdateStepRunsToAssignedParams struct {
Steprunids []pgtype.UUID `json:"steprunids"`
Stepruntimeouts []string `json:"stepruntimeouts"`
Workerids []pgtype.UUID `json:"workerids"`
}
type UpdateStepRunsToAssignedRow struct {
ID interface{} `json:"id"`
WorkerId interface{} `json:"workerId"`
}
func (q *Queries) UpdateStepRunsToAssigned(ctx context.Context, db DBTX, arg UpdateStepRunsToAssignedParams) ([]*UpdateStepRunsToAssignedRow, error) {
rows, err := db.Query(ctx, updateStepRunsToAssigned, arg.Steprunids, arg.Stepruntimeouts, arg.Workerids)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*UpdateStepRunsToAssignedRow
for rows.Next() {
var i UpdateStepRunsToAssignedRow
if err := rows.Scan(&i.ID, &i.WorkerId); err != nil {
return nil, err
}
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const updateWorkerSemaphoreCounts = `-- name: UpdateWorkerSemaphoreCounts :exec
UPDATE
"WorkerSemaphoreCount" wsc
SET
"count" = input."count"
FROM (
SELECT
"workerId",
"count"
FROM
(
SELECT
unnest($1::uuid[]) AS "workerId",
unnest($2::int[]) AS "count"
) AS subquery
) AS input
WHERE
wsc."workerId" = input."workerId"
`
type UpdateWorkerSemaphoreCountsParams struct {
Workerids []pgtype.UUID `json:"workerids"`
Counts []int32 `json:"counts"`
}
func (q *Queries) UpdateWorkerSemaphoreCounts(ctx context.Context, db DBTX, arg UpdateWorkerSemaphoreCountsParams) error {
_, err := db.Exec(ctx, updateWorkerSemaphoreCounts, arg.Workerids, arg.Counts)
return err
}
const upsertDesiredWorkerLabel = `-- name: UpsertDesiredWorkerLabel :one
INSERT INTO "StepDesiredWorkerLabel" (
"createdAt",
+56 -36
View File
@@ -3,7 +3,7 @@ SELECT
sqlc.embed(workers),
ww."url" AS "webhookUrl",
ww."id" AS "webhookId",
(SELECT COUNT(*) FROM "WorkerSemaphoreSlot" wss WHERE wss."workerId" = workers."id" AND wss."stepRunId" IS NOT NULL) AS "slots"
(SELECT COUNT(*) FROM "StepRun" sr WHERE sr."workerId" = workers."id" AND sr."tenantId" = @tenantId) AS "slots"
FROM
"Worker" workers
LEFT JOIN
@@ -70,54 +70,33 @@ FROM generate_series(1, sqlc.narg('maxRuns')::int);
-- name: ListSemaphoreSlotsWithStateForWorker :many
SELECT
wss."id" as "slot",
sr."id" AS "stepRunId",
sr."status" AS "status",
s."actionId",
sr."timeoutAt" AS "timeoutAt",
sr."startedAt" AS "startedAt",
jr."workflowRunId" AS "workflowRunId"
FROM
"WorkerSemaphoreSlot" wss
JOIN
"Worker" w ON wss."workerId" = w."id"
LEFT JOIN
"StepRun" sr ON wss."stepRunId" = sr."id"
LEFT JOIN
"JobRun" jr ON sr."jobRunId" = jr."id"
LEFT JOIN
"Step" s ON sr."stepId" = s."id"
WHERE
wss."workerId" = @workerId::uuid AND
w."tenantId" = @tenantId::uuid
ORDER BY
wss."id" ASC;
-- name: ListRecentStepRunsForWorker :many
SELECT
sr."id" AS "id",
s."actionId",
sr."status" AS "status",
sr."createdAt" AS "createdAt",
sr."updatedAt" AS "updatedAt",
sr."finishedAt" AS "finishedAt",
sr."cancelledAt" AS "cancelledAt",
sr."timeoutAt" AS "timeoutAt",
sr."startedAt" AS "startedAt",
jr."workflowRunId" AS "workflowRunId"
FROM
"StepRun" sr
JOIN
"JobRun" jr ON sr."jobRunId" = jr."id"
JOIN
"Step" s ON sr."stepId" = s."id"
"Step" s ON sr."stepId" = s."id"
WHERE
sr."workerId" = @workerId::uuid
and sr."status" = ANY(cast(sqlc.narg('statuses')::text[] as "StepRunStatus"[]))
AND sr."tenantId" = @tenantId::uuid
ORDER BY
sr."startedAt" DESC
LIMIT 15;
AND sr."tenantId" = @tenantId::uuid;
-- name: ListRecentAssignedEventsForWorker :many
SELECT
"workerId",
"assignedStepRuns"
FROM
"WorkerAssignEvent"
WHERE
"workerId" = @workerId::uuid
ORDER BY "id" DESC
LIMIT
COALESCE(sqlc.narg('limit')::int, 100);
-- name: GetWorkerForEngine :one
SELECT
@@ -158,6 +137,12 @@ INSERT INTO "Worker" (
sqlc.narg('type')::"WorkerType"
) RETURNING *;
-- name: CreateWorkerCount :exec
INSERT INTO
"WorkerSemaphoreCount" ("workerId", "count")
VALUES
(@workerId::uuid, sqlc.narg('maxRuns')::int);
-- name: GetWorkerByWebhookId :one
SELECT
*
@@ -330,3 +315,38 @@ SET
"intValue" = sqlc.narg('intValue')::int,
"strValue" = sqlc.narg('strValue')::text
RETURNING *;
-- name: DeleteOldWorkers :one
WITH for_delete AS (
SELECT
"id"
FROM "Worker" w
WHERE
w."tenantId" = @tenantId::uuid AND
w."lastHeartbeatAt" < @lastHeartbeatBefore::timestamp
LIMIT sqlc.arg('limit') + 1
), expired_with_limit AS (
SELECT
for_delete."id" as "id"
FROM for_delete
LIMIT sqlc.arg('limit')
), has_more AS (
SELECT
CASE
WHEN COUNT(*) > sqlc.arg('limit') THEN TRUE
ELSE FALSE
END as has_more
FROM for_delete
), delete_slots AS (
DELETE FROM "WorkerSemaphoreSlot" wss
WHERE wss."workerId" IN (SELECT "id" FROM expired_with_limit)
RETURNING wss."id"
), delete_events AS (
DELETE FROM "WorkerAssignEvent" wae
WHERE wae."workerId" IN (SELECT "id" FROM expired_with_limit)
RETURNING wae."id"
)
DELETE FROM "Worker" w
WHERE w."id" IN (SELECT "id" FROM expired_with_limit)
RETURNING
(SELECT has_more FROM has_more) as has_more;
+96 -73
View File
@@ -73,6 +73,72 @@ func (q *Queries) CreateWorker(ctx context.Context, db DBTX, arg CreateWorkerPar
return &i, err
}
const createWorkerCount = `-- name: CreateWorkerCount :exec
INSERT INTO
"WorkerSemaphoreCount" ("workerId", "count")
VALUES
($1::uuid, $2::int)
`
type CreateWorkerCountParams struct {
Workerid pgtype.UUID `json:"workerid"`
MaxRuns pgtype.Int4 `json:"maxRuns"`
}
func (q *Queries) CreateWorkerCount(ctx context.Context, db DBTX, arg CreateWorkerCountParams) error {
_, err := db.Exec(ctx, createWorkerCount, arg.Workerid, arg.MaxRuns)
return err
}
const deleteOldWorkers = `-- name: DeleteOldWorkers :one
WITH for_delete AS (
SELECT
"id"
FROM "Worker" w
WHERE
w."tenantId" = $1::uuid AND
w."lastHeartbeatAt" < $2::timestamp
LIMIT $3 + 1
), expired_with_limit AS (
SELECT
for_delete."id" as "id"
FROM for_delete
LIMIT $3
), has_more AS (
SELECT
CASE
WHEN COUNT(*) > $3 THEN TRUE
ELSE FALSE
END as has_more
FROM for_delete
), delete_slots AS (
DELETE FROM "WorkerSemaphoreSlot" wss
WHERE wss."workerId" IN (SELECT "id" FROM expired_with_limit)
RETURNING wss."id"
), delete_events AS (
DELETE FROM "WorkerAssignEvent" wae
WHERE wae."workerId" IN (SELECT "id" FROM expired_with_limit)
RETURNING wae."id"
)
DELETE FROM "Worker" w
WHERE w."id" IN (SELECT "id" FROM expired_with_limit)
RETURNING
(SELECT has_more FROM has_more) as has_more
`
type DeleteOldWorkersParams struct {
Tenantid pgtype.UUID `json:"tenantid"`
Lastheartbeatbefore pgtype.Timestamp `json:"lastheartbeatbefore"`
Limit interface{} `json:"limit"`
}
func (q *Queries) DeleteOldWorkers(ctx context.Context, db DBTX, arg DeleteOldWorkersParams) (bool, error) {
row := db.QueryRow(ctx, deleteOldWorkers, arg.Tenantid, arg.Lastheartbeatbefore, arg.Limit)
var has_more bool
err := row.Scan(&has_more)
return has_more, err
}
const deleteWorker = `-- name: DeleteWorker :one
DELETE FROM
"Worker"
@@ -310,73 +376,39 @@ func (q *Queries) LinkServicesToWorker(ctx context.Context, db DBTX, arg LinkSer
return err
}
const listRecentStepRunsForWorker = `-- name: ListRecentStepRunsForWorker :many
const listRecentAssignedEventsForWorker = `-- name: ListRecentAssignedEventsForWorker :many
SELECT
sr."id" AS "id",
s."actionId",
sr."status" AS "status",
sr."createdAt" AS "createdAt",
sr."updatedAt" AS "updatedAt",
sr."finishedAt" AS "finishedAt",
sr."cancelledAt" AS "cancelledAt",
sr."timeoutAt" AS "timeoutAt",
sr."startedAt" AS "startedAt",
jr."workflowRunId" AS "workflowRunId"
"workerId",
"assignedStepRuns"
FROM
"StepRun" sr
JOIN
"JobRun" jr ON sr."jobRunId" = jr."id"
JOIN
"Step" s ON sr."stepId" = s."id"
"WorkerAssignEvent"
WHERE
sr."workerId" = $1::uuid
and sr."status" = ANY(cast($2::text[] as "StepRunStatus"[]))
AND sr."tenantId" = $3::uuid
ORDER BY
sr."startedAt" DESC
LIMIT 15
"workerId" = $1::uuid
ORDER BY "id" DESC
LIMIT
COALESCE($2::int, 100)
`
type ListRecentStepRunsForWorkerParams struct {
type ListRecentAssignedEventsForWorkerParams struct {
Workerid pgtype.UUID `json:"workerid"`
Statuses []string `json:"statuses"`
Tenantid pgtype.UUID `json:"tenantid"`
Limit pgtype.Int4 `json:"limit"`
}
type ListRecentStepRunsForWorkerRow struct {
ID pgtype.UUID `json:"id"`
ActionId string `json:"actionId"`
Status StepRunStatus `json:"status"`
CreatedAt pgtype.Timestamp `json:"createdAt"`
UpdatedAt pgtype.Timestamp `json:"updatedAt"`
FinishedAt pgtype.Timestamp `json:"finishedAt"`
CancelledAt pgtype.Timestamp `json:"cancelledAt"`
TimeoutAt pgtype.Timestamp `json:"timeoutAt"`
StartedAt pgtype.Timestamp `json:"startedAt"`
WorkflowRunId pgtype.UUID `json:"workflowRunId"`
type ListRecentAssignedEventsForWorkerRow struct {
WorkerId pgtype.UUID `json:"workerId"`
AssignedStepRuns []byte `json:"assignedStepRuns"`
}
func (q *Queries) ListRecentStepRunsForWorker(ctx context.Context, db DBTX, arg ListRecentStepRunsForWorkerParams) ([]*ListRecentStepRunsForWorkerRow, error) {
rows, err := db.Query(ctx, listRecentStepRunsForWorker, arg.Workerid, arg.Statuses, arg.Tenantid)
func (q *Queries) ListRecentAssignedEventsForWorker(ctx context.Context, db DBTX, arg ListRecentAssignedEventsForWorkerParams) ([]*ListRecentAssignedEventsForWorkerRow, error) {
rows, err := db.Query(ctx, listRecentAssignedEventsForWorker, arg.Workerid, arg.Limit)
if err != nil {
return nil, err
}
defer rows.Close()
var items []*ListRecentStepRunsForWorkerRow
var items []*ListRecentAssignedEventsForWorkerRow
for rows.Next() {
var i ListRecentStepRunsForWorkerRow
if err := rows.Scan(
&i.ID,
&i.ActionId,
&i.Status,
&i.CreatedAt,
&i.UpdatedAt,
&i.FinishedAt,
&i.CancelledAt,
&i.TimeoutAt,
&i.StartedAt,
&i.WorkflowRunId,
); err != nil {
var i ListRecentAssignedEventsForWorkerRow
if err := rows.Scan(&i.WorkerId, &i.AssignedStepRuns); err != nil {
return nil, err
}
items = append(items, &i)
@@ -389,7 +421,6 @@ func (q *Queries) ListRecentStepRunsForWorker(ctx context.Context, db DBTX, arg
const listSemaphoreSlotsWithStateForWorker = `-- name: ListSemaphoreSlotsWithStateForWorker :many
SELECT
wss."id" as "slot",
sr."id" AS "stepRunId",
sr."status" AS "status",
s."actionId",
@@ -397,20 +428,14 @@ SELECT
sr."startedAt" AS "startedAt",
jr."workflowRunId" AS "workflowRunId"
FROM
"WorkerSemaphoreSlot" wss
"StepRun" sr
JOIN
"Worker" w ON wss."workerId" = w."id"
LEFT JOIN
"StepRun" sr ON wss."stepRunId" = sr."id"
LEFT JOIN
"JobRun" jr ON sr."jobRunId" = jr."id"
LEFT JOIN
"Step" s ON sr."stepId" = s."id"
JOIN
"Step" s ON sr."stepId" = s."id"
WHERE
wss."workerId" = $1::uuid AND
w."tenantId" = $2::uuid
ORDER BY
wss."id" ASC
sr."workerId" = $1::uuid
AND sr."tenantId" = $2::uuid
`
type ListSemaphoreSlotsWithStateForWorkerParams struct {
@@ -419,13 +444,12 @@ type ListSemaphoreSlotsWithStateForWorkerParams struct {
}
type ListSemaphoreSlotsWithStateForWorkerRow struct {
Slot pgtype.UUID `json:"slot"`
StepRunId pgtype.UUID `json:"stepRunId"`
Status NullStepRunStatus `json:"status"`
ActionId pgtype.Text `json:"actionId"`
TimeoutAt pgtype.Timestamp `json:"timeoutAt"`
StartedAt pgtype.Timestamp `json:"startedAt"`
WorkflowRunId pgtype.UUID `json:"workflowRunId"`
StepRunId pgtype.UUID `json:"stepRunId"`
Status StepRunStatus `json:"status"`
ActionId string `json:"actionId"`
TimeoutAt pgtype.Timestamp `json:"timeoutAt"`
StartedAt pgtype.Timestamp `json:"startedAt"`
WorkflowRunId pgtype.UUID `json:"workflowRunId"`
}
func (q *Queries) ListSemaphoreSlotsWithStateForWorker(ctx context.Context, db DBTX, arg ListSemaphoreSlotsWithStateForWorkerParams) ([]*ListSemaphoreSlotsWithStateForWorkerRow, error) {
@@ -438,7 +462,6 @@ func (q *Queries) ListSemaphoreSlotsWithStateForWorker(ctx context.Context, db D
for rows.Next() {
var i ListSemaphoreSlotsWithStateForWorkerRow
if err := rows.Scan(
&i.Slot,
&i.StepRunId,
&i.Status,
&i.ActionId,
@@ -509,7 +532,7 @@ SELECT
workers.id, workers."createdAt", workers."updatedAt", workers."deletedAt", workers."tenantId", workers."lastHeartbeatAt", workers.name, workers."dispatcherId", workers."maxRuns", workers."isActive", workers."lastListenerEstablished", workers."isPaused", workers.type, workers."webhookId",
ww."url" AS "webhookUrl",
ww."id" AS "webhookId",
(SELECT COUNT(*) FROM "WorkerSemaphoreSlot" wss WHERE wss."workerId" = workers."id" AND wss."stepRunId" IS NOT NULL) AS "slots"
(SELECT COUNT(*) FROM "StepRun" sr WHERE sr."workerId" = workers."id" AND sr."tenantId" = $1) AS "slots"
FROM
"Worker" workers
LEFT JOIN
+499 -76
View File
@@ -323,7 +323,7 @@ func (s *stepRunEngineRepository) ListStepRuns(ctx context.Context, tenantId str
return res, err
}
func (s *stepRunEngineRepository) ListStepRunsToReassign(ctx context.Context, tenantId string) ([]*dbsqlc.GetStepRunForEngineRow, error) {
func (s *stepRunEngineRepository) ListStepRunsToReassign(ctx context.Context, tenantId string) ([]string, error) {
pgTenantId := sqlchelpers.UUIDFromStr(tenantId)
tx, err := s.pool.Begin(ctx)
@@ -335,16 +335,26 @@ func (s *stepRunEngineRepository) ListStepRunsToReassign(ctx context.Context, te
defer deferRollback(ctx, s.l, tx.Rollback)
// get the step run and make sure it's still in pending
stepRunIds, err := s.queries.ListStepRunsToReassign(ctx, tx, pgTenantId)
stepRunReassign, err := s.queries.ListStepRunsToReassign(ctx, tx, pgTenantId)
if err != nil {
return nil, err
}
stepRuns, err := s.queries.GetStepRunForEngine(ctx, tx, dbsqlc.GetStepRunForEngineParams{
Ids: stepRunIds,
TenantId: pgTenantId,
})
stepRunIds := make([]pgtype.UUID, len(stepRunReassign))
stepRunIdsStr := make([]string, len(stepRunReassign))
workerIds := make([]pgtype.UUID, len(stepRunReassign))
retryCounts := make([]int32, len(stepRunReassign))
for i, sr := range stepRunReassign {
stepRunIds[i] = sr.ID
stepRunIdsStr[i] = sqlchelpers.UUIDToStr(sr.ID)
workerIds[i] = sr.WorkerId
retryCounts[i] = sr.RetryCount
}
// release the semaphore slot
err = s.bulkReleaseWorkerSemaphoreQueueItems(ctx, tx, tenantId, workerIds, stepRunIds, retryCounts)
if err != nil {
return nil, err
@@ -356,13 +366,13 @@ func (s *stepRunEngineRepository) ListStepRunsToReassign(ctx context.Context, te
return nil, err
}
messages := make([]string, len(stepRuns))
reasons := make([]dbsqlc.StepRunEventReason, len(stepRuns))
severities := make([]dbsqlc.StepRunEventSeverity, len(stepRuns))
data := make([]map[string]interface{}, len(stepRuns))
messages := make([]string, len(stepRunIds))
reasons := make([]dbsqlc.StepRunEventReason, len(stepRunIds))
severities := make([]dbsqlc.StepRunEventSeverity, len(stepRunIds))
data := make([]map[string]interface{}, len(stepRunIds))
for i := range stepRuns {
workerId := sqlchelpers.UUIDToStr(stepRuns[i].SRWorkerId)
for i := range stepRunIds {
workerId := sqlchelpers.UUIDToStr(workerIds[i])
messages[i] = "Worker has become inactive"
reasons[i] = dbsqlc.StepRunEventReasonREASSIGNED
severities[i] = dbsqlc.StepRunEventSeverityCRITICAL
@@ -381,7 +391,7 @@ func (s *stepRunEngineRepository) ListStepRunsToReassign(ctx context.Context, te
data,
)
return stepRuns, nil
return stepRunIdsStr, nil
}
func (s *stepRunEngineRepository) ListStepRunsToTimeout(ctx context.Context, tenantId string) ([]*dbsqlc.GetStepRunForEngineRow, error) {
@@ -506,24 +516,12 @@ func (s *stepRunEngineRepository) ReleaseStepRunSemaphore(ctx context.Context, t
return fmt.Errorf("could not create step run event: %w", err)
}
_, err = s.queries.ReleaseWorkerSemaphoreSlot(ctx, tx, dbsqlc.ReleaseWorkerSemaphoreSlotParams{
Steprunid: stepRun.SRID,
Tenantid: stepRun.SRTenantId,
})
err = s.releaseWorkerSemaphoreSlot(ctx, tx, tenantId, stepRunId, int(stepRun.SRRetryCount))
if err != nil {
return fmt.Errorf("could not release worker semaphore slot: %w", err)
}
_, err = s.queries.UnlinkStepRunFromWorker(ctx, tx, dbsqlc.UnlinkStepRunFromWorkerParams{
Steprunid: stepRun.SRID,
Tenantid: stepRun.SRTenantId,
})
if err != nil {
return fmt.Errorf("could not unlink step run from worker: %w", err)
}
// Update the Step Run to release the semaphore
_, err = s.queries.UpdateStepRun(ctx, tx, dbsqlc.UpdateStepRunParams{
ID: stepRun.SRID,
@@ -603,6 +601,7 @@ func (s *stepRunEngineRepository) bulkStepRunsAssigned(
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workerIdToStepRunIds := make(map[string][]string)
messages := make([]string, len(stepRunIds))
reasons := make([]dbsqlc.StepRunEventReason, len(stepRunIds))
severities := make([]dbsqlc.StepRunEventSeverity, len(stepRunIds))
@@ -610,12 +609,51 @@ func (s *stepRunEngineRepository) bulkStepRunsAssigned(
for i := range stepRunIds {
workerId := sqlchelpers.UUIDToStr(workerIds[i])
if _, ok := workerIdToStepRunIds[workerId]; !ok {
workerIdToStepRunIds[workerId] = make([]string, 0)
}
workerIdToStepRunIds[workerId] = append(workerIdToStepRunIds[workerId], sqlchelpers.UUIDToStr(stepRunIds[i]))
messages[i] = fmt.Sprintf("Assigned to worker %s", workerId)
reasons[i] = dbsqlc.StepRunEventReasonASSIGNED
severities[i] = dbsqlc.StepRunEventSeverityINFO
data[i] = map[string]interface{}{"worker_id": workerId}
}
orderedWorkerIds := make([]pgtype.UUID, 0)
assignedStepRuns := make([][]byte, 0)
for workerId, stepRunIds := range workerIdToStepRunIds {
orderedWorkerIds = append(orderedWorkerIds, sqlchelpers.UUIDFromStr(workerId))
assignedStepRunsBytes, _ := json.Marshal(stepRunIds) // nolint: errcheck
assignedStepRuns = append(assignedStepRuns, assignedStepRunsBytes)
}
tx, err := s.pool.Begin(ctx)
if err != nil {
s.l.Err(err).Msg("could not begin transaction")
return
}
defer deferRollback(ctx, s.l, tx.Rollback)
err = s.queries.CreateWorkerAssignEvents(ctx, tx, dbsqlc.CreateWorkerAssignEventsParams{
Workerids: orderedWorkerIds,
Assignedstepruns: assignedStepRuns,
})
if err != nil {
s.l.Err(err).Msg("could not create worker assign events")
return
}
if err := tx.Commit(ctx); err != nil {
s.l.Err(err).Msg("could not commit transaction")
return
}
deferredBulkStepRunEvents(
ctx,
s.l,
@@ -753,24 +791,7 @@ func (s *stepRunEngineRepository) UnassignStepRunFromWorker(ctx context.Context,
defer deferRollback(ctx, s.l, tx.Rollback)
_, err = s.queries.ReleaseWorkerSemaphoreSlot(ctx, tx, dbsqlc.ReleaseWorkerSemaphoreSlotParams{
Steprunid: pgStepRunId,
Tenantid: pgTenantId,
})
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
return fmt.Errorf("could not release previous worker semaphore: %w", err)
}
_, err = s.queries.UnlinkStepRunFromWorker(ctx, tx, dbsqlc.UnlinkStepRunFromWorkerParams{
Steprunid: pgStepRunId,
Tenantid: pgTenantId,
})
if err != nil {
return fmt.Errorf("could not unlink step run from worker: %w", err)
}
_, err = s.queries.UpdateStepRun(ctx, tx, dbsqlc.UpdateStepRunParams{
updateStepRun, err := s.queries.UpdateStepRun(ctx, tx, dbsqlc.UpdateStepRunParams{
ID: pgStepRunId,
Tenantid: pgTenantId,
Status: dbsqlc.NullStepRunStatus{
@@ -783,6 +804,12 @@ func (s *stepRunEngineRepository) UnassignStepRunFromWorker(ctx context.Context,
return fmt.Errorf("could not update step run status: %w", err)
}
err = s.releaseWorkerSemaphoreSlot(ctx, tx, tenantId, sqlchelpers.UUIDToStr(updateStepRun.ID), int(updateStepRun.RetryCount))
if err != nil {
return fmt.Errorf("could not release previous worker semaphore: %w", err)
}
return tx.Commit(ctx)
})
}
@@ -844,6 +871,8 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
return emptyRes, fmt.Errorf("could not list queues: %w", err)
}
listQueuesFinishedAt := time.Now().UTC()
if len(queues) == 0 {
ql.Debug().Msg("no queues found")
return emptyRes, nil
@@ -924,6 +953,8 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
return emptyRes, nil
}
durationListQueueItems := time.Since(startedAt)
var duplicates []*scheduling.QueueItemWithOrder
var cancelled []*scheduling.QueueItemWithOrder
@@ -1011,13 +1042,56 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
}
}
slots, err := s.queries.ListSemaphoreSlotsToAssign(ctx, tx, dbsqlc.ListSemaphoreSlotsToAssignParams{
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
// list workers to assign
workers, err := s.queries.GetWorkerDispatcherActions(ctx, tx, dbsqlc.GetWorkerDispatcherActionsParams{
Tenantid: pgTenantId,
Actionids: uniqueActionsArr,
})
if err != nil {
return emptyRes, fmt.Errorf("could not list semaphore slots to assign: %w", err)
return emptyRes, fmt.Errorf("could not get worker dispatcher actions: %w", err)
}
workerIds := make([]pgtype.UUID, 0, len(workers))
for _, worker := range workers {
workerIds = append(workerIds, worker.ID)
}
// join workers with counts
workerCounts, err := s.queries.GetWorkerSemaphoreCounts(ctx, tx, workerIds)
if err != nil {
return emptyRes, fmt.Errorf("could not get worker semaphore counts: %w", err)
}
workerToCounts := make(map[string]int)
for _, worker := range workerCounts {
workerToCounts[sqlchelpers.UUIDToStr(worker.WorkerId)] = int(worker.Count)
}
slots := make([]*scheduling.Slot, 0)
for _, worker := range workers {
workerId := sqlchelpers.UUIDToStr(worker.ID)
dispatcherId := sqlchelpers.UUIDToStr(worker.DispatcherId)
actionId := worker.ActionId
count, ok := workerToCounts[workerId]
if !ok {
continue
}
for i := 0; i < count; i++ {
slots = append(slots, &scheduling.Slot{
ID: fmt.Sprintf("%s-%d", workerId, i),
WorkerId: workerId,
DispatcherId: dispatcherId,
ActionId: actionId,
})
}
}
// GET UNIQUE STEP IDS
@@ -1043,8 +1117,8 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
if hasDesired {
// GET UNIQUE WORKER LABELS
workerIdSet := UniqueSet(slots, func(x *dbsqlc.ListSemaphoreSlotsToAssignRow) string {
return sqlchelpers.UUIDToStr(x.WorkerId)
workerIdSet := UniqueSet(slots, func(x *scheduling.Slot) string {
return x.WorkerId
})
for workerId := range workerIdSet {
@@ -1122,17 +1196,45 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
}
}()
_, err = s.queries.BulkAssignStepRunsToWorkers(ctx, tx, dbsqlc.BulkAssignStepRunsToWorkersParams{
startAssignTime := time.Now()
_, err = s.queries.UpdateStepRunsToAssigned(ctx, tx, dbsqlc.UpdateStepRunsToAssignedParams{
Steprunids: plan.StepRunIds,
Stepruntimeouts: plan.StepRunTimeouts,
Slotids: plan.SlotIds,
Workerids: plan.WorkerIds,
Stepruntimeouts: plan.StepRunTimeouts,
})
if err != nil {
return emptyRes, fmt.Errorf("could not bulk assign step runs to workers: %w", err)
}
finishedAssignTime := time.Since(startAssignTime)
// track the counts
for _, workerId := range plan.WorkerIds {
workerToCounts[sqlchelpers.UUIDToStr(workerId)]--
}
startUpdateCountTime := time.Now()
updateCountParams := dbsqlc.UpdateWorkerSemaphoreCountsParams{
Workerids: make([]pgtype.UUID, 0, len(workerToCounts)),
Counts: make([]int32, 0, len(workerToCounts)),
}
for workerId, count := range workerToCounts {
updateCountParams.Workerids = append(updateCountParams.Workerids, sqlchelpers.UUIDFromStr(workerId))
updateCountParams.Counts = append(updateCountParams.Counts, int32(count))
}
err = s.queries.UpdateWorkerSemaphoreCounts(ctx, tx, updateCountParams)
if err != nil {
return emptyRes, fmt.Errorf("could not update worker semaphore counts: %w", err)
}
finishUpdateCountTime := time.Since(startUpdateCountTime)
popItems := plan.QueuedItems
// we'd like to remove duplicates from the queue items as well
@@ -1148,6 +1250,8 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
popItems = append(popItems, item.QueueItem.ID)
}
startQueueTime := time.Now()
err = s.queries.BulkQueueItems(ctx, tx, popItems)
if err != nil {
@@ -1163,16 +1267,18 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
}
}
err = tx.Commit(ctx)
finishQueueTime := time.Since(startQueueTime)
defer s.bulkStepRunsAssigned(plan.StepRunIds, plan.WorkerIds)
defer s.bulkStepRunsUnassigned(plan.UnassignedStepRunIds)
defer s.bulkStepRunsRateLimited(plan.RateLimitedStepRuns)
err = tx.Commit(ctx)
if err != nil {
return emptyRes, fmt.Errorf("could not commit transaction: %w", err)
}
defer s.bulkStepRunsAssigned(plan.StepRunIds, plan.WorkerIds)
defer s.bulkStepRunsUnassigned(plan.UnassignedStepRunIds)
defer s.bulkStepRunsRateLimited(plan.RateLimitedStepRuns)
// update the cache with the min queued id
for name, qiId := range plan.MinQueuedIds {
s.cachedMinQueuedIds.Store(getCacheName(tenantId, name), qiId)
@@ -1189,7 +1295,7 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
timedOutStepRunsStr[i] = sqlchelpers.UUIDToStr(id)
}
defer printQueueDebugInfo(ql, tenantId, queues, queueItems, duplicates, cancelled, plan, slots, startedAt)
defer printQueueDebugInfo(ql, tenantId, queues, queueItems, duplicates, cancelled, plan, slots, startedAt, listQueuesFinishedAt.Sub(startedAt), durationListQueueItems, finishedAssignTime, finishUpdateCountTime, finishQueueTime)
return repository.QueueStepRunsResult{
Queued: plan.QueuedStepRuns,
@@ -1198,6 +1304,109 @@ func (s *stepRunEngineRepository) QueueStepRuns(ctx context.Context, qlp *zerolo
}, nil
}
func (s *stepRunEngineRepository) UpdateWorkerSemaphoreCounts(ctx context.Context, qlp *zerolog.Logger, tenantId string) (bool, error) {
ctx, span := telemetry.NewSpan(ctx, "update-worker-semaphore-counts-database")
defer span.End()
pgTenantId := sqlchelpers.UUIDFromStr(tenantId)
limit := 100
tx, err := s.pool.Begin(ctx)
if err != nil {
return false, err
}
defer deferRollback(ctx, s.l, tx.Rollback)
// list queues
queueItems, err := s.queries.ListInternalQueueItems(ctx, tx, dbsqlc.ListInternalQueueItemsParams{
Tenantid: pgTenantId,
Queue: dbsqlc.InternalQueueWORKERSEMAPHORECOUNT,
Limit: pgtype.Int4{
Int32: int32(limit),
Valid: true,
},
})
if err != nil {
return false, fmt.Errorf("could not list queues: %w", err)
}
data, err := internalQueueItemDataToWorkerSemaphoreQueueData(queueItems)
if err != nil {
return false, fmt.Errorf("could not convert internal queue item data to worker semaphore queue data: %w", err)
}
uniqueWorkerIds := make(map[string]bool)
for _, item := range data {
uniqueWorkerIds[item.WorkerId] = true
}
workerIds := make([]pgtype.UUID, 0, len(uniqueWorkerIds))
for workerId := range uniqueWorkerIds {
workerIds = append(workerIds, sqlchelpers.UUIDFromStr(workerId))
}
workerCounts, err := s.queries.GetWorkerSemaphoreCounts(ctx, tx, workerIds)
if err != nil {
return false, fmt.Errorf("could not get worker semaphore counts: %w", err)
}
workerToCounts := make(map[string]int)
for _, worker := range workerCounts {
workerToCounts[sqlchelpers.UUIDToStr(worker.WorkerId)] = int(worker.Count)
}
// append the semaphore queue items to the worker counts
for _, item := range data {
workerToCounts[item.WorkerId] += item.Inc
}
qiIds := make([]int64, 0, len(data))
for _, item := range queueItems {
qiIds = append(qiIds, item.ID)
}
// update the processed semaphore queue items
err = s.queries.MarkInternalQueueItemsProcessed(ctx, tx, qiIds)
if err != nil {
return false, fmt.Errorf("could not mark worker semaphore queue items processed: %w", err)
}
updateCountParams := dbsqlc.UpdateWorkerSemaphoreCountsParams{
Workerids: make([]pgtype.UUID, 0, len(workerToCounts)),
Counts: make([]int32, 0, len(workerToCounts)),
}
for workerId, count := range workerToCounts {
updateCountParams.Workerids = append(updateCountParams.Workerids, sqlchelpers.UUIDFromStr(workerId))
updateCountParams.Counts = append(updateCountParams.Counts, int32(count))
}
err = s.queries.UpdateWorkerSemaphoreCounts(ctx, tx, updateCountParams)
if err != nil {
return false, fmt.Errorf("could not update worker semaphore counts: %w", err)
}
err = tx.Commit(ctx)
if err != nil {
return false, fmt.Errorf("could not commit transaction: %w", err)
}
return len(queueItems) == limit, nil
}
func (s *stepRunEngineRepository) CleanupQueueItems(ctx context.Context, tenantId string) error {
// setup telemetry
ctx, span := telemetry.NewSpan(ctx, "cleanup-queue-items-database")
@@ -1267,6 +1476,75 @@ func (s *stepRunEngineRepository) CleanupQueueItems(ctx context.Context, tenantI
return nil
}
func (s *stepRunEngineRepository) CleanupInternalQueueItems(ctx context.Context, tenantId string) error {
// setup telemetry
ctx, span := telemetry.NewSpan(ctx, "cleanup-internal-queue-items-database")
defer span.End()
pgTenantId := sqlchelpers.UUIDFromStr(tenantId)
// get the min and max queue items
minMax, err := s.queries.GetMinMaxProcessedInternalQueueItems(ctx, s.pool, pgTenantId)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil
}
return fmt.Errorf("could not get min max processed queue items: %w", err)
}
if minMax == nil {
return nil
}
minId := minMax.MinId
maxId := minMax.MaxId
if maxId == 0 {
return nil
}
// iterate until we have no more queue items to process
var batchSize int64 = 1000
var currBatch int64
for {
if ctx.Err() != nil {
return ctx.Err()
}
currBatch++
currMax := minId + batchSize*currBatch
if currMax > maxId {
currMax = maxId
}
// get the next batch of queue items
err := s.queries.CleanupInternalQueueItems(ctx, s.pool, dbsqlc.CleanupInternalQueueItemsParams{
Minid: minId,
Maxid: minId + batchSize*currBatch,
Tenantid: pgTenantId,
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil
}
return fmt.Errorf("could not cleanup queue items: %w", err)
}
if currMax == maxId {
break
}
}
return nil
}
func (s *stepRunEngineRepository) UpdateStepRun(ctx context.Context, tenantId, stepRunId string, opts *repository.UpdateStepRunOpts) (*dbsqlc.GetStepRunForEngineRow, *repository.StepRunUpdateInfo, error) {
ctx, span := telemetry.NewSpan(ctx, "update-step-run")
defer span.End()
@@ -1472,19 +1750,6 @@ func (s *stepRunEngineRepository) PreflightCheckReplayStepRun(ctx context.Contex
return nil
}
func (s *stepRunEngineRepository) UnlinkStepRunFromWorker(ctx context.Context, tenantId, stepRunId string) error {
_, err := s.queries.UnlinkStepRunFromWorker(ctx, s.pool, dbsqlc.UnlinkStepRunFromWorkerParams{
Steprunid: sqlchelpers.UUIDFromStr(stepRunId),
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
})
if err != nil {
return fmt.Errorf("could not unlink step run from worker: %w", err)
}
return nil
}
func (s *stepRunEngineRepository) UpdateStepRunOverridesData(ctx context.Context, tenantId, stepRunId string, opts *repository.UpdateStepRunOverridesDataOpts) ([]byte, error) {
if err := s.v.Validate(opts); err != nil {
return nil, err
@@ -1897,12 +2162,9 @@ func (s *stepRunEngineRepository) updateStepRunCore(
// we must have actually updated the status to a different state
string(innerStepRun.SRStatus) != string(updateStepRun.Status) {
_, err = s.queries.ReleaseWorkerSemaphoreSlot(ctx, tx, dbsqlc.ReleaseWorkerSemaphoreSlotParams{
Steprunid: updateStepRun.ID,
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
})
err = s.releaseWorkerSemaphoreSlot(ctx, tx, tenantId, sqlchelpers.UUIDToStr(updateStepRun.ID), int(innerStepRun.SRRetryCount))
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
if err != nil {
return nil, fmt.Errorf("could not release worker semaphore: %w", err)
}
}
@@ -2156,6 +2418,136 @@ func (s *stepRunEngineRepository) removeCancelledStepRuns(ctx context.Context, t
return remaining, cancelled, nil
}
func (s *stepRunEngineRepository) releaseWorkerSemaphoreSlot(ctx context.Context, tx pgx.Tx, tenantId, stepRunId string, retryCount int) error {
oldWorkerId, err := s.queries.UpdateStepRunUnsetWorkerId(ctx, tx, dbsqlc.UpdateStepRunUnsetWorkerIdParams{
Steprunid: sqlchelpers.UUIDFromStr(stepRunId),
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
})
if err != nil {
return err
}
if oldWorkerId.Valid {
// add to internal queue
err = s.bulkReleaseWorkerSemaphoreQueueItems(
ctx,
tx,
tenantId,
[]pgtype.UUID{oldWorkerId},
[]pgtype.UUID{sqlchelpers.UUIDFromStr(stepRunId)},
[]int32{int32(retryCount)},
)
if err != nil {
return err
}
}
return nil
}
type workerSemaphoreQueueData struct {
WorkerId string `json:"worker_id"`
StepRunId string `json:"step_run_id"`
// Inc is what to increment the semaphore count by (-1 for assignment, 1 for release)
Inc int `json:"inc"`
}
func (s *stepRunEngineRepository) bulkReleaseWorkerSemaphoreQueueItems(
ctx context.Context,
tx pgx.Tx,
tenantId string,
workerIds []pgtype.UUID,
stepRunIds []pgtype.UUID,
retryCounts []int32,
) error {
// if length of workerIds and stepRunIds is not the same, return an error
if len(workerIds) != len(stepRunIds) {
return fmt.Errorf("workerIds and stepRunIds must be the same length")
}
insertData := make([]any, len(workerIds))
uniqueKeys := make([]string, len(workerIds))
for i, workerId := range workerIds {
insertData[i] = workerSemaphoreQueueData{
WorkerId: sqlchelpers.UUIDToStr(workerId),
StepRunId: sqlchelpers.UUIDToStr(stepRunIds[i]),
Inc: 1,
}
uniqueKeys[i] = fmt.Sprintf(
"%s:%d:release",
sqlchelpers.UUIDToStr(stepRunIds[i]),
retryCounts[i],
)
}
return s.bulkInsertInternalQueueItem(
ctx,
tx,
tenantId,
dbsqlc.InternalQueueWORKERSEMAPHORECOUNT,
insertData,
uniqueKeys,
)
}
func internalQueueItemDataToWorkerSemaphoreQueueData(items []*dbsqlc.InternalQueueItem) ([]workerSemaphoreQueueData, error) {
res := make([]workerSemaphoreQueueData, len(items))
for i, item := range items {
var data workerSemaphoreQueueData
err := json.Unmarshal(item.Data, &data)
if err != nil {
return nil, err
}
res[i] = data
}
return res, nil
}
func (s *stepRunEngineRepository) bulkInsertInternalQueueItem(
ctx context.Context,
tx pgx.Tx,
tenantId string,
queue dbsqlc.InternalQueue,
data []any,
uniqueKeys []string,
) error {
// construct bytes for the data
insertData := make([][]byte, len(data))
for i, d := range data {
b, err := json.Marshal(d)
if err != nil {
return err
}
insertData[i] = b
}
err := s.queries.CreateInternalQueueItemsBulk(ctx, tx, dbsqlc.CreateInternalQueueItemsBulkParams{
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
Queue: queue,
Datas: insertData,
Uniquekeys: uniqueKeys,
})
if err != nil {
return err
}
return nil
}
// removes duplicates from a slice of queue items by step run id
func removeDuplicates(qis []*scheduling.QueueItemWithOrder) ([]*scheduling.QueueItemWithOrder, []*scheduling.QueueItemWithOrder) {
encountered := map[string]bool{}
@@ -2195,6 +2587,22 @@ func (r *stepRunEngineRepository) prepareTx(ctx context.Context, timeoutMs int)
}, nil
}
type debugInfo struct {
NumQueues int `json:"num_queues"`
TotalStepRuns int `json:"total_step_runs"`
TotalStepRunsAssigned int `json:"total_step_runs_assigned"`
TotalSlots int `json:"total_slots"`
NumDuplicates int `json:"num_duplicates"`
NumCancelled int `json:"num_cancelled"`
TotalDuration string `json:"total_duration"`
DurationListQueues string `json:"duration_list_queues"`
DurationListQueueItems string `json:"duration_list_queue_items"`
DurationProcessSemaphoreQueueItems string `json:"duration_process_semaphore_queue_items"`
DurationAssignQueueItems string `json:"duration_assign_queue_items"`
DurationPopQueueItems string `json:"duration_pop_queue_items"`
TenantId string `json:"tenant_id"`
}
func printQueueDebugInfo(
l zerolog.Logger,
tenantId string,
@@ -2203,8 +2611,13 @@ func printQueueDebugInfo(
duplicates []*scheduling.QueueItemWithOrder,
cancelled []*scheduling.QueueItemWithOrder,
plan scheduling.SchedulePlan,
slots []*dbsqlc.ListSemaphoreSlotsToAssignRow,
slots []*scheduling.Slot,
startedAt time.Time,
durationListQueues time.Duration,
durationListQueueItems time.Duration,
durationProcessSemaphoreQueueItems time.Duration,
durationAssignQueueItems time.Duration,
durationPopQueueItems time.Duration,
) {
duration := time.Since(startedAt)
@@ -2232,5 +2645,15 @@ func printQueueDebugInfo(
"num_cancelled", len(cancelled),
).Dur(
"total_duration", duration,
).Dur(
"duration_list_queues", durationListQueues,
).Dur(
"duration_list_queue_items", durationListQueueItems,
).Dur(
"duration_process_semaphore_queue_items", durationProcessSemaphoreQueueItems,
).Dur(
"duration_assign_queue_items", durationAssignQueueItems,
).Dur(
"duration_pop_queue_items", durationPopQueueItems,
).Msg(msg)
}
+73 -8
View File
@@ -2,6 +2,7 @@ package prisma
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
@@ -52,7 +53,7 @@ func (w *workerAPIRepository) GetWorkerActionsByWorkerId(tenantid, workerId stri
})
}
func (w *workerAPIRepository) ListWorkerState(tenantId, workerId string, failed bool) ([]*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow, []*dbsqlc.ListRecentStepRunsForWorkerRow, error) {
func (w *workerAPIRepository) ListWorkerState(tenantId, workerId string, maxRuns int) ([]*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow, []*dbsqlc.GetStepRunForEngineRow, error) {
slots, err := w.queries.ListSemaphoreSlotsWithStateForWorker(context.Background(), w.pool, dbsqlc.ListSemaphoreSlotsWithStateForWorkerParams{
Workerid: sqlchelpers.UUIDFromStr(workerId),
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
@@ -62,16 +63,53 @@ func (w *workerAPIRepository) ListWorkerState(tenantId, workerId string, failed
return nil, nil, fmt.Errorf("could not list worker slot state: %w", err)
}
var statuses = []string{"SUCCEEDED", "FAILED", "CANCELLED"}
// get recent assignment events
assignedEvents, err := w.queries.ListRecentAssignedEventsForWorker(context.Background(), w.pool, dbsqlc.ListRecentAssignedEventsForWorkerParams{
Workerid: sqlchelpers.UUIDFromStr(workerId),
Limit: pgtype.Int4{
Int32: int32(maxRuns),
Valid: true,
},
})
if failed {
statuses = []string{"FAILED", "CANCELLED"}
if err != nil {
return nil, nil, fmt.Errorf("could not list worker recent assigned events: %w", err)
}
recent, err := w.queries.ListRecentStepRunsForWorker(context.Background(), w.pool, dbsqlc.ListRecentStepRunsForWorkerParams{
Workerid: sqlchelpers.UUIDFromStr(workerId),
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
Statuses: statuses,
// construct unique array of recent step run ids
uniqueStepRunIds := make(map[string]bool)
for _, event := range assignedEvents {
// unmarshal to string array
var stepRunIds []string
if err := json.Unmarshal(event.AssignedStepRuns, &stepRunIds); err != nil {
return nil, nil, fmt.Errorf("could not unmarshal assigned step runs: %w", err)
}
for _, stepRunId := range stepRunIds {
if _, ok := uniqueStepRunIds[stepRunId]; ok {
continue
}
// just do 20 for now
if len(uniqueStepRunIds) >= 20 {
break
}
uniqueStepRunIds[stepRunId] = true
}
}
stepRunIds := make([]pgtype.UUID, 0, len(uniqueStepRunIds))
for stepRunId := range uniqueStepRunIds {
stepRunIds = append(stepRunIds, sqlchelpers.UUIDFromStr(stepRunId))
}
recent, err := w.queries.GetStepRunForEngine(context.Background(), w.pool, dbsqlc.GetStepRunForEngineParams{
Ids: stepRunIds,
TenantId: sqlchelpers.UUIDFromStr(tenantId),
})
if err != nil {
@@ -263,6 +301,15 @@ func (w *workerEngineRepository) CreateNewWorker(ctx context.Context, tenantId s
return nil, nil, fmt.Errorf("could not create worker: %w", err)
}
err = w.queries.CreateWorkerCount(ctx, tx, dbsqlc.CreateWorkerCountParams{
Workerid: worker.ID,
MaxRuns: createParams.MaxRuns,
})
if err != nil {
return nil, nil, fmt.Errorf("could not create worker count: %w", err)
}
err = w.queries.StubWorkerSemaphoreSlots(ctx, tx, dbsqlc.StubWorkerSemaphoreSlotsParams{
Workerid: worker.ID,
MaxRuns: pgtype.Int4{
@@ -498,3 +545,21 @@ func (w *workerEngineRepository) UpsertWorkerLabels(ctx context.Context, workerI
return affinities, nil
}
func (r *workerEngineRepository) DeleteOldWorkers(ctx context.Context, tenantId string, lastHeartbeatBefore time.Time) (bool, error) {
hasMore, err := r.queries.DeleteOldWorkers(ctx, r.pool, dbsqlc.DeleteOldWorkersParams{
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
Lastheartbeatbefore: sqlchelpers.TimestampFromTime(lastHeartbeatBefore),
Limit: 20,
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return false, nil
}
return false, err
}
return hasMore, nil
}
+5 -3
View File
@@ -167,7 +167,7 @@ type StepRunEngineRepository interface {
ListStepRuns(ctx context.Context, tenantId string, opts *ListStepRunsOpts) ([]*dbsqlc.GetStepRunForEngineRow, error)
// ListStepRunsToReassign returns a list of step runs which are in a reassignable state.
ListStepRunsToReassign(ctx context.Context, tenantId string) ([]*dbsqlc.GetStepRunForEngineRow, error)
ListStepRunsToReassign(ctx context.Context, tenantId string) ([]string, error)
ListStepRunsToTimeout(ctx context.Context, tenantId string) ([]*dbsqlc.GetStepRunForEngineRow, error)
@@ -180,8 +180,6 @@ type StepRunEngineRepository interface {
CreateStepRunEvent(ctx context.Context, tenantId, stepRunId string, opts CreateStepRunEventOpts) error
UnlinkStepRunFromWorker(ctx context.Context, tenantId, stepRunId string) error
ReleaseStepRunSemaphore(ctx context.Context, tenantId, stepRunId string) error
// UpdateStepRunOverridesData updates the overrides data field in the input for a step run. This returns the input
@@ -202,10 +200,14 @@ type StepRunEngineRepository interface {
// a pending state.
QueueStepRun(ctx context.Context, tenantId, stepRunId string, opts *UpdateStepRunOpts) (*dbsqlc.GetStepRunForEngineRow, error)
UpdateWorkerSemaphoreCounts(ctx context.Context, qlp *zerolog.Logger, tenantId string) (bool, error)
QueueStepRuns(ctx context.Context, ql *zerolog.Logger, tenantId string) (QueueStepRunsResult, error)
CleanupQueueItems(ctx context.Context, tenantId string) error
CleanupInternalQueueItems(ctx context.Context, tenantId string) error
ListStartableStepRuns(ctx context.Context, tenantId, jobRunId string, parentStepRunId *string) ([]*dbsqlc.GetStepRunForEngineRow, error)
ArchiveStepRunResult(ctx context.Context, tenantId, stepRunId string) error
+3 -1
View File
@@ -72,7 +72,7 @@ type WorkerAPIRepository interface {
ListWorkers(tenantId string, opts *ListWorkersOpts) ([]*dbsqlc.ListWorkersWithStepCountRow, error)
// ListRecentWorkerStepRuns lists recent step runs for a given worker
ListWorkerState(tenantId, workerId string, failed bool) ([]*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow, []*dbsqlc.ListRecentStepRunsForWorkerRow, error)
ListWorkerState(tenantId, workerId string, maxRuns int) ([]*dbsqlc.ListSemaphoreSlotsWithStateForWorkerRow, []*dbsqlc.GetStepRunForEngineRow, error)
// GetWorkerActionsByWorkerId returns a list of actions for a worker
GetWorkerActionsByWorkerId(tenantid, workerId string) ([]pgtype.Text, error)
@@ -111,4 +111,6 @@ type WorkerEngineRepository interface {
UpdateWorkerActiveStatus(ctx context.Context, tenantId, workerId string, isActive bool, timestamp time.Time) (*dbsqlc.Worker, error)
UpsertWorkerLabels(ctx context.Context, workerId pgtype.UUID, opts []UpsertWorkerLabelOpts) ([]*dbsqlc.WorkerLabel, error)
DeleteOldWorkers(ctx context.Context, tenantId string, lastHeartbeatBefore time.Time) (bool, error)
}
+5 -6
View File
@@ -6,14 +6,13 @@ import (
"github.com/jackc/pgx/v5/pgtype"
"github.com/hatchet-dev/hatchet/pkg/repository"
"github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc"
"github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers"
)
type SchedulePlan struct {
StepRunIds []pgtype.UUID
StepRunTimeouts []string
SlotIds []pgtype.UUID
SlotIds []string
WorkerIds []pgtype.UUID
UnassignedStepRunIds []pgtype.UUID
QueuedStepRuns []repository.QueuedStepRun
@@ -61,16 +60,16 @@ func (plan *SchedulePlan) HandleRateLimited(qi *QueueItemWithOrder) {
plan.RateLimitedStepRuns = append(plan.RateLimitedStepRuns, qi.StepRunId)
}
func (plan *SchedulePlan) AssignQiToSlot(qi *QueueItemWithOrder, slot *dbsqlc.ListSemaphoreSlotsToAssignRow) {
func (plan *SchedulePlan) AssignQiToSlot(qi *QueueItemWithOrder, slot *Slot) {
plan.StepRunIds = append(plan.StepRunIds, qi.StepRunId)
plan.StepRunTimeouts = append(plan.StepRunTimeouts, qi.StepTimeout.String)
plan.SlotIds = append(plan.SlotIds, slot.ID)
plan.WorkerIds = append(plan.WorkerIds, slot.WorkerId)
plan.WorkerIds = append(plan.WorkerIds, sqlchelpers.UUIDFromStr(slot.WorkerId))
plan.QueuedItems = append(plan.QueuedItems, qi.ID)
plan.QueuedStepRuns = append(plan.QueuedStepRuns, repository.QueuedStepRun{
StepRunId: sqlchelpers.UUIDToStr(qi.StepRunId),
WorkerId: sqlchelpers.UUIDToStr(slot.WorkerId),
DispatcherId: sqlchelpers.UUIDToStr(slot.DispatcherId),
WorkerId: slot.WorkerId,
DispatcherId: slot.DispatcherId,
})
}
+9 -2
View File
@@ -18,9 +18,16 @@ type QueueItemWithOrder struct {
Order int
}
type Slot struct {
ID string
WorkerId string
DispatcherId string
ActionId string
}
func GeneratePlan(
ctx context.Context,
slots []*dbsqlc.ListSemaphoreSlotsToAssignRow,
slots []*Slot,
uniqueActionsArr []string,
queueItems []*QueueItemWithOrder,
stepRateUnits map[string]map[string]int32,
@@ -34,7 +41,7 @@ func GeneratePlan(
plan := SchedulePlan{
StepRunIds: make([]pgtype.UUID, 0),
StepRunTimeouts: make([]string, 0),
SlotIds: make([]pgtype.UUID, 0),
SlotIds: make([]string, 0),
WorkerIds: make([]pgtype.UUID, 0),
UnassignedStepRunIds: make([]pgtype.UUID, 0),
RateLimitedStepRuns: make([]pgtype.UUID, 0),
+1 -1
View File
@@ -15,7 +15,7 @@ import (
)
type args struct {
Slots []*dbsqlc.ListSemaphoreSlotsToAssignRow
Slots []*Slot
UniqueActionsArr []string
QueueItems []*QueueItemWithOrder
WorkerLabels map[string][]*dbsqlc.GetWorkerLabelsRow
+6 -6
View File
@@ -7,7 +7,7 @@ import (
type WorkerState struct {
workerId string
slots map[string]*dbsqlc.ListSemaphoreSlotsToAssignRow
slots map[string]*Slot
actionIds map[string]struct{}
labels []*dbsqlc.GetWorkerLabelsRow
stepWeights map[string]int
@@ -16,7 +16,7 @@ type WorkerState struct {
func NewWorkerState(workerId string, labels []*dbsqlc.GetWorkerLabelsRow) *WorkerState {
return &WorkerState{
workerId: workerId,
slots: make(map[string]*dbsqlc.ListSemaphoreSlotsToAssignRow),
slots: make(map[string]*Slot),
actionIds: make(map[string]struct{}),
labels: labels,
stepWeights: make(map[string]int),
@@ -27,8 +27,8 @@ func (w *WorkerState) AddStepWeight(stepId string, weight int) {
w.stepWeights[stepId] = weight
}
func (w *WorkerState) AddSlot(slot *dbsqlc.ListSemaphoreSlotsToAssignRow) {
w.slots[sqlchelpers.UUIDToStr(slot.ID)] = slot
func (w *WorkerState) AddSlot(slot *Slot) {
w.slots[slot.ID] = slot
w.actionIds[slot.ActionId] = struct{}{}
}
@@ -48,7 +48,7 @@ func (w *WorkerState) CanAssign(action string, stepId *string) bool {
return true
}
func (w *WorkerState) AssignSlot(qi *QueueItemWithOrder) (*dbsqlc.ListSemaphoreSlotsToAssignRow, bool) {
func (w *WorkerState) AssignSlot(qi *QueueItemWithOrder) (*Slot, bool) {
// if the actionId is not in the worker's actionIds, then we can't assign this slot
stepId := sqlchelpers.UUIDToStr(qi.StepId)
@@ -63,7 +63,7 @@ func (w *WorkerState) AssignSlot(qi *QueueItemWithOrder) (*dbsqlc.ListSemaphoreS
return slot, isEmpty
}
func (w *WorkerState) popRandomSlot(slots map[string]*dbsqlc.ListSemaphoreSlotsToAssignRow) *dbsqlc.ListSemaphoreSlotsToAssignRow {
func (w *WorkerState) popRandomSlot(slots map[string]*Slot) *Slot {
for id, slot := range slots {
delete(slots, id)
return slot
+34 -9
View File
@@ -1,6 +1,9 @@
package scheduling
import (
"fmt"
"sort"
"github.com/hatchet-dev/hatchet/pkg/repository/prisma/dbsqlc"
"github.com/hatchet-dev/hatchet/pkg/repository/prisma/sqlchelpers"
)
@@ -12,7 +15,7 @@ type WorkerStateManager struct {
}
func NewWorkerStateManager(
slots []*dbsqlc.ListSemaphoreSlotsToAssignRow,
slots []*Slot,
workerLabels map[string][]*dbsqlc.GetWorkerLabelsRow,
stepDesiredLabels map[string][]*dbsqlc.GetDesiredLabelsRow,
) *WorkerStateManager {
@@ -22,7 +25,7 @@ func NewWorkerStateManager(
// initialize worker states
for _, slot := range slots {
workerId := sqlchelpers.UUIDToStr(slot.WorkerId)
workerId := slot.WorkerId
if _, ok := workers[workerId]; !ok {
workers[workerId] = NewWorkerState(
@@ -30,11 +33,15 @@ func NewWorkerStateManager(
workerLabels[workerId],
)
}
workers[sqlchelpers.UUIDToStr(slot.WorkerId)].AddSlot(slot)
workers[workerId].AddSlot(slot)
}
// compute affinity weights
for stepId, desired := range stepDesiredLabels {
if len(desired) == 0 {
continue
}
for workerId, worker := range workers {
weight := ComputeWeight(desired, worker.labels)
@@ -69,10 +76,11 @@ func (wm *WorkerStateManager) HasEligibleWorkers(stepId string) bool {
return len(wm.workers) > 0
}
func (wm *WorkerStateManager) AttemptAssignSlot(qi *QueueItemWithOrder) *dbsqlc.ListSemaphoreSlotsToAssignRow {
func (wm *WorkerStateManager) AttemptAssignSlot(qi *QueueItemWithOrder) *Slot {
// STICKY WORKERS
if qi.Sticky.Valid {
fmt.Println("STICKY WORKER")
if worker, ok := wm.workers[sqlchelpers.UUIDToStr(qi.DesiredWorkerId)]; ok {
slot := wm.attemptAssignToWorker(worker, qi)
@@ -90,7 +98,8 @@ func (wm *WorkerStateManager) AttemptAssignSlot(qi *QueueItemWithOrder) *dbsqlc.
} // if we reached this with sticky we'll try to find an alternative worker
// AFFINITY WORKERS
if workers, ok := wm.workerStepWeights[sqlchelpers.UUIDToStr(qi.StepId)]; ok {
if workers, ok := wm.workerStepWeights[sqlchelpers.UUIDToStr(qi.StepId)]; ok && len(workers) > 0 {
fmt.Println("AFFINITY WORKER", workers)
for _, workerWW := range workers {
worker := wm.workers[workerWW.WorkerId]
@@ -113,10 +122,11 @@ func (wm *WorkerStateManager) AttemptAssignSlot(qi *QueueItemWithOrder) *dbsqlc.
}
// DEFAULT STRATEGY
workers := wm.workers
for _, worker := range workers {
workers := wm.getWorkersSortedBySlots()
slot := wm.attemptAssignToWorker(worker, qi)
for _, worker := range workers {
workerCp := worker
slot := wm.attemptAssignToWorker(workerCp, qi)
if slot == nil {
continue
@@ -128,7 +138,7 @@ func (wm *WorkerStateManager) AttemptAssignSlot(qi *QueueItemWithOrder) *dbsqlc.
return nil
}
func (wm *WorkerStateManager) attemptAssignToWorker(worker *WorkerState, qi *QueueItemWithOrder) *dbsqlc.ListSemaphoreSlotsToAssignRow {
func (wm *WorkerStateManager) attemptAssignToWorker(worker *WorkerState, qi *QueueItemWithOrder) *Slot {
slot, isEmpty := worker.AssignSlot(qi)
if slot == nil {
@@ -152,3 +162,18 @@ func (wm *WorkerStateManager) DropWorker(workerId string) {
// cleanup the step weights
// TODO
}
func (wm *WorkerStateManager) getWorkersSortedBySlots() []*WorkerState {
workers := make([]*WorkerState, 0, len(wm.workers))
for _, worker := range wm.workers {
workers = append(workers, worker)
}
// sort the workers by the number of slots, descending
sort.SliceStable(workers, func(i, j int) bool {
return len(workers[i].slots) > len(workers[j].slots)
})
return workers
}
@@ -0,0 +1,53 @@
-- CreateEnum
CREATE TYPE "InternalQueue" AS ENUM ('WORKER_SEMAPHORE_COUNT', 'STEP_RUN_UPDATE');
-- CreateTable
CREATE TABLE "InternalQueueItem" (
"id" BIGSERIAL NOT NULL,
"queue" "InternalQueue" NOT NULL,
"isQueued" BOOLEAN NOT NULL,
"data" JSONB,
"tenantId" UUID NOT NULL,
"priority" INTEGER NOT NULL DEFAULT 1,
"uniqueKey" TEXT,
CONSTRAINT "InternalQueueItem_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkerSemaphoreCount" (
"workerId" UUID NOT NULL,
"count" INTEGER NOT NULL,
CONSTRAINT "WorkerSemaphoreCount_pkey" PRIMARY KEY ("workerId")
);
-- CreateTable
CREATE TABLE "WorkerAssignEvent" (
"id" BIGSERIAL NOT NULL,
"workerId" UUID NOT NULL,
"assignedStepRuns" JSONB,
CONSTRAINT "WorkerAssignEvent_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE INDEX "InternalQueueItem_isQueued_tenantId_queue_priority_id_idx" ON "InternalQueueItem"("isQueued", "tenantId", "queue", "priority" DESC, "id");
-- CreateIndex
CREATE UNIQUE INDEX "InternalQueueItem_tenantId_queue_uniqueKey_key" ON "InternalQueueItem"("tenantId", "queue", "uniqueKey");
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphoreCount_workerId_key" ON "WorkerSemaphoreCount"("workerId");
-- CreateIndex
CREATE INDEX "WorkerSemaphoreCount_workerId_idx" ON "WorkerSemaphoreCount"("workerId");
-- CreateIndex
CREATE INDEX "WorkerAssignEvent_workerId_id_idx" ON "WorkerAssignEvent"("workerId", "id");
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreCount" ADD CONSTRAINT "WorkerSemaphoreCount_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerAssignEvent" ADD CONSTRAINT "WorkerAssignEvent_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+49
View File
@@ -1353,6 +1353,29 @@ model QueueItem {
@@index([isQueued, tenantId, queue, priority(sort: Desc), id], name: "QueueItem_isQueued_priority_tenantId_queue_id_idx_2")
}
enum InternalQueue {
WORKER_SEMAPHORE_COUNT
STEP_RUN_UPDATE
}
model InternalQueueItem {
id BigInt @id @default(autoincrement()) @db.BigInt
queue InternalQueue
isQueued Boolean
data Json?
tenantId String @db.Uuid
// ALTER TABLE "InternalQueueItem" ADD CONSTRAINT "InternalQueueItem_priority_check" CHECK ("priority" >= 1 AND "priority" <= 4);
priority Int @default(1) // custom migration to set this between 1 and 4
uniqueKey String?
@@unique([tenantId, queue, uniqueKey])
@@index([isQueued, tenantId, queue, priority(sort: Desc), id])
}
enum StepRunEventReason {
REQUEUED_NO_WORKER
REQUEUED_RATE_LIMIT
@@ -1559,6 +1582,9 @@ model Worker {
slots WorkerSemaphoreSlot[]
semaphoreCount WorkerSemaphoreCount?
assignedEvents WorkerAssignEvent[]
webhook WebhookWorker? @relation(fields: [webhookId], references: [id], onDelete: SetNull, onUpdate: Cascade)
webhookId String? @unique @db.Uuid
@@ -1590,6 +1616,29 @@ model WorkerSemaphoreSlot {
@@index([workerId])
}
model WorkerSemaphoreCount {
// the parent semaphore
worker Worker @relation(fields: [workerId], references: [id], onDelete: Cascade, onUpdate: Cascade)
workerId String @id @unique @db.Uuid
// the count of the semaphore
count Int
@@index([workerId])
}
model WorkerAssignEvent {
id BigInt @id @default(autoincrement()) @db.BigInt
// the parent worker
worker Worker @relation(fields: [workerId], references: [id], onDelete: Cascade, onUpdate: Cascade)
workerId String @db.Uuid
assignedStepRuns Json?
@@index([workerId, id(order: Desc)])
}
model Service {
// base fields
id String @id @unique @default(uuid()) @db.Uuid
+3
View File
@@ -3,3 +3,6 @@
-- Modify "QueueItem" table
ALTER TABLE "QueueItem" ADD CONSTRAINT "QueueItem_priority_check" CHECK ("priority" >= 1 AND "priority" <= 4);
-- Modify "InternalQueueItem" table
ALTER TABLE "InternalQueueItem" ADD CONSTRAINT "InternalQueueItem_priority_check" CHECK ("priority" >= 1 AND "priority" <= 4);
+18
View File
@@ -0,0 +1,18 @@
-- Create enum type "InternalQueue"
CREATE TYPE "InternalQueue" AS ENUM ('WORKER_SEMAPHORE_COUNT', 'STEP_RUN_UPDATE');
-- Create "InternalQueueItem" table
CREATE TABLE "InternalQueueItem" ("id" bigserial NOT NULL, "queue" "InternalQueue" NOT NULL, "isQueued" boolean NOT NULL, "data" jsonb NULL, "tenantId" uuid NOT NULL, "priority" integer NOT NULL DEFAULT 1, "uniqueKey" text NULL, PRIMARY KEY ("id"), CONSTRAINT "InternalQueueItem_priority_check" CHECK ((priority >= 1) AND (priority <= 4)));
-- Create index "InternalQueueItem_isQueued_tenantId_queue_priority_id_idx" to table: "InternalQueueItem"
CREATE INDEX "InternalQueueItem_isQueued_tenantId_queue_priority_id_idx" ON "InternalQueueItem" ("isQueued", "tenantId", "queue", "priority" DESC, "id");
-- Create index "InternalQueueItem_tenantId_queue_uniqueKey_key" to table: "InternalQueueItem"
CREATE UNIQUE INDEX "InternalQueueItem_tenantId_queue_uniqueKey_key" ON "InternalQueueItem" ("tenantId", "queue", "uniqueKey");
-- Create "WorkerAssignEvent" table
CREATE TABLE "WorkerAssignEvent" ("id" bigserial NOT NULL, "workerId" uuid NOT NULL, "assignedStepRuns" jsonb NULL, PRIMARY KEY ("id"), CONSTRAINT "WorkerAssignEvent_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker" ("id") ON UPDATE CASCADE ON DELETE CASCADE);
-- Create index "WorkerAssignEvent_workerId_id_idx" to table: "WorkerAssignEvent"
CREATE INDEX "WorkerAssignEvent_workerId_id_idx" ON "WorkerAssignEvent" ("workerId", "id");
-- Create "WorkerSemaphoreCount" table
CREATE TABLE "WorkerSemaphoreCount" ("workerId" uuid NOT NULL, "count" integer NOT NULL, PRIMARY KEY ("workerId"), CONSTRAINT "WorkerSemaphoreCount_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker" ("id") ON UPDATE CASCADE ON DELETE CASCADE);
-- Create index "WorkerSemaphoreCount_workerId_idx" to table: "WorkerSemaphoreCount"
CREATE INDEX "WorkerSemaphoreCount_workerId_idx" ON "WorkerSemaphoreCount" ("workerId");
-- Create index "WorkerSemaphoreCount_workerId_key" to table: "WorkerSemaphoreCount"
CREATE UNIQUE INDEX "WorkerSemaphoreCount_workerId_key" ON "WorkerSemaphoreCount" ("workerId");
+2 -1
View File
@@ -1,4 +1,4 @@
h1:k4+bjV9IywiD14BrY6CJDucCajnWKXzahw2csnd2SCY=
h1:qP+uWZm8/CRqvpIvFo8zJWwXd9gLZNlytynZcz0uanY=
20240115180414_init.sql h1:Ef3ZyjAHkmJPdGF/dEWCahbwgcg6uGJKnDxW2JCRi2k=
20240122014727_v0_6_0.sql h1:o/LdlteAeFgoHJ3e/M4Xnghqt9826IE/Y/h0q95Acuo=
20240126235456_v0_7_0.sql h1:KiVzt/hXgQ6esbdC6OMJOOWuYEXmy1yeCpmsVAHTFKs=
@@ -50,3 +50,4 @@ h1:k4+bjV9IywiD14BrY6CJDucCajnWKXzahw2csnd2SCY=
20240823120430_0.42.4.sql h1:kdfT+J0j21YBvohnF5k+qtt+4YU6egi4fLIaReDucmc=
20240823204123_0.42.5.sql h1:6IPQwF82lVs2AXsb6zYb6QWIHEoOt8puEENtJvDCiZ8=
20240829142550_v0.43.2.sql h1:MDsYtU4RKp7p0GYHbLJQ2U10B2SgbZ9cFNzS2F1rTPw=
20240904120327_v0.44.0.sql h1:ieiJyKEglb5seYl5HGVEv8W10cNL2d5jBXc7U8/h9Nw=
+57
View File
@@ -1,6 +1,9 @@
-- CreateEnum
CREATE TYPE "ConcurrencyLimitStrategy" AS ENUM ('CANCEL_IN_PROGRESS', 'DROP_NEWEST', 'QUEUE_NEWEST', 'GROUP_ROUND_ROBIN');
-- CreateEnum
CREATE TYPE "InternalQueue" AS ENUM ('WORKER_SEMAPHORE_COUNT', 'STEP_RUN_UPDATE');
-- CreateEnum
CREATE TYPE "InviteLinkStatus" AS ENUM ('PENDING', 'ACCEPTED', 'REJECTED');
@@ -141,6 +144,19 @@ CREATE TABLE "GetGroupKeyRun" (
CONSTRAINT "GetGroupKeyRun_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "InternalQueueItem" (
"id" BIGSERIAL NOT NULL,
"queue" "InternalQueue" NOT NULL,
"isQueued" BOOLEAN NOT NULL,
"data" JSONB,
"tenantId" UUID NOT NULL,
"priority" INTEGER NOT NULL DEFAULT 1,
"uniqueKey" TEXT,
CONSTRAINT "InternalQueueItem_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Job" (
"id" UUID NOT NULL,
@@ -654,6 +670,15 @@ CREATE TABLE "Worker" (
CONSTRAINT "Worker_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkerAssignEvent" (
"id" BIGSERIAL NOT NULL,
"workerId" UUID NOT NULL,
"assignedStepRuns" JSONB,
CONSTRAINT "WorkerAssignEvent_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "WorkerLabel" (
"id" BIGSERIAL NOT NULL,
@@ -673,6 +698,14 @@ CREATE TABLE "WorkerSemaphore" (
"slots" INTEGER NOT NULL
);
-- CreateTable
CREATE TABLE "WorkerSemaphoreCount" (
"workerId" UUID NOT NULL,
"count" INTEGER NOT NULL,
CONSTRAINT "WorkerSemaphoreCount_pkey" PRIMARY KEY ("workerId")
);
-- CreateTable
CREATE TABLE "WorkerSemaphoreSlot" (
"id" UUID NOT NULL,
@@ -928,6 +961,12 @@ CREATE INDEX "GetGroupKeyRun_workerId_idx" ON "GetGroupKeyRun"("workerId" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "GetGroupKeyRun_workflowRunId_key" ON "GetGroupKeyRun"("workflowRunId" ASC);
-- CreateIndex
CREATE INDEX "InternalQueueItem_isQueued_tenantId_queue_priority_id_idx" ON "InternalQueueItem"("isQueued" ASC, "tenantId" ASC, "queue" ASC, "priority" DESC, "id" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "InternalQueueItem_tenantId_queue_uniqueKey_key" ON "InternalQueueItem"("tenantId" ASC, "queue" ASC, "uniqueKey" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "Job_id_key" ON "Job"("id" ASC);
@@ -1132,6 +1171,9 @@ CREATE UNIQUE INDEX "Worker_id_key" ON "Worker"("id" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "Worker_webhookId_key" ON "Worker"("webhookId" ASC);
-- CreateIndex
CREATE INDEX "WorkerAssignEvent_workerId_id_idx" ON "WorkerAssignEvent"("workerId" ASC, "id" ASC);
-- CreateIndex
CREATE INDEX "WorkerLabel_workerId_idx" ON "WorkerLabel"("workerId" ASC);
@@ -1141,6 +1183,12 @@ CREATE UNIQUE INDEX "WorkerLabel_workerId_key_key" ON "WorkerLabel"("workerId" A
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphore_workerId_key" ON "WorkerSemaphore"("workerId" ASC);
-- CreateIndex
CREATE INDEX "WorkerSemaphoreCount_workerId_idx" ON "WorkerSemaphoreCount"("workerId" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphoreCount_workerId_key" ON "WorkerSemaphoreCount"("workerId" ASC);
-- CreateIndex
CREATE UNIQUE INDEX "WorkerSemaphoreSlot_id_key" ON "WorkerSemaphoreSlot"("id" ASC);
@@ -1468,12 +1516,18 @@ ALTER TABLE "Worker" ADD CONSTRAINT "Worker_tenantId_fkey" FOREIGN KEY ("tenantI
-- AddForeignKey
ALTER TABLE "Worker" ADD CONSTRAINT "Worker_webhookId_fkey" FOREIGN KEY ("webhookId") REFERENCES "WebhookWorker"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerAssignEvent" ADD CONSTRAINT "WorkerAssignEvent_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerLabel" ADD CONSTRAINT "WorkerLabel_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphore" ADD CONSTRAINT "WorkerSemaphore_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreCount" ADD CONSTRAINT "WorkerSemaphoreCount_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "Worker"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "WorkerSemaphoreSlot" ADD CONSTRAINT "WorkerSemaphoreSlot_stepRunId_fkey" FOREIGN KEY ("stepRunId") REFERENCES "StepRun"("id") ON DELETE CASCADE ON UPDATE CASCADE;
@@ -1593,3 +1647,6 @@ ALTER TABLE "_WorkflowToWorkflowTag" ADD CONSTRAINT "_WorkflowToWorkflowTag_B_fk
-- Modify "QueueItem" table
ALTER TABLE "QueueItem" ADD CONSTRAINT "QueueItem_priority_check" CHECK ("priority" >= 1 AND "priority" <= 4);
-- Modify "InternalQueueItem" table
ALTER TABLE "InternalQueueItem" ADD CONSTRAINT "InternalQueueItem_priority_check" CHECK ("priority" >= 1 AND "priority" <= 4);