mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-01-04 07:39:43 -06:00
feat: queued metrics, along with bug fixes for round robin queueing (#340)
This commit is contained in:
@@ -178,3 +178,5 @@ ListSNSIntegrations:
|
||||
$ref: "./sns.yaml#/ListSNSIntegrations"
|
||||
CreateSNSIntegrationRequest:
|
||||
$ref: "./sns.yaml#/CreateSNSIntegrationRequest"
|
||||
WorkflowMetrics:
|
||||
$ref: "./workflow.yaml#/WorkflowMetrics"
|
||||
|
||||
@@ -50,6 +50,10 @@ EventWorkflowRunSummary:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The number of running runs.
|
||||
queued:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The number of queued runs.
|
||||
succeeded:
|
||||
type: integer
|
||||
format: int64
|
||||
|
||||
@@ -283,3 +283,13 @@ LinkGithubRepositoryRequest:
|
||||
- gitRepoName
|
||||
- gitRepoOwner
|
||||
- gitRepoBranch
|
||||
|
||||
WorkflowMetrics:
|
||||
type: object
|
||||
properties:
|
||||
groupKeyRunsCount:
|
||||
type: integer
|
||||
description: The number of runs for a specific group key (passed via filter)
|
||||
groupKeyCount:
|
||||
type: integer
|
||||
description: The total number of concurrency group keys.
|
||||
|
||||
@@ -87,6 +87,7 @@ WorkflowRunStatus:
|
||||
- SUCCEEDED
|
||||
- FAILED
|
||||
- CANCELLED
|
||||
- QUEUED
|
||||
|
||||
WorkflowRunStatusList:
|
||||
type: array
|
||||
|
||||
@@ -96,6 +96,8 @@ paths:
|
||||
$ref: "./paths/workflow/workflow.yaml#/workflowVersionDefinition"
|
||||
/api/v1/workflows/{workflow}/link-github:
|
||||
$ref: "./paths/workflow/workflow.yaml#/linkGithub"
|
||||
/api/v1/workflows/{workflow}/metrics:
|
||||
$ref: "./paths/workflow/workflow.yaml#/getMetrics"
|
||||
/api/v1/step-runs/{step-run}/create-pr:
|
||||
$ref: "./paths/workflow/workflow.yaml#/createPullRequest"
|
||||
/api/v1/step-runs/{step-run}/logs:
|
||||
|
||||
@@ -601,3 +601,58 @@ getDiff:
|
||||
summary: Get diff
|
||||
tags:
|
||||
- Workflow
|
||||
getMetrics:
|
||||
get:
|
||||
x-resources: ["tenant", "workflow"]
|
||||
description: Get the metrics for a workflow version
|
||||
operationId: workflow:get:metrics
|
||||
parameters:
|
||||
- description: The workflow id
|
||||
in: path
|
||||
name: workflow
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
minLength: 36
|
||||
maxLength: 36
|
||||
- description: A status of workflow runs to filter by
|
||||
in: query
|
||||
name: status
|
||||
required: false
|
||||
schema:
|
||||
$ref: "../../components/schemas/_index.yaml#/WorkflowRunStatus"
|
||||
- description: A group key to filter metrics by
|
||||
in: query
|
||||
name: groupKey
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "../../components/schemas/_index.yaml#/WorkflowMetrics"
|
||||
description: Successfully retrieved the workflow version metrics
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "../../components/schemas/_index.yaml#/APIErrors"
|
||||
description: A malformed or bad request
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "../../components/schemas/_index.yaml#/APIErrors"
|
||||
description: Forbidden
|
||||
"404":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "../../components/schemas/_index.yaml#/APIErrors"
|
||||
description: Not found
|
||||
summary: Get workflow metrics
|
||||
tags:
|
||||
- Workflow
|
||||
|
||||
44
api/v1/server/handlers/workflows/get_metrics.go
Normal file
44
api/v1/server/handlers/workflows/get_metrics.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package workflows
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors"
|
||||
"github.com/hatchet-dev/hatchet/api/v1/server/oas/gen"
|
||||
"github.com/hatchet-dev/hatchet/internal/repository"
|
||||
"github.com/hatchet-dev/hatchet/internal/repository/prisma/db"
|
||||
)
|
||||
|
||||
func (t *WorkflowService) WorkflowGetMetrics(ctx echo.Context, request gen.WorkflowGetMetricsRequestObject) (gen.WorkflowGetMetricsResponseObject, error) {
|
||||
tenant := ctx.Get("tenant").(*db.TenantModel)
|
||||
workflow := ctx.Get("workflow").(*db.WorkflowModel)
|
||||
|
||||
opts := &repository.GetWorkflowMetricsOpts{}
|
||||
|
||||
if request.Params.Status != nil {
|
||||
opts.Status = (*string)(request.Params.Status)
|
||||
}
|
||||
|
||||
if request.Params.GroupKey != nil {
|
||||
opts.GroupKey = request.Params.GroupKey
|
||||
}
|
||||
|
||||
metrics, err := t.config.APIRepository.Workflow().GetWorkflowMetrics(tenant.ID, workflow.ID, opts)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
return gen.WorkflowGetMetrics404JSONResponse(
|
||||
apierrors.NewAPIErrors("workflow not found"),
|
||||
), nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gen.WorkflowGetMetrics200JSONResponse(gen.WorkflowMetrics{
|
||||
GroupKeyCount: &metrics.GroupKeyCount,
|
||||
GroupKeyRunsCount: &metrics.GroupKeyRunsCount,
|
||||
}), nil
|
||||
}
|
||||
@@ -102,6 +102,7 @@ const (
|
||||
CANCELLED WorkflowRunStatus = "CANCELLED"
|
||||
FAILED WorkflowRunStatus = "FAILED"
|
||||
PENDING WorkflowRunStatus = "PENDING"
|
||||
QUEUED WorkflowRunStatus = "QUEUED"
|
||||
RUNNING WorkflowRunStatus = "RUNNING"
|
||||
SUCCEEDED WorkflowRunStatus = "SUCCEEDED"
|
||||
)
|
||||
@@ -262,6 +263,9 @@ type EventWorkflowRunSummary struct {
|
||||
// Pending The number of pending runs.
|
||||
Pending *int64 `json:"pending,omitempty"`
|
||||
|
||||
// Queued The number of queued runs.
|
||||
Queued *int64 `json:"queued,omitempty"`
|
||||
|
||||
// Running The number of running runs.
|
||||
Running *int64 `json:"running,omitempty"`
|
||||
|
||||
@@ -728,6 +732,15 @@ type WorkflowList struct {
|
||||
Rows *[]Workflow `json:"rows,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowMetrics defines model for WorkflowMetrics.
|
||||
type WorkflowMetrics struct {
|
||||
// GroupKeyCount The total number of concurrency group keys.
|
||||
GroupKeyCount *int `json:"groupKeyCount,omitempty"`
|
||||
|
||||
// GroupKeyRunsCount The number of runs for a specific group key (passed via filter)
|
||||
GroupKeyRunsCount *int `json:"groupKeyRunsCount,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowRun defines model for WorkflowRun.
|
||||
type WorkflowRun struct {
|
||||
DisplayName *string `json:"displayName,omitempty"`
|
||||
@@ -904,6 +917,15 @@ type WorkflowRunListParams struct {
|
||||
ParentStepRunId *openapi_types.UUID `form:"parentStepRunId,omitempty" json:"parentStepRunId,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowGetMetricsParams defines parameters for WorkflowGetMetrics.
|
||||
type WorkflowGetMetricsParams struct {
|
||||
// Status A status of workflow runs to filter by
|
||||
Status *WorkflowRunStatus `form:"status,omitempty" json:"status,omitempty"`
|
||||
|
||||
// GroupKey A group key to filter metrics by
|
||||
GroupKey *string `form:"groupKey,omitempty" json:"groupKey,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowRunCreateParams defines parameters for WorkflowRunCreate.
|
||||
type WorkflowRunCreateParams struct {
|
||||
// Version The workflow version. If not supplied, the latest version is fetched.
|
||||
@@ -1134,6 +1156,9 @@ type ServerInterface interface {
|
||||
// Link github repository
|
||||
// (POST /api/v1/workflows/{workflow}/link-github)
|
||||
WorkflowUpdateLinkGithub(ctx echo.Context, workflow openapi_types.UUID) error
|
||||
// Get workflow metrics
|
||||
// (GET /api/v1/workflows/{workflow}/metrics)
|
||||
WorkflowGetMetrics(ctx echo.Context, workflow openapi_types.UUID, params WorkflowGetMetricsParams) error
|
||||
// Trigger workflow run
|
||||
// (POST /api/v1/workflows/{workflow}/trigger)
|
||||
WorkflowRunCreate(ctx echo.Context, workflow openapi_types.UUID, params WorkflowRunCreateParams) error
|
||||
@@ -2293,6 +2318,42 @@ func (w *ServerInterfaceWrapper) WorkflowUpdateLinkGithub(ctx echo.Context) erro
|
||||
return err
|
||||
}
|
||||
|
||||
// WorkflowGetMetrics converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) WorkflowGetMetrics(ctx echo.Context) error {
|
||||
var err error
|
||||
// ------------- Path parameter "workflow" -------------
|
||||
var workflow openapi_types.UUID
|
||||
|
||||
err = runtime.BindStyledParameterWithLocation("simple", false, "workflow", runtime.ParamLocationPath, ctx.Param("workflow"), &workflow)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter workflow: %s", err))
|
||||
}
|
||||
|
||||
ctx.Set(BearerAuthScopes, []string{})
|
||||
|
||||
ctx.Set(CookieAuthScopes, []string{})
|
||||
|
||||
// Parameter object where we will unmarshal all parameters from the context
|
||||
var params WorkflowGetMetricsParams
|
||||
// ------------- Optional query parameter "status" -------------
|
||||
|
||||
err = runtime.BindQueryParameter("form", true, false, "status", ctx.QueryParams(), ¶ms.Status)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter status: %s", err))
|
||||
}
|
||||
|
||||
// ------------- Optional query parameter "groupKey" -------------
|
||||
|
||||
err = runtime.BindQueryParameter("form", true, false, "groupKey", ctx.QueryParams(), ¶ms.GroupKey)
|
||||
if err != nil {
|
||||
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter groupKey: %s", err))
|
||||
}
|
||||
|
||||
// Invoke the callback with all the unmarshaled arguments
|
||||
err = w.Handler.WorkflowGetMetrics(ctx, workflow, params)
|
||||
return err
|
||||
}
|
||||
|
||||
// WorkflowRunCreate converts echo context to params.
|
||||
func (w *ServerInterfaceWrapper) WorkflowRunCreate(ctx echo.Context) error {
|
||||
var err error
|
||||
@@ -2463,6 +2524,7 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL
|
||||
router.DELETE(baseURL+"/api/v1/workflows/:workflow", wrapper.WorkflowDelete)
|
||||
router.GET(baseURL+"/api/v1/workflows/:workflow", wrapper.WorkflowGet)
|
||||
router.POST(baseURL+"/api/v1/workflows/:workflow/link-github", wrapper.WorkflowUpdateLinkGithub)
|
||||
router.GET(baseURL+"/api/v1/workflows/:workflow/metrics", wrapper.WorkflowGetMetrics)
|
||||
router.POST(baseURL+"/api/v1/workflows/:workflow/trigger", wrapper.WorkflowRunCreate)
|
||||
router.GET(baseURL+"/api/v1/workflows/:workflow/versions", wrapper.WorkflowVersionGet)
|
||||
router.GET(baseURL+"/api/v1/workflows/:workflow/versions/definition", wrapper.WorkflowVersionGetDefinition)
|
||||
@@ -4465,6 +4527,51 @@ func (response WorkflowUpdateLinkGithub404JSONResponse) VisitWorkflowUpdateLinkG
|
||||
return json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
type WorkflowGetMetricsRequestObject struct {
|
||||
Workflow openapi_types.UUID `json:"workflow"`
|
||||
Params WorkflowGetMetricsParams
|
||||
}
|
||||
|
||||
type WorkflowGetMetricsResponseObject interface {
|
||||
VisitWorkflowGetMetricsResponse(w http.ResponseWriter) error
|
||||
}
|
||||
|
||||
type WorkflowGetMetrics200JSONResponse WorkflowMetrics
|
||||
|
||||
func (response WorkflowGetMetrics200JSONResponse) VisitWorkflowGetMetricsResponse(w http.ResponseWriter) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
|
||||
return json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
type WorkflowGetMetrics400JSONResponse APIErrors
|
||||
|
||||
func (response WorkflowGetMetrics400JSONResponse) VisitWorkflowGetMetricsResponse(w http.ResponseWriter) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(400)
|
||||
|
||||
return json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
type WorkflowGetMetrics403JSONResponse APIErrors
|
||||
|
||||
func (response WorkflowGetMetrics403JSONResponse) VisitWorkflowGetMetricsResponse(w http.ResponseWriter) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(403)
|
||||
|
||||
return json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
type WorkflowGetMetrics404JSONResponse APIErrors
|
||||
|
||||
func (response WorkflowGetMetrics404JSONResponse) VisitWorkflowGetMetricsResponse(w http.ResponseWriter) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(404)
|
||||
|
||||
return json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
type WorkflowRunCreateRequestObject struct {
|
||||
Workflow openapi_types.UUID `json:"workflow"`
|
||||
Params WorkflowRunCreateParams
|
||||
@@ -4712,6 +4819,8 @@ type StrictServerInterface interface {
|
||||
|
||||
WorkflowUpdateLinkGithub(ctx echo.Context, request WorkflowUpdateLinkGithubRequestObject) (WorkflowUpdateLinkGithubResponseObject, error)
|
||||
|
||||
WorkflowGetMetrics(ctx echo.Context, request WorkflowGetMetricsRequestObject) (WorkflowGetMetricsResponseObject, error)
|
||||
|
||||
WorkflowRunCreate(ctx echo.Context, request WorkflowRunCreateRequestObject) (WorkflowRunCreateResponseObject, error)
|
||||
|
||||
WorkflowVersionGet(ctx echo.Context, request WorkflowVersionGetRequestObject) (WorkflowVersionGetResponseObject, error)
|
||||
@@ -6164,6 +6273,32 @@ func (sh *strictHandler) WorkflowUpdateLinkGithub(ctx echo.Context, workflow ope
|
||||
return nil
|
||||
}
|
||||
|
||||
// WorkflowGetMetrics operation middleware
|
||||
func (sh *strictHandler) WorkflowGetMetrics(ctx echo.Context, workflow openapi_types.UUID, params WorkflowGetMetricsParams) error {
|
||||
var request WorkflowGetMetricsRequestObject
|
||||
|
||||
request.Workflow = workflow
|
||||
request.Params = params
|
||||
|
||||
handler := func(ctx echo.Context, request interface{}) (interface{}, error) {
|
||||
return sh.ssi.WorkflowGetMetrics(ctx, request.(WorkflowGetMetricsRequestObject))
|
||||
}
|
||||
for _, middleware := range sh.middlewares {
|
||||
handler = middleware(handler, "WorkflowGetMetrics")
|
||||
}
|
||||
|
||||
response, err := handler(ctx, request)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if validResponse, ok := response.(WorkflowGetMetricsResponseObject); ok {
|
||||
return validResponse.VisitWorkflowGetMetricsResponse(ctx.Response())
|
||||
} else if response != nil {
|
||||
return fmt.Errorf("Unexpected response type: %T", response)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WorkflowRunCreate operation middleware
|
||||
func (sh *strictHandler) WorkflowRunCreate(ctx echo.Context, workflow openapi_types.UUID, params WorkflowRunCreateParams) error {
|
||||
var request WorkflowRunCreateRequestObject
|
||||
@@ -6251,128 +6386,131 @@ func (sh *strictHandler) WorkflowVersionGetDefinition(ctx echo.Context, workflow
|
||||
// Base64 encoded, gzipped, json marshaled Swagger object
|
||||
var swaggerSpec = []string{
|
||||
|
||||
"H4sIAAAAAAAC/+x961PkOPLgv+Lw3YfdiOLZPbNzROwHumF62aWBK5rpuJsgCGGrqjS4LI8kQ7Md9b//",
|
||||
"Qi9btiVbLqqKYtqfeFiPVCpfSmWmvocRnmc4hSmj4dH3kEYzOAfi1+Ors1NCMOG/ZwRnkDAExZcIx5D/",
|
||||
"jCGNCMoYwml4FIIgyinD8+BfgEUzyALIewei8SiE38A8S2B4dPB+f38UTjCZAxYehTlK2c/vw1HInjMY",
|
||||
"HoUoZXAKSbgYVYdvzmb8HUwwCdgMUTmnOV14XDZ8hAqmOaQUTGE5K2UEpVMxKY7oXYLSB9uU/P8BwwGb",
|
||||
"wSDGUT6HKQMWAEYBmgSIBfAbooxWwJkiNsvvdyM835tJPO3E8FH/boNogmASN6HhMIhPAZsBZkweIBoA",
|
||||
"SnGEAINx8ITYTMADsixBEbhPKtsRpmBuQcRiFBL4Z44IjMOj3ytT3xaN8f0fMGIcRk0rtEkssPg/YnAu",
|
||||
"fvnfBE7Co/B/7ZW0t6cIb6+gukUxDSAEPDdAUuM6oPkMGWjCAnI28wCAdz7mTRcL9+jHaqzqDGIU+Wtz",
|
||||
"u2ieZZjwTeGD0gBPAg4RTBmKBBmZG/N7eA8oisJROMV4mkC+0gKDDSJpoMoF9hnnLwI0U9X2KuXkYSG2",
|
||||
"pxlkM6hIHJVDcFpTnQKcCr5AKWUgjQyausc4gSDlQAhis+KGf+EIkUOUMDZ5p5NYFUXrxTgoZAwpzkkE",
|
||||
"7ZQSEci555jZoWVoDg2+I2qs4AnQQHWtQH64f3i4c3C4c/Duy+H+0f7PR+9/2f3ll1/+f2hIwhgwuMMH",
|
||||
"tgkB5JAAKJZIM4AYBSgNbm7OTgI1tAnI/f3hwftf9v+xc/j+Z7jz/h34aQcc/hTvvD/4x88H8UE0mfwf",
|
||||
"aAKV54ivZA6+ncN0yin+3c+jcI5S888GtHkWL4u9BFAWqP6rRGGNRsSqyk02QXbQyxf8AG0s8y1DBFLb",
|
||||
"Ur/OoGSJ46uzgPHugWq9673vc8hADCSFdkitCkE7ee1LjdcK2Har23z4009dOCxgGxUsVyDDisQoghk7",
|
||||
"Sx8Rg2P4Zw4pa+ITic8Ssz2Jtg+RjsJvOxhkaIebJ1OY7sBvjIAdBqYCikeQIL4v4VGx4pFghUWDkCS8",
|
||||
"tvV+FOSlSce5Yvs+HctdknbFi7ZJjO8DH81wSmETQKYpv0lJFbDawZCjuOG4ypNE4ehXgufXDGbj3MJw",
|
||||
"9wSk0exCIa19TqPtbTHR9cW1oQSd28JwhqJj4lr4HPwXp4HmuYDPEfzteHzxd81Y1xfXgRijiZoliG+O",
|
||||
"0n8ejObg2z8Pf/q5SYUFsG78foEpSLu4D84BSuwrFp/04nLKjQEcSOpfyQrl1GJhOIFd8k6u5jOc30My",
|
||||
"5u0b5qEYTg3WhZWevFmXoUwMsgosiGXQJJ/aJ+VfVj/pSB0+BJ8sHNaUAMqGx9NHmFow9wCf7Wt4gM+F",
|
||||
"VIO87+6K9Z5EjB8Ble3PYju4ZydVhNePVurg5VzIEyYPkwQ/jfP0Op/PAXnugkwg9GuzW4v65cg2FnKr",
|
||||
"t+UE2GxbjdfmYvmX6uYEf/v39eVFcP/MIP17t5AXQxfT/+dlNKDHOEc21szAFKXFOaYNoVdFy0LHCSnz",
|
||||
"5H8qLZbTPGppQLcFyhYQL0kMyYfnE0RgpEGCaT7nOwcoP2ryrTK4vLYXqv+v2iGh+5Z2tLPrNQQkmlmP",
|
||||
"ri56b+ByApD1cCrEcc41AWdV2SogeVo1s91+pgymMYelY2DVrM/IJE9Tj5FVsz4j0zyKIIy70VE09B+d",
|
||||
"08snyJQFdoImE7dtGKPJxJ9AjSE7/TtyZC5LPolj/3GWnaWUgSRxOC9AFOE8ZXfgETBA7nKSWMlNN0vt",
|
||||
"FuQoRMYsdxQyhtIpdQ63tKJyS3M3ADXoR7Y123S0xOAHYQ27LOoWhNC7GE5AnjDjc+HUsZrcGj6jqxuu",
|
||||
"McxwEyoCM+yGSXzFTykk3acAo+3IGNYG0L/xvYXG2/zQQm0anmhlLPyB73fXdJ5vjEkZzPrxYJP5qmZQ",
|
||||
"072I5hDnzL589bFr6Y+QUIRT6wxuZijAMgcoHA5y6Y6dtB4fI5BGMEm0c8rPC1N0Ki5E3E3GEFBJKBZP",
|
||||
"fororN/Uf0iKbNtRTrSypWP3XkB0BNIq35cYpgwQ1m8xlAGWU4/1cDNAtlX0Pc7T3mpmCSqPHiBpZ4E+",
|
||||
"yzVs/y6QDfun1nN5fqkOogmk2AU311wX26QtvKvTi5Ozi0/hKBzfXFzI365vPn48PT05PQlH4a/HZ+fi",
|
||||
"l4/HFx9Pz/nvNlPwHKUPpcyniGHy7Dx7TxHjrUqt1ZQ8pBglkHrHKnjUQBfOs7wxDJcrbYNcapXTOopQ",
|
||||
"NtZhTN3uOnJawOnlf294KCtTVvFRW9iohnUbjfCDjv0yyfeCr97VwqdqEuGZpG7zc6PHq+ISwHrC4hBb",
|
||||
"LdVtAd9uRneZ4QaIaj4XTZhGJqwsugd4iu4cFGHIjiXHF7amY3TDA922Z0Yr78mNobsxbk5wq2CrOq3p",
|
||||
"K5NSFZpV0RCenqMU9rqL5eJSfOamN9fF2ghN8DRIUAr73LTJmBDrHHw41aDTrHf1li2M7nrpNWyZt5Jl",
|
||||
"oEoxw22JqnP4CBNTTZ+cfrjhqvns4tfLcBR+PR5fhKPwdDy+HNv1sTFO4dXxooAKBDZ+Ut9f3ymmycou",
|
||||
"tOXHFzjGqiP0dI2pzi3OMQsCzKvR72GUEwJTdpcJ2j0chSn8pv96NwrTfC7+oOHRwf5iVOesSmfbVb1q",
|
||||
"EWSSCouJD728VAYs1pgP+K058ju/kct1WSMMMAOJ6bvjTYXLOUGUyWuSMiJt38cpZpFYplRv0xMfAIWl",
|
||||
"GdvYY6PlvyCI/VqenRgtTGdm2eRCLL+zGbf2YQ8FJttXx/iCWOJ21Ehj9qLNlyObXPo7dMwOjVnqmLLA",
|
||||
"asOUaytGjs20oPG2ShYFbrU8wBnkSjBKMK2EJpXYGENOXj9OlMQYZgl4FpcA7mtp/vUsrgr9TQc1tUcj",
|
||||
"aghvxZIIP0QLJ0TLFma5zbHSwBxvxketGV2WAaeQshviuL+/GZ8HDAcUprGITVCmBQ0YXs8NrOt4m6fo",
|
||||
"zxwGKIYpQxMESXEVqG5YVWSYDKEwgw3vYYLTqYa4vp3NDVtfBIefA6Y1KkP4YS2XGHpzmx7GGUpiAqsn",
|
||||
"7Q4qXZNXMANEB4z7Q0IgiMF9At1uD/m9iGmEAWUwsxLnypzVjhnc22usorLX2rmmNlDaPmexc+vX4Jw+",
|
||||
"ZqcZrlgOhsJfkQtbEKHhrOxJA8vRMHSCvIxHvezTgq66dK445D38uer6oWi/eh7EOXOBuCR7/pnDHB5P",
|
||||
"mDTB/JC58vsB2aVlZ/zuEBSLVS8RfK/GeFuXbPEQPH1WXHRpWfETJq5rCS81VFBgsbLWOwDzkt4V1tUk",
|
||||
"ZBxzXW7HCyaIn2KT7gXIQKaivTHubQlZ2/WE+u3u+Pr67NPF59OLL+EolH+I+4kXXV98KSLLqkhZe5C2",
|
||||
"K9bvxcGC3SHdzrg/M550fYGkiyKo3K3OjVQCOcxGw+yXi1btMpKVKYxie7yh/uzGmmzhvvNSI1SCzJeg",
|
||||
"kkqYbblXZjBiB+1sgYuwQsp11cjPwFO8Ixk1HPNxxfHW3NOVioSXEZR/3Ctnva7WNxQS2eMqv09Q1EYK",
|
||||
"YryWgGsT5q3ZdLV/y2z6WO2TVkKXXy9Ox1zbnHw+uwhH4efTzx9O7X73LwRNp5AYdvTq3AQ3IpHJK9p/",
|
||||
"JYH2zv2+oTbG6FQMII4JpNRUEBU5riVOU0/wD79BUtghjWwslcKotM4M0OBRNef/RaQKwa41g3EG6BWg",
|
||||
"9AkT3zlAkKkOAYXMPupaLIgY0SwBzxVLQqOzt4CvYte13x9nIJ1CjSB3WgV8ciNRGD/wqcSaVk922Jfw",
|
||||
"UeqRxbqzVkAKIFrx9zIYGrel6suogicXys/xFKXLJ/Qsx24vyu/ZOozrNWZduB7DKaKM/3xD6PY7YjgE",
|
||||
"wxbuls419d00U4PRGcroWzU+GsbYBpXrOrSMnMy2bV+Fx8PlHXccBdVHeUCSPpMgAmmQQcLXx+Hx94Yl",
|
||||
"QNwAEnYPATtmrYfPcjqRvE5hygIQzHTv3dVWAli7s0GuZdfu7YtgqhNDqMuNz9sIx7pIOSlLo5QDvyyK",
|
||||
"t8Nn4SaoLWB8RdnWYBR9HLBlIGQJfp7D7uOdHuOk6PERpxM07ayn48hj0DHEu47YdAcR8C+2IbxwpOLZ",
|
||||
"bSzZP5J6I+zixJDWcBbZAabLY0iv8QuwCi+VKNGPKvl4v8mOxT3uKtiOj/sRpzKQKLKk9U0hM75/IjjP",
|
||||
"LOUQUiXfg6cZimbBFDIqcBeVXYMp71scFwxCsO5NguaIXTMCGJw68lSp+howzHVa8KQrepizinECUccE",
|
||||
"RDMYC3mvfALSn3x3dnF3Nb78ND69vg5H4cn48uru4vTr6fWXcBT+35vTm9Pyz0/jy5uru/HlzcXJ3fjy",
|
||||
"w9mF1YEwB9/cEngOvqF5PjfCnwpwWUFr1nTAd4f2yKfKvqup6wgcWTeyjSoaMurHSAGYutIZlwreto7m",
|
||||
"8iqXN+tyvOA4ywIzP8ArrmINKY89UhLcS741aEuGx9VLqhTEf3Zi3Rrd224ovChgYsM2hrAjvOqCfa0m",
|
||||
"KdXTe4Vx7wzdW+3NfOHrBHGMOApAcmWAw0gOLQuQ15v+6Cmv5ut688URMZLvNh2YJudW5vLrgLC2XECz",
|
||||
"LkBxl99+CS996zD+8Nxj8C9GLyP/ThlEPe0nywgvz+L7zch+VbirLva2nbm35PBjmOa9ZNPachIbc2hE",
|
||||
"9V2SQZ81ueKgM0uiC06vDEHSjL4iOL3mNmbuiLaGjx5nxKJMiApaXU90YE+KLzq1kTE/71iq0SaYrOZA",
|
||||
"++ITn91VKSFsXZgki4+Ec9fEThktoV53yIHsrglV2PXEEXJ95wr3eeG01L7C/pKkhjdb7OBjIxSux8AF",
|
||||
"flZrLUjxbkdfKfHv1Dm+P5oNtVXnlcpB3AcT5tnd8Pm8xJPzkqhHEtfCE13H1kI59t1zarhQ7MJAffQS",
|
||||
"KU+GU8/XaNd9eopQDbPGUmWg225yOYHcaLcnFxDwVP1sOfyCp+D/HX8+D+KiYX+JWZ3HA2h7+d4NUdgP",
|
||||
"QCXcUodRThB7vi5rW99DQCDRJbBl0Wt+2BD/Lhc4Y0wE1EYYPyComyOOIfkv7Tw8ChsF0EGGRMG1hTgY",
|
||||
"TrAdybrW/PHVGe8q09DC6n+LXQoPdvd398UmZzAFGQqPwne7B7v7wv5gM7G0PZChvQQ9QuWbbM77Sfse",
|
||||
"easUUhoU9jinwcIDE56r75/Euogym8Ush/v7zYH/BUHCZkJE/mT7foFZMWdlZ8Kj329HIdWF0ziEZUPt",
|
||||
"hf5djR/NYPQQ3vL+Yq0Egvi5e7G8GWpb7Vg3WOVyBXABwwEQxYMDRsBkouLQ2lZfQNu5/McD/mNHlKel",
|
||||
"e9+L3xdCqmBqwckYPuIHGIDUqOw8wSQAKoKygZrjDInKETI2S3aXNi+YQyZU1O+t5XVFVbDwSFBpyTMF",
|
||||
"rKHJ7dJNIiVGRY4tc7Rf3DZ28n0TIdd5FEFKJ3mSPAdELC+WEaW6XsZ7ucERTpk6oajnCPgIe3+odJMS",
|
||||
"aJ8nAtS1eN3JNwcJXzKMA0yCexAHpCy88H7/3WbA+BWTexTHUBZIK2lTkQ7f2C9q5zR5lv+7HYXfdnRV",
|
||||
"dPGtoKtyyysULK3cve/i52JPqz4XR8sHLHSxT5CWRTirdFsUEZUs3UmvsmSooDILucrD6SZJdXU0V5ZT",
|
||||
"tWx2jfwZQfBRMYDEiNiPgQsqEtrATMkD0jXRQv+Shiq0L68DdkCW7ZlXGdTJAOeIMtcFSFOtFTcvvNtZ",
|
||||
"rena6M2jmlA/Qqwucpto8WAzYNykIGczTNB/YSwn/mkzE3+GbIbjIMUsAEmCn3SV79J6+V4xkH+/XVTM",
|
||||
"mS5y1bwjm/jxxt736WzH/M9iT9xdevNMcdOJYAfLiGpNPsrDBMepQ2pgv1Ft4qpl1Y+lK3swcPTb5ega",
|
||||
"M9UZuqEN60zwIpYX/+e/7YiQhUX5N2e5xd69KujmLRqKDq1i4UPZ6q1JhpFP6IcTyBLVrSD2nVQXXHbP",
|
||||
"qVr4T7kZCdgoGNhPCBbUNgjAtysADZGxCuG39wTvZxg/uD04xtzTBN+DJNBd7EJLOm4+iaZfi5bdLq4K",
|
||||
"4WYE8z9gXEw20Ow20WzViSgpBNgopNvi1hS49139svCiRZWU7UOLMhGkpMVOJaoGderPJ4OsN2pRDxzz",
|
||||
"l+OYBh23ccwctjsraVE7tQjt1vc7xlujVU75rHq4ryJWhT4V2drHZCku47aFmDvuUsy4NLWPn8tqtLWd",
|
||||
"3EO1MsXuMwNIkqDS2rWL0vNWabhWw9RWorzXDid8eXhSXd027XbVEqttQvsmU36UpCldyF1NILOETJ2I",
|
||||
"/9cL+DU2+DqlsqWPAqtXA3QpMqoIZFvuwySO4gYyBlX2+qqs4AMnwWpmuL64bruX4ETXZBP5eaHv5dw2",
|
||||
"IJ9XX481WEQafD4sUlQNsnNGAe1GPSPyokdW9lrqVrD+3K0BxMFgZQ5WppeVSRnMdkgulJf6dbEnq7Pv",
|
||||
"ZMTNmfKZ2AAEWZ4kemdUtEcRtdVgWpmLIRlXPW9MfBhYZ027lZuCfd0aTpa6xjIwaCVE0PrKs4UupOM3",
|
||||
"y1nAsHpTv7YLDRws1mgX9gW/ImHUKwDCNqys4MeOCeCzvt/MrBeYM22e1vW+Yu8aWWlBUoRbtml+zZHd",
|
||||
"4iZWJTXbw3LQZKLkSyEN7iF7gir1eI4p00UW+DeQSrqaIELFf3Zd4ugTZKKo51uSQ2viZsfzpv1OebF6",
|
||||
"xnTg4NfkYM43sSTrNbFtgqftngxaPDdEa5zb5EXzYZw3woijlheGGQ7oA8o0bH/mkDyXwOHJhAoPnAUU",
|
||||
"90Mr7dPJcgv3z44pxeeXznhceHAS+AgTyuedoIRB0jKxaFmZ2fcBJflqkGPlVDwOFIjZDDgmmDgAkR36",
|
||||
"AqLeILIA8VVU18WBSBdwrx+bLyD1nLzyepIDD3L6uHiiqRWKE6PZMpCU/dd8DW5Igy7lw0nSjCqlQ0Rp",
|
||||
"zY9ZSGFDF5zjaX81ID/TrlMhDYCoVOmI+pdXdLJpuM5DlZyoeF/QfpbSj6vow9RGT0+66nCPcxIrChX/",
|
||||
"hWm8D4mro0pBbJrCFW4bRG6j6NIlWea8tF/SFCko1C/FxdeweW1X5e3675Bqj9f2O1xo39Ig5etSvsiT",
|
||||
"of2SZ24Xow4fX+98rkK2vw1yX5f+0bRuaKD1u+LKSQf+WhV/KUZYMjutXeGUhRhaztEgSZRxW2FAR2ba",
|
||||
"W9E1P/IB+gE+ex2febvKrF4FJgQZiDTxZjkhN0xGGTgv2EpZ0RtAox7dciCSPFUJ19ALVt3W++Brr3/0",
|
||||
"Ss4IsZ+v44oQU2+BI8KEY1NuiFKaDk6Il5qnCi3eOa0+WnNPSEdP1SlFrof6/A98Hk5rpQ5Ziv4Fsgce",
|
||||
"sPFAoFT6KvmAiMei2ypz8O80AIUilR0dHKDrcYhBf9xTnOUJbocTURd4EKYI0XjbnB/RX1FJ4AZV5axD",
|
||||
"Ih7tWK2yks8R0vZg/pI1K48YUof73Hgk8AfXUw189HN41LA9+NUrGqtBi13edV+vYvWOSE3QSuuDU9G4",
|
||||
"1Kq+YNh+taUef+1zw3WwFu5c4p5LE8bAltbrrpJv/PnSQ1Ppf+zIvz1SWmhxqPJhZf/klq10UVb5qh22",
|
||||
"nQIdb123dnKvTujZXu61pbYU++MKhajuo9BrgNmeXZGnpn6c8MZzWLaQE1avd90vB/vq3Vzv8qYjSzw5",
|
||||
"V8L3ZjhXbkh/zm3TfHP5DmbPM5ruZWdx403v4YxWxcdSZzSN7cEYtJ3RSlpcjS1Iu0Kgakmh1JajORC/",
|
||||
"DHu6vriuZOr7038Dy0MS5hblR7sYwSs9ujPyyqNOwOAVEQio8ldrwNXqaLY6qbd3Yyh4sMUM7eQ8T45u",
|
||||
"1aiWLKrWvEcz1fFZcq4rg/HNHiH/6imVvrnQVYtXY2XIo9xUHmWFFp8AFTLClVhZbI8hF/i/+EYvm1XT",
|
||||
"Lif2IpBGMHFf8R8zBucZEzfQsq0hPjoqMcihBwnytiVIjKjwTCoRIokg2T4b45Wv+LsYZVMMTSBRTws7",
|
||||
"QnZ4B28eFs0HFt7GICKSp2qrOvzGRVkXWcXettzFVlgqQwhRawiRjE3fuEAp19RaSEU2qxVkaDlZXMth",
|
||||
"B9HyetZB/e3BZU4Sat+HA8VWHyj0Lq1Fajxh8iDf8HR7HJIkkM068pq/ikaDe18mCi11qzWkWjqeQFME",
|
||||
"WKtcBMmyfjeNaKUxzT+7/G+VfLNOhlD5Ym/ZHVdZsLMmvoHBN8y1aruWZNvBPWfn3AI3/QqPVWhqeX7e",
|
||||
"y4hHeXWzriGtVS21msMGufBBjHqXdGD1dQFYKS1LmbzndCX3Qu9MUmPzrkXH9V//m/SyZGEGHXlUId1B",
|
||||
"/tSu4qvYWbsEol7GtGjpZz0MBnWpmgeTevWKuR9PeDLBHtfD3pwQCL+2ry091DLZ1lomZt4rn3MKWbG1",
|
||||
"u46JRfuzONyUZeMPme6yfuAyQDjSHNZXDSzZ2OCHjcFn8ZtaYVOO4pXCtaGD3wsUjKCnQcm4T38vUDQ5",
|
||||
"hYTuRTkhainugHxxzy0bBrxbQ5PcUEg+QfZRDbZGuuIz9SQmAfEQ/fd23mflRF4jN03jYvstZKzevYxA",
|
||||
"ktyD6MFJzh/xPJOJlJwyLvn8gfUNFT6RenZVDH3JcflRD18j8Hf7hx2P/ERq3rg57wyCWKXEJFhuhvVa",
|
||||
"qhDbi17I1CuuTuqJT8oAccuGa/51OUyKrv3RKOB5BSQKcHtiEONpAtdDkWLoLabIVRCgRN+KCbBE3NYR",
|
||||
"4Evprav6SVmmq1psonjVtFPB8xHMfMe1PoTZu9yIURrrh6o14mM+9noYvbMWiZP29kAUwYy1BM+K7/1S",
|
||||
"t2WfNRWwl4M3so0XvR/xkysfamq0h4QKJHXW1HDTF4EiMqclmJN/70dfsk+4rqhEPvgK6EuufKCvjpBA",
|
||||
"jqQl6CvBU9QSI3yOpzRAaQCEbtxtMTDOxUBrqo/AVTAff0N1zr1O2gmeTmEcoCG9bosfLRVU43uSTvAU",
|
||||
"56yDGXDO/LiBD7UlNMpBGYj07XiBJPX4kq0qyzBDWY8jkNHJ7xhkFtgQ3dS92VoJ3D5p//OQiaLhTLTM",
|
||||
"mcjEYDdJZoDSJ0xityxVFXakJA10+zaReqXHXJ+N8XEG0mkx0TYZG5GALC4QNYjzNyTOJVlVKd2DiQic",
|
||||
"ckFG2g59sgVttUjW+g4en0CDsU0Mo5E3XIS9CTtdk1C3zaNC92VQLiQ+4fUWa0aG+3uG0csxWgNYxRRv",
|
||||
"N7dkiRiFLeOnrUkq6ZFTMtKk0yBwGfRWxJ17lMc1w8u9It/8K+QasU7tEdwbZYH3HW5Ds1ZsAeCQmvjK",
|
||||
"b8YrYjUoZpn4aVHXzCenyosTemiB7WOD1YetLRmvNmgDe6ja8iTeoRP2EpQ+7MholRafJUofAhDIZgGB",
|
||||
"GaaIYfmGHDCBtPOG8mai9EFGsLwpRln9aadExLjApG/JjcSxExutwOHN5BxaxeFNiAc1+spqVHC1jZLW",
|
||||
"JGoYQdNpmyfii2ygnlxZKqfZv87oNgiY9qyAR0gowulucDYRR2Cac/qA8Ugm2gEGKdONAkSDCRRVtVyp",
|
||||
"A6pluPXyUZGBsat9ChLV8mM3LxV9UrXNF2SGRO1tEopaBnWkiHeVOukhFhVfUt8SD5rjvUTib7LxGzqd",
|
||||
"/BVk4poljNrUZXOC9KIHWfPKsqaSjFSS4prMLy1n9mI4QSnSEdZ9RE7Zs6/0OSnnHOTQX0wOGXv7Molk",
|
||||
"0NcgnLZROJkbtLycqt9630NAICluvUfWe3BIHrW8yEkSHoXh4nbxPwEAAP//4Of5nf8eAQA=",
|
||||
"H4sIAAAAAAAC/+x9W3PbOLLwX2Hx+x52q+Rrktk5rtoHJ/ZkveM4ObI9qXOmXC6YhCSMKYIDgHa8Kf33",
|
||||
"U7iRIAmQoCzJ8oRPvhCXRqNvaHQ3vocRnmc4hSmj4dH3kEYzOAfi1+MvZ6eEYMJ/zwjOIGEIii8RjiH/",
|
||||
"GUMaEZQxhNPwKARBlFOG58G/AItmkAWQ9w5E41EIv4F5lsDw6ODt/v4onGAyByw8CnOUsp/ehqOQPWUw",
|
||||
"PApRyuAUknAxqg7fnM34O5hgErAZonJOc7rwuGz4ABVMc0gpmMJyVsoISqdiUhzR2wSl97Yp+f8DhgM2",
|
||||
"g0GMo3wOUwYsAIwCNAkQC+A3RBmtgDNFbJbf7UZ4vjeTeNqJ4YP+3QbRBMEkbkLDYRCfAjYDzJg8QDQA",
|
||||
"lOIIAQbj4BGxmYAHZFmCInCXVLYjTMHcgojFKCTwzxwRGIdHv1emvika47s/YMQ4jJpWaJNYYPF/xOBc",
|
||||
"/PL/CZyER+H/2ytpb08R3l5BdYtiGkAIeGqApMZ1QPMJMtCEBeRs5gEA73zMmy4W7tGP1VjVGcQo8tfm",
|
||||
"dtE8yzDhm8IHpQGeBBwimDIUCTIyN+b38A5QFIWjcIrxNIF8pQUGG0TSQJUL7DPOXwRopqrtVcrJw0Js",
|
||||
"jzPIZlCROCqH4LSmOgU4FXyBUspAGhk0dYdxAkHKgRDEZsUN/8IRIocoYWzyTiexKorWi3FQyBhSnJMI",
|
||||
"2iklIpBzzzGzQ8vQHBp8R9RYwSOggepagfxw//Bw5+Bw5+DN1eH+0f5PR29/3v3555//NzQkYQwY3OED",
|
||||
"24QAckgAFEukGUCMApQG19dnJ4Ea2gTk7u7w4O3P+//YOXz7E9x5+wa82wGH7+Kdtwf/+OkgPogmk/+C",
|
||||
"JlB5jvhK5uDbOUynnOLf/DQK5yg1/2xAm2fxsthLAGWB6r9KFNZoRKyq3GQTZAe9XOF7aGOZbxkikNqW",
|
||||
"+nUGJUscfzkLGO8eqNa73vs+hwzEQFJoh9SqELST165qvFbAtlvd5sN377pwWMA2KliuQIYViVEEM3aW",
|
||||
"PiAGx/DPHFLWxCcSnyVmexJtHyIdhd92MMjQDjdPpjDdgd8YATsMTAUUDyBBfF/Co2LFI8EKiwYhSXht",
|
||||
"6/0gyEuTjnPF9n06lrsk7YpnbZMY3wc+muGUwiaATFN+k5IqYLWDIUdxw/ElTxKFo18Inl8ymI1zC8Pd",
|
||||
"EZBGswuFtPY5jbY3xUSXF5eGEnRuC8MZio6Ja+Fz8B+cBprnAj5H8Lfj8cXfNWNdXlwGYowmapYgvjlK",
|
||||
"/3kwmoNv/zx891OTCgtg3fi9gilIu7gPzgFK7CsWn/TicsqNARxI6l/JCuXUYmE4gV3yTq7mE5zfQTLm",
|
||||
"7RvmoRhODdaFlZ68WZehTAyyCiyIZdAkn9on5V9WP+lIHT4Enywc1pQAyobH0weYWjB3D5/sa7iHT4VU",
|
||||
"g7zv7or1nkSMHwGV7c9iO7hnJ1WE149W6uDlXMgjJveTBD+O8/Qyn88BeeqCTCD0a7Nbi/rlyDYWcqO3",
|
||||
"5QTYbFuN1+Zi+Zfq5gR/+/fl54vg7olB+vduIS+GLqb/9Xk0oMc4RzbWzMAUpcU5pg2hX4qWhY4TUubR",
|
||||
"/1RaLKd51NKAbguULSB+JjEk759OEIGRBgmm+ZzvHKD8qMm3yuDy2l6o/r9oh4TuW9rRzq6XEJBoZj26",
|
||||
"uui9gcsJQNbDqRDHOdcEnFVlq4DkadXMdvuZMpjGHJaOgVWzPiP/mcO8G2LZqs+4JE9TD4hVsz4j0zyK",
|
||||
"IIy7gS4a+o/O6fAjZMqyO0GTidvmjNFk4k/4xpCdfiM5MpdRH4U74TjLzlLKQJI4nCIginCeslvwABgg",
|
||||
"tzlJrGSsm6V2y3QUImOWWwoZQ+mUOodbWgG6tYQbgBr0I9uabbpfYvC9sLJdlnoLQuhtDCcgT5jxuXAW",
|
||||
"WU15DZ/R1Q3XGGa4CRWBGXbDJL7ixxSS7tOF0XZkDGsD6N/4zkLjbf5toY4ND7cyQv7Ad7tr8hM0xqQM",
|
||||
"Zv14sMl8VfOq6bZEc4hzZl+++ti19AdIKMKpdQY3MxRgmQMUjgy5dMdOWo+lEUgjmCTa6eXn3Sk6FRct",
|
||||
"7iZjCKgkFMsNQYrorN/Uf0iKbNtRTrSypWP3nkF0BNIq35cYpgwQ1m8xlAGWU4/1cPNCtlX0Pc7T3mpm",
|
||||
"CSqP7iFpZ4E+yzXOFF0gG3ZVrefy/FIdRBNIsQturrkstklbjl9OL07OLj6Go3B8fXEhf7u8/vDh9PTk",
|
||||
"9CQchb8cn52LXz4cX3w4Pee/20zMc5TelzKfIobJk/NMP0WMtyq1VlPykGKUQOodq+BRA104fQTGMFyu",
|
||||
"tA3yWauc1lGEsrEOY+p211HWAk4vv37D81mZsoqP2sJGNazbaIQfoOyXVL4Xh/WuFj5VkwiPJ3Wbnxs9",
|
||||
"thWXC9aTG4fYaqluC/h2M7rLDDdAVPO5aMI0MmFl0T3AU3TnoAhDdiw5vrA1HaMbnu22PTNaeU9uDN2N",
|
||||
"cXOCGwVb1RlOX5iUqtCsiobw9BylsNcdLxeX4jM3vbku1kZogqdBglLY5wZPxppY5+DDqQadZr2rt2xh",
|
||||
"dNdLr2HLvO0sA2CKGW5KVJ3DB5iYavrk9P01V81nF798Dkfh1+PxRTgKT8fjz2O7PjbGKbxFXhRQgcDG",
|
||||
"T+r7yzvbNFnZhbb8+AyHW3WEni431bnF6WZBgHnl+j2MckJgym4zQbuHozCF3/Rfb0Zhms/FHzQ8Othf",
|
||||
"jOqcVelsCwFQLYJMUmEx8aGXl8qAxRpLAr81R37jN3K5LmvkAmYgMX2CvKlwZSeIMnn9Uka67fs4xSwS",
|
||||
"y5TqbXriPaCwNGMbe2y0/BcEsV/LsxOjhekkLZtciOV3NuPWPuyhwGT76hhXiCVuR400Zi/afDmyyWd/",
|
||||
"h47ZoTFLHVMWWG2Ycm3FyLGZFjTeVMmiwK2WBziDXAlGCaaVkKcSG2PIyevHib4YwywBT+JywX3dzb+e",
|
||||
"xVWhv+lgqfYoRw3hjVgS4Ydo4YRo2cIstzlWGpjjzfioNaPLMuAUUnZNHHEB1+PzgOGAwjQWMQ/KtKAB",
|
||||
"w+u52XUdb/MU/ZnDAMUwZWiCICmuGNXNrYo4k6EZZhDjHUxwOtUQ17ezuWHriwzxc8C0RnsIP6zlEkNv",
|
||||
"btPDOENJTGD1pN1BpWvyCmaA6EB0f0gIBDG4S6Db7SG/F7GSMKAMZlbiXJmz2jGDe3uNVVT2WjvX1AZK",
|
||||
"2+csdm79GpzTx+w0wxXLwVD4K3JhCyI0nJU9aWA5GoZOkJfxqJd9WtBVl84Vh7yHP1ddPxTtV8+DOGcu",
|
||||
"EJdkT3G/fTxh0gTzQ+bK7wdkl5ad8btDUCxWvUTwvRrjbV2yxUPw9Flx0aVlxY+YuK4lvNRQQYHFylrv",
|
||||
"AMxLele4WJOQccx1uR0vmCB+ik26FyADpIr2xrg3JWRt1xPqt9vjy8uzjxefTi+uwlEo/xD3E8+6vrgq",
|
||||
"ItaqSFl78LcrhvDZQYjdoeLOeEIzTnV9AaqLIljdrc6NFAU5zEbD95eLgu0ykpUpjGJ7HKP+7MaabOG+",
|
||||
"81IjVILXl6CSSvhuuVdmkGMH7WyBi7BCynXVyM/AU7wjGTUc83HF8dbc05WKhOcRlH88LWe9rtbXFBLZ",
|
||||
"40t+l6CojRTEeC2B3CbMW7Ppav+W2fSx2iethD5/vTgdc21z8unsIhyFn04/vT+1+92vCJpOITHs6NW5",
|
||||
"Ca5FgpRXFsFKAvid+31NbYzRqRhAHBNIqakgKnJcS5ymnuAffoOksEMaWV4qNVJpnRmgwYNqzv+LSBWC",
|
||||
"XWtm5AzQL4DSR0x85wBBpjoEFDL7qGuxIGJEswQ8VSwJjc7eAr6KXdd+f5iBdAo1gtzpGvDRjURh/MDH",
|
||||
"EmtaPdlhX8JHqUcW685aASmAaMXf82Bo3JaqL6MKnlwoP8dTlC6fKLQcuz0rb2jrMK7XmHXhegyniDL+",
|
||||
"8xWh2++I4RAMW7hbOofVd9NMDUZnKKOv1fhoGGMbVK7r0DJyMtu2fRUeD5d33HEUVB/lAUn6TIIIpEEG",
|
||||
"CV8fh8ffG5YAcQNI2B0E7Ji1Hj7L6URSPIUpC0Aw0713V1thYO3OBrmWXbu3L4KpTgyhLjc+byMc6yLl",
|
||||
"pCy5Ug78vCjeDp+Fm6C2gPEVZVuDUfRxwJaBkCX4aQ67j3d6jJOixwecTtC0s06PI49BxxDvOmLTHUTA",
|
||||
"v9iG8MKRime3sWT/SOqNsIsTQ1rDWWQHmC6PIb3GK2AVXipRoh9V8vF+kx2Le9xVsB0f9wNOZSBRZEkX",
|
||||
"nEJmfP9IcJ5ZyiykSr4HjzMUzYIpZFTgLiq7BlPetzguGIRg3ZsEzRG7ZAQwOHXkv1L1NWCY67TgUVcK",
|
||||
"MWcV4wSiPgqIZjAW8l75BKQ/+fbs4vbL+PPH8enlZTgKT8afv9xenH49vbwKR+F/X59en5Z/fhx/vv5y",
|
||||
"O/58fXFyO/78/uzC6kCYg29uCTwH39A8nxvhTwW4rKA1azrgm0N75FNl39XUdQSOrBvZRhUNGfVjpABM",
|
||||
"XemMSwVvW0dzeZXLm3U5XnCcZYGZH+AVV7GGlMceKQnuJd8YtCXD4+qlWgriPzuxbo3ubTcUnhUwsWEb",
|
||||
"Q9gRXvXGdOtPkBEUWSLLhVT9FT59wHnqMoRr4ZZNoXwPn0yHmnG7qYfnQqVlikq2tDQqQUAzGKEJispJ",
|
||||
"gr/x8yCMgwcEgglKGCR/94zm/FrN1qrnOYtTjjOGcbUhCoXTF8Qx4kgAyRcDHEZyaFmAvOf1p5MyRqFu",
|
||||
"QDw7NEgKoE1H6Mm51bnhZUBYW1KkWXihCGpoj0aQlwwwfv/UY/Aro5eRiKgsw56GpGWE56cz/makASvc",
|
||||
"VRd7087cW3IKNM4o/kJ6VcmZyuy0hzk0JtMY67s2g1BrAsZBcJbUH5x+MSRKMx6N4PSSW925I/4cPnic",
|
||||
"mouCLCqMdz3xkj1Jv+jURs/8BGip+5tgspoj/rPPwHbnrYSwdWGSLD4QzmYTO2W0BL/dIgeyuyZUgegT",
|
||||
"RxD6rSsA6pnTUvsK+4uUGt5s0ZQPjeDAHgMX+Fmt2SDlvB19pei/VZ6N/mg29FedVyquCR9MmN4Mwwv2",
|
||||
"HN/Wc+JASVwL2HQd5Ast2XfPqeFUsgsD9dFLpDwabk7fY4zu01OEapg1lioD3XSTywnk1rs93YKAx+pn",
|
||||
"izsAPAb/c/zpPIiLhv0lZnUeD6DthZI3RGE/AJVwkx1GOUHs6bKsIn4HAYFEFxuX5cX5qUP8u1zgjDER",
|
||||
"YhxhfI+gbo44huS/tDv1KGyUmgcZEqXtFuKEOMF2JOuq/sdfznhXmZgXVv9b7FJ4sLu/uy82OYMpyFB4",
|
||||
"FL7ZPdjdF/YHm4ml7YEM7SXoASpvbXPej9oby1ulkNKgMMw5DRY+qfBcff8o1kWU/SxmOdzfbw78LwgS",
|
||||
"NhMi8p3t+wVmxZyVnQmPfr8ZhVSXqOMQlg21X/53NX40g9F9eMP7i7USCOKn7sXyZqhttWPdYJXLFcAF",
|
||||
"DAdAlGkOGAGTiYrMa1t9AW3n8h8O+I8dUQiY7n0vfl8IqYKpBSdj+IDvYQBSo4a2dNGoqMQ6ao4zJGpp",
|
||||
"yGg12V3avGAOmVBRv7cWMhZ10sIjQaUlzxSwhia3S3+JlBgVObbMGX9x09jJt02EXOZRBCmd5EnyFBCx",
|
||||
"vFjG2OoKIm/lBkc4ZeqEoh5+4CPs/aEScEqgfR5jUIECdbfnHCR8yTAOMAnuQByQshTF2/03mwHjF0zu",
|
||||
"UBxDWTKupE1FOnxjr9TOafIs/3czCr/t6Prz4ltBV+WWVyhYWrl738XPxZ5WfS6Olk+F6LKqIC3LnVbp",
|
||||
"tijXKlm6k15lcVZBZRZylYfTTZLq6miuLFxr2ewa+TOC4INiAIkRsR8DF1QktIGZkgeka6KF/iUNVWhf",
|
||||
"XpDsgCzbMy93qJMBzhFlriuhplor7qJ4t7Na07XRm0d9pX6EWF3kNtHiwWbAuE5BzmaYoP/AWE78bjMT",
|
||||
"f4JshuMgxSwASYIfdT310nr5XjGQf79ZVMyZLnLVvCOb+PHG3vfpbMf8z2JP3OZ680xx94tgB8uI+lU+",
|
||||
"ysMEx6lDamC/Um3iqu7Vj6UrezBw9Ovl6Boz1Rm6oQ3rTPAslhf/57/tiCCORfk3Z7nF3p0qcectGooO",
|
||||
"rWLhfdnqtUmGkU8wjBPIEtWtIPadVJegds+pWvhPuRkJ2Cih2E8IFtQ2CMDXKwANkbEK4bf3CO9mGN+7",
|
||||
"PTjG3NME34Ek0F3sQks6bj6Kpl+Llt0urgrhZgTzP2BcTDbQ7DbRbNWJKCkE2Cik2+LWFLj3Xf2y8KJF",
|
||||
"labuQ4syNaakxU4lqgZ16s9Hg6w3alEPHPOX45gGHbdxzBy2OytpUU22CHbX9zvGq65VTvmkerivIlaF",
|
||||
"PhXr28dkKS7jtoWYO+5SzAA1tY+fyvq8tZ3cQ7XCze4zA0iSoNLatYvS81ZpuFbD1Fa0vdcOJ3x5eFJd",
|
||||
"3TbtdtUSq21C+yZTfpSkKV3IXU0gs4RMnYj/10saNjb4MqWypY8Cq9dHdCkyqghkW+7DJI7iBjIGVfby",
|
||||
"qqzgAyfBama4vLhsu5fgRNdkE/l5oe/l3DYgn1dfjzVYRBp8PixS1FGyc0YB7UY9I/KiR9Y6W+pWsP6w",
|
||||
"sAHEwWBlDlaml5VJGcx2SC6Ul/p1sSfr1e9kxM2Z8kHeAARZniR6Z1S0RxG11WBamZQhGVc9JE18GFjn",
|
||||
"kbuVm4J93RpOFv/GMjBoJUTQ+p62hS6k4zfLWcCwfPWivgsNHCzWaBf2Bb8iYdS7CMI2rKzgx44J4LO+",
|
||||
"3cysF5gzbZ7W9b5i7xpZaUFShFu2aX7Nkd3iJlZFRtvDctBkohP+tDS4g+wRqmTsOaZMl53g30Aq6WqC",
|
||||
"CBX/2XWJo4+QiTKnr0kOrYmbHQ++9jvlxeph14GDX5KDOd/EkqzXxLYJnrZ7MmjxABOtcW6TF82ngl4J",
|
||||
"I45aMpQZDug9yjRsf+aQPJXA4cmECg+cBRT30zPt08kCFHdPjinF5+fOeFx4cBL4ABPK55Xp1S0Ti5aV",
|
||||
"mX2flJLvKDlWTsVzSYGYzYBjgokDENmhLyDqVSYLEF9FvWEciHQB9/qx+SZUz8kr70k58CCnj4tHq1qh",
|
||||
"ODGaLQNJ2X/N1+CGNOhSPpwkzahSOkSU1vyYhRQ2dME5nvZXA/Iz7ToV0gCI2p2OqH95RSebhus8VMmJ",
|
||||
"ihcX7Wcp/dyMPkxt9PSk6zD3OCexonTzX5jG+5C4OqoUxKYpXOG2QeQ2ii5dkmXOS/slTZGCQv1SXHwN",
|
||||
"m5d2Vd6s/w6p9pxvv8OF9i0NUr4u5Ys8GdoveeZmMerw8fXO5ypk++sg93XpH03rhgZavyuunHTgr1Xx",
|
||||
"l2KEJbPT2hVOWYih5RwNkkQZtxUGdGSmvRZd8yMfoO/hk9fxmberzOpVYEKQgUgTb9YVcsNkFMbzgq2U",
|
||||
"Fb0BNCr0LQciyVOVcA29YNVtvQ++9vpHL+SMEPv5Mq4IMfUWOCJMODblhiil6eCEeK55qtDindPqozX3",
|
||||
"hHT0VJ1S5Hqoz1/h03BaK3XIUvQvkD3wgI0HAqXSV8kHRDyf3VaZg3+nASgUqezo4ABdj0MM+uOe4iyP",
|
||||
"kjuciLrAgzBFiMbb5vyI/opKAjeoKmcdEvGMyWqVlXygkbYH85esWXnWkTrc58aziT+4nmrgo5/Do4bt",
|
||||
"wa9e0VgNWuzyrvt6Fat3RGqCVlofnIrGpVb1Tcf2qy31HG6fG66DtXDnEvdcmjAGtrRed5V848+XHppK",
|
||||
"/2NH/u2R0kKLQ5UPK/snt2yli7LKV+2w7RToeO26tZN7dULP9nKvLbWl2B9XKER1H4VeA8z2EI08NfXj",
|
||||
"hFeew7KFnLB6vet+S9lX7+Z6lzcdWeLJuRK+V8O5ckP6c26b5pvLl0F7ntF0LzuLG6+cD2e0Kj6WOqNp",
|
||||
"bA/GoO2MVtLiamxB2hUCVUsKpbYczYH4ZdjT5cVlJVPfn/4bWB6SMLcoP9rFCF7p0Z2RVx51AgaviEBA",
|
||||
"lb9aA65WR7PVSb29G0PBgy1maCfneXJ0q0a1ZFG15j2aqY5PknNdGYyv9gj5V0+p9M2Frlq8GitDHuWm",
|
||||
"8igrtPgIqJARrsTKYnsMucD/xTd62ayadjmxF4E0gon7iv+YMTjPmLiBlm0N8dFRiUEOPUiQ1y1BYkSF",
|
||||
"Z1KJEEkEyfbZGC98xd/FKJtiaAKJemPYEbIjnjj25WHRfGDhbQwiInmqtqrDb1yUdZFV7G3LXWyFpTKE",
|
||||
"ELWGEMnY9I0LlHJNrYVUZLNaQYaWk8WlHHYQLS9nHdTfHlzmJKH2fThQbPWBQu/SWqTGIyb38g1Pt8ch",
|
||||
"SQLZrCOv+atoNLj3ZaLQUrdaQ6ql4wk0RYC1ykWQLOt304hWGtP8s8v/Vsk362QIlS/2mt1xlQU7a+Ib",
|
||||
"GHzFXKu2a0m2Hdxzds4tcNOv8FiFppbn572MeJRXN+sa0lrVUqs5bJALH8Sod0kHVl8XgJXSspTJe05X",
|
||||
"ci/0ziQ1Nu9SdFz/9b9JL0sWZtCRRxXSHeRP7Sq+ip21SyDqZUyLln7Ww2BQl6p5MKlXr5j78YQnE+xx",
|
||||
"PezNCYHwa/va0kMtk22tZWLmvfI5p5AVW7vrmFi0P4vDTVk2/pDpLusHLgOEI81hfdXAko0NftgYfBa/",
|
||||
"qRU25SheKVwbOvg9Q8EIehqUjPv09wxFk1NI6F6UE6KW4g7IF/fcsmHAuzU0yTWF5CNkH9Rga6QrPlNP",
|
||||
"YhIQD9F/r+d9Vk7kNXLTNC6230LG6t3LCCTJHYjuneT8Ac8zmUjJKeMznz+wvqHCJ1LProqhP3NcftDD",
|
||||
"1wj8zf5hxyM/kZo3bs47gyBWKTEJlpthvZYqxPaiFzL1iquTeuKTMkDcsuGSf10Ok6JrfzQKeF4AiQLc",
|
||||
"nhjEeJrA9VCkGHqLKXIVBCjRt2ICLBG3dQT4XHrrqn5SlumqFpsoXjXtVPB8BDPfca0PYfYuN2KUxvqh",
|
||||
"ao34mI+9HkbvrEXipL09EEUwYy3Bs+J7v9Rt2WdNBezl4I1s40XvR/zkyoeaGu0hoQJJnTU13PRFoIjM",
|
||||
"aQnm5N/70ZfsE64rKpEPvgL6kisf6KsjJJAjaQn6SvAUtcQIn+MpDVAaAKEbd1sMjHMx0JrqI3AVzMff",
|
||||
"UJ1zr5N2gqdTGAdoSK/b4kdLBdX4nqQTPMU562AGnDM/buBDbQmNclAGIn09XiBJPb5kq8oyzFDW4whk",
|
||||
"dPI7BpkFNkQ3dW+2VgK3T9r/PGSiaDgTLXMmMjHYTZIZoPQRk9gtS1WFHSlJA92+TaR+0WOuz8b4MAPp",
|
||||
"tJhom4yNSEAWF4gaxPkrEueSrKqU7sFEBE65ICNthz7ZgrZaJGt9B49PoMHYJobRyBsuwl6Fna5JqNvm",
|
||||
"UaH7MigXEp/weos1I8P9PcPo5RitAaxiitebW7JEjMKW8dPWJJX0yCkZadJpELgMeivizj3K45rh5V6R",
|
||||
"b/4Vco1Yp/YI7o2ywNsOt6FZK7YAcEhNfOE34xWxGhSzTPy0qGvmk1PlxQk9tMD2scHqw9aWjFcbtIE9",
|
||||
"VG15Eu/QCXsJSu93ZLRKi88SpfcBCGSzgMAMU8SwfEMOmEDaeUN5M1F6LyNYXhWjrP60UyJiXGDSt+RG",
|
||||
"4tiJjVbg8GZyDq3i8CbEgxp9YTUquNpGSWsSNXMu8iPaWflDtavnOz5AQm3VRQ0N/EnN8Erki+UBUfn+",
|
||||
"Z/0J0R5Phy7/cKgdoCnBeRbcQ/O9UL1BTlBEp1/hUwWYlzBDNEEsGT2vSE4veBBZLy2yKuH784Lb1ySw",
|
||||
"GEHTaZvr9Eo2UG9ELVWEwb8w8lZKrCsLu+wGZxPhs6M5pw4Yj2RmMGCQsoKnEA0mUJQBdOU6lQJ/yw06",
|
||||
"RQbGrvapoFZL6N+8GedTW8J88mqoLLFNIlHLoI6aFl21mXqIRcWX1LcmjeZ4L5H4m2z8itwpfwWZuGYJ",
|
||||
"ozb1mWbYIGu2yvwqSXFN5peWM3sxnKAU6ZSQPiKn7NlX+pyUcw5y6C8mh4y9febB0KDMQThtoXAyN2h5",
|
||||
"OVUP07mDgEBShOmMrIE7kDxoeZGTJDwKw8XN4v8CAAD//7km78oaJQEA",
|
||||
}
|
||||
|
||||
// GetSwagger returns the content of the embedded swagger specification file
|
||||
|
||||
@@ -34,6 +34,7 @@ func ToEventFromSQLC(eventRow *dbsqlc.ListEventsRow) *gen.Event {
|
||||
Running: &eventRow.Runningruns,
|
||||
Succeeded: &eventRow.Succeededruns,
|
||||
Pending: &eventRow.Pendingruns,
|
||||
Queued: &eventRow.Queuedruns,
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
@@ -63,8 +63,10 @@ import {
|
||||
Workflow,
|
||||
WorkflowID,
|
||||
WorkflowList,
|
||||
WorkflowMetrics,
|
||||
WorkflowRun,
|
||||
WorkflowRunList,
|
||||
WorkflowRunStatus,
|
||||
WorkflowRunStatusList,
|
||||
WorkflowVersion,
|
||||
WorkflowVersionDefinition,
|
||||
@@ -866,6 +868,33 @@ export class Api<SecurityDataType = unknown> extends HttpClient<SecurityDataType
|
||||
format: "json",
|
||||
...params,
|
||||
});
|
||||
/**
|
||||
* @description Get the metrics for a workflow version
|
||||
*
|
||||
* @tags Workflow
|
||||
* @name WorkflowGetMetrics
|
||||
* @summary Get workflow metrics
|
||||
* @request GET:/api/v1/workflows/{workflow}/metrics
|
||||
* @secure
|
||||
*/
|
||||
workflowGetMetrics = (
|
||||
workflow: string,
|
||||
query?: {
|
||||
/** A status of workflow runs to filter by */
|
||||
status?: WorkflowRunStatus;
|
||||
/** A group key to filter metrics by */
|
||||
groupKey?: string;
|
||||
},
|
||||
params: RequestParams = {},
|
||||
) =>
|
||||
this.request<WorkflowMetrics, APIErrors>({
|
||||
path: `/api/v1/workflows/${workflow}/metrics`,
|
||||
method: "GET",
|
||||
query: query,
|
||||
secure: true,
|
||||
format: "json",
|
||||
...params,
|
||||
});
|
||||
/**
|
||||
* @description Create a pull request for a workflow
|
||||
*
|
||||
|
||||
@@ -286,6 +286,11 @@ export interface EventWorkflowRunSummary {
|
||||
* @format int64
|
||||
*/
|
||||
running?: number;
|
||||
/**
|
||||
* The number of queued runs.
|
||||
* @format int64
|
||||
*/
|
||||
queued?: number;
|
||||
/**
|
||||
* The number of succeeded runs.
|
||||
* @format int64
|
||||
@@ -499,6 +504,7 @@ export enum WorkflowRunStatus {
|
||||
SUCCEEDED = "SUCCEEDED",
|
||||
FAILED = "FAILED",
|
||||
CANCELLED = "CANCELLED",
|
||||
QUEUED = "QUEUED",
|
||||
}
|
||||
|
||||
export type WorkflowRunStatusList = WorkflowRunStatus[];
|
||||
@@ -782,3 +788,10 @@ export interface CreateSNSIntegrationRequest {
|
||||
/** The Amazon Resource Name (ARN) of the SNS topic. */
|
||||
topicArn: string;
|
||||
}
|
||||
|
||||
export interface WorkflowMetrics {
|
||||
/** The number of runs for a specific group key (passed via filter) */
|
||||
groupKeyRunsCount?: number;
|
||||
/** The total number of concurrency group keys. */
|
||||
groupKeyCount?: number;
|
||||
}
|
||||
|
||||
@@ -56,6 +56,10 @@ export const queries = createQueryKeyStore({
|
||||
queryKey: ['workflow:get', workflow],
|
||||
queryFn: async () => (await api.workflowGet(workflow)).data,
|
||||
}),
|
||||
getMetrics: (workflow: string) => ({
|
||||
queryKey: ['workflow:get:metrics', workflow],
|
||||
queryFn: async () => (await api.workflowGetMetrics(workflow)).data,
|
||||
}),
|
||||
getVersion: (workflow: string, version?: string) => ({
|
||||
queryKey: ['workflow-version:get', workflow, version],
|
||||
queryFn: async () =>
|
||||
|
||||
@@ -119,14 +119,14 @@ function WorkflowRunSummary({ event }: { event: Event }) {
|
||||
invariant(tenant);
|
||||
|
||||
const [hoverCardOpen, setPopoverOpen] = useState<
|
||||
'failed' | 'succeeded' | 'running'
|
||||
'failed' | 'succeeded' | 'running' | 'queued' | 'pending'
|
||||
>();
|
||||
|
||||
const numFailed = event.workflowRunSummary?.failed || 0;
|
||||
const numSucceeded = event.workflowRunSummary?.succeeded || 0;
|
||||
const numRunning =
|
||||
(event.workflowRunSummary?.pending || 0) +
|
||||
(event.workflowRunSummary?.running || 0);
|
||||
const numRunning = event.workflowRunSummary?.running || 0;
|
||||
const numPending = event.workflowRunSummary?.pending || 0;
|
||||
const numQueued = event.workflowRunSummary?.queued || 0;
|
||||
|
||||
const listWorkflowRunsQuery = useQuery({
|
||||
...queries.workflowRuns.list(tenant.metadata.id, {
|
||||
@@ -148,7 +148,13 @@ function WorkflowRunSummary({ event }: { event: Event }) {
|
||||
return run.status == 'SUCCEEDED';
|
||||
}
|
||||
if (hoverCardOpen == 'running') {
|
||||
return run.status == 'RUNNING' || run.status == 'PENDING';
|
||||
return run.status == 'RUNNING';
|
||||
}
|
||||
if (hoverCardOpen == 'pending') {
|
||||
return run.status == 'PENDING';
|
||||
}
|
||||
if (hoverCardOpen == 'queued') {
|
||||
return run.status == 'QUEUED';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,6 +261,58 @@ function WorkflowRunSummary({ event }: { event: Event }) {
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
{numPending > 0 && (
|
||||
<Popover
|
||||
open={hoverCardOpen == 'pending'}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) {
|
||||
setPopoverOpen(undefined);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<PopoverTrigger>
|
||||
<Badge
|
||||
variant="inProgress"
|
||||
className="cursor-pointer"
|
||||
onClick={() => setPopoverOpen('pending')}
|
||||
>
|
||||
{numPending} Pending
|
||||
</Badge>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent
|
||||
className="min-w-fit p-0 bg-background border-none z-40"
|
||||
align="end"
|
||||
>
|
||||
{hoverCardContent}
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
{numQueued > 0 && (
|
||||
<Popover
|
||||
open={hoverCardOpen == 'queued'}
|
||||
onOpenChange={(open) => {
|
||||
if (!open) {
|
||||
setPopoverOpen(undefined);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<PopoverTrigger>
|
||||
<Badge
|
||||
variant="inProgress"
|
||||
className="cursor-pointer"
|
||||
onClick={() => setPopoverOpen('queued')}
|
||||
>
|
||||
{numQueued} Queued
|
||||
</Badge>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent
|
||||
className="min-w-fit p-0 bg-background border-none z-40"
|
||||
align="end"
|
||||
>
|
||||
{hoverCardContent}
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -230,6 +230,10 @@ function EventsTable() {
|
||||
value: WorkflowRunStatus.RUNNING,
|
||||
label: 'Running',
|
||||
},
|
||||
{
|
||||
value: WorkflowRunStatus.QUEUED,
|
||||
label: 'Queued',
|
||||
},
|
||||
{
|
||||
value: WorkflowRunStatus.PENDING,
|
||||
label: 'Pending',
|
||||
|
||||
@@ -62,7 +62,8 @@ INSERT INTO "Event" (
|
||||
-- name: ListEvents :many
|
||||
SELECT
|
||||
sqlc.embed(events),
|
||||
sum(case when runs."status" = 'PENDING' OR runs."status" = 'QUEUED' then 1 else 0 end) AS pendingRuns,
|
||||
sum(case when runs."status" = 'PENDING' then 1 else 0 end) AS pendingRuns,
|
||||
sum(case when runs."status" = 'QUEUED' then 1 else 0 end) AS queuedRuns,
|
||||
sum(case when runs."status" = 'RUNNING' then 1 else 0 end) AS runningRuns,
|
||||
sum(case when runs."status" = 'SUCCEEDED' then 1 else 0 end) AS succeededRuns,
|
||||
sum(case when runs."status" = 'FAILED' then 1 else 0 end) AS failedRuns
|
||||
|
||||
@@ -190,7 +190,8 @@ func (q *Queries) GetEventsForRange(ctx context.Context, db DBTX) ([]*GetEventsF
|
||||
const listEvents = `-- name: ListEvents :many
|
||||
SELECT
|
||||
events.id, events."createdAt", events."updatedAt", events."deletedAt", events.key, events."tenantId", events."replayedFromId", events.data,
|
||||
sum(case when runs."status" = 'PENDING' OR runs."status" = 'QUEUED' then 1 else 0 end) AS pendingRuns,
|
||||
sum(case when runs."status" = 'PENDING' then 1 else 0 end) AS pendingRuns,
|
||||
sum(case when runs."status" = 'QUEUED' then 1 else 0 end) AS queuedRuns,
|
||||
sum(case when runs."status" = 'RUNNING' then 1 else 0 end) AS runningRuns,
|
||||
sum(case when runs."status" = 'SUCCEEDED' then 1 else 0 end) AS succeededRuns,
|
||||
sum(case when runs."status" = 'FAILED' then 1 else 0 end) AS failedRuns
|
||||
@@ -248,6 +249,7 @@ type ListEventsParams struct {
|
||||
type ListEventsRow struct {
|
||||
Event Event `json:"event"`
|
||||
Pendingruns int64 `json:"pendingruns"`
|
||||
Queuedruns int64 `json:"queuedruns"`
|
||||
Runningruns int64 `json:"runningruns"`
|
||||
Succeededruns int64 `json:"succeededruns"`
|
||||
Failedruns int64 `json:"failedruns"`
|
||||
@@ -281,6 +283,7 @@ func (q *Queries) ListEvents(ctx context.Context, db DBTX, arg ListEventsParams)
|
||||
&i.Event.ReplayedFromId,
|
||||
&i.Event.Data,
|
||||
&i.Pendingruns,
|
||||
&i.Queuedruns,
|
||||
&i.Runningruns,
|
||||
&i.Succeededruns,
|
||||
&i.Failedruns,
|
||||
|
||||
@@ -99,20 +99,10 @@ LIMIT
|
||||
COALESCE(sqlc.narg('limit'), 50);
|
||||
|
||||
-- name: PopWorkflowRunsRoundRobin :many
|
||||
WITH running_count AS (
|
||||
SELECT
|
||||
COUNT(*) AS "count"
|
||||
FROM
|
||||
"WorkflowRun" r1
|
||||
JOIN
|
||||
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r1."tenantId" = $1 AND
|
||||
r1."status" = 'RUNNING' AND
|
||||
workflowVersion."id" = $2
|
||||
), queued_row_numbers AS (
|
||||
WITH workflow_runs AS (
|
||||
SELECT
|
||||
r2.id,
|
||||
r2."status",
|
||||
row_number() OVER (PARTITION BY r2."concurrencyGroupId" ORDER BY r2."createdAt") AS rn,
|
||||
row_number() over (order by r2."createdAt" ASC) as seqnum
|
||||
FROM
|
||||
@@ -120,32 +110,42 @@ WITH running_count AS (
|
||||
LEFT JOIN
|
||||
"WorkflowVersion" workflowVersion ON r2."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r2."tenantId" = $1 AND
|
||||
r2."status" = 'QUEUED' AND
|
||||
workflowVersion."id" = $2
|
||||
r2."tenantId" = @tenantId::uuid AND
|
||||
(r2."status" = 'QUEUED' OR r2."status" = 'RUNNING') AND
|
||||
workflowVersion."workflowId" = @workflowId::uuid
|
||||
ORDER BY
|
||||
rn, seqnum ASC
|
||||
), min_rn AS (
|
||||
SELECT
|
||||
MIN(rn) as min_rn
|
||||
FROM
|
||||
queued_row_numbers
|
||||
), first_partition_count AS (
|
||||
workflow_runs
|
||||
), total_group_count AS ( -- counts the number of groups
|
||||
SELECT
|
||||
COUNT(*) as count
|
||||
FROM
|
||||
queued_row_numbers
|
||||
workflow_runs
|
||||
WHERE
|
||||
rn = (SELECT min_rn FROM min_rn)
|
||||
), eligible_runs AS (
|
||||
SELECT
|
||||
id
|
||||
FROM
|
||||
queued_row_numbers
|
||||
"WorkflowRun" wr
|
||||
WHERE
|
||||
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
|
||||
-- total number of running workflows.
|
||||
queued_row_numbers."seqnum" <= (@maxRuns::int) * (SELECT count FROM first_partition_count) - (SELECT "count" FROM running_count)
|
||||
wr."id" IN (
|
||||
SELECT
|
||||
id
|
||||
FROM
|
||||
workflow_runs
|
||||
ORDER BY
|
||||
rn, seqnum ASC
|
||||
LIMIT
|
||||
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
|
||||
-- total number of running workflows.
|
||||
(@maxRuns::int) * (SELECT count FROM total_group_count)
|
||||
) AND
|
||||
wr."status" = 'QUEUED'
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE "WorkflowRun"
|
||||
@@ -154,7 +154,8 @@ SET
|
||||
FROM
|
||||
eligible_runs
|
||||
WHERE
|
||||
"WorkflowRun".id = eligible_runs.id
|
||||
"WorkflowRun".id = eligible_runs.id AND
|
||||
"WorkflowRun"."status" = 'QUEUED'
|
||||
RETURNING
|
||||
"WorkflowRun".*;
|
||||
|
||||
|
||||
@@ -844,20 +844,10 @@ func (q *Queries) ListWorkflowRuns(ctx context.Context, db DBTX, arg ListWorkflo
|
||||
}
|
||||
|
||||
const popWorkflowRunsRoundRobin = `-- name: PopWorkflowRunsRoundRobin :many
|
||||
WITH running_count AS (
|
||||
SELECT
|
||||
COUNT(*) AS "count"
|
||||
FROM
|
||||
"WorkflowRun" r1
|
||||
JOIN
|
||||
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r1."tenantId" = $1 AND
|
||||
r1."status" = 'RUNNING' AND
|
||||
workflowVersion."id" = $2
|
||||
), queued_row_numbers AS (
|
||||
WITH workflow_runs AS (
|
||||
SELECT
|
||||
r2.id,
|
||||
r2."status",
|
||||
row_number() OVER (PARTITION BY r2."concurrencyGroupId" ORDER BY r2."createdAt") AS rn,
|
||||
row_number() over (order by r2."createdAt" ASC) as seqnum
|
||||
FROM
|
||||
@@ -865,32 +855,42 @@ WITH running_count AS (
|
||||
LEFT JOIN
|
||||
"WorkflowVersion" workflowVersion ON r2."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r2."tenantId" = $1 AND
|
||||
r2."status" = 'QUEUED' AND
|
||||
workflowVersion."id" = $2
|
||||
r2."tenantId" = $1::uuid AND
|
||||
(r2."status" = 'QUEUED' OR r2."status" = 'RUNNING') AND
|
||||
workflowVersion."workflowId" = $2::uuid
|
||||
ORDER BY
|
||||
rn, seqnum ASC
|
||||
), min_rn AS (
|
||||
SELECT
|
||||
MIN(rn) as min_rn
|
||||
FROM
|
||||
queued_row_numbers
|
||||
), first_partition_count AS (
|
||||
workflow_runs
|
||||
), total_group_count AS ( -- counts the number of groups
|
||||
SELECT
|
||||
COUNT(*) as count
|
||||
FROM
|
||||
queued_row_numbers
|
||||
workflow_runs
|
||||
WHERE
|
||||
rn = (SELECT min_rn FROM min_rn)
|
||||
), eligible_runs AS (
|
||||
SELECT
|
||||
id
|
||||
FROM
|
||||
queued_row_numbers
|
||||
"WorkflowRun" wr
|
||||
WHERE
|
||||
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
|
||||
-- total number of running workflows.
|
||||
queued_row_numbers."seqnum" <= ($3::int) * (SELECT count FROM first_partition_count) - (SELECT "count" FROM running_count)
|
||||
wr."id" IN (
|
||||
SELECT
|
||||
id
|
||||
FROM
|
||||
workflow_runs
|
||||
ORDER BY
|
||||
rn, seqnum ASC
|
||||
LIMIT
|
||||
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
|
||||
-- total number of running workflows.
|
||||
($3::int) * (SELECT count FROM total_group_count)
|
||||
) AND
|
||||
wr."status" = 'QUEUED'
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE "WorkflowRun"
|
||||
@@ -899,19 +899,20 @@ SET
|
||||
FROM
|
||||
eligible_runs
|
||||
WHERE
|
||||
"WorkflowRun".id = eligible_runs.id
|
||||
"WorkflowRun".id = eligible_runs.id AND
|
||||
"WorkflowRun"."status" = 'QUEUED'
|
||||
RETURNING
|
||||
"WorkflowRun"."createdAt", "WorkflowRun"."updatedAt", "WorkflowRun"."deletedAt", "WorkflowRun"."tenantId", "WorkflowRun"."workflowVersionId", "WorkflowRun".status, "WorkflowRun".error, "WorkflowRun"."startedAt", "WorkflowRun"."finishedAt", "WorkflowRun"."concurrencyGroupId", "WorkflowRun"."displayName", "WorkflowRun".id, "WorkflowRun"."gitRepoBranch", "WorkflowRun"."childIndex", "WorkflowRun"."childKey", "WorkflowRun"."parentId", "WorkflowRun"."parentStepRunId"
|
||||
`
|
||||
|
||||
type PopWorkflowRunsRoundRobinParams struct {
|
||||
TenantId pgtype.UUID `json:"tenantId"`
|
||||
ID pgtype.UUID `json:"id"`
|
||||
Maxruns int32 `json:"maxruns"`
|
||||
Tenantid pgtype.UUID `json:"tenantid"`
|
||||
Workflowid pgtype.UUID `json:"workflowid"`
|
||||
Maxruns int32 `json:"maxruns"`
|
||||
}
|
||||
|
||||
func (q *Queries) PopWorkflowRunsRoundRobin(ctx context.Context, db DBTX, arg PopWorkflowRunsRoundRobinParams) ([]*WorkflowRun, error) {
|
||||
rows, err := db.Query(ctx, popWorkflowRunsRoundRobin, arg.TenantId, arg.ID, arg.Maxruns)
|
||||
rows, err := db.Query(ctx, popWorkflowRunsRoundRobin, arg.Tenantid, arg.Workflowid, arg.Maxruns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -417,4 +417,38 @@ WHERE
|
||||
workflowVersions."workflowId" = @workflowId::uuid
|
||||
ORDER BY
|
||||
workflowVersions."order" DESC
|
||||
LIMIT 1;
|
||||
LIMIT 1;
|
||||
|
||||
-- name: CountWorkflowRunsRoundRobin :one
|
||||
SELECT COUNT(*) AS total
|
||||
FROM
|
||||
"WorkflowRun" r1
|
||||
JOIN
|
||||
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r1."tenantId" = @tenantId::uuid AND
|
||||
(
|
||||
sqlc.narg('status')::"WorkflowRunStatus" IS NULL OR
|
||||
r1."status" = sqlc.narg('status')::"WorkflowRunStatus"
|
||||
) AND
|
||||
workflowVersion."workflowId" = @workflowId::uuid AND
|
||||
r1."concurrencyGroupId" IS NOT NULL AND
|
||||
(
|
||||
sqlc.narg('groupKey')::text IS NULL OR
|
||||
r1."concurrencyGroupId" = sqlc.narg('groupKey')::text
|
||||
);
|
||||
|
||||
-- name: CountRoundRobinGroupKeys :one
|
||||
SELECT
|
||||
COUNT(DISTINCT "concurrencyGroupId") AS total
|
||||
FROM
|
||||
"WorkflowRun" r1
|
||||
JOIN
|
||||
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r1."tenantId" = @tenantId::uuid AND
|
||||
(
|
||||
sqlc.narg('status')::"WorkflowRunStatus" IS NULL OR
|
||||
r1."status" = sqlc.narg('status')::"WorkflowRunStatus"
|
||||
) AND
|
||||
workflowVersion."workflowId" = @workflowId::uuid;
|
||||
@@ -49,6 +49,74 @@ func (q *Queries) AddWorkflowTag(ctx context.Context, db DBTX, arg AddWorkflowTa
|
||||
return err
|
||||
}
|
||||
|
||||
const countRoundRobinGroupKeys = `-- name: CountRoundRobinGroupKeys :one
|
||||
SELECT
|
||||
COUNT(DISTINCT "concurrencyGroupId") AS total
|
||||
FROM
|
||||
"WorkflowRun" r1
|
||||
JOIN
|
||||
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r1."tenantId" = $1::uuid AND
|
||||
(
|
||||
$2::"WorkflowRunStatus" IS NULL OR
|
||||
r1."status" = $2::"WorkflowRunStatus"
|
||||
) AND
|
||||
workflowVersion."workflowId" = $3::uuid
|
||||
`
|
||||
|
||||
type CountRoundRobinGroupKeysParams struct {
|
||||
Tenantid pgtype.UUID `json:"tenantid"`
|
||||
Status NullWorkflowRunStatus `json:"status"`
|
||||
Workflowid pgtype.UUID `json:"workflowid"`
|
||||
}
|
||||
|
||||
func (q *Queries) CountRoundRobinGroupKeys(ctx context.Context, db DBTX, arg CountRoundRobinGroupKeysParams) (int64, error) {
|
||||
row := db.QueryRow(ctx, countRoundRobinGroupKeys, arg.Tenantid, arg.Status, arg.Workflowid)
|
||||
var total int64
|
||||
err := row.Scan(&total)
|
||||
return total, err
|
||||
}
|
||||
|
||||
const countWorkflowRunsRoundRobin = `-- name: CountWorkflowRunsRoundRobin :one
|
||||
SELECT COUNT(*) AS total
|
||||
FROM
|
||||
"WorkflowRun" r1
|
||||
JOIN
|
||||
"WorkflowVersion" workflowVersion ON r1."workflowVersionId" = workflowVersion."id"
|
||||
WHERE
|
||||
r1."tenantId" = $1::uuid AND
|
||||
(
|
||||
$2::"WorkflowRunStatus" IS NULL OR
|
||||
r1."status" = $2::"WorkflowRunStatus"
|
||||
) AND
|
||||
workflowVersion."workflowId" = $3::uuid AND
|
||||
r1."concurrencyGroupId" IS NOT NULL AND
|
||||
(
|
||||
$4::text IS NULL OR
|
||||
r1."concurrencyGroupId" = $4::text
|
||||
)
|
||||
`
|
||||
|
||||
type CountWorkflowRunsRoundRobinParams struct {
|
||||
Tenantid pgtype.UUID `json:"tenantid"`
|
||||
Status NullWorkflowRunStatus `json:"status"`
|
||||
Workflowid pgtype.UUID `json:"workflowid"`
|
||||
GroupKey pgtype.Text `json:"groupKey"`
|
||||
}
|
||||
|
||||
func (q *Queries) CountWorkflowRunsRoundRobin(ctx context.Context, db DBTX, arg CountWorkflowRunsRoundRobinParams) (int64, error) {
|
||||
row := db.QueryRow(ctx, countWorkflowRunsRoundRobin,
|
||||
arg.Tenantid,
|
||||
arg.Status,
|
||||
arg.Workflowid,
|
||||
arg.GroupKey,
|
||||
)
|
||||
var total int64
|
||||
err := row.Scan(&total)
|
||||
return total, err
|
||||
}
|
||||
|
||||
const countWorkflows = `-- name: CountWorkflows :one
|
||||
SELECT
|
||||
count(workflows) OVER() AS total
|
||||
|
||||
@@ -230,6 +230,56 @@ func (r *workflowAPIRepository) UpsertWorkflowDeploymentConfig(workflowId string
|
||||
return deploymentConfig, nil
|
||||
}
|
||||
|
||||
func (r *workflowAPIRepository) GetWorkflowMetrics(tenantId, workflowId string, opts *repository.GetWorkflowMetricsOpts) (*repository.WorkflowMetrics, error) {
|
||||
if err := r.v.Validate(opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pgTenantId := sqlchelpers.UUIDFromStr(tenantId)
|
||||
pgWorkflowId := sqlchelpers.UUIDFromStr(workflowId)
|
||||
|
||||
countRunsParams := dbsqlc.CountWorkflowRunsRoundRobinParams{
|
||||
Tenantid: pgTenantId,
|
||||
Workflowid: pgWorkflowId,
|
||||
}
|
||||
|
||||
countGroupKeysParams := dbsqlc.CountRoundRobinGroupKeysParams{
|
||||
Tenantid: pgTenantId,
|
||||
Workflowid: pgWorkflowId,
|
||||
}
|
||||
|
||||
if opts.Status != nil {
|
||||
status := dbsqlc.NullWorkflowRunStatus{
|
||||
Valid: true,
|
||||
WorkflowRunStatus: dbsqlc.WorkflowRunStatus(*opts.Status),
|
||||
}
|
||||
|
||||
countRunsParams.Status = status
|
||||
countGroupKeysParams.Status = status
|
||||
}
|
||||
|
||||
if opts.GroupKey != nil {
|
||||
countRunsParams.GroupKey = sqlchelpers.TextFromStr(*opts.GroupKey)
|
||||
}
|
||||
|
||||
runsCount, err := r.queries.CountWorkflowRunsRoundRobin(context.Background(), r.pool, countRunsParams)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch workflow run counts: %w", err)
|
||||
}
|
||||
|
||||
groupKeysCount, err := r.queries.CountRoundRobinGroupKeys(context.Background(), r.pool, countGroupKeysParams)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch group key counts: %w", err)
|
||||
}
|
||||
|
||||
return &repository.WorkflowMetrics{
|
||||
GroupKeyRunsCount: int(runsCount),
|
||||
GroupKeyCount: int(groupKeysCount),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type workflowEngineRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
v validator.Validator
|
||||
|
||||
@@ -199,38 +199,12 @@ func (w *workflowRunEngineRepository) GetScheduledChildWorkflowRun(parentId, par
|
||||
return w.queries.GetScheduledChildWorkflowRun(context.Background(), w.pool, params)
|
||||
}
|
||||
|
||||
func (w *workflowRunEngineRepository) PopWorkflowRunsRoundRobin(tenantId, workflowVersionId string, maxRuns int) ([]*dbsqlc.WorkflowRun, error) {
|
||||
pgTenantId := &pgtype.UUID{}
|
||||
|
||||
if err := pgTenantId.Scan(tenantId); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx, err := w.pool.Begin(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer deferRollback(context.Background(), w.l, tx.Rollback)
|
||||
|
||||
res, err := w.queries.PopWorkflowRunsRoundRobin(context.Background(), tx, dbsqlc.PopWorkflowRunsRoundRobinParams{
|
||||
Maxruns: int32(maxRuns),
|
||||
TenantId: *pgTenantId,
|
||||
ID: sqlchelpers.UUIDFromStr(workflowVersionId),
|
||||
func (w *workflowRunEngineRepository) PopWorkflowRunsRoundRobin(tenantId, workflowId string, maxRuns int) ([]*dbsqlc.WorkflowRun, error) {
|
||||
return w.queries.PopWorkflowRunsRoundRobin(context.Background(), w.pool, dbsqlc.PopWorkflowRunsRoundRobinParams{
|
||||
Maxruns: int32(maxRuns),
|
||||
Tenantid: sqlchelpers.UUIDFromStr(tenantId),
|
||||
Workflowid: sqlchelpers.UUIDFromStr(workflowId),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = tx.Commit(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (w *workflowRunEngineRepository) CreateNewWorkflowRun(ctx context.Context, tenantId string, opts *repository.CreateWorkflowRunOpts) (string, error) {
|
||||
|
||||
@@ -176,6 +176,22 @@ type UpsertWorkflowDeploymentConfigOpts struct {
|
||||
GitRepoBranch string `validate:"required"`
|
||||
}
|
||||
|
||||
type WorkflowMetrics struct {
|
||||
// the number of runs for a specific group key
|
||||
GroupKeyRunsCount int `json:"groupKeyRunsCount,omitempty"`
|
||||
|
||||
// the total number of concurrency group keys
|
||||
GroupKeyCount int `json:"groupKeyCount,omitempty"`
|
||||
}
|
||||
|
||||
type GetWorkflowMetricsOpts struct {
|
||||
// (optional) the group key to filter by
|
||||
GroupKey *string
|
||||
|
||||
// (optional) the workflow run status to filter by
|
||||
Status *string `validate:"omitnil,oneof=PENDING QUEUED RUNNING SUCCEEDED FAILED"`
|
||||
}
|
||||
|
||||
type WorkflowAPIRepository interface {
|
||||
// ListWorkflows returns all workflows for a given tenant.
|
||||
ListWorkflows(tenantId string, opts *ListWorkflowsOpts) (*ListWorkflowsResult, error)
|
||||
@@ -193,6 +209,9 @@ type WorkflowAPIRepository interface {
|
||||
// DeleteWorkflow deletes a workflow for a given tenant.
|
||||
DeleteWorkflow(tenantId, workflowId string) (*db.WorkflowModel, error)
|
||||
|
||||
// GetWorkflowVersionMetrics returns the metrics for a given workflow version.
|
||||
GetWorkflowMetrics(tenantId, workflowId string, opts *GetWorkflowMetricsOpts) (*WorkflowMetrics, error)
|
||||
|
||||
UpsertWorkflowDeploymentConfig(workflowId string, opts *UpsertWorkflowDeploymentConfigOpts) (*db.WorkflowDeploymentConfigModel, error)
|
||||
}
|
||||
|
||||
|
||||
@@ -290,7 +290,7 @@ type WorkflowRunEngineRepository interface {
|
||||
|
||||
GetScheduledChildWorkflowRun(parentId, parentStepRunId string, childIndex int, childkey *string) (*dbsqlc.WorkflowTriggerScheduledRef, error)
|
||||
|
||||
PopWorkflowRunsRoundRobin(tenantId, workflowVersionId string, maxRuns int) ([]*dbsqlc.WorkflowRun, error)
|
||||
PopWorkflowRunsRoundRobin(tenantId, workflowId string, maxRuns int) ([]*dbsqlc.WorkflowRun, error)
|
||||
|
||||
// CreateNewWorkflowRun creates a new workflow run for a workflow version.
|
||||
CreateNewWorkflowRun(ctx context.Context, tenantId string, opts *CreateWorkflowRunOpts) (string, error)
|
||||
|
||||
@@ -503,12 +503,13 @@ func (wc *WorkflowsControllerImpl) queueByGroupRoundRobin(ctx context.Context, t
|
||||
defer span.End()
|
||||
|
||||
workflowVersionId := sqlchelpers.UUIDToStr(workflowVersion.WorkflowVersion.ID)
|
||||
workflowId := sqlchelpers.UUIDToStr(workflowVersion.WorkflowVersion.WorkflowId)
|
||||
maxRuns := int(workflowVersion.ConcurrencyMaxRuns.Int32)
|
||||
|
||||
wc.l.Info().Msgf("handling queue with strategy GROUP_ROUND_ROBIN for workflow version %s", workflowVersionId)
|
||||
|
||||
// get workflow runs which are queued for this group key
|
||||
poppedWorkflowRuns, err := wc.repo.WorkflowRun().PopWorkflowRunsRoundRobin(tenantId, workflowVersionId, maxRuns)
|
||||
poppedWorkflowRuns, err := wc.repo.WorkflowRun().PopWorkflowRunsRoundRobin(tenantId, workflowId, maxRuns)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list queued workflow runs: %w", err)
|
||||
|
||||
@@ -99,6 +99,7 @@ const (
|
||||
CANCELLED WorkflowRunStatus = "CANCELLED"
|
||||
FAILED WorkflowRunStatus = "FAILED"
|
||||
PENDING WorkflowRunStatus = "PENDING"
|
||||
QUEUED WorkflowRunStatus = "QUEUED"
|
||||
RUNNING WorkflowRunStatus = "RUNNING"
|
||||
SUCCEEDED WorkflowRunStatus = "SUCCEEDED"
|
||||
)
|
||||
@@ -259,6 +260,9 @@ type EventWorkflowRunSummary struct {
|
||||
// Pending The number of pending runs.
|
||||
Pending *int64 `json:"pending,omitempty"`
|
||||
|
||||
// Queued The number of queued runs.
|
||||
Queued *int64 `json:"queued,omitempty"`
|
||||
|
||||
// Running The number of running runs.
|
||||
Running *int64 `json:"running,omitempty"`
|
||||
|
||||
@@ -725,6 +729,15 @@ type WorkflowList struct {
|
||||
Rows *[]Workflow `json:"rows,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowMetrics defines model for WorkflowMetrics.
|
||||
type WorkflowMetrics struct {
|
||||
// GroupKeyCount The total number of concurrency group keys.
|
||||
GroupKeyCount *int `json:"groupKeyCount,omitempty"`
|
||||
|
||||
// GroupKeyRunsCount The number of runs for a specific group key (passed via filter)
|
||||
GroupKeyRunsCount *int `json:"groupKeyRunsCount,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowRun defines model for WorkflowRun.
|
||||
type WorkflowRun struct {
|
||||
DisplayName *string `json:"displayName,omitempty"`
|
||||
@@ -901,6 +914,15 @@ type WorkflowRunListParams struct {
|
||||
ParentStepRunId *openapi_types.UUID `form:"parentStepRunId,omitempty" json:"parentStepRunId,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowGetMetricsParams defines parameters for WorkflowGetMetrics.
|
||||
type WorkflowGetMetricsParams struct {
|
||||
// Status A status of workflow runs to filter by
|
||||
Status *WorkflowRunStatus `form:"status,omitempty" json:"status,omitempty"`
|
||||
|
||||
// GroupKey A group key to filter metrics by
|
||||
GroupKey *string `form:"groupKey,omitempty" json:"groupKey,omitempty"`
|
||||
}
|
||||
|
||||
// WorkflowRunCreateParams defines parameters for WorkflowRunCreate.
|
||||
type WorkflowRunCreateParams struct {
|
||||
// Version The workflow version. If not supplied, the latest version is fetched.
|
||||
@@ -1230,6 +1252,9 @@ type ClientInterface interface {
|
||||
|
||||
WorkflowUpdateLinkGithub(ctx context.Context, workflow openapi_types.UUID, body WorkflowUpdateLinkGithubJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error)
|
||||
|
||||
// WorkflowGetMetrics request
|
||||
WorkflowGetMetrics(ctx context.Context, workflow openapi_types.UUID, params *WorkflowGetMetricsParams, reqEditors ...RequestEditorFn) (*http.Response, error)
|
||||
|
||||
// WorkflowRunCreateWithBody request with any body
|
||||
WorkflowRunCreateWithBody(ctx context.Context, workflow openapi_types.UUID, params *WorkflowRunCreateParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error)
|
||||
|
||||
@@ -2070,6 +2095,18 @@ func (c *Client) WorkflowUpdateLinkGithub(ctx context.Context, workflow openapi_
|
||||
return c.Client.Do(req)
|
||||
}
|
||||
|
||||
func (c *Client) WorkflowGetMetrics(ctx context.Context, workflow openapi_types.UUID, params *WorkflowGetMetricsParams, reqEditors ...RequestEditorFn) (*http.Response, error) {
|
||||
req, err := NewWorkflowGetMetricsRequest(c.Server, workflow, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
if err := c.applyEditors(ctx, req, reqEditors); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Client.Do(req)
|
||||
}
|
||||
|
||||
func (c *Client) WorkflowRunCreateWithBody(ctx context.Context, workflow openapi_types.UUID, params *WorkflowRunCreateParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) {
|
||||
req, err := NewWorkflowRunCreateRequestWithBody(c.Server, workflow, params, contentType, body)
|
||||
if err != nil {
|
||||
@@ -4467,6 +4504,78 @@ func NewWorkflowUpdateLinkGithubRequestWithBody(server string, workflow openapi_
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// NewWorkflowGetMetricsRequest generates requests for WorkflowGetMetrics
|
||||
func NewWorkflowGetMetricsRequest(server string, workflow openapi_types.UUID, params *WorkflowGetMetricsParams) (*http.Request, error) {
|
||||
var err error
|
||||
|
||||
var pathParam0 string
|
||||
|
||||
pathParam0, err = runtime.StyleParamWithLocation("simple", false, "workflow", runtime.ParamLocationPath, workflow)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverURL, err := url.Parse(server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
operationPath := fmt.Sprintf("/api/v1/workflows/%s/metrics", pathParam0)
|
||||
if operationPath[0] == '/' {
|
||||
operationPath = "." + operationPath
|
||||
}
|
||||
|
||||
queryURL, err := serverURL.Parse(operationPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if params != nil {
|
||||
queryValues := queryURL.Query()
|
||||
|
||||
if params.Status != nil {
|
||||
|
||||
if queryFrag, err := runtime.StyleParamWithLocation("form", true, "status", runtime.ParamLocationQuery, *params.Status); err != nil {
|
||||
return nil, err
|
||||
} else if parsed, err := url.ParseQuery(queryFrag); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
for k, v := range parsed {
|
||||
for _, v2 := range v {
|
||||
queryValues.Add(k, v2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if params.GroupKey != nil {
|
||||
|
||||
if queryFrag, err := runtime.StyleParamWithLocation("form", true, "groupKey", runtime.ParamLocationQuery, *params.GroupKey); err != nil {
|
||||
return nil, err
|
||||
} else if parsed, err := url.ParseQuery(queryFrag); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
for k, v := range parsed {
|
||||
for _, v2 := range v {
|
||||
queryValues.Add(k, v2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
queryURL.RawQuery = queryValues.Encode()
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", queryURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// NewWorkflowRunCreateRequest calls the generic WorkflowRunCreate builder with application/json body
|
||||
func NewWorkflowRunCreateRequest(server string, workflow openapi_types.UUID, params *WorkflowRunCreateParams, body WorkflowRunCreateJSONRequestBody) (*http.Request, error) {
|
||||
var bodyReader io.Reader
|
||||
@@ -4884,6 +4993,9 @@ type ClientWithResponsesInterface interface {
|
||||
|
||||
WorkflowUpdateLinkGithubWithResponse(ctx context.Context, workflow openapi_types.UUID, body WorkflowUpdateLinkGithubJSONRequestBody, reqEditors ...RequestEditorFn) (*WorkflowUpdateLinkGithubResponse, error)
|
||||
|
||||
// WorkflowGetMetricsWithResponse request
|
||||
WorkflowGetMetricsWithResponse(ctx context.Context, workflow openapi_types.UUID, params *WorkflowGetMetricsParams, reqEditors ...RequestEditorFn) (*WorkflowGetMetricsResponse, error)
|
||||
|
||||
// WorkflowRunCreateWithBodyWithResponse request with any body
|
||||
WorkflowRunCreateWithBodyWithResponse(ctx context.Context, workflow openapi_types.UUID, params *WorkflowRunCreateParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*WorkflowRunCreateResponse, error)
|
||||
|
||||
@@ -6206,6 +6318,31 @@ func (r WorkflowUpdateLinkGithubResponse) StatusCode() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
type WorkflowGetMetricsResponse struct {
|
||||
Body []byte
|
||||
HTTPResponse *http.Response
|
||||
JSON200 *WorkflowMetrics
|
||||
JSON400 *APIErrors
|
||||
JSON403 *APIErrors
|
||||
JSON404 *APIErrors
|
||||
}
|
||||
|
||||
// Status returns HTTPResponse.Status
|
||||
func (r WorkflowGetMetricsResponse) Status() string {
|
||||
if r.HTTPResponse != nil {
|
||||
return r.HTTPResponse.Status
|
||||
}
|
||||
return http.StatusText(0)
|
||||
}
|
||||
|
||||
// StatusCode returns HTTPResponse.StatusCode
|
||||
func (r WorkflowGetMetricsResponse) StatusCode() int {
|
||||
if r.HTTPResponse != nil {
|
||||
return r.HTTPResponse.StatusCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type WorkflowRunCreateResponse struct {
|
||||
Body []byte
|
||||
HTTPResponse *http.Response
|
||||
@@ -6888,6 +7025,15 @@ func (c *ClientWithResponses) WorkflowUpdateLinkGithubWithResponse(ctx context.C
|
||||
return ParseWorkflowUpdateLinkGithubResponse(rsp)
|
||||
}
|
||||
|
||||
// WorkflowGetMetricsWithResponse request returning *WorkflowGetMetricsResponse
|
||||
func (c *ClientWithResponses) WorkflowGetMetricsWithResponse(ctx context.Context, workflow openapi_types.UUID, params *WorkflowGetMetricsParams, reqEditors ...RequestEditorFn) (*WorkflowGetMetricsResponse, error) {
|
||||
rsp, err := c.WorkflowGetMetrics(ctx, workflow, params, reqEditors...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseWorkflowGetMetricsResponse(rsp)
|
||||
}
|
||||
|
||||
// WorkflowRunCreateWithBodyWithResponse request with arbitrary body returning *WorkflowRunCreateResponse
|
||||
func (c *ClientWithResponses) WorkflowRunCreateWithBodyWithResponse(ctx context.Context, workflow openapi_types.UUID, params *WorkflowRunCreateParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*WorkflowRunCreateResponse, error) {
|
||||
rsp, err := c.WorkflowRunCreateWithBody(ctx, workflow, params, contentType, body, reqEditors...)
|
||||
@@ -9035,6 +9181,53 @@ func ParseWorkflowUpdateLinkGithubResponse(rsp *http.Response) (*WorkflowUpdateL
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// ParseWorkflowGetMetricsResponse parses an HTTP response from a WorkflowGetMetricsWithResponse call
|
||||
func ParseWorkflowGetMetricsResponse(rsp *http.Response) (*WorkflowGetMetricsResponse, error) {
|
||||
bodyBytes, err := io.ReadAll(rsp.Body)
|
||||
defer func() { _ = rsp.Body.Close() }()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := &WorkflowGetMetricsResponse{
|
||||
Body: bodyBytes,
|
||||
HTTPResponse: rsp,
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200:
|
||||
var dest WorkflowMetrics
|
||||
if err := json.Unmarshal(bodyBytes, &dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.JSON200 = &dest
|
||||
|
||||
case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400:
|
||||
var dest APIErrors
|
||||
if err := json.Unmarshal(bodyBytes, &dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.JSON400 = &dest
|
||||
|
||||
case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403:
|
||||
var dest APIErrors
|
||||
if err := json.Unmarshal(bodyBytes, &dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.JSON403 = &dest
|
||||
|
||||
case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404:
|
||||
var dest APIErrors
|
||||
if err := json.Unmarshal(bodyBytes, &dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.JSON404 = &dest
|
||||
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// ParseWorkflowRunCreateResponse parses an HTTP response from a WorkflowRunCreateWithResponse call
|
||||
func ParseWorkflowRunCreateResponse(rsp *http.Response) (*WorkflowRunCreateResponse, error) {
|
||||
bodyBytes, err := io.ReadAll(rsp.Body)
|
||||
|
||||
@@ -114,6 +114,7 @@ from hatchet_sdk.clients.rest.models.workflow import Workflow
|
||||
from hatchet_sdk.clients.rest.models.workflow_concurrency import WorkflowConcurrency
|
||||
from hatchet_sdk.clients.rest.models.workflow_deployment_config import WorkflowDeploymentConfig
|
||||
from hatchet_sdk.clients.rest.models.workflow_list import WorkflowList
|
||||
from hatchet_sdk.clients.rest.models.workflow_metrics import WorkflowMetrics
|
||||
from hatchet_sdk.clients.rest.models.workflow_run import WorkflowRun
|
||||
from hatchet_sdk.clients.rest.models.workflow_run_list import WorkflowRunList
|
||||
from hatchet_sdk.clients.rest.models.workflow_run_status import WorkflowRunStatus
|
||||
|
||||
@@ -16,7 +16,7 @@ from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from pydantic import Field, StrictInt
|
||||
from pydantic import Field, StrictInt, StrictStr
|
||||
from typing import Optional
|
||||
from typing_extensions import Annotated
|
||||
from hatchet_sdk.clients.rest.models.create_pull_request_from_step_run import CreatePullRequestFromStepRun
|
||||
@@ -26,8 +26,10 @@ from hatchet_sdk.clients.rest.models.list_pull_requests_response import ListPull
|
||||
from hatchet_sdk.clients.rest.models.pull_request_state import PullRequestState
|
||||
from hatchet_sdk.clients.rest.models.workflow import Workflow
|
||||
from hatchet_sdk.clients.rest.models.workflow_list import WorkflowList
|
||||
from hatchet_sdk.clients.rest.models.workflow_metrics import WorkflowMetrics
|
||||
from hatchet_sdk.clients.rest.models.workflow_run import WorkflowRun
|
||||
from hatchet_sdk.clients.rest.models.workflow_run_list import WorkflowRunList
|
||||
from hatchet_sdk.clients.rest.models.workflow_run_status import WorkflowRunStatus
|
||||
from hatchet_sdk.clients.rest.models.workflow_version import WorkflowVersion
|
||||
from hatchet_sdk.clients.rest.models.workflow_version_definition import WorkflowVersionDefinition
|
||||
|
||||
@@ -1146,6 +1148,308 @@ class WorkflowApi:
|
||||
|
||||
|
||||
|
||||
@validate_call
|
||||
def workflow_get_metrics(
|
||||
self,
|
||||
workflow: Annotated[str, Field(min_length=36, strict=True, max_length=36, description="The workflow id")],
|
||||
status: Annotated[Optional[WorkflowRunStatus], Field(description="A status of workflow runs to filter by")] = None,
|
||||
group_key: Annotated[Optional[StrictStr], Field(description="A group key to filter metrics by")] = None,
|
||||
_request_timeout: Union[
|
||||
None,
|
||||
Annotated[StrictFloat, Field(gt=0)],
|
||||
Tuple[
|
||||
Annotated[StrictFloat, Field(gt=0)],
|
||||
Annotated[StrictFloat, Field(gt=0)]
|
||||
]
|
||||
] = None,
|
||||
_request_auth: Optional[Dict[StrictStr, Any]] = None,
|
||||
_content_type: Optional[StrictStr] = None,
|
||||
_headers: Optional[Dict[StrictStr, Any]] = None,
|
||||
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
||||
) -> WorkflowMetrics:
|
||||
"""Get workflow metrics
|
||||
|
||||
Get the metrics for a workflow version
|
||||
|
||||
:param workflow: The workflow id (required)
|
||||
:type workflow: str
|
||||
:param status: A status of workflow runs to filter by
|
||||
:type status: WorkflowRunStatus
|
||||
:param group_key: A group key to filter metrics by
|
||||
:type group_key: str
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:type _request_timeout: int, tuple(int, int), optional
|
||||
:param _request_auth: set to override the auth_settings for an a single
|
||||
request; this effectively ignores the
|
||||
authentication in the spec for a single request.
|
||||
:type _request_auth: dict, optional
|
||||
:param _content_type: force content-type for the request.
|
||||
:type _content_type: str, Optional
|
||||
:param _headers: set to override the headers for a single
|
||||
request; this effectively ignores the headers
|
||||
in the spec for a single request.
|
||||
:type _headers: dict, optional
|
||||
:param _host_index: set to override the host_index for a single
|
||||
request; this effectively ignores the host_index
|
||||
in the spec for a single request.
|
||||
:type _host_index: int, optional
|
||||
:return: Returns the result object.
|
||||
""" # noqa: E501
|
||||
|
||||
_param = self._workflow_get_metrics_serialize(
|
||||
workflow=workflow,
|
||||
status=status,
|
||||
group_key=group_key,
|
||||
_request_auth=_request_auth,
|
||||
_content_type=_content_type,
|
||||
_headers=_headers,
|
||||
_host_index=_host_index
|
||||
)
|
||||
|
||||
_response_types_map: Dict[str, Optional[str]] = {
|
||||
'200': "WorkflowMetrics",
|
||||
'400': "APIErrors",
|
||||
'403': "APIErrors",
|
||||
'404': "APIErrors",
|
||||
}
|
||||
response_data = self.api_client.call_api(
|
||||
*_param,
|
||||
_request_timeout=_request_timeout
|
||||
)
|
||||
response_data.read()
|
||||
return self.api_client.response_deserialize(
|
||||
response_data=response_data,
|
||||
response_types_map=_response_types_map,
|
||||
).data
|
||||
|
||||
|
||||
@validate_call
|
||||
def workflow_get_metrics_with_http_info(
|
||||
self,
|
||||
workflow: Annotated[str, Field(min_length=36, strict=True, max_length=36, description="The workflow id")],
|
||||
status: Annotated[Optional[WorkflowRunStatus], Field(description="A status of workflow runs to filter by")] = None,
|
||||
group_key: Annotated[Optional[StrictStr], Field(description="A group key to filter metrics by")] = None,
|
||||
_request_timeout: Union[
|
||||
None,
|
||||
Annotated[StrictFloat, Field(gt=0)],
|
||||
Tuple[
|
||||
Annotated[StrictFloat, Field(gt=0)],
|
||||
Annotated[StrictFloat, Field(gt=0)]
|
||||
]
|
||||
] = None,
|
||||
_request_auth: Optional[Dict[StrictStr, Any]] = None,
|
||||
_content_type: Optional[StrictStr] = None,
|
||||
_headers: Optional[Dict[StrictStr, Any]] = None,
|
||||
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
||||
) -> ApiResponse[WorkflowMetrics]:
|
||||
"""Get workflow metrics
|
||||
|
||||
Get the metrics for a workflow version
|
||||
|
||||
:param workflow: The workflow id (required)
|
||||
:type workflow: str
|
||||
:param status: A status of workflow runs to filter by
|
||||
:type status: WorkflowRunStatus
|
||||
:param group_key: A group key to filter metrics by
|
||||
:type group_key: str
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:type _request_timeout: int, tuple(int, int), optional
|
||||
:param _request_auth: set to override the auth_settings for an a single
|
||||
request; this effectively ignores the
|
||||
authentication in the spec for a single request.
|
||||
:type _request_auth: dict, optional
|
||||
:param _content_type: force content-type for the request.
|
||||
:type _content_type: str, Optional
|
||||
:param _headers: set to override the headers for a single
|
||||
request; this effectively ignores the headers
|
||||
in the spec for a single request.
|
||||
:type _headers: dict, optional
|
||||
:param _host_index: set to override the host_index for a single
|
||||
request; this effectively ignores the host_index
|
||||
in the spec for a single request.
|
||||
:type _host_index: int, optional
|
||||
:return: Returns the result object.
|
||||
""" # noqa: E501
|
||||
|
||||
_param = self._workflow_get_metrics_serialize(
|
||||
workflow=workflow,
|
||||
status=status,
|
||||
group_key=group_key,
|
||||
_request_auth=_request_auth,
|
||||
_content_type=_content_type,
|
||||
_headers=_headers,
|
||||
_host_index=_host_index
|
||||
)
|
||||
|
||||
_response_types_map: Dict[str, Optional[str]] = {
|
||||
'200': "WorkflowMetrics",
|
||||
'400': "APIErrors",
|
||||
'403': "APIErrors",
|
||||
'404': "APIErrors",
|
||||
}
|
||||
response_data = self.api_client.call_api(
|
||||
*_param,
|
||||
_request_timeout=_request_timeout
|
||||
)
|
||||
response_data.read()
|
||||
return self.api_client.response_deserialize(
|
||||
response_data=response_data,
|
||||
response_types_map=_response_types_map,
|
||||
)
|
||||
|
||||
|
||||
@validate_call
|
||||
def workflow_get_metrics_without_preload_content(
|
||||
self,
|
||||
workflow: Annotated[str, Field(min_length=36, strict=True, max_length=36, description="The workflow id")],
|
||||
status: Annotated[Optional[WorkflowRunStatus], Field(description="A status of workflow runs to filter by")] = None,
|
||||
group_key: Annotated[Optional[StrictStr], Field(description="A group key to filter metrics by")] = None,
|
||||
_request_timeout: Union[
|
||||
None,
|
||||
Annotated[StrictFloat, Field(gt=0)],
|
||||
Tuple[
|
||||
Annotated[StrictFloat, Field(gt=0)],
|
||||
Annotated[StrictFloat, Field(gt=0)]
|
||||
]
|
||||
] = None,
|
||||
_request_auth: Optional[Dict[StrictStr, Any]] = None,
|
||||
_content_type: Optional[StrictStr] = None,
|
||||
_headers: Optional[Dict[StrictStr, Any]] = None,
|
||||
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
||||
) -> RESTResponseType:
|
||||
"""Get workflow metrics
|
||||
|
||||
Get the metrics for a workflow version
|
||||
|
||||
:param workflow: The workflow id (required)
|
||||
:type workflow: str
|
||||
:param status: A status of workflow runs to filter by
|
||||
:type status: WorkflowRunStatus
|
||||
:param group_key: A group key to filter metrics by
|
||||
:type group_key: str
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:type _request_timeout: int, tuple(int, int), optional
|
||||
:param _request_auth: set to override the auth_settings for an a single
|
||||
request; this effectively ignores the
|
||||
authentication in the spec for a single request.
|
||||
:type _request_auth: dict, optional
|
||||
:param _content_type: force content-type for the request.
|
||||
:type _content_type: str, Optional
|
||||
:param _headers: set to override the headers for a single
|
||||
request; this effectively ignores the headers
|
||||
in the spec for a single request.
|
||||
:type _headers: dict, optional
|
||||
:param _host_index: set to override the host_index for a single
|
||||
request; this effectively ignores the host_index
|
||||
in the spec for a single request.
|
||||
:type _host_index: int, optional
|
||||
:return: Returns the result object.
|
||||
""" # noqa: E501
|
||||
|
||||
_param = self._workflow_get_metrics_serialize(
|
||||
workflow=workflow,
|
||||
status=status,
|
||||
group_key=group_key,
|
||||
_request_auth=_request_auth,
|
||||
_content_type=_content_type,
|
||||
_headers=_headers,
|
||||
_host_index=_host_index
|
||||
)
|
||||
|
||||
_response_types_map: Dict[str, Optional[str]] = {
|
||||
'200': "WorkflowMetrics",
|
||||
'400': "APIErrors",
|
||||
'403': "APIErrors",
|
||||
'404': "APIErrors",
|
||||
}
|
||||
response_data = self.api_client.call_api(
|
||||
*_param,
|
||||
_request_timeout=_request_timeout
|
||||
)
|
||||
return response_data.response
|
||||
|
||||
|
||||
def _workflow_get_metrics_serialize(
|
||||
self,
|
||||
workflow,
|
||||
status,
|
||||
group_key,
|
||||
_request_auth,
|
||||
_content_type,
|
||||
_headers,
|
||||
_host_index,
|
||||
) -> RequestSerialized:
|
||||
|
||||
_host = None
|
||||
|
||||
_collection_formats: Dict[str, str] = {
|
||||
}
|
||||
|
||||
_path_params: Dict[str, str] = {}
|
||||
_query_params: List[Tuple[str, str]] = []
|
||||
_header_params: Dict[str, Optional[str]] = _headers or {}
|
||||
_form_params: List[Tuple[str, str]] = []
|
||||
_files: Dict[str, str] = {}
|
||||
_body_params: Optional[bytes] = None
|
||||
|
||||
# process the path parameters
|
||||
if workflow is not None:
|
||||
_path_params['workflow'] = workflow
|
||||
# process the query parameters
|
||||
if status is not None:
|
||||
|
||||
_query_params.append(('status', status.value))
|
||||
|
||||
if group_key is not None:
|
||||
|
||||
_query_params.append(('groupKey', group_key))
|
||||
|
||||
# process the header parameters
|
||||
# process the form parameters
|
||||
# process the body parameter
|
||||
|
||||
|
||||
# set the HTTP header `Accept`
|
||||
_header_params['Accept'] = self.api_client.select_header_accept(
|
||||
[
|
||||
'application/json'
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# authentication setting
|
||||
_auth_settings: List[str] = [
|
||||
'cookieAuth',
|
||||
'bearerAuth'
|
||||
]
|
||||
|
||||
return self.api_client.param_serialize(
|
||||
method='GET',
|
||||
resource_path='/api/v1/workflows/{workflow}/metrics',
|
||||
path_params=_path_params,
|
||||
query_params=_query_params,
|
||||
header_params=_header_params,
|
||||
body=_body_params,
|
||||
post_params=_form_params,
|
||||
files=_files,
|
||||
auth_settings=_auth_settings,
|
||||
collection_formats=_collection_formats,
|
||||
_host=_host,
|
||||
_request_auth=_request_auth
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
@validate_call
|
||||
def workflow_list(
|
||||
self,
|
||||
|
||||
@@ -84,6 +84,7 @@ from hatchet_sdk.clients.rest.models.workflow import Workflow
|
||||
from hatchet_sdk.clients.rest.models.workflow_concurrency import WorkflowConcurrency
|
||||
from hatchet_sdk.clients.rest.models.workflow_deployment_config import WorkflowDeploymentConfig
|
||||
from hatchet_sdk.clients.rest.models.workflow_list import WorkflowList
|
||||
from hatchet_sdk.clients.rest.models.workflow_metrics import WorkflowMetrics
|
||||
from hatchet_sdk.clients.rest.models.workflow_run import WorkflowRun
|
||||
from hatchet_sdk.clients.rest.models.workflow_run_list import WorkflowRunList
|
||||
from hatchet_sdk.clients.rest.models.workflow_run_status import WorkflowRunStatus
|
||||
|
||||
@@ -28,9 +28,10 @@ class EventWorkflowRunSummary(BaseModel):
|
||||
""" # noqa: E501
|
||||
pending: Optional[StrictInt] = Field(default=None, description="The number of pending runs.")
|
||||
running: Optional[StrictInt] = Field(default=None, description="The number of running runs.")
|
||||
queued: Optional[StrictInt] = Field(default=None, description="The number of queued runs.")
|
||||
succeeded: Optional[StrictInt] = Field(default=None, description="The number of succeeded runs.")
|
||||
failed: Optional[StrictInt] = Field(default=None, description="The number of failed runs.")
|
||||
__properties: ClassVar[List[str]] = ["pending", "running", "succeeded", "failed"]
|
||||
__properties: ClassVar[List[str]] = ["pending", "running", "queued", "succeeded", "failed"]
|
||||
|
||||
model_config = {
|
||||
"populate_by_name": True,
|
||||
@@ -85,6 +86,7 @@ class EventWorkflowRunSummary(BaseModel):
|
||||
_obj = cls.model_validate({
|
||||
"pending": obj.get("pending"),
|
||||
"running": obj.get("running"),
|
||||
"queued": obj.get("queued"),
|
||||
"succeeded": obj.get("succeeded"),
|
||||
"failed": obj.get("failed")
|
||||
})
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Hatchet API
|
||||
|
||||
The Hatchet API
|
||||
|
||||
The version of the OpenAPI document: 1.0.0
|
||||
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
||||
|
||||
Do not edit the class manually.
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
import pprint
|
||||
import re # noqa: F401
|
||||
import json
|
||||
|
||||
from pydantic import BaseModel, Field, StrictInt
|
||||
from typing import Any, ClassVar, Dict, List, Optional
|
||||
from typing import Optional, Set
|
||||
from typing_extensions import Self
|
||||
|
||||
class WorkflowMetrics(BaseModel):
|
||||
"""
|
||||
WorkflowMetrics
|
||||
""" # noqa: E501
|
||||
group_key_runs_count: Optional[StrictInt] = Field(default=None, description="The number of runs for a specific group key (passed via filter)", alias="groupKeyRunsCount")
|
||||
group_key_count: Optional[StrictInt] = Field(default=None, description="The total number of concurrency group keys.", alias="groupKeyCount")
|
||||
__properties: ClassVar[List[str]] = ["groupKeyRunsCount", "groupKeyCount"]
|
||||
|
||||
model_config = {
|
||||
"populate_by_name": True,
|
||||
"validate_assignment": True,
|
||||
"protected_namespaces": (),
|
||||
}
|
||||
|
||||
|
||||
def to_str(self) -> str:
|
||||
"""Returns the string representation of the model using alias"""
|
||||
return pprint.pformat(self.model_dump(by_alias=True))
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Returns the JSON representation of the model using alias"""
|
||||
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
||||
return json.dumps(self.to_dict())
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json_str: str) -> Optional[Self]:
|
||||
"""Create an instance of WorkflowMetrics from a JSON string"""
|
||||
return cls.from_dict(json.loads(json_str))
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Return the dictionary representation of the model using alias.
|
||||
|
||||
This has the following differences from calling pydantic's
|
||||
`self.model_dump(by_alias=True)`:
|
||||
|
||||
* `None` is only added to the output dict for nullable fields that
|
||||
were set at model initialization. Other fields with value `None`
|
||||
are ignored.
|
||||
"""
|
||||
excluded_fields: Set[str] = set([
|
||||
])
|
||||
|
||||
_dict = self.model_dump(
|
||||
by_alias=True,
|
||||
exclude=excluded_fields,
|
||||
exclude_none=True,
|
||||
)
|
||||
return _dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
||||
"""Create an instance of WorkflowMetrics from a dict"""
|
||||
if obj is None:
|
||||
return None
|
||||
|
||||
if not isinstance(obj, dict):
|
||||
return cls.model_validate(obj)
|
||||
|
||||
_obj = cls.model_validate({
|
||||
"groupKeyRunsCount": obj.get("groupKeyRunsCount"),
|
||||
"groupKeyCount": obj.get("groupKeyCount")
|
||||
})
|
||||
return _obj
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ class WorkflowRunStatus(str, Enum):
|
||||
SUCCEEDED = 'SUCCEEDED'
|
||||
FAILED = 'FAILED'
|
||||
CANCELLED = 'CANCELLED'
|
||||
QUEUED = 'QUEUED'
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json_str: str) -> Self:
|
||||
|
||||
Reference in New Issue
Block a user