diff --git a/api-contracts/openapi/components/schemas/_index.yaml b/api-contracts/openapi/components/schemas/_index.yaml index 54cee030e..55fe68e66 100644 --- a/api-contracts/openapi/components/schemas/_index.yaml +++ b/api-contracts/openapi/components/schemas/_index.yaml @@ -36,6 +36,12 @@ TenantMemberList: $ref: "./tenant.yaml#/TenantMemberList" TenantMemberRole: $ref: "./tenant.yaml#/TenantMemberRole" +TenantResource: + $ref: "./tenant.yaml#/TenantResource" +TenantResourceLimit: + $ref: "./tenant.yaml#/TenantResourceLimit" +TenantResourcePolicy: + $ref: "./tenant.yaml#/TenantResourcePolicy" CreateTenantInviteRequest: $ref: "./tenant.yaml#/CreateTenantInviteRequest" UpdateTenantInviteRequest: diff --git a/api-contracts/openapi/components/schemas/tenant.yaml b/api-contracts/openapi/components/schemas/tenant.yaml index 3954810ec..259afa064 100644 --- a/api-contracts/openapi/components/schemas/tenant.yaml +++ b/api-contracts/openapi/components/schemas/tenant.yaml @@ -33,6 +33,9 @@ TenantAlertingSettings: enableExpiringTokenAlerts: type: boolean description: Whether to enable alerts when tokens are approaching expiration. + enableTenantResourceLimitAlerts: + type: boolean + description: Whether to enable alerts when tenant resources are approaching limits. maxAlertingFrequency: type: string description: The max frequency at which to alert. @@ -79,6 +82,9 @@ UpdateTenantRequest: enableExpiringTokenAlerts: type: boolean description: Whether to enable alerts when tokens are approaching expiration. + enableTenantResourceLimitAlerts: + type: boolean + description: Whether to enable alerts when tenant resources are approaching limits. maxAlertingFrequency: type: string description: The max frequency at which to alert. @@ -86,6 +92,57 @@ UpdateTenantRequest: validate: "omitnil,duration" type: object +TenantResource: + enum: + - "WORKER" + - "EVENT" + - "WORKFLOW_RUN" + - "CRON" + - "SCHEDULE" + type: string + +TenantResourceLimit: + properties: + metadata: + $ref: "./metadata.yaml#/APIResourceMeta" + resource: + $ref: "#/TenantResource" + description: The resource associated with this limit. + limitValue: + type: integer + description: The limit associated with this limit. + alarmValue: + type: integer + description: The alarm value associated with this limit to warn of approaching limit value. + value: + type: integer + description: The current value associated with this limit. + window: + type: string + description: The meter window for the limit. (i.e. 1 day, 1 week, 1 month) + lastRefill: + type: string + description: The last time the limit was refilled. + format: date-time + required: + - metadata + - tenantId + - resource + - limitValue + - value + type: object + +TenantResourcePolicy: + properties: + limits: + type: array + items: + $ref: "#/TenantResourceLimit" + description: A list of resource limits for the tenant. + required: + - limits + type: object + TenantMember: properties: metadata: diff --git a/api-contracts/openapi/openapi.yaml b/api-contracts/openapi/openapi.yaml index 5c691567a..a02a50ba2 100644 --- a/api-contracts/openapi/openapi.yaml +++ b/api-contracts/openapi/openapi.yaml @@ -56,6 +56,8 @@ paths: $ref: "./paths/ingestors/ingestors.yaml#/snsIntegration" /api/v1/tenants/{tenant}/alerting-email-groups: $ref: "./paths/tenant/tenant.yaml#/tenantAlertEmailGroups" + /api/v1/tenants/{tenant}/resource-policy: + $ref: "./paths/tenant/tenant.yaml#/tenantResourcePolicy" /api/v1/alerting-email-groups/{alert-email-group}: $ref: "./paths/tenant/tenant.yaml#/alertEmailGroup" /api/v1/sns/{sns}: diff --git a/api-contracts/openapi/paths/event/event.yaml b/api-contracts/openapi/paths/event/event.yaml index 2d979b05e..9d58f0d88 100644 --- a/api-contracts/openapi/paths/event/event.yaml +++ b/api-contracts/openapi/paths/event/event.yaml @@ -216,6 +216,12 @@ replayEvents: schema: $ref: "../../components/schemas/_index.yaml#/APIErrors" description: Forbidden + "429": + content: + application/json: + schema: + $ref: "../../components/schemas/_index.yaml#/APIErrors" + description: Resource limit exceeded summary: Replay events tags: - Event diff --git a/api-contracts/openapi/paths/tenant/tenant.yaml b/api-contracts/openapi/paths/tenant/tenant.yaml index ef024a7a2..357e6a846 100644 --- a/api-contracts/openapi/paths/tenant/tenant.yaml +++ b/api-contracts/openapi/paths/tenant/tenant.yaml @@ -269,6 +269,45 @@ tenantAlertEmailGroups: summary: List tenant alert email groups tags: - Tenant + +tenantResourcePolicy: + get: + x-resources: ["tenant"] + description: Gets the resource policy for a tenant + operationId: tenant-resource-policy:get + parameters: + - description: The tenant id + in: path + name: tenant + required: true + schema: + type: string + format: uuid + minLength: 36 + maxLength: 36 + responses: + "200": + content: + application/json: + schema: + $ref: "../../components/schemas/_index.yaml#/TenantResourcePolicy" + description: Successfully retrieved the tenant resource policy + "400": + content: + application/json: + schema: + $ref: "../../components/schemas/_index.yaml#/APIErrors" + description: A malformed or bad request + "403": + content: + application/json: + schema: + $ref: "../../components/schemas/_index.yaml#/APIError" + description: Forbidden + summary: Create tenant alert email group + tags: + - Tenant + invites: post: x-resources: ["tenant"] diff --git a/api-contracts/openapi/paths/workflow/workflow.yaml b/api-contracts/openapi/paths/workflow/workflow.yaml index 30e81f508..73ad6ed7b 100644 --- a/api-contracts/openapi/paths/workflow/workflow.yaml +++ b/api-contracts/openapi/paths/workflow/workflow.yaml @@ -258,6 +258,12 @@ triggerWorkflow: schema: $ref: "../../components/schemas/_index.yaml#/APIErrors" description: A malformed or bad request + "429": + content: + application/json: + schema: + $ref: "../../components/schemas/_index.yaml#/APIErrors" + description: Resource limit exceeded "403": content: application/json: diff --git a/api/v1/server/handlers/events/replay.go b/api/v1/server/handlers/events/replay.go index 72547f48a..1202dd26c 100644 --- a/api/v1/server/handlers/events/replay.go +++ b/api/v1/server/handlers/events/replay.go @@ -4,8 +4,10 @@ import ( "github.com/hashicorp/go-multierror" "github.com/labstack/echo/v4" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/apierrors" "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" ) @@ -34,6 +36,12 @@ func (t *EventService) EventUpdateReplay(ctx echo.Context, request gen.EventUpda newEvent, err := t.config.Ingestor.IngestReplayedEvent(ctx.Request().Context(), tenant.ID, event) + if err == metered.ErrResourceExhausted { + return gen.EventUpdateReplay429JSONResponse( + apierrors.NewAPIErrors("Event limit exceeded"), + ), nil + } + if err != nil { allErrs = multierror.Append(allErrs, err) } diff --git a/api/v1/server/handlers/tenants/create.go b/api/v1/server/handlers/tenants/create.go index 0ac86233e..43ac7a94a 100644 --- a/api/v1/server/handlers/tenants/create.go +++ b/api/v1/server/handlers/tenants/create.go @@ -1,6 +1,7 @@ package tenants import ( + "context" "errors" "github.com/labstack/echo/v4" @@ -48,6 +49,12 @@ func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateR return nil, err } + err = t.config.EntitlementRepository.TenantLimit().CreateTenantDefaultLimits(context.Background(), tenant.ID) + + if err != nil { + return nil, err + } + // add the user as an owner of the tenant _, err = t.config.APIRepository.Tenant().CreateTenantMember(tenant.ID, &repository.CreateTenantMemberOpts{ UserId: user.ID, diff --git a/api/v1/server/handlers/tenants/get_resource_policy.go b/api/v1/server/handlers/tenants/get_resource_policy.go new file mode 100644 index 000000000..868daa2a1 --- /dev/null +++ b/api/v1/server/handlers/tenants/get_resource_policy.go @@ -0,0 +1,25 @@ +package tenants + +import ( + "context" + + "github.com/labstack/echo/v4" + + "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" + "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" + "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" +) + +func (t *TenantService) TenantResourcePolicyGet(ctx echo.Context, request gen.TenantResourcePolicyGetRequestObject) (gen.TenantResourcePolicyGetResponseObject, error) { + tenant := ctx.Get("tenant").(*db.TenantModel) + + limits, err := t.config.EntitlementRepository.TenantLimit().GetLimits(context.Background(), tenant.ID) + + if err != nil { + return nil, err + } + + return gen.TenantResourcePolicyGet200JSONResponse( + *transformers.ToTenantResourcePolicy(limits), + ), nil +} diff --git a/api/v1/server/handlers/tenants/update.go b/api/v1/server/handlers/tenants/update.go index 9c5617daf..be565a1bd 100644 --- a/api/v1/server/handlers/tenants/update.go +++ b/api/v1/server/handlers/tenants/update.go @@ -43,14 +43,16 @@ func (t *TenantService) TenantUpdate(ctx echo.Context, request gen.TenantUpdateR if request.Body.MaxAlertingFrequency != nil || request.Body.EnableExpiringTokenAlerts != nil || + request.Body.EnableTenantResourceLimitAlerts != nil || request.Body.EnableWorkflowRunFailureAlerts != nil { _, err = t.config.APIRepository.TenantAlertingSettings().UpsertTenantAlertingSettings( tenant.ID, &repository.UpsertTenantAlertingSettingsOpts{ - MaxFrequency: request.Body.MaxAlertingFrequency, - EnableExpiringTokenAlerts: request.Body.EnableExpiringTokenAlerts, - EnableWorkflowRunFailureAlerts: request.Body.EnableWorkflowRunFailureAlerts, + MaxFrequency: request.Body.MaxAlertingFrequency, + EnableExpiringTokenAlerts: request.Body.EnableExpiringTokenAlerts, + EnableWorkflowRunFailureAlerts: request.Body.EnableWorkflowRunFailureAlerts, + EnableTenantResourceLimitAlerts: request.Body.EnableTenantResourceLimitAlerts, }, ) diff --git a/api/v1/server/handlers/workflows/trigger.go b/api/v1/server/handlers/workflows/trigger.go index 62555357a..8898d538e 100644 --- a/api/v1/server/handlers/workflows/trigger.go +++ b/api/v1/server/handlers/workflows/trigger.go @@ -12,6 +12,7 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/transformers" "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes" ) @@ -83,6 +84,12 @@ func (t *WorkflowService) WorkflowRunCreate(ctx echo.Context, request gen.Workfl workflowRun, err := t.config.APIRepository.WorkflowRun().CreateNewWorkflowRun(ctx.Request().Context(), tenant.ID, createOpts) + if err == metered.ErrResourceExhausted { + return gen.WorkflowRunCreate429JSONResponse( + apierrors.NewAPIErrors("Workflow Run limit exceeded"), + ), nil + } + if err != nil { return nil, fmt.Errorf("could not create workflow run: %w", err) } diff --git a/api/v1/server/oas/gen/openapi.gen.go b/api/v1/server/oas/gen/openapi.gen.go index 98afd4a8a..81376b7a8 100644 --- a/api/v1/server/oas/gen/openapi.gen.go +++ b/api/v1/server/oas/gen/openapi.gen.go @@ -114,6 +114,15 @@ const ( OWNER TenantMemberRole = "OWNER" ) +// Defines values for TenantResource. +const ( + CRON TenantResource = "CRON" + EVENT TenantResource = "EVENT" + SCHEDULE TenantResource = "SCHEDULE" + WORKER TenantResource = "WORKER" + WORKFLOWRUN TenantResource = "WORKFLOW_RUN" +) + // Defines values for WorkerStatus. const ( ACTIVE WorkerStatus = "ACTIVE" @@ -666,6 +675,9 @@ type TenantAlertingSettings struct { // EnableExpiringTokenAlerts Whether to enable alerts when tokens are approaching expiration. EnableExpiringTokenAlerts *bool `json:"enableExpiringTokenAlerts,omitempty"` + // EnableTenantResourceLimitAlerts Whether to enable alerts when tenant resources are approaching limits. + EnableTenantResourceLimitAlerts *bool `json:"enableTenantResourceLimitAlerts,omitempty"` + // EnableWorkflowRunFailureAlerts Whether to send alerts when workflow runs fail. EnableWorkflowRunFailureAlerts *bool `json:"enableWorkflowRunFailureAlerts,omitempty"` @@ -717,6 +729,35 @@ type TenantMemberList struct { // TenantMemberRole defines model for TenantMemberRole. type TenantMemberRole string +// TenantResource defines model for TenantResource. +type TenantResource string + +// TenantResourceLimit defines model for TenantResourceLimit. +type TenantResourceLimit struct { + // AlarmValue The alarm value associated with this limit to warn of approaching limit value. + AlarmValue *int `json:"alarmValue,omitempty"` + + // LastRefill The last time the limit was refilled. + LastRefill *time.Time `json:"lastRefill,omitempty"` + + // LimitValue The limit associated with this limit. + LimitValue int `json:"limitValue"` + Metadata APIResourceMeta `json:"metadata"` + Resource TenantResource `json:"resource"` + + // Value The current value associated with this limit. + Value int `json:"value"` + + // Window The meter window for the limit. (i.e. 1 day, 1 week, 1 month) + Window *string `json:"window,omitempty"` +} + +// TenantResourcePolicy defines model for TenantResourcePolicy. +type TenantResourcePolicy struct { + // Limits A list of resource limits for the tenant. + Limits []TenantResourceLimit `json:"limits"` +} + // TriggerWorkflowRunRequest defines model for TriggerWorkflowRunRequest. type TriggerWorkflowRunRequest struct { AdditionalMetadata *map[string]interface{} `json:"additionalMetadata,omitempty"` @@ -745,6 +786,9 @@ type UpdateTenantRequest struct { // EnableExpiringTokenAlerts Whether to enable alerts when tokens are approaching expiration. EnableExpiringTokenAlerts *bool `json:"enableExpiringTokenAlerts,omitempty"` + // EnableTenantResourceLimitAlerts Whether to enable alerts when tenant resources are approaching limits. + EnableTenantResourceLimitAlerts *bool `json:"enableTenantResourceLimitAlerts,omitempty"` + // EnableWorkflowRunFailureAlerts Whether to send alerts when workflow runs fail. EnableWorkflowRunFailureAlerts *bool `json:"enableWorkflowRunFailureAlerts,omitempty"` @@ -1357,6 +1401,9 @@ type ServerInterface interface { // Delete a tenant member // (DELETE /api/v1/tenants/{tenant}/members/{member}) TenantMemberDelete(ctx echo.Context, tenant openapi_types.UUID, member openapi_types.UUID) error + // Create tenant alert email group + // (GET /api/v1/tenants/{tenant}/resource-policy) + TenantResourcePolicyGet(ctx echo.Context, tenant openapi_types.UUID) error // List Slack integrations // (GET /api/v1/tenants/{tenant}/slack) SlackWebhookList(ctx echo.Context, tenant openapi_types.UUID) error @@ -2295,6 +2342,26 @@ func (w *ServerInterfaceWrapper) TenantMemberDelete(ctx echo.Context) error { return err } +// TenantResourcePolicyGet converts echo context to params. +func (w *ServerInterfaceWrapper) TenantResourcePolicyGet(ctx echo.Context) error { + var err error + // ------------- Path parameter "tenant" ------------- + var tenant openapi_types.UUID + + err = runtime.BindStyledParameterWithLocation("simple", false, "tenant", runtime.ParamLocationPath, ctx.Param("tenant"), &tenant) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter tenant: %s", err)) + } + + ctx.Set(BearerAuthScopes, []string{}) + + ctx.Set(CookieAuthScopes, []string{}) + + // Invoke the callback with all the unmarshaled arguments + err = w.Handler.TenantResourcePolicyGet(ctx, tenant) + return err +} + // SlackWebhookList converts echo context to params. func (w *ServerInterfaceWrapper) SlackWebhookList(ctx echo.Context) error { var err error @@ -3180,6 +3247,7 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL router.PATCH(baseURL+"/api/v1/tenants/:tenant/invites/:tenant-invite", wrapper.TenantInviteUpdate) router.GET(baseURL+"/api/v1/tenants/:tenant/members", wrapper.TenantMemberList) router.DELETE(baseURL+"/api/v1/tenants/:tenant/members/:member", wrapper.TenantMemberDelete) + router.GET(baseURL+"/api/v1/tenants/:tenant/resource-policy", wrapper.TenantResourcePolicyGet) router.GET(baseURL+"/api/v1/tenants/:tenant/slack", wrapper.SlackWebhookList) router.GET(baseURL+"/api/v1/tenants/:tenant/slack/start", wrapper.UserUpdateSlackOauthStart) router.GET(baseURL+"/api/v1/tenants/:tenant/sns", wrapper.SnsList) @@ -4330,6 +4398,15 @@ func (response EventUpdateReplay403JSONResponse) VisitEventUpdateReplayResponse( return json.NewEncoder(w).Encode(response) } +type EventUpdateReplay429JSONResponse APIErrors + +func (response EventUpdateReplay429JSONResponse) VisitEventUpdateReplayResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(429) + + return json.NewEncoder(w).Encode(response) +} + type TenantInviteListRequestObject struct { Tenant openapi_types.UUID `json:"tenant"` } @@ -4536,6 +4613,41 @@ func (response TenantMemberDelete404JSONResponse) VisitTenantMemberDeleteRespons return json.NewEncoder(w).Encode(response) } +type TenantResourcePolicyGetRequestObject struct { + Tenant openapi_types.UUID `json:"tenant"` +} + +type TenantResourcePolicyGetResponseObject interface { + VisitTenantResourcePolicyGetResponse(w http.ResponseWriter) error +} + +type TenantResourcePolicyGet200JSONResponse TenantResourcePolicy + +func (response TenantResourcePolicyGet200JSONResponse) VisitTenantResourcePolicyGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type TenantResourcePolicyGet400JSONResponse APIErrors + +func (response TenantResourcePolicyGet400JSONResponse) VisitTenantResourcePolicyGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type TenantResourcePolicyGet403JSONResponse APIError + +func (response TenantResourcePolicyGet403JSONResponse) VisitTenantResourcePolicyGetResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(403) + + return json.NewEncoder(w).Encode(response) +} + type SlackWebhookListRequestObject struct { Tenant openapi_types.UUID `json:"tenant"` } @@ -5857,6 +5969,15 @@ func (response WorkflowRunCreate404JSONResponse) VisitWorkflowRunCreateResponse( return json.NewEncoder(w).Encode(response) } +type WorkflowRunCreate429JSONResponse APIErrors + +func (response WorkflowRunCreate429JSONResponse) VisitWorkflowRunCreateResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(429) + + return json.NewEncoder(w).Encode(response) +} + type WorkflowVersionGetRequestObject struct { Workflow openapi_types.UUID `json:"workflow"` Params WorkflowVersionGetParams @@ -6020,6 +6141,8 @@ type StrictServerInterface interface { TenantMemberDelete(ctx echo.Context, request TenantMemberDeleteRequestObject) (TenantMemberDeleteResponseObject, error) + TenantResourcePolicyGet(ctx echo.Context, request TenantResourcePolicyGetRequestObject) (TenantResourcePolicyGetResponseObject, error) + SlackWebhookList(ctx echo.Context, request SlackWebhookListRequestObject) (SlackWebhookListResponseObject, error) UserUpdateSlackOauthStart(ctx echo.Context, request UserUpdateSlackOauthStartRequestObject) (UserUpdateSlackOauthStartResponseObject, error) @@ -7059,6 +7182,31 @@ func (sh *strictHandler) TenantMemberDelete(ctx echo.Context, tenant openapi_typ return nil } +// TenantResourcePolicyGet operation middleware +func (sh *strictHandler) TenantResourcePolicyGet(ctx echo.Context, tenant openapi_types.UUID) error { + var request TenantResourcePolicyGetRequestObject + + request.Tenant = tenant + + handler := func(ctx echo.Context, request interface{}) (interface{}, error) { + return sh.ssi.TenantResourcePolicyGet(ctx, request.(TenantResourcePolicyGetRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "TenantResourcePolicyGet") + } + + response, err := handler(ctx, request) + + if err != nil { + return err + } else if validResponse, ok := response.(TenantResourcePolicyGetResponseObject); ok { + return validResponse.VisitTenantResourcePolicyGetResponse(ctx.Response()) + } else if response != nil { + return fmt.Errorf("Unexpected response type: %T", response) + } + return nil +} + // SlackWebhookList operation middleware func (sh *strictHandler) SlackWebhookList(ctx echo.Context, tenant openapi_types.UUID) error { var request SlackWebhookListRequestObject @@ -8078,160 +8226,166 @@ func (sh *strictHandler) WorkflowVersionGetDefinition(ctx echo.Context, workflow // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9bVPjuNLoX3H53qp7TlUgAzOzZx+qng8MZGbzLAPcBHbq3C2KEo6SaHFsryQDOVP8", - "91t6s2VbsuWQhDDjL7tMrJdWq7vV6jd994N4kcQRjCjxj777JJjDBeB/Hl8OBxjHmP2d4DiBmCLIvwTx", - "BLL/TyAJMEooiiP/yAdekBIaL7zfAA3mkHqQ9fZ4454Pn8AiCaF/dPDh3bueP43xAlD/yE9RRH/54Pd8", - "ukygf+SjiMIZxP5zrzh8dTbt3940xh6dIyLm1Kfzj/OGD1DCtICEgBnMZyUUo2jGJ40Dchui6N40Jfvd", - "o7FH59CbxEG6gBEFBgB6Hpp6iHrwCRFKCuDMEJ2nd/tBvOjPBZ72JvBB/W2CaIpgOKlCw2Dgnzw6B1Sb", - "3EPEA4TEAQIUTrxHROccHpAkIQrAXVjYDj8CCwMinns+hn+nCMOJf/RnYeqbrHF89xcMKINR0QqpEgvM", - "fkcULvgf/xvDqX/k/69+Tnt9SXj9jOqes2kAxmBZAUmOa4HmK6SgCgtI6dwBANb5mDV97vlJTOg8njn2", - "upStWcdlGEfHSTK07N0l+842xRuecvJJCeR9GG14AdtTkiZJjGlhuw4O33/4+Mu/ft1jf5T+w37/r3cH", - "h8bttGHpWOKkiCm+LvFnFXQJF5x4bFDixVOPYRZGFAWcHXSI//TvAEGB3/NncTwLIduxjBIqxF7ZchvY", - "QyYnMFDCoURzESNzA+If55DOoWRVlA/BeEZ28uKI8zeKCAVRoPHGXRyHEEQMCM40RtywLwwhYogcxqoM", - "aGQ6yZlqMTWUfpkTaYngE/RbTKiFAmNCf4tn3vHl0JuzVjqMc0oTctTvS/rfl18YcZqEFEjQ73DZPM89", - "XBamSeb3tznpgrtgAqfO5DuCJE5xAM3MHmDIBOCxZfUULaAmOrEcy3sExJNdC5Aevjs83Ds43Dt4f3X4", - "7ujdL0cfft3/9ddf/5+vHWYTQOEeG9iEImQRBGgi6EUDouehyLu+FoKBDa0Dcnd3ePDh13f/2jv88Avc", - "+/AefNwDhx8nex8O/vXLweQgmE7/C+pApSliK1mApzMYzRizv/+l5y9QpP+zAm2aTFbFXggI9WT/daKw", - "xB58Vfkm6yBbWOUqvocmafGUIAyJaanf5lBIA0a7lHX3ZOt9531fQAomQFBowxFSIGirmLkqiZkMtv3i", - "Nh9+/NiEwwy2XiZtMmQYkRgEMKHD6AFROIJ/p1DIliI+Ef8sMNuSaNsQac9/2otBgvaYhjmD0R58ohjs", - "UTDjUDyAELF98Y+yFfc4KzxXCEnAa1rvCScvRTrWFZv36VjsklANX7RNfHwX+EgSRwRWAaSK8quUVACr", - "Hgwxih2OyzQMJY4+43gxpjAZpQaGu8MgCubnEmn1c2ptb7KJxudj7fy3bguNExQcY9vCF+A/ceQpnvPY", - "HN4/jkfn/1SMNT4fe3yMKmpWIL4Fiv77oLcAT/99+PGXKhVmwNrxewUjENHjEGI6WAAUfsFxmlhXD1kT", - "YiLLEBHK1ihaKOUTs4uKo2a2wvIn6AH2+IzVtUtQm1beIHfE4Ma95p/UtrK1sluc4Pu17K1aV8/HcQib", - "JL1YzVe4uIN4xNob8eHLwZqw0lIqlU8PygdZBxb4MkiYzsyTsi/rn7Qnb85cQjxbVGgOlAmPgwcYGTAH", - "JhPEIAfhV+3wLrFR1sZTp2gm6iEbVltdPuG9SUlm2LmHS2v3tegSAuVupJm3N91eGbjD0+JWli0O0h5h", - "XchjjO+nYfw4SqNxulgAvGyCjG/Vt2q3GpVGXDayhdyoDT8FpvuCeZvZYqt76/3jf8YX597dkkLyz+aD", - "kw+dTf/7y2hAjXGGTEyfgBmKsmtxHUIvs5aZ3sDl16O7sSZbTvXmrgDdFShrQLzAE4g/LU8RhoECCUbp", - "gu0cIIEvLJGa/Cjthez/WdnpVN/8bmLtOoYAB3PjeWuj9woupwAZbR1c0KfsjGGsKlp5OI2KVxe7+TWB", - "0YTB0jCwbNZm5L9TmDZDLFq1GRenUeQAsWzWZmSSBgGEk2ags4buozM6/AKp1JZP0XRq1+MnaDp1J3xt", - "yEZzqhiZyagv3Dp1nCTDiFAQhhYbGwiCOI3oLXgAFODbFIdGMlbNIrO23/ORNsstgZSiaEasw618ANpP", - "CTsAJeh7pjWbtAqBwU/85mK7/dQghNxO4BSkIdU+Z7ZH4/VIwad1tcM1gklchQrDJLbDxL/GjxHEzTc2", - "rW1PG9YE0P/EdwYar3P78ONYc/xIJeSv+G5/Q7aXypiEwqQdD1aZr6heVe9aaAHjlJqXLz82Lf0BYoLi", - "yDiDnRkysPQBMuOQWLplJ41X/QBEAQxDZUh0s5hlnTL/o73JCAIiCMXgOIsQmbeb+i9BkXU7yohWtLTs", - "3guIDkNS5Pscw4QCTNsthlBAU+KwHqZeiLaSvkdp1PqYWYHKg3uI61mgzXK1O0UTyJpeVeq5Or8UB1EE", - "ku2CnWvG2TYpzfFycH46PP/i9/zR9fm5+Gt8fXIyGJwOTv2e//l4eMb/ODk+Pxmcsb9NKuYZiu5zmU8Q", - "jfHSai2YIcpa5adWVfLgbBRPnDtGwSMHOrdaH7RhmFypG+RCHTm1o/DDxjiMfrbbrrIGcFr5SirW5MKU", - "RXyUFtYrYd1EI+wCZfZ5uvrTy10NfCon4VZkYlc/t3ptyxw2xpsbg9ioqe4K+GY1ukkN10CU89loQlcy", - "YWHRLcCTdGehCE12rDg+1zUto2vegro901o5T64N3YxxfYIbCVvRwUBemZSK0KyRhsYhCO6/wbt5HN+/", - "+iI1WNa1xHh2hiLYKjSAnQj8M7tdMHVD6dlhPPNCFME2jl8RZWacgw0nGzTeXGy9RQuDnbmELd1Jnoe+", - "ZTPc5Kg6gw8w1DWR08Gna6Z9DM8/X/g9/9vx6Nzv+YPR6GJkVjm0cTKDmNP+FyAwiQz5/fXtiYqszOeS", - "+PgCm2JxhJZWRdm5xq5oQIDuqf/uBynGMKK3Cafdw54fwSf1r/c9P0oX/B/EPzp499wrc1ahsylyRLbw", - "EkGF2cSHToY4DRZj9BV8qo783m3kfF3GgJeYglA3e7Km3FofIkKF7yqPcX3nYvczSCz94Ko7Cj8BAnNN", - "vbLHWsvfIJi4tRyeai10O3De5Jwvv7EZu9DAFme0aF8c4wrR0G6LEvr6eZ25SjS5cLdZ6R0qs5QxZYDV", - "hCnbVvQsm2lA402RLDLcKnkQJ5AdgkEYk0KQYI6NEWTk9fME7YxgEoIl95/YYwXY1+GkKPS3HWNXH9+s", - "ILzhS8JpJO0sNVuYpCbbUQVzrBkbtaRXGgacQUKvsSWo4np05tHYIzCa8FAZqVoQj8abcV7bbvBphP5O", - "oYcmMKJoiiDOvKjSOS0DFUVEjx72ewfDOJopiMvbWd2wzQUUudmYaoOEChp0VeedgyiCoQ2H8rOHJmZX", - "PmGDe49idOP2yhHsdh81BY8+WXGSF9EQWNhWz769YOmsu33dfPCXLHonqN+NPhUiMnQX6aKnkaGRhClM", - "TK5GJZ8MRIfCCYZFe1iDoN2Q7T4BWGVRuUOCIZiAuxDajZPiexYlDj1CYdJIJi9yKVlmsFOAtooCOSgT", - "uNxAob7XbP0GXEjHdJDEBeVX01nX5GjiRKi5FFrSwGo0DK0gr+L3yvvUoKusYBTcZg5eF+kkzNqvnwfj", - "lNpAXJE9eRTK8ZSKW4QbMtfuxRNdanbGzdMnWazo6nN1YLO2NtniIHjarDjrUrPixxjbnIdOJ1VGgdnK", - "aj11eihNRULJoM4qIccTdiCb8RJjNEMRCJsXIMIYs/bauDc5ZJZIVh6vYsagYrHKWtHE3EGzZxpIXklH", - "BwKUVzPeg1EWfIAY0WWb3mPVJ3dY19DfZ4QJHUMhYt1p8Ay062VKUCpOXxpXhz3DoYYQ3VwrdrKGNncl", - "3LJAkEYbqYEMNIPGaPB/rwfXg9Pb84vbbxej3wcjv5f/ODq+GtyeDb8Or/yePz75bXB6fTY8/3J7Nfw6", - "OL29uGY/H4/Hwy/n3EE+vjoeXQmf+fB8OP6t6D4fDa5G/xbu9dyT3vPZWBfXV7ejwefRQPYZDbRR9cnG", - "Zxes5dngeJyNORyc3n769+31eGC2khspWUOBZm2X0I2GV8OT47O60erCCORft2INXwfnJTS9KMzgKoss", - "L6ntIcQyy2FgyUX5pvJxY4+3VleWBe9F9o3JtyAC4ZKigFwk9CKlNaPmd6A5IF6csIuX1HOzQcxzbDxp", - "z5YB8eIUiuYUP2s2hDG/aLuJRZsJ8azJLzKueQekqHkvTHlYs3hPkJw/YhNwCav1RtFsLINat8eiImd9", - "8JQgtss8jIMDUz++6CWmId4jz73lESkewNADSYJjEMxRNBNJuBzBdfNrV7LPAIUphg5AcHOmDoKK6+IR", - "3TyO3jxnCIjAuN2xzDOjeeo0iCQ2HwFhU9IWbmXwpDb2M7+fRIEln2QBnrypauIB6j3OUTDPdnK9BjY7", - "9xkBtvPiMPNEbCa97zlLcrYbQ7TUdjHMVtO+V8shbLITSr61WTnVZzvWRIs6OycfoZD0vMIpVUh+zPdK", - "T+RqoJ2dEd+SlNtJbbGnVfhfjaDccwYZ6zW1viYQix6X6V2IgjpS4OPVpMHqMO/Mpsv9W2XTR3KflAJ/", - "8e2c336OT78Oz/2e/3Xw9ZPlSnGF0WwGsXbkWf2E5uzWqhmgrTvxmtff+BmT1PWVN/jZ15IfbmUIHRD7", - "/r/he9nPqFhuUOFzZ5F4gWiEwt4kVVGoL7/qmiKgronp7GvU/cBkgiEhug5YUNWUUlFVBdmH3wCZm2TQ", - "HJC5PuT/IaXppFQSapSo6DYWxdG8kzmg1gn/gDizDNt5hGuyjEMeZHP2K8JFGMw0MwfkEhDyGGPXOYCX", - "yA4egXSLVpEJIkkIlgWSUfvXWmksYvfGQmAncxDNoEKQvYAGfLQjkVM5fMyxplReM+wrHEZqZL7upBaQ", - "DIha/L0MhkoItvzSK+DJhvKzeIai1Uu3rMbfL6rksnMYV2tMmnA9gjNEKPv/G0K321liEQw7uFuqnprr", - "pulKH5mjhLzVC03lgrfF03wTp4yYzLRt37gP2havZDEvyY/C6CK82F4AIi+BmK1vv5WtHDwAFDKNVEWd", - "NNS9qE4Ln2CQUugFcSQj88PlvrkcMyIJr7WEhw2VLHmgG5pFcOLlnXagpmUIeBA2pncQ0GNaa/7LscSN", - "tgRG1APeXPXeX29tUDYH43cYQTwgFNyFPBpnhyBcgCc7jS3AE1qki/XR2sbdbwKqfXP0TgAjVY6F2MLy", - "WBseKCdvb6r+dz7wS3Pn85gegyOQf6suRhmOjk+uhn8MeN6W/POmrZ/QLvB24GCSktcYXaAu56bKIkkY", - "Lxew2aSpxjjNepzE0RTNGsvSW+qTqKv+vqXmhGWb2RfTEE44knUqymTFhEX7CglbYUgrhpQGZpCFYLY6", - "htQar4DxcJUFUNpRJRvvD9ExCzyvS/NwZTs27omSm4GhDNgMUu175rovmTQiqX9Iy9AMUsJxF+RdvRnr", - "m11nNUIw7k2IFoiOKQYUzpY2aSW+ejQW1hJVVVmflY/j8VrSIJjDiS7ORPzJ7fD89nJ08WU0GI/9nn86", - "uri8PR98G4yv/J7Pw4Pyf34ZXVxf3o4urs9Pb0cXn4bnRqN5y0MtP7eKtrtyma/3h+Z0v8K+y6nLCOwZ", - "N7KOKioy6uco7TGzlSlbqSiDcTSbJzXXc8V43nGSeHrdD6dkog142luUGrEv+UajLZETWjaLZsQ/PDVu", - "jeptVhRelGKxZR2D6xFOz1Ko1l8hxSgwhNpwqfo7XJ6oiFzDYVbKMa4K5Xu4JGaNWQ3PhErNFCUNnQl5", - "4JEEBmiKgnwS7x8JIAROvAcEvCkKKcT/dExh/laswuTiesx/vdRaU5xCw/jyIm/N/V1vXkTmBG0Fowgu", - "dye1PDFibcFwKuFByLBt37TF3GM9MHvbIGysXppekzXLpKhPgRC+eTj5tGwx+JXWS6tRJpXLlrqoYYSX", - "Vzr7Q6sQKHFXXOxNvXzYkYukds1xl/PrqtsmNVdzZHVlMoWxtmvTCLUkYCwEZyiZE0eXmkSpJsHhOBoz", - "xT211G2ADw4X7yxrQKa/byZJsyXpZ50a6Jmc8GRBq9ulUCGwSKObrCRQmrZpEVYNhmegtKE6NdSJ6NjE", - "T6Xmlfkl9xhzkxTnGT9KDjN+U4xq/JjzruFz3WquwMyEvzDG6zEZvdimYnZWCQjrCETKiBPMZO7ULCZq", - "0i9vkYXzmiaUuUJTSyWPW1sK3gunJeYVtj9fSngz5fM+VNJTWwyc4We9OqQ49M3oy/WAW2kpa49mTZkp", - "80rB1OWCCd06pllVX2IrfUkmMp6UUoZthqEef2yRHZtXeY6+TYdsSxdEM2SaBYb86CR2HjXTuuvVWfVp", - "eeYqmBUmCwPdNJPUKWTXPXNdGwwei58NJijw6P37+OuZN8katpeqxXkcgDY/ZLglKvwJqIQnGwcpRnQ5", - "zh84vYMAQ6zeQRUvn7JrKv85X+CcUp6AFcTxPYKqOWIYEj8pE/6RX3nNVz6Pyf1TKJrGZiSrh5OPL4c8", - "U5hXQPOLv2a75B/sv9t/xzc5gRFIkH/kv98/2H/HFVY650vrgwT1Q/QApYegOu8X5QFgrSJIiJfd5BgN", - "ZnZQ/0x+/8LXheWFi89y+O5ddeDfIAjpnIvRj6bv5zHN5izsjH/0503PJ+q5EwZh3lD5gv6U4wdzGNz7", - "N6w/XyuGYLJsXixrhupWO1IN1rlcDhyPxuXPKHoUg+lUZkDUrT6DtnH5Dwd9IEOF93jcyh436ZH+d/6z", - "/tuzgDGE1KBJnvLfiQey5514FLiIzuHdKxgrRfyLEcTlCSwg5UfXnzVpQ5UZPH4R4vzF6DnnrspSfF0+", - "CJOckDEvvlk931T2/kMVW+M0CCAh0zQMl55A6aTwNlYFec89/4OgkiCOqLwXywe62aD9v2RSfL4Ol0ez", - "ZQRW2V6/ACHDApx4MfbuwMTDeW3kD+/erx0MExSfY3yHJhMoAsdz+hZ0UkdmiuJlmtFNz3/aU+/N8g8y", - "S6lnIIwbfgWgJq+XSJV4CYmLEX4MEheFKGMhO9dCDA7ZQAYyqcUWjeWzwhVsPJtF9FoWYsnErsJeEAPy", - "MeJODLiJAUEtmxMD+gGZoD2R/dP/nv3NT8PE+FT6CD7E9zxjO38EWvjNshlLYiJBPDFJrEl0d5ES2fAW", - "maBg3anjDvPlSTpXzzX8wERN2lC1JB22sVdy5xQZ57/VUXK25QUKFqai/nf+/+e+uhvaVF6+N9kbliDK", - "35Ys0m32NqbQeRvpVbyEaTvChLl/m6S6PprLXwltEvMYUozgg2QAgRG+Hx0XFK4wGmZyHhDOnhr6FzRU", - "oH0RtbIHkqSvR9wQKwOcIUJtcTrVe18WIMS6DUtNN0ZvDo/ZtCPE4iJ3iRYPtgPGdQRSOo8x+g+ciIk/", - "bmfir5DO44kXxUyLCeNH9Sx2fr3/XrAg/XnzXLjvN5Gr4h3RxI03+t9n8z39l+c+D7Fz5pksIA/BBpbh", - "jwW5HB46ONYzpAT2Gz1NbE8ptWPpwh50HP12ObrETGWGrpyGZSZ4Ecvz39lfezyy9jn/N2O55/6dfE/M", - "WTRkHWrFwqe81VuTDD2XCGUrkDmqa0FsO6l679c+p2zhPuV2JGDlvbp2QjCjtk4Avl0BqImMdQi//qP2", - "5IXRgqPNPQvjOxCq1xYsQksYbr7wpt+yls0+oALhJjhm/4CTbLKOZneJZoteNkEhwEQhzRq3osD+d/nH", - "sxMtSmOnCy0Ko2dOi42HqBzUen4+amS9VY2645gfjmMqdFzHMQtYb6wk2buWWQaiCoDgB0EUwAqnqFwS", - "u69+XeiTCVhtVJYsWmVXiLkh2EAP+Zf7mOXq8PNZdd6LE5EQI11wpU3uo9IDuvbrBAhDr9DatsHCKFdo", - "uFGd1fR4dqvNV0UMC6vbJUIoKmmlTajuv77J/N2q/nf+P4dgEm+sv3NV2WL98TL32JHCmNbDjoO4k0Ei", - "RZx0x93rH3flwJQy1Sqe4L/X+S4E0RU5JiL97yQiTtxSfKutyi8RacEmpYffrIwiRerOsUkJGR2j7CCj", - "VAg2Y5XzcS2jRMTAJuLzs3Jy2y9UbF7la66wSOsQLRtnZNBu1cwovKbi/aeVXOwaDIcfPxaAOOiubN2V", - "zenKRihM9nDKDy/553NfPEO/l2A7Z57wJh7wkjQM1c7I0KksR6DCtCJnXDCuGOESuzCwqsVlP9wk7G8v", - "lFKiIX8E/DOOF1nlMHMQJS9c4NHYC0y7sNUAyrbgFySMAF/cpgor+LkDbNisH7Yz63nMmDaNyue+ZO8S", - "WSlBkiX31J38iiObxc1EPrxYH+OGplNV0kRJgztIH6EsN7WICVWl+9g3EAm6miJM+C/7NnH0BVL+9ONb", - "kkMb4uYvkGqPYa7ox+Pb2XHwK3Mw45uJIOsNsW2e1Gy3/ok2Jb61MSLrMRCDvhFe7NWUYaKxR+5RomD7", - "O4V4mQMXT6eEW7QNoKCI/vLBWJGpfjpRZe9uaZmSf2454yaFTeVp0xXicUknaLYlaAo89wgIv5qYJE+Z", - "8TW2z6wG7CemIa5HEoXxrF4OES+MZ16IIliWRVWt4CyenaFIvDPWiaHdEEM9+xNSIXyAIWHzilJ2NRPz", - "loWZa31Ekg5Yr88IhhPbygkEOJh7fDYNjmmMLYCIDm0BGYteBiC+8fcMY4+nydvXzz9/Woq1tJz8Qu9r", - "wYOYfoIwDKSRsAaKU63ZKpDk/Tcc3aZJg6bziZFkdzhZEkX4qZBJYe0sOItn7Y8B8Zk02aeIB/g7PpZk", - "PhF5I5r6mzTvFB9tq0+Nzcw6r5AI28piQ7OnIbs8V91okhFbU1ariaIz54iolliT3c6T/Z4QoSia1RP4", - "23GUbCFd3Y0J8yI9r5qY3vHj2vLOW2SZ1/KluQZLfdQbyLRVWw48aapH4Xod2QkO3maxhhUsB/ZN6Hin", - "oK7VUas7M/VaqGjtC7Vk2tvPerjpGub6arE4q6AHr1yLpXoCdrVYXHXUF9VicTsl+wRS9n/SXLdNdfFU", - "l/pKLBq5oGg2ln0cy1v8JMekhpgXnJH6nnSsVPCwWdG0Nj7KChrVO9qy+kLErX5Rp09mWQAcH6smrapY", - "p87WV1YesyJIpF1lpCaFcYViXZ2OyBGgaF1TCzcfGpZP2vHXuvhLMsKKpcfqDxyHqA7Ck7oKoR2it6Xs", - "2Fs5a35mN+o9XDo5UVm7wqxOJfg5GfAi2dXnTOwwaU/ROcGWy4rWAGpv4q0GIk4jWW4aOsGq2jq7P83P", - "Bb2SS5rv5+s4pPnUO+CO1uHQndE1xJIlP9/DpfcAwhR6CUC4Qi/Za2p/MnY7OOJND/we+9eh+NchE++m", - "9Rge4TMyQ9MrQxuvwNh51NeiZUMVL+lYd9Hl8O9zIe+oAYiTw0EL+B0uu0tnfhSuRP8c2R0PmHjAk5rJ", - "OvkAwyQEy7rq0ey77t8THS0coGpG80F/3suoQIB81KrWQaGKEPMTEiu8bc8J3yY0mQHXHVXWWtkMPWs+", - "rFD0gChs63pXvczuhCH/2p1Tyoug4WMl/4HCduc1MDnWc1rckDddTFBL651tVPOfC5S4uc0Fbl/VVy7A", - "XcVFLgmjY0uzXzzjm/U48SSfqx/2xL/bPdLlwMqtn+XaLUtrka/qYdvL0PHWz9ZG7jW8ObZj3GuqGJPt", - "jy2uv7iPbd7ycuCEN14aZgc5YbNB2audu68Wlu3IuYZnwnaZc2W4dGvOrTv5FnBxJx82bnFHU73MLP6V", - "f+3uaIoaNXysdEdT2O6UQdMdLafF9eiCcrz+d/GHS7lAIIHwpjheNAVECmr4MVRBuWwbbOLz9osarp13", - "V9EBfw6ufQOFVjImLWzM2uSFKDVaG/tZKGNKaqvvdqemCPvUcdImNLqE6q4g4g7VKtV4wVLruqGurwMj", - "9gkFmFrZccy+imyGi+OUzj1jGcRrArFQtjlAFwyhvOdb5Mz37w4b6ohylMnDq4CVOQQTeTkIY0EwRVop", - "z/3c6mkajtLijIoQ2A6sTAdNofilYrnEVLu2k8NSDp+PCzX/W0jiMpY7WbxzsrjKCE5loxszABzqp3du", - "LY6AIn/VBv6vj2aLkzq7p7pC8DvM0FbOc+To2hPVUNOtth6sXgJ2KTjXVtn1zZpAfvRSs641oosmy6yW", - "YGf22LWyj4wx11rq0UlO9AMQBTC0x2geUwoXCeUhhKKtQyVaWaFeDN1JkLctQSaIcNeyFCGCCMLd0zFe", - "OUaziVG2xdAYso41MdesgzMP8+YdC+9iFDhOI7lVDY7/7LkL8VS+abnPO6GpdDHgtTHgIkdy6wIlX1Pt", - "AxOimWOp+i+QjsWwnWh5Pe1Ajhff/QUDuuJNQu57d6HY6QuF2qWNSI3HGN9DXG9xCENPNGuor/ONN+rM", - "+yJhfaWwpK7kh6XYlCTA0osuq3uyFKLlian/s8n+Vqh70MgQsm7BWzbHFRZsfXhfw+Ab5lq5XSuybWee", - "M3Nuhpt2DzIVaGp1fu4n2OGhdv29N1J6zdGoDmvkwgbR3gEkHatvCsDCk5uECj+nrcgMdK5oom3emHfc", - "vPtfp5cVC4Sp0PEC6Xbyp+SKL2Jn4xKIOCnTvKWb9tAp1PnR3KnU6z+Y2/GEIxM0+qZOlJn9DtBgXq4n", - "RupO27fjm9qQ/VjDBRHIcLUiS+dGWWclazcq54+IsH895gAPJ6RQjOtFCK6Wq2tpF5MOMYMa34kLPQJG", - "kE2ZZMpSo94m5So5+OCuZygHxfkW3lXj3NVqnHrJIzbnDNJsa/ctE/P2w4m/rTuRO2Sqy+aBSwBmSLPc", - "20pgicbfdGG8JfgMHhcjbNLFtFm4drqEaVcyswZ1L1D/uzO9wTa3mWsAN8ct2I4EpCmuVYDGSL/ImLJ7", - "G1v7VznjWz3ru6OwOwq3exR2p031bq2kyAtOHCW8upNnIydPSiAm/SDFWC6l/pkt2dBj3YypqV8gPZGD", - "bZDGeApmO6LiEHdZQa+fFeSagcuIvERuxQzcKhnPEJ2nd3sgSfoBCMO7uuIHJzGTwhS2SLj+woc/ThKe", - "dH2iZmid0xzIqbef1awWXZfYXIvW9WawFxGqsth/lAxxGyI3SJs7TJhro8ON0OBuEuCL6S2OZyHcDL3x", - "oX9wehPoWzO95Yj74eitqah6/vpHsYY1N0w4qZdsBL2MIvF3qYq59uLGT1XC3OXy4nqsupU4t9JeHwQB", - "TGhNSif/3q4irOjjb8YXLQavFDG1+I9rqE+svCvVXZ+oyJHUWKrbTl8Ycr94TYoh+96OvkQff1O5cmzw", - "NdCXWHlHXw2JagxJK9BXGM9QTebqWTwjHoo8wM/G/RoF44wPtKGyy+wIZuNv6RVYJztPGM9mcOKhrujL", - "bpl3isc6oxpXO04Yz+KUNjBDnFI3bmBD7QiNMlA6In07NkhBPa5kK6vqzlHS4gqkdXK7Bum1f3k3GZO1", - "UQI3T9r+PqSjqLsTrXIn0jHYTJIJIOQxxhO7LJWF+4Uk9VT7OpF6qcbcnI5xMgfRLJtol5SNgEM2yRDV", - "ifM3JM4FWRUp3YGJMJwxQYbrLn2iBanVSE70p742wTYKjF1iGIW8zg37JvR0RUKuOo8obL0JD0Ne3vpn", - "8LTK9HyReCtf9mhIoTfohiKl3zFVXoxRm6S65Qcy1l0/YoW4ox2TTjtTOKJF3YieIp0KgYuI1iy33O35", - "mizQySl61f3tGi0Usz5Le/tvxDg+5pIB2JUf2o1nXDSKWSVHmtcud6mb4sQJLU6B3WOD9YejrhiD2p0G", - "5rDT1Um84Uzohyi63xOhPjUWYBTde8ATzTwMk5ggGuOlR2ONUay8IW3DKLoX4T9vilHWf3fMETHKMOma", - "EB1admKrVTadmZxBKzm8CnF3jL7yMcq52kRJGxI1LmlVjFSK+VOZIHiAmJheENFO4BZpVLsgXwzpLSJR", - "s1Jewj3Hc/UMTzNAMxynCU+yyUFQG2QFhXf6HS4LwLyGGvLCjBhJcl1SzK6IrEIqziLj9g0JLIrRbFZn", - "iL4SDTzgRfBxtUKL7o8f7aTEujKwy743nHILKEkZdcBJT1T/AhQSmvEUIt4U8lL/tlTMXODvuEInyUDb", - "1TZV0ktF+7avxrnUj1QPP3XVI3dNJCoZ1FC3sqn+cguxKPmSuNadVRzvJBL/EI3fkDnlR5CJG5YwclNf", - "qIZ1sman1K+cFDekfik505/AKYqQ8hm2ETl5z7bS5zSfs5NDP5gc0vb2hRdDjTI74bSDwknfoNXlVDkS", - "4Q4CDHEWidAzxiZA/KDkRYpD/8j3n2+e/38AAAD//+t4p2PndQEA", + "H4sIAAAAAAAC/+x9bVPjuNLoX3H53qp7TlUgAzOzZw9VzwcGMrN5lgFuAjt17hZFCVtJtDi2V1KAPFP8", + "91t6s2VbsuWQhDDjL7tMrJdWq9/U6m5994NkniYxjCnxj777JJjBOeB/Hl8OBxgnmP2d4iSFmCLIvwRJ", + "CNn/Q0gCjFKKktg/8oEXLAhN5t5vgAYzSD3Ienu8cc+HT2CeRtA/Ovjw7l3PnyR4Dqh/5C9QTH/54Pd8", + "ukyhf+SjmMIpxP5zrzh8dTbt394kwR6dISLm1Kfzj/OGD1DCNIeEgCnMZyUUo3jKJ00Cchuh+N40Jfvd", + "o4lHZ9ALk2AxhzEFBgB6Hpp4iHrwCRFKCuBMEZ0t7vaDZN6fCTzthfBB/W2CaIJgFFahYTDwTx6dAapN", + "7iHiAUKSAAEKQ+8R0RmHB6RphAJwFxW2w4/B3ICI556P4d8LhGHoH/1ZmPoma5zc/QUDymBUtEKqxAKz", + "3xGFc/7H/8Zw4h/5/6uf015fEl4/o7rnbBqAMVhWQJLjWqD5CimowgIWdOYAAOt8zJo+9/w0IXSWTB17", + "XcrWrOMySuLjNB1a9u6SfWeb4g1POfksCOR9GG14AdtTskjTBNPCdh0cvv/w8Zd//brH/ij9h/3+73cH", + "h8bttGHpWOKkiCm+LvFnFXQJFww9NijxkonHMAtjigLODjrEf/p3gKDA7/nTJJlGkO1YRgkVYq9suQ3s", + "IZMTGCjhUKK5mJG5AfGPM0hnULIqyodgPCM7eUnM+RvFhII40HjjLkkiCGIGBGcaI27YF4YQMUQOY1UG", + "NDKd5Ey1mBpKv8yJtETwKfotIdRCgQmhvyVT7/hy6M1YKx3GGaUpOer3Jf3vyy+MOE1CCqTod7hsnuce", + "LgvTpLP725x0wV0Qwokz+Y4gSRY4gGZmDzBkAvDYsnqK5lATnViO5T0C4smuBUgP3x0e7h0c7h28vzp8", + "d/Tul6MPv+7/+uuv/8/XlFkIKNxjA5tQhCyCAIWCXjQgeh6KvetrIRjY0Dogd3eHBx9+ffevvcMPv8C9", + "D+/Bxz1w+DHc+3Dwr18OwoNgMvk31IFaLBBbyRw8ncF4ypj9/S89f45i/Z8VaBdpuCr2IkCoJ/uvE4Ul", + "9uCryjdZB9nCKlfJPTRJi6cUYUhMS/02g0IaMNqlrLsnW+877/scUhACQaENKqRA0FYxc1USMxls+8Vt", + "Pvz4sQmHGWy9TNpkyDAiMQhgSofxA6JwBP9eQCFbivhE/LPAbEuibUOkPf9pLwEp2mMW5hTGe/CJYrBH", + "wZRD8QAixPbFP8pW3OOs8FwhJAGvab0nnLwU6VhXbN6nY7FLwjR80Tbx8V3gI2kSE1gFkCrKr1JSAax6", + "MMQodjguF1EkcfQZJ/MxheloYWC4OwziYHYukVY/p9b2JptofD7W9L91W2iSouAY2xY+B/+TxJ7iOY/N", + "4f3jeHT+T8VY4/Oxx8eoomYF4puj+L8OenPw9F+HH3+pUmEGrB2/VzAGMT2OIKaDOUDRF5wsUuvqIWtC", + "TGQZIULZGkULZXxidlBxtMxWWH6IHmCPz1hduwS1aeUNckcMbtxr/kltK1srO8UJvl/L3qp19XycRLBJ", + "0ovVfIXzO4hHrL0RH74crAkrLaVSWXtQPsg6sMCXQaLF1Dwp+7L+SXvy5MwlxLPFhOZAmfA4eICxAXMg", + "DBGDHERfNeVdYqOsjae0aCbqIRtWW10+4b3JSGbYuYdLa/e12BIC5W6kmbc3nV4ZuMPT4laWPQ7SH2Fd", + "yGOC7ydR8jhaxOPFfA7wsgkyvlXfqt1qTBpx2MgWcqM2/BSYzgvmbWaLre6t94//Hl+ce3dLCsk/mxUn", + "Hzqb/veX0YAa4wyZmD4FUxRnx+I6hF5mLTO7gcuvR3dnTbac6sldAborUNaAeIFDiD8tTxGGgQIJxos5", + "2zlAAl94IjX5UdoL2f+z8tOpvvnZxNp1DAEOZkZ9a6P3Ci4nABl9HVzQL5iOYawqWnl4ERePLnb3awrj", + "kMHSMLBs1mbkvxdw0QyxaNVmXLyIYweIZbM2I5NFEEAYNgOdNXQfndHhF0iltXyKJhO7HR+iycSd8LUh", + "G92pYmQmo75w79Rxmg5jQkEUWXxsIAiSRUxvwQOgAN8ucGQkY9UsNlv7PR9ps9wSSCmKp8Q63MoK0K4l", + "7ACUoO+Z1myyKgQGP/GTi+30U4MQchvCCVhEVPuc+R6NxyMFn9bVDtcIpkkVKgzTxA4T/5o8xhA3n9i0", + "tj1tWBNA/53cGWi87tqHq2Pt4kcaIX8ld/sb8r1UxiQUpu14sMp8RfOqetZCc5gsqHn58mPT0h8gJiiJ", + "jTPYmSEDSx8gcw6JpVt20njUD0AcwChSjkQ3j1nWKbt/tDcZQUAEoRguzmJEZu2m/ktQZN2OMqIVLS27", + "9wKiw5AU+T7HMKEA03aLIRTQBXFYDzMvRFtJ36NF3FrNrEDlwT3E9SzQZrnamaIJZM2uKvVcnV+KgygC", + "yXbBzjXjbJuU5Xg5OD8dnn/xe/7o+vxc/DW+PjkZDE4Hp37P/3w8PON/nByfnwzO2N8mE/MMxfe5zCeI", + "Jnhp9RZMEWWtcq1VlTw4G8UTescoeORA51bvgzYMkyt1g1wolVM7Clc2xmF03W47yhrAaXVXUvEmF6Ys", + "4qO0sF4J6yYaYQco852n6316uauBT+Uk3ItM7ObnVo9t2YWN8eTGIDZaqrsCvtmMbjLDNRDlfDaa0I1M", + "WFh0C/Ak3VkoQpMdK47PbU3L6NptQd2eaa2cJ9eGbsa4PsGNhK14wUBemZSK0KyRhsYRCO6/wbtZkty/", + "+iI1WNa1xGR6hmLYKjSAaQT+mZ0umLmh7OwomXoRimGbi18RZWacgw0nGzSeXGy9RQuDn7mELf2SPA99", + "y2a4yVF1Bh9gpFsip4NP18z6GJ5/vvB7/rfj0bnf8wej0cXIbHJo42QOMaf9L0BgEhny++v7ExVZmfWS", + "+PgCn2JxhJZeRdm5xq9oQIB+U//dDxYYw5jeppx2D3t+DJ/Uv973/Hgx5/8g/tHBu+dembMKnU2RI7KF", + "lwoqzCY+dHLEabAYo6/gU3Xk924j5+syBrwkFES625M15d76CBEq7q7yGNd3Ln4/g8TSFVedKvwECMwt", + "9coeay1/gyB0azk81VrofuC8yTlffmMzdqCBLXS0aF8c4wrRyO6LEvb6eZ27SjS5cPdZ6R0qs5QxZYDV", + "hCnbVvQsm2lA402RLDLcKnmQpJApwSBKSCFIMMfGCDLy+nmCdkYwjcCS35/YYwXY12FYFPrbjrGrj29W", + "EN7wJeFFLP0sNVuYLky+owrmWDM2asmuNAw4hYReY0tQxfXozKOJR2Ac8lAZaVoQjyabuby2neAXMfp7", + "AT0UwpiiCYI4u0WVl9MyUFFE9Ohhv3cwSuKpgri8ndUN21xAkZuPqTZIqGBBV23eGYhjGNlwKD97KDRf", + "5RM2uPcoRjdurxzB7vdRU/DokxUneRENgblt9ezbC5bOutvXzQd/yaJ3gvrd6FMhIkN3kS56GhkaSZjC", + "1HTVqOSTgehQFGJY9Ic1CNoN+e5TgFUWlTskGIIQ3EXQ7pwU37MocegRCtNGMnnRlZJlBjsFaKsokINy", + "gcsNFOZ7zdZv4ArpmA7SpGD8ajbrmi6aOBFqVwotaWA1GoZWkFe598r71KCrbGAUrs0cbl3kJWHWfv08", + "mCyoDcQV2ZNHoRxPqDhFuCFz7bd4okvNzrjd9EkWK171uV5gs7Y22eIgeNqsOOtSs+LHBNsuD500VUaB", + "2cpqb+r0UJqKhJJBnVVCTkKmkM14STCaohhEzQsQYYxZe23cmxwySyQrj1cxY1CxWGWtKDR30PyZBpJX", + "0tGBAOXRjPdglAUfIEZ02ab3WPXJL6xr6O8zwoSOoRCx7jR4Btr1MiUoFacvjavDnuFQQ4jurhU7WUOb", + "uxJuWSBIo4/UQAaaQ2M0+L/Xg+vB6e35xe23i9Hvg5Hfy38cHV8Nbs+GX4dXfs8fn/w2OL0+G55/ub0a", + "fh2c3l5cs5+Px+Phl3N+QT6+Oh5diTvz4flw/Fvx+nw0uBr9R1yv5zfpPZ+NdXF9dTsafB4NZJ/RQBtV", + "n2x8dsFang2Ox9mYw8Hp7af/3F6PB2YvuZGSNRRo3nYJ3Wh4NTw5PqsbrS6MQP51K9bwdXBeQtOLwgyu", + "ssjyktkeQSyzHAaWXJRvKh838XhrdWSZ815k35h8C2IQLSkKyEVKLxa0ZtT8DDQDxEtSdvCSdm42iHmO", + "jSft2TIgXpxC0ZziZ82GMOYXbTexaDMhnjX5RcY174AUNe+FKQ9rmuwJkvNHbAIuYbXeKJ6OZVDr9lhU", + "5KwPnlLEdpmHcXBg6scXvcQ0xHvkubc8IsUDGHogTXECghmKpyIJlyO4bn6VHyWI5AzNEV0RCrFkleVc", + "hSdiY9fiQjsefgYoWmDoAAp3reqAqBgzHl3OY/rNc0aAiKXaL7l5ljZP4wax3NlHQNiUtMUVN3hSRPaZ", + "n5XiwJLbMgdP3kQ18QD1HmcomGVUtV5nn10SGAG2y4VhdiuymVTD5yzh2u6Y0dLsxTBbTUFfLZ+xyWcp", + "GcrmcVWf7VgTLep8rnyEQgL2ChqzkIiZ75WeVNZAOzujSiQpt9MgYk+r8L8aQbnnLzLWa2p9TSAWPS4X", + "dxEK6kiBj1eTkqvDvDObLvdvlU0fyX1Sh4mLb+f8JHZ8+nV47vf8r4OvnyzHm6Le1QfJznODP8QZhP3w", + "+ezi2+3o+pyfcy7O83PdwGF0rtVNhg3A8z9AtLDIB/7de2ANzCKIa3Qmuh8B5mkmFXUveu8bg0mYch3B", + "CYqiJuXLQ8v4cEz7Yt4Hhu6infetWagY275EM/wvS2HItr2ZQDMiee75D/ZVqKChpg0zr+YRxWHyaLFL", + "IIXYEy0yTSHG8v6B9uG+d+CFYNnzDrxHCO/Z/+dJTGf/XPGGLENPYevU4u2CRSHqMolQYMgCFRZo3aEs", + "q9EjmhrUYgvBUmS/plgGCZxxdRhNpxBr5rE1vsGclV91X7YNg7jmdYN+xuIa+sob4oPWUtfCqjx1QOz7", + "/4b9Sd2B+HUPxBs8qLqzazJHNEZRL1yoSP6XuwtNUaTXxGSzN55ZQRhiSIh+di3YIeowVD3Csg+/ATIz", + "ycMZIDN9yP9DStNJCSlUuaiKORYFJr2TGaDWCf+AOLtds/MrP4Ezbn2QzdmvCBdhMNPMDJBLQMhjgl3n", + "AF4qO3gE0i16lkNE0ggsCySj9q/1YbeI3RsLgZ3MQDyFCkH2IkTw0Y5ETuXwMceasknMsK+gGNXIfN1p", + "LSAZELX4exkMlTQW+aVXwJMN5WfJFMWrl79ajb9fVA1r5zCu1pg24XoEp4hQ9v83hG43XWIRDDu4W6om", + "peum6QYomaGUvFVHTMUxtUVtvgktIyYzbds3Hsdji/m0uMXlR3HwF5FAXgBiL4WYrW+/1X0jeAAoYhap", + "itxrqB1UnRY+wWBBoRcksXRUREuzJ4JhkNerw8OGasDcy4GmMQy9vNMO1AWOAE9kwfQOAnpMa68tcixx", + "fxeBMfWAN1O999dbX5nNwfgdxhAPCAV3EY9o3CEI5+DJTmNz8ITmi/n6aG3jIQwCqn1zBGQAY1XSithC", + "m1kbHmwsT2/qDYV84JfWH8njIg3BFPxbdTHKV318cjX8Y8BzX+WfN21jLewCbwcUk5S8xggtdTg3VWdK", + "o2Q5h81XMWqM06zHSRJP0LTxaQ9LjSd11N+31O2xbDP7YhrCCUey1k+ZrLh7v3WVma0wpBVDygIzyEIw", + "XR1Dao1XwKhcZRGpdlTJxvtDdMySd+rcy65sx8Y9UXLT5ESfQqp9z8KfSi6NWNof0jM0hZRw3AV5V2/K", + "+mbHWY0Q9q2XOGOKAYXTpU1aia8eTYS3RFWm12cVlz28Hj8IZuIaSYkzEcN3Ozy/vRxdfBkNxmO/55+O", + "Li5vzwffBuMrv+fzEMv8n19GF9eXt6OL6/PT29HFp+G58TqupVLL9VbRd1culfj+0JwyXdh3OXUZgT3j", + "RtZRRUVG/Rzlkaa2Uo8rFbYxjmaLAMntXDGed5ymnl47ySkhcwMRQi3KNdmXfKPRlsirL7tFM+Ifnhq3", + "RvU2GwovSlPbso3B7Qinp31U66+QYhQYwhW5VP0dLk9UVoNBmZXqNFSF8j1cErPFrIZnQqVmipKFzoQ8", + "8EgKAzRBQT6J948UEAJD7wEBb4IiCvE/HctAfCtWsnO5Bs1/vdRaU7yAhvHlQd5aP2G9uWXZhWwrGEWC", + "jjup5cllawsoVkljQoZt+6Qt5h7ryS3bBmFjNSf1utZZNlp9GpmIE4Dhp2WLwa+0XlqdR2lctrRFDSO8", + "vFrkH1qVVYm74mJv6uXDjhwktWOOu5xfV+1Labmas1MqkymMtV2bRqglAWMhOEPZsSS+1CRKNZEYJ/GY", + "Ge4LS+0b+OBw8M4yr2QJkc0kurck/axTAz2TE55wbb12KVRZLdLoJquxlKZtWoTVguFZfG2oTg11Ijo2", + "8VOpeWV+yT3G/E7FecaPksOM3xSjGj/mvGv4XLeaKzA14S9K8HpcRi/2qZgvqwSEdQQiZcQJZjJ3YhYT", + "NSnst8jCeU0TynzLiaUa0q0tjfmF0xLzCtvrlxLeTDURHiop/i0GzvCzXhtSKH0z+nI74FZ6ytqjWTNm", + "yrxScHW5YEL3jmle1Zf4Sl9SzQGHpbILNsdQjz9Yy9TmVV7nxGZDtqULojkyzQJDfnQSO4+aa9316Kz6", + "tNS5CmaFycJAN80kdQrZcc9cGwyDx+JngwsKPHr/Of565oVZw/ZStTiPA9Dmx2C3RIU/AZXwgg3BAiO6", + "HOePRN9BgCFWb0mL16PZMZX/nC9wRilPYg2S5B5B1RwxDImflAv/yK+8iC6fGOb3UyieJGYkq8fnjy+H", + "vNoCryLpF3/Ndsk/2H+3/45vcgpjkCL/yH+/f7D/jhusdMaX1gcp6kfoAcobguq8X9QNAGsVQ0K87CTH", + "aDDzg/pn8vsXvi4sD1x8lsN376oD/wZBRGdcjH40fT9PaDZnYWf8oz9vej5RT0YxCPOG6i7oTzl+MIPB", + "vX/D+vO1YgjCZfNiWTNUt9qRarDO5XLgeDQuf4rWoxhMJjJzq271GbSNy3846AMZKrzH41b2uEuP9L/z", + "n/XfngWMEaQGS/KU/048kD2RxyPSRXQO717BWCn7QIwgDk+AJ6owsGvSHSszePwgxPmL0XPOXZWl+Lp8", + "EC45IWNefLJ6vqns/YcqtsaLIICETBZRtPQESsPC+4IV5D33/A+CSoIkpvJcDNI0ko/Q9/+ShUXydTRI", + "fl4FTEZglf31cxAxLMDQS7B3B0IP5/XlP7x7v3YwTFB8TvAdCkMoAsdz+hZ0UkdmiuJleuRNz3/ay4L3", + "+euz4kPPQBg3/AhATbdeIm3jJSQuRvgxSFwU802E7FwLMThkJhnIpBZbNJFPs1ew8WwW0WtZiKWaRRX2", + "ghiQD7p3YsBNDAhq2ZwY0BVkivZEJlL/e/Y314ZpQgxGwwg+JPe80kT+kL64N8tmLImJFPEkKbEm0d1F", + "SmTDW2SCgnWn1B3my5N0rp68+YGJmrShakk6bGOv5M4pMs5/q6PkbMsLFCxcRf3v/P/PfXU2tJm8fG+y", + "d4BBnL/PW6Tb7H1hYfM20qt4TdimwoS7f5ukuj6ay19abhLzGFKM4INkAIERvh8dFxSOMBpmch4Qlz01", + "9C9oqED7ImplD6RpX4+4IVYGOEOE2uJ0que+LECIdRuWmm6M3hweBGtHiMVF7hItHmwHjOsYLOgsweh/", + "YCgm/ridib9COktCL06YFRMljzAsH++/FzxIf948F877TeSqeEc0ceON/vfpbE//5bnPQ+yceSYLyEOw", + "gWX4g2suykMHx6pDSmC/UW1ie46uHUsX9qDj6LfL0SVmKjN0RRuWmeBFLM9/Z3/t8cja5/zfjOWe+3fy", + "TUZn0ZB1qBULn/JWb00y9FwilK1A5qiuBbHtpOrNdPucsoX7lNuRgJU3P9sJwYzaOgH4dgWgJjLWIfz6", + "j9qzQUYPjjb3NEruQKRerLEILeG4+cKbfstaNt8BFQg3xQn7BwyzyTqa3SWaLd6yCQoBJgpptrgVBfa/", + "yz+enWhROjtdaFE4PXNabFSiclCr/nzUyHqrFnXHMT8cx1TouI5j5rDeWUmyt4GzDEQVAMEVQczLDxY5", + "ReWS2O/q14U+mYDVxmTJolV2hZgbgg30kH+5j1muDtfPqvNekoqEGHkFV9rkPio9Qm4/ToAo8gqtbRss", + "nHKFhhu1WeWOFx4wb7X5qqBiYXW7RAhFI620CdX91zeZv/3X/87/5xBM4o31twIrW6w/AOkeO1IY06rs", + "OIg7GSRSxEmn7l5f3ZUDU8pUq3iC/153dyGIrsgxMel/JzFx4pbie5dVfolJCzYpPZ5pZRQpUneOTUrI", + "6BhlBxmlQrAZq5yPaxklJgY2EZ+f1SW3/UDF5lV3zRUWaR2iZeOMDNqtuhnFral4Q2+lK3YNhsOPHwtA", + "HHRHtu7I5nRkIxSme3jBlZf887kfYAgo3EuxnTNPeBMPeOkiitTOyNCpLEegwrQiZ1wwrhjhErswsKrF", + "ZVduEva3F0op0bCIVGrpZ5zMs8ph5iBKXrjAo4kXmHZhqwGUbcEvSBgBvjhNFVbwcwfYsFk/bGfW84Qx", + "7SIu633J3iWyUoIkS+6p0/yKI5vFTSgfr62PcUOTiSppoqTBHaSPUJabmieEqtJ97BuIBV1NECb8l32b", + "OPoCKX8+9y3JoQ1x8xdItQeFV7zH49vZcfArczDjm1CQ9YbYNk9qtnv/RJsS39oYkfUYiEHfCC/2asow", + "0cQj9yhVsP29gHiZA5dMJoR7tA2goJj+8sFYkal+OlFl725pmZJ/bjnjJoVN5XnoFeJxSSdotiVoCjz3", + "CAg/mpgkT5nxNbbPvAbsJ2YhrkcSRcm0Xg4RL0qmXoRiWJZFVavgLJmeoVi8j9iJod0QQz37c1YRfIAR", + "YfOKUnY1E/OWhZlr74gkHbBenxGMQtvKCQQ4mHl8Ng2OSYItgIgObQEZi14GIL7xd1gTj6fJ29fPP39a", + "irW0nPxC72vBg5g+RBgG0klYA8Wp1mwVSPL+G45u06RBk35iJNkpJ0uiCNcKmRTWdMFZMm2vBsRn0uSf", + "Ih7g7/hYkvlE5I1o6m/SvVN8QK4+NTZz67xCImwrjw3NnrTt8lx1p0lGbE1ZrSaKzi5HRLXEmux2nuz3", + "hAhF8bSewN/ORckW0tXdmDAv0vOqiekdP64t77xFlnktX5prsNRHvYHMWrXlwJOmehSux5Gd4OBtFmtY", + "wXNg34SOdwrmWh21ujNTr4WJ1r5QS2a9/azKTbcw11eLxdkEPXjlWixVDdjVYnG1UV9Ui8VNS/YJpOz/", + "pLlum+riqS71lVg0ckHxdCz7OJa3+EnUpIaYF+hIfU86VircsFnRtDY+ygoa1V+0ZfWFiFv9os6ezLIA", + "OD5WTVpVsU6dr69sPGZFkEi7ykhNBuMKxbo6G5EjQNG6ZhZuPjQsn7Tjr3Xxl2SEFUuP1Ssch6gOwpO6", + "CqEdorel7Nhb0TU/8zXqPVw6XaKydoVZnUrwczLgRbKrz5nYYdKeonOCLZcVrQHU3sRbDUS8iGW5aegE", + "q2rrfP1pfi7ola6k+X6+zoU0n3oHrqN1OPTL6BpiyZKf7+HSewDRAnopQLhCL9lran8ydjs44k0P/B77", + "16H41yET76b1GB7hMzJD0ytDG6/A2N2or8XKhipe0rHuoovy73Mh72gBCM3hYAX8DpfdoTNXhSvRP0d2", + "xwMmHvCkZbJOPsAwjcCyrno0+67f74mOFg5QNaP5oD/vYVQgQD5qVXtBoYoQcw2JFd62dwnfJjSZAdep", + "qkJk8uG/tzOrehhJHpngUwBhWEllFmS3ZoWJ4gdEYdvrf9XLfKUx5F87XaluMjR8rHSHobDd3VyYLvdz", + "WtzQjb6YoJbWO/+sdocvUOJ2dS9w+6r39QLcVa7pJWF0bGm+m8/4Zj0XiZLP1Q974t/tHgpzYOXWT4Pt", + "lre3yFf1sO1l6HjrurWRew3vnu0Y95qq1mT7Y8stKO5jm/fEHDjhjZen2UFO2Gxg+Gp699VCwx051/BU", + "2S5zrgzZbs25dZpvDud38nHlFmc01cvM4l/51+6MpqhRw8dKZzSF7c4YNJ3Rclpcjy0ox+t/F3+4lCwE", + "EghvgpN5U1CmoIYfwxSUy7bBJj5vv7Di2nl3FRvw5+DaN1DsJWPSwsasTV6oHntpEqHA6Qlu6ZEVHVwC", + "uZUT95L36MK4+ya0rKZcS7vRKdmtZ0OIesG1AdyFWsSktoR2Z3aK2G0dJ23yG0qo7qqa7lDBYY0XLAXr", + "G4pzOzBin1CAqZUdx+yr0GMXxws684y1TK8JxOK0ygG6YAjlPd8iZ75/d9hQDJijTKqVAlZmEITydB0l", + "gmCKtFKe+7nV+1IcpcUZFSGwHViZDpryaUoVr4mpAHUnh6UcPh8XHu5oIYnLWO5k8c7J4iojONV+b0zj", + "cXgEobsX5ggo8ldt9s76aLY4qfP9bveaww4ztJXzHDm6VqMaCjPWFnXW6zgvBefayjO/WR/ij14v2rXQ", + "e9EtkRUE7fyGu1a7lTHmWuu1OsmJfgDiAEb2QOtjSuE8pTwOWLR1KCctn5kQQ3cS5G1LkBARHpshRYgg", + "gmj3bIxXzodoYpRtMTSGrGNN4gTr4MzDvHnHwruYyoEXsdyqhsiZ7M0acT9kWu7zTlgqXSJHjXzhG/4a", + "AiVfU+0rMaKZ43sTXyAdi2E70fJ61oEcL7n7CwZ0xZOE3PfuQLHTBwq1SxuRGo8Jvoe43uMQRZ5o1lAk", + "6xtv1Ln3RdWJleL6uro9lopxkgBLzzKtfpOlEC01pv7PJv9boXhJI0PI4iNv2R1XWLANNB2Db5hr5Xat", + "yLade87MuRlu2r2qVqCp1fm5n+KGa2um4vRHG0npSVajOayRCxtEe8yTdKy+KQAL7+YSKu45bZWioHNZ", + "Im3zxrzj5q//dXpZscqfyr0okG4nf0pX8UXsbFwCESdjmrd0sx46gzpXzZ1JvX7F3I4nHJmg8W7qRLnZ", + "7wANZuWigKRO276du6kN+Y81XBCBDFcvsrzcKNusZO1O5fwlIPavxxzgYUgKFfVehOBqzcmWfjF5IWYw", + "4ztxoUfACLIpk0xZatT7pFwlBx/cVYdyUJxP4V1J3V0tqavXLWNzTiHNtnbfMjFvPwz9bZ2J3CFTXTYP", + "XAowQ5rl3FYCSzT+pgvjLcFnuHExwiavmDYL107XIe7q3tag7gXmf6fTG3xzmzkGcHfcnO1IQJriWgVo", + "jPSLjCm7t/G1f5UzvlVd36nCThVuVxV22qZ6tlZS5AUaRwmvTvNsRPMsCMSkHywwlkupL7AgG3qsmzE1", + "9QukJ3KwDdIYT8FsR1Qc4i4r6PWzglwzcBmRl8itmIFbJeMporPF3R5I034AouiurvjBScKkMIUtEq6/", + "8OGP05QnXZ+oGVrnNAdy6u1nNatF1yU216J1vRnsRYSqLPYfJUPchsgN0uYOE+ba6HAjNLibBPhiekuS", + "aQQ3Q2986B+c3gT61kxvOeJ+OHprepUgf8KnWASeOyaczEs2gl6HlPi79AyA9mzOT/UGgMvhxVWtur0R", + "YKW9PggCmNKalE7+vV1JZdHH38xdtBi8UgXYcn9cQ31i5V2t+/pERY6kxlr3dvrCkN+L16QYsu/t6Ev0", + "8TeVK8cGXwN9iZV39NWQqMaQtAJ9RckU1WSuniVT4qHYA1w37tcYGGd8oA3VLWcqmI2/paecnfw8UTKd", + "wtBDXdGX3XLvFNU6oxpXP06UTJMFbWCGZEHduIENtSM0ykDpiPTt+CAF9biSrSxLPUNpiyOQ1sntGKQX", + "z+bdZEzWRgncPGn785COou5MtMqZSMdgM0mmgJDHBId2WSpfvhCS1FPt60TqpRpzczbGyQzE02yiXTI2", + "Ag5ZmCGqE+dvSJwLsipSugMTYThlggzXHfpEC1JrkZzob+Vtgm0UGLvEMAp53TXsm7DTFQm52jyisPUm", + "bhjy8tY/w02rTM8XibfyaZyGFHqDbShS+h1T5cUYtUmqW35hZt31I1aIO9ox6bQzhSNa1I3oKdKpELiI", + "aM1yy93ef8oCnZyiV90ff9JCMeuztLf/yJLja0gZgF35od14B0mjmFVypHntcpe6KU6c0EIL7B4brD8c", + "dcUY1E4bmMNOVyfxBp3Qj1B8vydCfWo8wCi+94AnmnkYpglBNMFLjyYao1h5Q/qGUXwvwn/eFKOs/+yY", + "I2KUYdI1ITqy7MRWq2w6MzmDVnJ4FeJOjb6yGuVcbaKkDYkal7QqRirF/KlMEDxATEwviGgauEUa1S7I", + "F0N6i0jUrJSXcM/xXD3D0wwQfyGPJ9nkIKgNsoLCO/0OlwVgXsMMeWFGjCS5LilmV0RWIRVnnnH7hgQW", + "xWg6rXNEX4kGHvBi+LhaoUX3x492UmJdGdhl3xtOuAeULBh1wLAnqn8BCgnNeAoRbwJ5qX9bKmYu8Hfc", + "oJNkoO1qmyrppaJ92zfjXOpHqoefuuqROyASe/6Hw39vZ1b1MLEsqgKfAgjDyotTSg421M5sqgHdQjRL", + "2UBca98qqeMklv8Qjd+QS+dHkMsblnJyU19oCnbybqdMwJwUN2QCKjnTD+EExUjdW7YROXnPttLnNJ+z", + "k0M/mBzS9vaFh1ONMjvhtIPCSd+g1eVUORriDgIMcRYN0TPGR0D8oOTFAkf+ke8/3zz//wAAAP//ndNk", + "W3R/AQA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/api/v1/server/oas/transformers/tenant.go b/api/v1/server/oas/transformers/tenant.go index 5c440f2e4..d3b3461ba 100644 --- a/api/v1/server/oas/transformers/tenant.go +++ b/api/v1/server/oas/transformers/tenant.go @@ -5,6 +5,8 @@ import ( "github.com/hatchet-dev/hatchet/api/v1/server/oas/gen" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" + "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" + "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" ) func ToTenant(tenant *db.TenantModel) *gen.Tenant { @@ -19,10 +21,11 @@ func ToTenant(tenant *db.TenantModel) *gen.Tenant { func ToTenantAlertingSettings(alerting *db.TenantAlertingSettingsModel) *gen.TenantAlertingSettings { res := &gen.TenantAlertingSettings{ - Metadata: *toAPIMetadata(alerting.ID, alerting.CreatedAt, alerting.UpdatedAt), - MaxAlertingFrequency: alerting.MaxFrequency, - EnableExpiringTokenAlerts: &alerting.EnableExpiringTokenAlerts, - EnableWorkflowRunFailureAlerts: &alerting.EnableWorkflowRunFailureAlerts, + Metadata: *toAPIMetadata(alerting.ID, alerting.CreatedAt, alerting.UpdatedAt), + MaxAlertingFrequency: alerting.MaxFrequency, + EnableExpiringTokenAlerts: &alerting.EnableExpiringTokenAlerts, + EnableWorkflowRunFailureAlerts: &alerting.EnableWorkflowRunFailureAlerts, + EnableTenantResourceLimitAlerts: &alerting.EnableTenantResourceLimitAlerts, } if lastAlertedAt, ok := alerting.LastAlertedAt(); ok { @@ -40,3 +43,35 @@ func ToTenantAlertEmailGroup(group *db.TenantAlertEmailGroupModel) *gen.TenantAl Emails: emails, } } + +func ToTenantResourcePolicy(_limits []*dbsqlc.TenantResourceLimit) *gen.TenantResourcePolicy { + + limits := make([]gen.TenantResourceLimit, len(_limits)) + + for i, limit := range _limits { + + var alarmValue int + if limit.AlarmValue.Valid { + alarmValue = int(limit.AlarmValue.Int32) + } + + var window string + if limit.Window.Valid { + window = limit.Window.String + } + + limits[i] = gen.TenantResourceLimit{ + Metadata: *toAPIMetadata(sqlchelpers.UUIDToStr(limit.ID), limit.CreatedAt.Time, limit.UpdatedAt.Time), + Resource: gen.TenantResource(limit.Resource), + LimitValue: int(limit.LimitValue), + AlarmValue: &alarmValue, + Value: int(limit.Value), + Window: &window, + LastRefill: &limit.LastRefill.Time, + } + } + + return &gen.TenantResourcePolicy{ + Limits: limits, + } +} diff --git a/cmd/hatchet-engine/engine/run.go b/cmd/hatchet-engine/engine/run.go index 6793ac3c0..f21d08cbd 100644 --- a/cmd/hatchet-engine/engine/run.go +++ b/cmd/hatchet-engine/engine/run.go @@ -81,6 +81,7 @@ func Run(ctx context.Context, cf *loader.ConfigLoader) error { ticker.WithRepository(sc.EngineRepository), ticker.WithLogger(sc.Logger), ticker.WithTenantAlerter(sc.TenantAlerter), + ticker.WithEntitlementsRepository(sc.EntitlementRepository), ) if err != nil { @@ -102,6 +103,7 @@ func Run(ctx context.Context, cf *loader.ConfigLoader) error { events.WithMessageQueue(sc.MessageQueue), events.WithRepository(sc.EngineRepository), events.WithLogger(sc.Logger), + events.WithEntitlementsRepository(sc.EntitlementRepository), ) if err != nil { return fmt.Errorf("could not create events controller: %w", err) @@ -189,6 +191,7 @@ func Run(ctx context.Context, cf *loader.ConfigLoader) error { dispatcher.WithMessageQueue(sc.MessageQueue), dispatcher.WithRepository(sc.EngineRepository), dispatcher.WithLogger(sc.Logger), + dispatcher.WithEntitlementsRepository(sc.EntitlementRepository), ) if err != nil { return fmt.Errorf("could not create dispatcher: %w", err) @@ -211,6 +214,7 @@ func Run(ctx context.Context, cf *loader.ConfigLoader) error { sc.EngineRepository.Log(), ), ingestor.WithMessageQueue(sc.MessageQueue), + ingestor.WithEntitlementsRepository(sc.EntitlementRepository), ) if err != nil { return fmt.Errorf("could not create ingestor: %w", err) @@ -219,6 +223,7 @@ func Run(ctx context.Context, cf *loader.ConfigLoader) error { adminSvc, err := admin.NewAdminService( admin.WithRepository(sc.EngineRepository), admin.WithMessageQueue(sc.MessageQueue), + admin.WithEntitlementsRepository(sc.EntitlementRepository), ) if err != nil { return fmt.Errorf("could not create admin service: %w", err) diff --git a/frontend/app/src/lib/api/generated/Api.ts b/frontend/app/src/lib/api/generated/Api.ts index f445a2f25..d63ea2f2e 100644 --- a/frontend/app/src/lib/api/generated/Api.ts +++ b/frontend/app/src/lib/api/generated/Api.ts @@ -58,6 +58,7 @@ import { TenantInviteList, TenantMember, TenantMemberList, + TenantResourcePolicy, TriggerWorkflowRunRequest, UpdateTenantAlertEmailGroupRequest, UpdateTenantInviteRequest, @@ -396,6 +397,23 @@ export class Api extends HttpClient + this.request({ + path: `/api/v1/tenants/${tenant}/resource-policy`, + method: "GET", + secure: true, + format: "json", + ...params, + }); /** * @description Updates a tenant alert email group * diff --git a/frontend/app/src/lib/api/generated/data-contracts.ts b/frontend/app/src/lib/api/generated/data-contracts.ts index 2a7a8878a..392f255de 100644 --- a/frontend/app/src/lib/api/generated/data-contracts.ts +++ b/frontend/app/src/lib/api/generated/data-contracts.ts @@ -219,6 +219,38 @@ export enum TenantMemberRole { MEMBER = "MEMBER", } +export enum TenantResource { + WORKER = "WORKER", + EVENT = "EVENT", + WORKFLOW_RUN = "WORKFLOW_RUN", + CRON = "CRON", + SCHEDULE = "SCHEDULE", +} + +export interface TenantResourceLimit { + metadata: APIResourceMeta; + /** The resource associated with this limit. */ + resource: TenantResource; + /** The limit associated with this limit. */ + limitValue: number; + /** The alarm value associated with this limit to warn of approaching limit value. */ + alarmValue?: number; + /** The current value associated with this limit. */ + value: number; + /** The meter window for the limit. (i.e. 1 day, 1 week, 1 month) */ + window?: string; + /** + * The last time the limit was refilled. + * @format date-time + */ + lastRefill?: string; +} + +export interface TenantResourcePolicy { + /** A list of resource limits for the tenant. */ + limits: TenantResourceLimit[]; +} + export interface CreateTenantInviteRequest { /** The email of the user to invite. */ email: string; @@ -239,6 +271,8 @@ export interface TenantAlertingSettings { enableWorkflowRunFailureAlerts?: boolean; /** Whether to enable alerts when tokens are approaching expiration. */ enableExpiringTokenAlerts?: boolean; + /** Whether to enable alerts when tenant resources are approaching limits. */ + enableTenantResourceLimitAlerts?: boolean; /** The max frequency at which to alert. */ maxAlertingFrequency: string; /** @@ -332,6 +366,8 @@ export interface UpdateTenantRequest { enableWorkflowRunFailureAlerts?: boolean; /** Whether to enable alerts when tokens are approaching expiration. */ enableExpiringTokenAlerts?: boolean; + /** Whether to enable alerts when tenant resources are approaching limits. */ + enableTenantResourceLimitAlerts?: boolean; /** The max frequency at which to alert. */ maxAlertingFrequency?: string; } diff --git a/frontend/app/src/lib/api/queries.ts b/frontend/app/src/lib/api/queries.ts index d738d5cdb..fba940208 100644 --- a/frontend/app/src/lib/api/queries.ts +++ b/frontend/app/src/lib/api/queries.ts @@ -30,6 +30,12 @@ export const queries = createQueryKeyStore({ queryFn: async () => (await api.tenantAlertingSettingsGet(tenant)).data, }), }, + tenantResourcePolicy: { + get: (tenant: string) => ({ + queryKey: ['tenant-resource-policy:get', tenant], + queryFn: async () => (await api.tenantResourcePolicyGet(tenant)).data, + }), + }, members: { list: (tenant: string) => ({ queryKey: ['tenant-member:list', tenant], diff --git a/frontend/app/src/pages/main/index.tsx b/frontend/app/src/pages/main/index.tsx index 6e4f1b2e0..187daff01 100644 --- a/frontend/app/src/pages/main/index.tsx +++ b/frontend/app/src/pages/main/index.tsx @@ -78,7 +78,7 @@ function Sidebar({ className, memberships, currTenant }: SidebarProps) { className, )} > -
+

@@ -148,6 +148,12 @@ function Sidebar({ className, memberships, currTenant }: SidebarProps) { to="/tenant-settings/members" name="Members" />, + ,

+
+ { + setEnableTenantResourceLimitAlerts( + (checkedState) => !checkedState, + ); + }} + /> + +
= { + [TenantResource.WORKER]: 'Concurrent Workers', + [TenantResource.EVENT]: 'Events', + [TenantResource.WORKFLOW_RUN]: 'Workflow Runs', + [TenantResource.CRON]: 'Cron Triggers', + [TenantResource.SCHEDULE]: 'Schedule Triggers', +}; + +const indicatorVariants = { + ok: 'border-transparent rounded-full bg-green-500', + alarm: 'border-transparent rounded-full bg-yellow-500', + exhausted: 'border-transparent rounded-full bg-red-500', +}; + +export function LimitIndicator({ limit }: { limit: TenantResourceLimit }) { + let variant = indicatorVariants.ok; + + if (limit.alarmValue && limit.value >= limit.alarmValue) { + variant = indicatorVariants.alarm; + } + + if (limit.value >= limit.limitValue) { + variant = indicatorVariants.exhausted; + } + + return
; +} + +export const columns = (): ColumnDef[] => { + return [ + { + accessorKey: 'name', + header: ({ column }) => ( + + ), + cell: ({ row }) => ( +
+ + {resources[row.original.resource]} +
+ ), + enableSorting: false, + enableHiding: false, + }, + { + accessorKey: 'current', + header: ({ column }) => ( + + ), + cell: ({ row }) =>
{row.original.value.toLocaleString()}
, + enableSorting: false, + enableHiding: false, + }, + { + accessorKey: 'limit_value', + header: ({ column }) => ( + + ), + cell: ({ row }) =>
{row.original.limitValue.toLocaleString()}
, + enableSorting: false, + enableHiding: false, + }, + { + accessorKey: 'alarm_value', + header: ({ column }) => ( + + ), + cell: ({ row }) => ( +
+ {row.original.alarmValue + ? row.original.alarmValue.toLocaleString() + : 'N/A'} +
+ ), + enableSorting: false, + enableHiding: false, + }, + { + accessorKey: 'window', + header: ({ column }) => ( + + ), + cell: ({ row }) =>
{row.original.window || 'N/A'}
, + enableSorting: false, + enableHiding: false, + }, + { + accessorKey: 'alarm_value', + header: ({ column }) => ( + + ), + cell: ({ row }) => ( +
+ {!row.original.window + ? 'N/A' + : row.original.lastRefill && ( + + )} +
+ ), + enableSorting: false, + enableHiding: false, + }, + ]; +}; diff --git a/frontend/app/src/pages/main/tenant-settings/resource-limits/index.tsx b/frontend/app/src/pages/main/tenant-settings/resource-limits/index.tsx new file mode 100644 index 000000000..1d810d580 --- /dev/null +++ b/frontend/app/src/pages/main/tenant-settings/resource-limits/index.tsx @@ -0,0 +1,46 @@ +import { Separator } from '@/components/ui/separator'; +import { TenantContextType } from '@/lib/outlet'; +import { useOutletContext } from 'react-router-dom'; +import { useQuery } from '@tanstack/react-query'; +import { queries } from '@/lib/api'; +import { DataTable } from '@/components/molecules/data-table/data-table'; +import { columns } from './components/resource-limit-columns'; + +export default function ResourceLimits() { + const { tenant } = useOutletContext(); + + const resourcePolicyQuery = useQuery({ + ...queries.tenantResourcePolicy.get(tenant.metadata.id), + }); + + const cols = columns(); + + return ( +
+
+
+

+ Resource Limits +

+
+

+ Resource limits are used to control the usage of resources within a + tenant. When a limit is reached, the system will take action based on + the limit type. Please{' '} + + contact us + {' '} + if you need to adjust your limits. +

+ + row.metadata.id} + /> +
+
+ ); +} diff --git a/frontend/app/src/router.tsx b/frontend/app/src/router.tsx index 95cfdbcae..4682759aa 100644 --- a/frontend/app/src/router.tsx +++ b/frontend/app/src/router.tsx @@ -229,6 +229,17 @@ const routes: RouteObject[] = [ }, ), }, + { + path: '/tenant-settings/resource-limits', + lazy: async () => + import('./pages/main/tenant-settings/resource-limits').then( + (res) => { + return { + Component: res.default, + }; + }, + ), + }, { path: '/tenant-settings/ingestors', lazy: async () => diff --git a/hack/db/atlas-apply.sh b/hack/db/atlas-apply.sh index abe56c6e8..ff9b44812 100644 --- a/hack/db/atlas-apply.sh +++ b/hack/db/atlas-apply.sh @@ -40,10 +40,13 @@ if [[ ! "$DATABASE_URL" =~ sslmode ]]; then fi fi +echo "DATABASE_URL: $DATABASE_URL" # Check for prisma migrations MIGRATION_NAME=$(psql "$DATABASE_URL" -t -c "SELECT migration_name FROM _prisma_migrations ORDER BY started_at DESC LIMIT 1;" 2>/dev/null | xargs) MIGRATION_NAME=$(echo $MIGRATION_NAME | cut -d'_' -f1) +echo "Migration name: $MIGRATION_NAME" + if [ $? -eq 0 ] && [ -n "$MIGRATION_NAME" ]; then echo "Using existing prisma migration: $MIGRATION_NAME" diff --git a/hack/dev/atlas-migrate.sh b/hack/dev/atlas-migrate.sh old mode 100644 new mode 100755 diff --git a/internal/config/database/config.go b/internal/config/database/config.go index db1db6d1b..ad276066f 100644 --- a/internal/config/database/config.go +++ b/internal/config/database/config.go @@ -47,6 +47,8 @@ type Config struct { EngineRepository repository.EngineRepository + EntitlementRepository repository.EntitlementsRepository + Seed SeedConfigFile } diff --git a/internal/config/loader/loader.go b/internal/config/loader/loader.go index beb0e30e3..cf1136219 100644 --- a/internal/config/loader/loader.go +++ b/internal/config/loader/loader.go @@ -32,6 +32,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/logger" "github.com/hatchet-dev/hatchet/internal/msgqueue/rabbitmq" "github.com/hatchet-dev/hatchet/internal/repository/cache" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/services/ingestor" @@ -86,7 +87,13 @@ func (c *ConfigLoader) LoadDatabaseConfig() (res *database.Config, err error) { return nil, err } - return GetDatabaseConfigFromConfigFile(cf) + scf, err := LoadServerConfigFile(configFileBytes...) + + if err != nil { + return nil, err + } + + return GetDatabaseConfigFromConfigFile(cf, &scf.Runtime) } // LoadServerConfig loads the server configuration @@ -113,7 +120,7 @@ func (c *ConfigLoader) LoadServerConfig() (cleanup func() error, res *server.Ser return GetServerConfigFromConfigfile(dc, cf) } -func GetDatabaseConfigFromConfigFile(cf *database.ConfigFile) (res *database.Config, err error) { +func GetDatabaseConfigFromConfigFile(cf *database.ConfigFile, runtime *server.ConfigFileRuntime) (res *database.Config, err error) { l := logger.NewStdErr(&cf.Logger, "database") databaseUrl := fmt.Sprintf( @@ -159,14 +166,20 @@ func GetDatabaseConfigFromConfigFile(cf *database.ConfigFile) (res *database.Con ch := cache.New(cf.CacheDuration) + entitlementRepo := prisma.NewEntitlementRepository(pool, runtime, prisma.WithLogger(&l), prisma.WithCache(ch)) + + meter := metered.NewMetered(entitlementRepo, &l) + return &database.Config{ Disconnect: func() error { ch.Stop() + meter.Stop() return c.Prisma.Disconnect() }, - APIRepository: prisma.NewAPIRepository(c, pool, prisma.WithLogger(&l), prisma.WithCache(ch)), - EngineRepository: prisma.NewEngineRepository(pool, prisma.WithLogger(&l), prisma.WithCache(ch)), - Seed: cf.Seed, + APIRepository: prisma.NewAPIRepository(c, pool, prisma.WithLogger(&l), prisma.WithCache(ch), prisma.WithMetered(meter)), + EngineRepository: prisma.NewEngineRepository(pool, prisma.WithLogger(&l), prisma.WithCache(ch), prisma.WithMetered(meter)), + EntitlementRepository: entitlementRepo, + Seed: cf.Seed, }, nil } @@ -201,6 +214,7 @@ func GetServerConfigFromConfigfile(dc *database.Config, cf *server.ServerConfigF ingestor.WithStreamEventsRepository(dc.EngineRepository.StreamEvent()), ingestor.WithLogRepository(dc.EngineRepository.Log()), ingestor.WithMessageQueue(mq), + ingestor.WithEntitlementsRepository(dc.EntitlementRepository), ) if err != nil { diff --git a/internal/config/server/server.go b/internal/config/server/server.go index 1d88b4a56..c4cf28fb6 100644 --- a/internal/config/server/server.go +++ b/internal/config/server/server.go @@ -79,6 +79,30 @@ type ConfigFileRuntime struct { // ShutdownWait is the time between the readiness probe being offline when a shutdown is triggered and the actual start of cleaning up resources. ShutdownWait time.Duration `mapstructure:"shutdownWait" json:"shutdownWait,omitempty" default:"20s"` + + // Enforce limits controls whether the server enforces tenant limits + EnforceLimits bool `mapstructure:"enforceLimits" json:"enforceLimits,omitempty" default:"false"` + + Limits LimitConfigFile `mapstructure:"limits" json:"limits,omitempty"` +} + +type LimitConfigFile struct { + DefaultWorkflowRunLimit int `mapstructure:"defaultWorkflowRunLimit" json:"defaultWorkflowRunLimit,omitempty" default:"10000"` + DefaultWorkflowRunAlarmLimit int `mapstructure:"defaultWorkflowRunAlarmLimit" json:"defaultWorkflowRunAlarmLimit,omitempty" default:"7500"` + DefaultWorkflowRunWindow time.Duration `mapstructure:"defaultWorkflowRunWindow" json:"defaultWorkflowRunWindow,omitempty" default:"24h"` + + DefaultWorkerLimit int `mapstructure:"defaultWorkerLimit" json:"defaultWorkerLimit,omitempty" default:"4"` + DefaultWorkerAlarmLimit int `mapstructure:"defaultWorkerAlarmLimit" json:"defaultWorkerAlarmLimit,omitempty" default:"2"` + + DefaultEventLimit int `mapstructure:"defaultEventLimit" json:"defaultEventLimit,omitempty" default:"10000"` + DefaultEventAlarmLimit int `mapstructure:"defaultEventAlarmLimit" json:"defaultEventAlarmLimit,omitempty" default:"7500"` + DefaultEventWindow time.Duration `mapstructure:"defaultEventWindow" json:"defaultEventWindow,omitempty" default:"24h"` + + DefaultCronLimit int `mapstructure:"defaultCronLimit" json:"defaultCronLimit,omitempty" default:"20"` + DefaultCronAlarmLimit int `mapstructure:"defaultCronAlarmLimit" json:"defaultCronAlarmLimit,omitempty" default:"15"` + + DefaultScheduleLimit int `mapstructure:"defaultScheduleLimit" json:"defaultScheduleLimit,omitempty" default:"10000"` + DefaultScheduleAlarmLimit int `mapstructure:"defaultScheduleAlarmLimit" json:"defaultScheduleAlarmLimit,omitempty" default:"7500"` } // Alerting options @@ -341,6 +365,25 @@ func BindAllEnv(v *viper.Viper) { _ = v.BindEnv("runtime.workerEnabled", "SERVER_WORKER_ENABLED") _ = v.BindEnv("runtime.shutdownWait", "SERVER_SHUTDOWN_WAIT") _ = v.BindEnv("services", "SERVER_SERVICES") + _ = v.BindEnv("runtime.enforceLimits", "SERVER_ENFORCE_LIMITS") + + // limit options + _ = v.BindEnv("limits.defaultWorkflowRunLimit", "SERVER_LIMITS_DEFAULT_WORKFLOW_RUN_LIMIT") + _ = v.BindEnv("limits.defaultWorkflowRunAlertLimit", "SERVER_LIMITS_DEFAULT_WORKFLOW_RUN_ALERT_LIMIT") + _ = v.BindEnv("limits.defaultWorkflowRunWindow", "SERVER_LIMITS_DEFAULT_WORKFLOW_RUN_WINDOW") + + _ = v.BindEnv("limits.defaultWorkerLimit", "SERVER_LIMITS_DEFAULT_WORKER_LIMIT") + _ = v.BindEnv("limits.defaultWorkerAlertLimit", "SERVER_LIMITS_DEFAULT_WORKER_ALERT_LIMIT") + + _ = v.BindEnv("limits.defaultEventLimit", "SERVER_LIMITS_DEFAULT_EVENT_LIMIT") + _ = v.BindEnv("limits.defaultEventAlertLimit", "SERVER_LIMITS_DEFAULT_EVENT_ALERT_LIMIT") + _ = v.BindEnv("limits.defaultEventWindow", "SERVER_LIMITS_DEFAULT_EVENT_WINDOW") + + _ = v.BindEnv("limits.defaultCronLimit", "SERVER_LIMITS_DEFAULT_CRON_LIMIT") + _ = v.BindEnv("limits.defaultCronAlertLimit", "SERVER_LIMITS_DEFAULT_CRON_ALERT_LIMIT") + + _ = v.BindEnv("limits.defaultScheduleLimit", "SERVER_LIMITS_DEFAULT_SCHEDULE_LIMIT") + _ = v.BindEnv("limits.defaultScheduleAlertLimit", "SERVER_LIMITS_DEFAULT_SCHEDULE_ALERT_LIMIT") // alerting options _ = v.BindEnv("alerting.sentry.enabled", "SERVER_ALERTING_SENTRY_ENABLED") diff --git a/internal/integrations/alerting/alerter.go b/internal/integrations/alerting/alerter.go index 33d68d64d..391d94a2b 100644 --- a/internal/integrations/alerting/alerter.go +++ b/internal/integrations/alerting/alerter.go @@ -208,3 +208,52 @@ func (t *TenantAlertManager) sendExpiringTokenAlert(ctx context.Context, tenantA return nil } + +func (t *TenantAlertManager) SendTenantResourceLimitAlert(tenantId string, alert *dbsqlc.TenantResourceLimitAlert) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // read in the tenant alerting settings and determine if we should alert + tenantAlerting, err := t.repo.TenantAlertingSettings().GetTenantAlertingSettings(ctx, tenantId) + + if err != nil { + return err + } + + percentage := int(float64(alert.Value) / float64(alert.Limit) * 100) + + payload := &alerttypes.ResourceLimitAlert{ + Link: fmt.Sprintf("%s/tenant-settings/resource-limits?tenant=%s", t.serverURL, tenantId), + Resource: string(alert.Resource), + AlertType: string(alert.AlertType), + CurrentValue: int(alert.Value), + LimitValue: int(alert.Limit), + Percentage: percentage, + } + + return t.sendTenantResourceLimitAlert(ctx, tenantAlerting, payload) +} + +func (t *TenantAlertManager) sendTenantResourceLimitAlert(ctx context.Context, tenantAlerting *repository.GetTenantAlertingSettingsResponse, payload *alerttypes.ResourceLimitAlert) error { + + if !tenantAlerting.Settings.EnableExpiringTokenAlerts { + return nil + } + + var err error + + // iterate through possible alerters + for _, slackWebhook := range tenantAlerting.SlackWebhooks { + if innerErr := t.sendSlackTenantResourceLimitAlert(slackWebhook, payload); innerErr != nil { + err = multierror.Append(err, innerErr) + } + } + + for _, emailGroup := range tenantAlerting.EmailGroups { + if innerErr := t.sendEmailTenantResourceLimitAlert(tenantAlerting.Tenant, emailGroup, payload); innerErr != nil { + err = multierror.Append(err, innerErr) + } + } + + return nil +} diff --git a/internal/integrations/alerting/alerttypes/resource_limit_alert.go b/internal/integrations/alerting/alerttypes/resource_limit_alert.go new file mode 100644 index 000000000..91ac0c7a8 --- /dev/null +++ b/internal/integrations/alerting/alerttypes/resource_limit_alert.go @@ -0,0 +1,10 @@ +package alerttypes + +type ResourceLimitAlert struct { + Link string `json:"link"` + Resource string `json:"resource"` + AlertType string `json:"alert_type"` + CurrentValue int `json:"current_value"` + LimitValue int `json:"limit_value"` + Percentage int `json:"percentage"` +} diff --git a/internal/integrations/alerting/email.go b/internal/integrations/alerting/email.go index a7d91f8ed..f1d9f1c0d 100644 --- a/internal/integrations/alerting/email.go +++ b/internal/integrations/alerting/email.go @@ -3,8 +3,12 @@ package alerting import ( "context" "fmt" + "strings" "time" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "github.com/hatchet-dev/hatchet/internal/integrations/alerting/alerttypes" "github.com/hatchet-dev/hatchet/internal/integrations/email" "github.com/hatchet-dev/hatchet/internal/repository" @@ -59,3 +63,45 @@ func (t *TenantAlertManager) sendEmailExpiringTokenAlert(tenant *dbsqlc.Tenant, }, ) } + +func (t *TenantAlertManager) sendEmailTenantResourceLimitAlert(tenant *dbsqlc.Tenant, emailGroup *repository.TenantAlertEmailGroupForSend, payload *alerttypes.ResourceLimitAlert) error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + var subject string + var summary string + var summary2 string + + resource := strings.ReplaceAll(strings.ToLower(payload.Resource), "_", " ") + resource = cases.Title(language.English).String(resource) + + if payload.AlertType == string(dbsqlc.TenantResourceLimitAlertTypeAlarm) { + subject = fmt.Sprintf("%s has exhausted %d%% of its limit (%d/%d)", resource, payload.Percentage, payload.CurrentValue, payload.LimitValue) + summary = "We're sending you this alert because a resource on your Hatchet tenant is approaching its usage limit." + summary2 = "Once the limit is reached, any further resource usage will be denied." + } + + if payload.AlertType == string(dbsqlc.TenantResourceLimitAlertTypeExhausted) { + subject = fmt.Sprintf("%s has exhausted 100%% of its limit (%d/%d)", payload.Resource, payload.CurrentValue, payload.LimitValue) + summary = "We're sending you this alert because a resource on your Hatchet tenant has exhausted its usage limit." + summary2 = "Any further resource usage will be denied until the limit is increased." + } + + return t.email.SendTenantResourceLimitAlert( + ctx, + emailGroup.Emails, + email.ResourceLimitAlertData{ + TenantName: tenant.Name, + Subject: subject, + Summary: summary, + Summary2: summary2, + Resource: payload.Resource, + AlertType: payload.AlertType, + CurrentValue: payload.CurrentValue, + LimitValue: payload.LimitValue, + Percentage: payload.Percentage, + Link: payload.Link, + SettingsLink: fmt.Sprintf("%s/tenant-settings/alerting?tenant=%s", t.serverURL, sqlchelpers.UUIDToStr(tenant.ID)), + }, + ) +} diff --git a/internal/integrations/alerting/slack.go b/internal/integrations/alerting/slack.go index 494e3c535..8ad096154 100644 --- a/internal/integrations/alerting/slack.go +++ b/internal/integrations/alerting/slack.go @@ -139,3 +139,85 @@ func (t *TenantAlertManager) getSlackExpiringTokenTextAndBlocks(payload *alertty BlockSet: res, } } + +func (t *TenantAlertManager) sendSlackTenantResourceLimitAlert(slackWebhook *dbsqlc.SlackAppWebhook, payload *alerttypes.ResourceLimitAlert) error { + headerText, blocks := t.getSlackTenantResourceLimitTextAndBlocks(payload) + + // decrypt the webhook url + whDecrypted, err := t.enc.Decrypt(slackWebhook.WebhookURL, "incoming_webhook_url") + + if err != nil { + return err + } + + err = slack.PostWebhook(string(whDecrypted), &slack.WebhookMessage{ + Text: headerText, + Blocks: blocks, + }) + + if err != nil { + return err + } + + return nil +} + +func (t *TenantAlertManager) getSlackTenantResourceLimitTextAndBlocks(payload *alerttypes.ResourceLimitAlert) (string, *slack.Blocks) { + res := make([]slack.Block, 0) + + var headerText string + + if payload.AlertType == string(dbsqlc.TenantResourceLimitAlertTypeAlarm) { + headerText = fmt.Sprintf(":warning: Limit Alarm! `%s` resource is at %d%% of its limit (%d/%d)", payload.Resource, payload.Percentage, payload.CurrentValue, payload.LimitValue) + } + + if payload.AlertType == string(dbsqlc.TenantResourceLimitAlertTypeExhausted) { + headerText = fmt.Sprintf(":no_entry: Limit Exhausted! `%s` resource is at 100%% of its limit (%d/%d)", payload.Resource, payload.CurrentValue, payload.LimitValue) + } + + res = append(res, slack.NewSectionBlock( + slack.NewTextBlockObject(slack.MarkdownType, headerText, false, false), + nil, + nil, + )) + + buttonAccessory := slack.NewAccessory( + slack.NewButtonBlockElement( + "View Limits", + payload.Resource, + slack.NewTextBlockObject(slack.PlainTextType, "View Limits", true, false), + ), + ) + + buttonAccessory.ButtonElement.URL = payload.Link + buttonAccessory.ButtonElement.ActionID = "button-action" + + if payload.AlertType == string(dbsqlc.TenantResourceLimitAlertTypeExhausted) { + res = append(res, slack.NewSectionBlock( + slack.NewTextBlockObject( + slack.MarkdownType, + "Any further resource usage will be denied until the limit is increased.", + false, + false, + ), + nil, + nil, + )) + + } + + res = append(res, slack.NewSectionBlock( + slack.NewTextBlockObject( + slack.MarkdownType, + "Please review your resource usage and consider upgrading your plan.", + false, + false, + ), + nil, + buttonAccessory, + )) + + return headerText, &slack.Blocks{ + BlockSet: res, + } +} diff --git a/internal/integrations/email/email.go b/internal/integrations/email/email.go index 1fa3c62e6..9903a6128 100644 --- a/internal/integrations/email/email.go +++ b/internal/integrations/email/email.go @@ -30,6 +30,20 @@ type ExpiringTokenEmailData struct { SettingsLink string `json:"settings_link"` } +type ResourceLimitAlertData struct { + Subject string `json:"subject"` + Summary string `json:"summary"` + Summary2 string `json:"summary2"` + TenantName string `json:"tenant_name"` + Link string `json:"link"` + Resource string `json:"resource"` + AlertType string `json:"alert_type"` + CurrentValue int `json:"current_value"` + LimitValue int `json:"limit_value"` + Percentage int `json:"percentage"` + SettingsLink string `json:"settings_link"` +} + type EmailService interface { // for clients to show email settings IsValid() bool @@ -37,6 +51,7 @@ type EmailService interface { SendTenantInviteEmail(ctx context.Context, email string, data TenantInviteEmailData) error SendWorkflowRunFailedAlerts(ctx context.Context, emails []string, data WorkflowRunsFailedEmailData) error SendExpiringTokenEmail(ctx context.Context, emails []string, data ExpiringTokenEmailData) error + SendTenantResourceLimitAlert(ctx context.Context, emails []string, data ResourceLimitAlertData) error } type NoOpService struct{} @@ -56,3 +71,7 @@ func (s *NoOpService) SendWorkflowRunFailedAlerts(ctx context.Context, emails [] func (s *NoOpService) SendExpiringTokenEmail(ctx context.Context, emails []string, data ExpiringTokenEmailData) error { return nil } + +func (s *NoOpService) SendTenantResourceLimitAlert(ctx context.Context, emails []string, data ResourceLimitAlertData) error { + return nil +} diff --git a/internal/integrations/email/postmark/postmark.go b/internal/integrations/email/postmark/postmark.go index d7c436c1a..b15c6799d 100644 --- a/internal/integrations/email/postmark/postmark.go +++ b/internal/integrations/email/postmark/postmark.go @@ -36,6 +36,7 @@ const ( userInviteTemplate = "user-invitation" workflowRunsFailedTemplate = "workflow-runs-failed" tokenAlertExpiringTemplate = "token-expiring" // nolint: gosec + resourceLimitAlertTemplate = "resource-limit-alert" ) type sendEmailFromTemplateRequest struct { @@ -55,27 +56,43 @@ func (s *PostmarkClient) IsValid() bool { } func (c *PostmarkClient) SendTenantInviteEmail(ctx context.Context, to string, data email.TenantInviteEmailData) error { - return c.sendTemplateEmail(ctx, to, userInviteTemplate, data) + return c.sendTemplateEmail(ctx, to, userInviteTemplate, data, false) } func (c *PostmarkClient) SendWorkflowRunFailedAlerts(ctx context.Context, emails []string, data email.WorkflowRunsFailedEmailData) error { - return c.sendTemplateEmailBCC(ctx, strings.Join(emails, ","), workflowRunsFailedTemplate, data) + return c.sendTemplateEmailBCC(ctx, strings.Join(emails, ","), workflowRunsFailedTemplate, data, false) } func (c *PostmarkClient) SendExpiringTokenEmail(ctx context.Context, emails []string, data email.ExpiringTokenEmailData) error { - return c.sendTemplateEmail(ctx, strings.Join(emails, ","), tokenAlertExpiringTemplate, data) + return c.sendTemplateEmailBCC(ctx, strings.Join(emails, ","), tokenAlertExpiringTemplate, data, false) } -func (c *PostmarkClient) sendTemplateEmail(ctx context.Context, to, templateAlias string, templateModelData interface{}) error { +func (c *PostmarkClient) SendTenantResourceLimitAlert(ctx context.Context, emails []string, data email.ResourceLimitAlertData) error { + return c.sendTemplateEmailBCC(ctx, strings.Join(emails, ","), resourceLimitAlertTemplate, data, true) +} + +func (c *PostmarkClient) sendTemplateEmail(ctx context.Context, to, templateAlias string, templateModelData interface{}, bccSupport bool) error { + var bcc string + + if bccSupport { + bcc = c.supportEmail + } + return c.sendRequest(ctx, "/email/withTemplate", "POST", &sendEmailFromTemplateRequest{ From: fmt.Sprintf("%s <%s>", c.fromName, c.fromEmail), To: to, + Bcc: bcc, TemplateAlias: templateAlias, TemplateModel: templateModelData, }) } -func (c *PostmarkClient) sendTemplateEmailBCC(ctx context.Context, bcc, templateAlias string, templateModelData interface{}) error { +func (c *PostmarkClient) sendTemplateEmailBCC(ctx context.Context, bcc, templateAlias string, templateModelData interface{}, bccSupport bool) error { + + if bccSupport { + bcc = fmt.Sprintf("%s,%s", bcc, c.supportEmail) + } + return c.sendRequest(ctx, "/email/withTemplate", "POST", &sendEmailFromTemplateRequest{ From: fmt.Sprintf("%s <%s>", c.fromName, c.fromEmail), Bcc: bcc, diff --git a/internal/repository/metered/metered.go b/internal/repository/metered/metered.go new file mode 100644 index 000000000..bb09595fe --- /dev/null +++ b/internal/repository/metered/metered.go @@ -0,0 +1,89 @@ +package metered + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/cache" + "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" +) + +type Metered struct { + entitlements repository.EntitlementsRepository + l *zerolog.Logger + c cache.Cacheable +} + +func (m *Metered) Stop() { + m.c.Stop() +} + +func NewMetered(entitlements repository.EntitlementsRepository, l *zerolog.Logger) *Metered { + return &Metered{ + entitlements: entitlements, + l: l, + c: cache.New(time.Second * 30), + } +} + +var ErrResourceExhausted = fmt.Errorf("resource exhausted") + +func MakeMetered[T any](ctx context.Context, m *Metered, resource dbsqlc.LimitResource, tenantId string, f func() (*T, error)) (*T, error) { + + var key = fmt.Sprintf("%s:%s", resource, tenantId) + + var canCreate *bool + var percent int + + if hit, ok := m.c.Get(key); ok { + c := hit.(bool) + canCreate = &c + } + + if canCreate == nil { + c, percent, err := m.entitlements.TenantLimit().CanCreate(ctx, resource, tenantId) + + if err != nil { + return nil, fmt.Errorf("could not check tenant limit: %w", err) + } + + canCreate = &c + + if percent <= 50 || percent >= 100 { + m.c.Set(key, c) + } + + } + + if !*canCreate { + return nil, ErrResourceExhausted + } + + res, err := f() + + if err != nil { + return nil, err + } + + deferredMeter := func() { + limit, err := m.entitlements.TenantLimit().Meter(ctx, resource, tenantId) + + if limit != nil && (percent <= 50 || percent >= 100) { + m.c.Set(key, limit.Value < limit.LimitValue) + } + + // TODO: we should probably publish an event here if limits are exhausted to notify immediately + + if err != nil { + m.l.Error().Err(err).Msg("could not meter resource") + } + } + + defer deferredMeter() + + return res, nil +} diff --git a/internal/repository/prisma/dbsqlc/models.go b/internal/repository/prisma/dbsqlc/models.go index da2cb35f5..89361cb75 100644 --- a/internal/repository/prisma/dbsqlc/models.go +++ b/internal/repository/prisma/dbsqlc/models.go @@ -185,6 +185,51 @@ func (ns NullJobRunStatus) Value() (driver.Value, error) { return string(ns.JobRunStatus), nil } +type LimitResource string + +const ( + LimitResourceWORKFLOWRUN LimitResource = "WORKFLOW_RUN" + LimitResourceEVENT LimitResource = "EVENT" + LimitResourceWORKER LimitResource = "WORKER" + LimitResourceCRON LimitResource = "CRON" + LimitResourceSCHEDULE LimitResource = "SCHEDULE" +) + +func (e *LimitResource) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = LimitResource(s) + case string: + *e = LimitResource(s) + default: + return fmt.Errorf("unsupported scan type for LimitResource: %T", src) + } + return nil +} + +type NullLimitResource struct { + LimitResource LimitResource `json:"LimitResource"` + Valid bool `json:"valid"` // Valid is true if LimitResource is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullLimitResource) Scan(value interface{}) error { + if value == nil { + ns.LimitResource, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.LimitResource.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullLimitResource) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.LimitResource), nil +} + type LogLineLevel string const ( @@ -416,6 +461,48 @@ func (ns NullTenantMemberRole) Value() (driver.Value, error) { return string(ns.TenantMemberRole), nil } +type TenantResourceLimitAlertType string + +const ( + TenantResourceLimitAlertTypeAlarm TenantResourceLimitAlertType = "Alarm" + TenantResourceLimitAlertTypeExhausted TenantResourceLimitAlertType = "Exhausted" +) + +func (e *TenantResourceLimitAlertType) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = TenantResourceLimitAlertType(s) + case string: + *e = TenantResourceLimitAlertType(s) + default: + return fmt.Errorf("unsupported scan type for TenantResourceLimitAlertType: %T", src) + } + return nil +} + +type NullTenantResourceLimitAlertType struct { + TenantResourceLimitAlertType TenantResourceLimitAlertType `json:"TenantResourceLimitAlertType"` + Valid bool `json:"valid"` // Valid is true if TenantResourceLimitAlertType is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullTenantResourceLimitAlertType) Scan(value interface{}) error { + if value == nil { + ns.TenantResourceLimitAlertType, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.TenantResourceLimitAlertType.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullTenantResourceLimitAlertType) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.TenantResourceLimitAlertType), nil +} + type VcsProvider string const ( @@ -868,16 +955,17 @@ type TenantAlertEmailGroup struct { } type TenantAlertingSettings struct { - ID pgtype.UUID `json:"id"` - CreatedAt pgtype.Timestamp `json:"createdAt"` - UpdatedAt pgtype.Timestamp `json:"updatedAt"` - DeletedAt pgtype.Timestamp `json:"deletedAt"` - TenantId pgtype.UUID `json:"tenantId"` - MaxFrequency string `json:"maxFrequency"` - LastAlertedAt pgtype.Timestamp `json:"lastAlertedAt"` - TickerId pgtype.UUID `json:"tickerId"` - EnableExpiringTokenAlerts bool `json:"enableExpiringTokenAlerts"` - EnableWorkflowRunFailureAlerts bool `json:"enableWorkflowRunFailureAlerts"` + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + DeletedAt pgtype.Timestamp `json:"deletedAt"` + TenantId pgtype.UUID `json:"tenantId"` + MaxFrequency string `json:"maxFrequency"` + LastAlertedAt pgtype.Timestamp `json:"lastAlertedAt"` + TickerId pgtype.UUID `json:"tickerId"` + EnableExpiringTokenAlerts bool `json:"enableExpiringTokenAlerts"` + EnableWorkflowRunFailureAlerts bool `json:"enableWorkflowRunFailureAlerts"` + EnableTenantResourceLimitAlerts bool `json:"enableTenantResourceLimitAlerts"` } type TenantInviteLink struct { @@ -901,6 +989,32 @@ type TenantMember struct { Role TenantMemberRole `json:"role"` } +type TenantResourceLimit struct { + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + Resource LimitResource `json:"resource"` + TenantId pgtype.UUID `json:"tenantId"` + LimitValue int32 `json:"limitValue"` + AlarmValue pgtype.Int4 `json:"alarmValue"` + Value int32 `json:"value"` + Window pgtype.Text `json:"window"` + LastRefill pgtype.Timestamp `json:"lastRefill"` + CustomValueMeter bool `json:"customValueMeter"` +} + +type TenantResourceLimitAlert struct { + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + ResourceLimitId pgtype.UUID `json:"resourceLimitId"` + TenantId pgtype.UUID `json:"tenantId"` + Resource LimitResource `json:"resource"` + AlertType TenantResourceLimitAlertType `json:"alertType"` + Value int32 `json:"value"` + Limit int32 `json:"limit"` +} + type TenantVcsProvider struct { ID pgtype.UUID `json:"id"` CreatedAt pgtype.Timestamp `json:"createdAt"` diff --git a/internal/repository/prisma/dbsqlc/schema.sql b/internal/repository/prisma/dbsqlc/schema.sql index 59d525cff..4b5973ffd 100644 --- a/internal/repository/prisma/dbsqlc/schema.sql +++ b/internal/repository/prisma/dbsqlc/schema.sql @@ -10,6 +10,9 @@ CREATE TYPE "JobKind" AS ENUM ('DEFAULT', 'ON_FAILURE'); -- CreateEnum CREATE TYPE "JobRunStatus" AS ENUM ('PENDING', 'RUNNING', 'SUCCEEDED', 'FAILED', 'CANCELLED'); +-- CreateEnum +CREATE TYPE "LimitResource" AS ENUM ('WORKFLOW_RUN', 'EVENT', 'WORKER', 'CRON', 'SCHEDULE'); + -- CreateEnum CREATE TYPE "LogLineLevel" AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR'); @@ -25,6 +28,9 @@ CREATE TYPE "StepRunStatus" AS ENUM ('PENDING', 'PENDING_ASSIGNMENT', 'ASSIGNED' -- CreateEnum CREATE TYPE "TenantMemberRole" AS ENUM ('OWNER', 'ADMIN', 'MEMBER'); +-- CreateEnum +CREATE TYPE "TenantResourceLimitAlertType" AS ENUM ('Alarm', 'Exhausted'); + -- CreateEnum CREATE TYPE "VcsProvider" AS ENUM ('GITHUB'); @@ -445,6 +451,7 @@ CREATE TABLE "TenantAlertingSettings" ( "tickerId" UUID, "enableExpiringTokenAlerts" BOOLEAN NOT NULL DEFAULT true, "enableWorkflowRunFailureAlerts" BOOLEAN NOT NULL DEFAULT false, + "enableTenantResourceLimitAlerts" BOOLEAN NOT NULL DEFAULT true, CONSTRAINT "TenantAlertingSettings_pkey" PRIMARY KEY ("id") ); @@ -476,6 +483,38 @@ CREATE TABLE "TenantMember" ( CONSTRAINT "TenantMember_pkey" PRIMARY KEY ("id") ); +-- CreateTable +CREATE TABLE "TenantResourceLimit" ( + "id" UUID NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "resource" "LimitResource" NOT NULL, + "tenantId" UUID NOT NULL, + "limitValue" INTEGER NOT NULL, + "alarmValue" INTEGER, + "value" INTEGER NOT NULL DEFAULT 0, + "window" TEXT, + "lastRefill" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "customValueMeter" BOOLEAN NOT NULL DEFAULT false, + + CONSTRAINT "TenantResourceLimit_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "TenantResourceLimitAlert" ( + "id" UUID NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "resourceLimitId" UUID NOT NULL, + "tenantId" UUID NOT NULL, + "resource" "LimitResource" NOT NULL, + "alertType" "TenantResourceLimitAlertType" NOT NULL, + "value" INTEGER NOT NULL, + "limit" INTEGER NOT NULL, + + CONSTRAINT "TenantResourceLimitAlert_pkey" PRIMARY KEY ("id") +); + -- CreateTable CREATE TABLE "TenantVcsProvider" ( "id" UUID NOT NULL, @@ -928,6 +967,15 @@ CREATE UNIQUE INDEX "TenantMember_id_key" ON "TenantMember"("id" ASC); -- CreateIndex CREATE UNIQUE INDEX "TenantMember_tenantId_userId_key" ON "TenantMember"("tenantId" ASC, "userId" ASC); +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimit_id_key" ON "TenantResourceLimit"("id" ASC); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimit_tenantId_resource_key" ON "TenantResourceLimit"("tenantId" ASC, "resource" ASC); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimitAlert_id_key" ON "TenantResourceLimitAlert"("id" ASC); + -- CreateIndex CREATE UNIQUE INDEX "TenantVcsProvider_id_key" ON "TenantVcsProvider"("id" ASC); @@ -1234,6 +1282,15 @@ ALTER TABLE "TenantMember" ADD CONSTRAINT "TenantMember_tenantId_fkey" FOREIGN K -- AddForeignKey ALTER TABLE "TenantMember" ADD CONSTRAINT "TenantMember_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; +-- AddForeignKey +ALTER TABLE "TenantResourceLimit" ADD CONSTRAINT "TenantResourceLimit_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TenantResourceLimitAlert" ADD CONSTRAINT "TenantResourceLimitAlert_resourceLimitId_fkey" FOREIGN KEY ("resourceLimitId") REFERENCES "TenantResourceLimit"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TenantResourceLimitAlert" ADD CONSTRAINT "TenantResourceLimitAlert_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; + -- AddForeignKey ALTER TABLE "TenantVcsProvider" ADD CONSTRAINT "TenantVcsProvider_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/internal/repository/prisma/dbsqlc/sqlc.yaml b/internal/repository/prisma/dbsqlc/sqlc.yaml index a9b74c0f7..3d9e54cda 100644 --- a/internal/repository/prisma/dbsqlc/sqlc.yaml +++ b/internal/repository/prisma/dbsqlc/sqlc.yaml @@ -19,6 +19,7 @@ sql: - logs.sql - tenants.sql - rate_limits.sql + - tenant_limits.sql schema: - schema.sql strict_order_by: false diff --git a/internal/repository/prisma/dbsqlc/tenant_limits.sql b/internal/repository/prisma/dbsqlc/tenant_limits.sql new file mode 100644 index 000000000..917dc4f2d --- /dev/null +++ b/internal/repository/prisma/dbsqlc/tenant_limits.sql @@ -0,0 +1,78 @@ +-- name: ListTenantResourceLimits :many +SELECT * FROM "TenantResourceLimit" +WHERE "tenantId" = @tenantId::uuid; + +-- name: ResolveAllLimitsIfWindowPassed :many +WITH resolved_limits AS ( + UPDATE "TenantResourceLimit" + SET + "value" = 0, -- Reset value to 0 + "lastRefill" = CURRENT_TIMESTAMP -- Update lastRefill timestamp + WHERE + ("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL) + RETURNING * +) +SELECT * +FROM resolved_limits; + +-- name: GetTenantResourceLimit :one +WITH updated AS ( + UPDATE "TenantResourceLimit" + SET + "value" = 0, -- Reset to 0 if the window has passed + "lastRefill" = CURRENT_TIMESTAMP -- Update lastRefill if the window has passed + WHERE "tenantId" = @tenantId::uuid + AND (("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL)) + AND "resource" = sqlc.narg('resource')::"LimitResource" + AND "customValueMeter" = false + RETURNING * +) +SELECT * FROM updated +UNION ALL +SELECT * FROM "TenantResourceLimit" +WHERE "tenantId" = @tenantId::uuid + AND "resource" = sqlc.narg('resource')::"LimitResource" + AND NOT EXISTS (SELECT 1 FROM updated); + +-- name: SelectOrInsertTenantResourceLimit :one +WITH existing AS ( + SELECT * + FROM "TenantResourceLimit" + WHERE "tenantId" = @tenantId::uuid AND "resource" = sqlc.narg('resource')::"LimitResource" +) +, insert_row AS ( + INSERT INTO "TenantResourceLimit" ("id", "tenantId", "resource", "value", "limitValue", "alarmValue", "window", "lastRefill", "customValueMeter") + SELECT gen_random_uuid(), @tenantId::uuid, sqlc.narg('resource')::"LimitResource", 0, sqlc.narg('limitValue')::int, sqlc.narg('alarmValue')::int, sqlc.narg('window')::text, CURRENT_TIMESTAMP, COALESCE(sqlc.narg('customValueMeter')::boolean, false) + WHERE NOT EXISTS (SELECT 1 FROM existing) + RETURNING * +) +SELECT * FROM insert_row +UNION ALL +SELECT * FROM existing +LIMIT 1; + +-- name: MeterTenantResource :one +UPDATE "TenantResourceLimit" +SET + "value" = CASE + WHEN ("customValueMeter" = true OR ("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL)) THEN + 0 -- Refill to 0 since the window has passed + ELSE + "value" + 1 -- Increment the current value within the window + END, + "lastRefill" = CASE + WHEN ("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL) THEN + CURRENT_TIMESTAMP -- Update lastRefill if the window has passed + ELSE + "lastRefill" -- Keep the lastRefill unchanged if within the window + END +WHERE "tenantId" = @tenantId::uuid + AND "resource" = sqlc.narg('resource')::"LimitResource" +RETURNING *; + +-- name: CountTenantWorkers :one +SELECT COUNT(distinct id) AS "count" +FROM "Worker" +WHERE "tenantId" = @tenantId::uuid +AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL +AND "isActive" = true; diff --git a/internal/repository/prisma/dbsqlc/tenant_limits.sql.go b/internal/repository/prisma/dbsqlc/tenant_limits.sql.go new file mode 100644 index 000000000..91b0e5199 --- /dev/null +++ b/internal/repository/prisma/dbsqlc/tenant_limits.sql.go @@ -0,0 +1,293 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.24.0 +// source: tenant_limits.sql + +package dbsqlc + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +const countTenantWorkers = `-- name: CountTenantWorkers :one +SELECT COUNT(distinct id) AS "count" +FROM "Worker" +WHERE "tenantId" = $1::uuid +AND "lastHeartbeatAt" >= NOW() - '30 seconds'::INTERVAL +AND "isActive" = true +` + +func (q *Queries) CountTenantWorkers(ctx context.Context, db DBTX, tenantid pgtype.UUID) (int64, error) { + row := db.QueryRow(ctx, countTenantWorkers, tenantid) + var count int64 + err := row.Scan(&count) + return count, err +} + +const getTenantResourceLimit = `-- name: GetTenantResourceLimit :one +WITH updated AS ( + UPDATE "TenantResourceLimit" + SET + "value" = 0, -- Reset to 0 if the window has passed + "lastRefill" = CURRENT_TIMESTAMP -- Update lastRefill if the window has passed + WHERE "tenantId" = $1::uuid + AND (("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL)) + AND "resource" = $2::"LimitResource" + AND "customValueMeter" = false + RETURNING id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" +) +SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" FROM updated +UNION ALL +SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" FROM "TenantResourceLimit" +WHERE "tenantId" = $1::uuid + AND "resource" = $2::"LimitResource" + AND NOT EXISTS (SELECT 1 FROM updated) +` + +type GetTenantResourceLimitParams struct { + Tenantid pgtype.UUID `json:"tenantid"` + Resource NullLimitResource `json:"resource"` +} + +type GetTenantResourceLimitRow struct { + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + Resource LimitResource `json:"resource"` + TenantId pgtype.UUID `json:"tenantId"` + LimitValue int32 `json:"limitValue"` + AlarmValue pgtype.Int4 `json:"alarmValue"` + Value int32 `json:"value"` + Window pgtype.Text `json:"window"` + LastRefill pgtype.Timestamp `json:"lastRefill"` + CustomValueMeter bool `json:"customValueMeter"` +} + +func (q *Queries) GetTenantResourceLimit(ctx context.Context, db DBTX, arg GetTenantResourceLimitParams) (*GetTenantResourceLimitRow, error) { + row := db.QueryRow(ctx, getTenantResourceLimit, arg.Tenantid, arg.Resource) + var i GetTenantResourceLimitRow + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Resource, + &i.TenantId, + &i.LimitValue, + &i.AlarmValue, + &i.Value, + &i.Window, + &i.LastRefill, + &i.CustomValueMeter, + ) + return &i, err +} + +const listTenantResourceLimits = `-- name: ListTenantResourceLimits :many +SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" FROM "TenantResourceLimit" +WHERE "tenantId" = $1::uuid +` + +func (q *Queries) ListTenantResourceLimits(ctx context.Context, db DBTX, tenantid pgtype.UUID) ([]*TenantResourceLimit, error) { + rows, err := db.Query(ctx, listTenantResourceLimits, tenantid) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*TenantResourceLimit + for rows.Next() { + var i TenantResourceLimit + if err := rows.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Resource, + &i.TenantId, + &i.LimitValue, + &i.AlarmValue, + &i.Value, + &i.Window, + &i.LastRefill, + &i.CustomValueMeter, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const meterTenantResource = `-- name: MeterTenantResource :one +UPDATE "TenantResourceLimit" +SET + "value" = CASE + WHEN ("customValueMeter" = true OR ("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL)) THEN + 0 -- Refill to 0 since the window has passed + ELSE + "value" + 1 -- Increment the current value within the window + END, + "lastRefill" = CASE + WHEN ("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL) THEN + CURRENT_TIMESTAMP -- Update lastRefill if the window has passed + ELSE + "lastRefill" -- Keep the lastRefill unchanged if within the window + END +WHERE "tenantId" = $1::uuid + AND "resource" = $2::"LimitResource" +RETURNING id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" +` + +type MeterTenantResourceParams struct { + Tenantid pgtype.UUID `json:"tenantid"` + Resource NullLimitResource `json:"resource"` +} + +func (q *Queries) MeterTenantResource(ctx context.Context, db DBTX, arg MeterTenantResourceParams) (*TenantResourceLimit, error) { + row := db.QueryRow(ctx, meterTenantResource, arg.Tenantid, arg.Resource) + var i TenantResourceLimit + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Resource, + &i.TenantId, + &i.LimitValue, + &i.AlarmValue, + &i.Value, + &i.Window, + &i.LastRefill, + &i.CustomValueMeter, + ) + return &i, err +} + +const resolveAllLimitsIfWindowPassed = `-- name: ResolveAllLimitsIfWindowPassed :many +WITH resolved_limits AS ( + UPDATE "TenantResourceLimit" + SET + "value" = 0, -- Reset value to 0 + "lastRefill" = CURRENT_TIMESTAMP -- Update lastRefill timestamp + WHERE + ("window" IS NOT NULL AND "window" != '' AND NOW() - "lastRefill" >= "window"::INTERVAL) + RETURNING id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" +) +SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" +FROM resolved_limits +` + +type ResolveAllLimitsIfWindowPassedRow struct { + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + Resource LimitResource `json:"resource"` + TenantId pgtype.UUID `json:"tenantId"` + LimitValue int32 `json:"limitValue"` + AlarmValue pgtype.Int4 `json:"alarmValue"` + Value int32 `json:"value"` + Window pgtype.Text `json:"window"` + LastRefill pgtype.Timestamp `json:"lastRefill"` + CustomValueMeter bool `json:"customValueMeter"` +} + +func (q *Queries) ResolveAllLimitsIfWindowPassed(ctx context.Context, db DBTX) ([]*ResolveAllLimitsIfWindowPassedRow, error) { + rows, err := db.Query(ctx, resolveAllLimitsIfWindowPassed) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*ResolveAllLimitsIfWindowPassedRow + for rows.Next() { + var i ResolveAllLimitsIfWindowPassedRow + if err := rows.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Resource, + &i.TenantId, + &i.LimitValue, + &i.AlarmValue, + &i.Value, + &i.Window, + &i.LastRefill, + &i.CustomValueMeter, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectOrInsertTenantResourceLimit = `-- name: SelectOrInsertTenantResourceLimit :one +WITH existing AS ( + SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" + FROM "TenantResourceLimit" + WHERE "tenantId" = $1::uuid AND "resource" = $2::"LimitResource" +) +, insert_row AS ( + INSERT INTO "TenantResourceLimit" ("id", "tenantId", "resource", "value", "limitValue", "alarmValue", "window", "lastRefill", "customValueMeter") + SELECT gen_random_uuid(), $1::uuid, $2::"LimitResource", 0, $3::int, $4::int, $5::text, CURRENT_TIMESTAMP, COALESCE($6::boolean, false) + WHERE NOT EXISTS (SELECT 1 FROM existing) + RETURNING id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" +) +SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" FROM insert_row +UNION ALL +SELECT id, "createdAt", "updatedAt", resource, "tenantId", "limitValue", "alarmValue", value, "window", "lastRefill", "customValueMeter" FROM existing +LIMIT 1 +` + +type SelectOrInsertTenantResourceLimitParams struct { + Tenantid pgtype.UUID `json:"tenantid"` + Resource NullLimitResource `json:"resource"` + LimitValue pgtype.Int4 `json:"limitValue"` + AlarmValue pgtype.Int4 `json:"alarmValue"` + Window pgtype.Text `json:"window"` + CustomValueMeter pgtype.Bool `json:"customValueMeter"` +} + +type SelectOrInsertTenantResourceLimitRow struct { + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + Resource LimitResource `json:"resource"` + TenantId pgtype.UUID `json:"tenantId"` + LimitValue int32 `json:"limitValue"` + AlarmValue pgtype.Int4 `json:"alarmValue"` + Value int32 `json:"value"` + Window pgtype.Text `json:"window"` + LastRefill pgtype.Timestamp `json:"lastRefill"` + CustomValueMeter bool `json:"customValueMeter"` +} + +func (q *Queries) SelectOrInsertTenantResourceLimit(ctx context.Context, db DBTX, arg SelectOrInsertTenantResourceLimitParams) (*SelectOrInsertTenantResourceLimitRow, error) { + row := db.QueryRow(ctx, selectOrInsertTenantResourceLimit, + arg.Tenantid, + arg.Resource, + arg.LimitValue, + arg.AlarmValue, + arg.Window, + arg.CustomValueMeter, + ) + var i SelectOrInsertTenantResourceLimitRow + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Resource, + &i.TenantId, + &i.LimitValue, + &i.AlarmValue, + &i.Value, + &i.Window, + &i.LastRefill, + &i.CustomValueMeter, + ) + return &i, err +} diff --git a/internal/repository/prisma/dbsqlc/tenants.sql.go b/internal/repository/prisma/dbsqlc/tenants.sql.go index b59392686..4d3a2e508 100644 --- a/internal/repository/prisma/dbsqlc/tenants.sql.go +++ b/internal/repository/prisma/dbsqlc/tenants.sql.go @@ -117,7 +117,7 @@ func (q *Queries) GetSlackWebhooks(ctx context.Context, db DBTX, tenantid pgtype const getTenantAlertingSettings = `-- name: GetTenantAlertingSettings :one SELECT - id, "createdAt", "updatedAt", "deletedAt", "tenantId", "maxFrequency", "lastAlertedAt", "tickerId", "enableExpiringTokenAlerts", "enableWorkflowRunFailureAlerts" + id, "createdAt", "updatedAt", "deletedAt", "tenantId", "maxFrequency", "lastAlertedAt", "tickerId", "enableExpiringTokenAlerts", "enableWorkflowRunFailureAlerts", "enableTenantResourceLimitAlerts" FROM "TenantAlertingSettings" as tenantAlertingSettings WHERE @@ -138,6 +138,7 @@ func (q *Queries) GetTenantAlertingSettings(ctx context.Context, db DBTX, tenant &i.TickerId, &i.EnableExpiringTokenAlerts, &i.EnableWorkflowRunFailureAlerts, + &i.EnableTenantResourceLimitAlerts, ) return &i, err } @@ -210,7 +211,7 @@ SET "lastAlertedAt" = COALESCE($1::timestamp, "lastAlertedAt") WHERE "tenantId" = $2::uuid -RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "maxFrequency", "lastAlertedAt", "tickerId", "enableExpiringTokenAlerts", "enableWorkflowRunFailureAlerts" +RETURNING id, "createdAt", "updatedAt", "deletedAt", "tenantId", "maxFrequency", "lastAlertedAt", "tickerId", "enableExpiringTokenAlerts", "enableWorkflowRunFailureAlerts", "enableTenantResourceLimitAlerts" ` type UpdateTenantAlertingSettingsParams struct { @@ -232,6 +233,7 @@ func (q *Queries) UpdateTenantAlertingSettings(ctx context.Context, db DBTX, arg &i.TickerId, &i.EnableExpiringTokenAlerts, &i.EnableWorkflowRunFailureAlerts, + &i.EnableTenantResourceLimitAlerts, ) return &i, err } diff --git a/internal/repository/prisma/dbsqlc/tickers.sql b/internal/repository/prisma/dbsqlc/tickers.sql index 017cc89db..86a9c5c59 100644 --- a/internal/repository/prisma/dbsqlc/tickers.sql +++ b/internal/repository/prisma/dbsqlc/tickers.sql @@ -290,3 +290,77 @@ RETURNING t1."name", t1."tenantId", t1."expiresAt"; + +-- name: PollTenantResourceLimitAlerts :many +WITH alerting_resource_limits AS ( + SELECT + rl."id" AS "resourceLimitId", + rl."tenantId", + rl."resource", + rl."limitValue", + rl."alarmValue", + rl."value", + rl."window", + rl."lastRefill", + CASE + WHEN rl."value" >= rl."limitValue" THEN 'Exhausted' + WHEN rl."alarmValue" IS NOT NULL AND rl."value" >= rl."alarmValue" THEN 'Alarm' + END AS "alertType" + FROM + "TenantResourceLimit" AS rl + JOIN + "TenantAlertingSettings" AS ta + ON + ta."tenantId" = rl."tenantId"::uuid + WHERE + ta."enableTenantResourceLimitAlerts" = true + AND ( + (rl."alarmValue" IS NOT NULL AND rl."value" >= rl."alarmValue") + OR rl."value" >= rl."limitValue" + ) + FOR UPDATE SKIP LOCKED +), +new_alerts AS ( + SELECT + arl."resourceLimitId", + arl."tenantId", + arl."resource", + arl."alertType", + arl."value", + arl."limitValue" AS "limit", + EXISTS ( + SELECT 1 + FROM "TenantResourceLimitAlert" AS trla + WHERE trla."resourceLimitId" = arl."resourceLimitId" + AND trla."alertType" = arl."alertType"::"TenantResourceLimitAlertType" + AND trla."createdAt" >= NOW() - arl."window"::INTERVAL + ) AS "existingAlert" + FROM + alerting_resource_limits AS arl +) +INSERT INTO "TenantResourceLimitAlert" ( + "id", + "createdAt", + "updatedAt", + "resourceLimitId", + "resource", + "alertType", + "value", + "limit", + "tenantId" +) +SELECT + gen_random_uuid(), + NOW(), + NOW(), + na."resourceLimitId", + na."resource", + na."alertType"::"TenantResourceLimitAlertType", + na."value", + na."limit", + na."tenantId" +FROM + new_alerts AS na +WHERE + na."existingAlert" = false +RETURNING *; diff --git a/internal/repository/prisma/dbsqlc/tickers.sql.go b/internal/repository/prisma/dbsqlc/tickers.sql.go index 6936fa5e3..150d5360d 100644 --- a/internal/repository/prisma/dbsqlc/tickers.sql.go +++ b/internal/repository/prisma/dbsqlc/tickers.sql.go @@ -576,7 +576,7 @@ func (q *Queries) PollStepRuns(ctx context.Context, db DBTX, tickerid pgtype.UUI const pollTenantAlerts = `-- name: PollTenantAlerts :many WITH active_tenant_alerts AS ( SELECT - alerts.id, alerts."createdAt", alerts."updatedAt", alerts."deletedAt", alerts."tenantId", alerts."maxFrequency", alerts."lastAlertedAt", alerts."tickerId", alerts."enableExpiringTokenAlerts", alerts."enableWorkflowRunFailureAlerts" + alerts.id, alerts."createdAt", alerts."updatedAt", alerts."deletedAt", alerts."tenantId", alerts."maxFrequency", alerts."lastAlertedAt", alerts."tickerId", alerts."enableExpiringTokenAlerts", alerts."enableWorkflowRunFailureAlerts", alerts."enableTenantResourceLimitAlerts" FROM "TenantAlertingSettings" as alerts WHERE @@ -613,21 +613,22 @@ FROM WHERE alerts."id" = active_tenant_alerts."id" AND alerts."tenantId" IN (SELECT "tenantId" FROM failed_run_count_by_tenant WHERE "failedWorkflowRunCount" > 0) -RETURNING alerts.id, alerts."createdAt", alerts."updatedAt", alerts."deletedAt", alerts."tenantId", alerts."maxFrequency", alerts."lastAlertedAt", alerts."tickerId", alerts."enableExpiringTokenAlerts", alerts."enableWorkflowRunFailureAlerts", active_tenant_alerts."lastAlertedAt" AS "prevLastAlertedAt" +RETURNING alerts.id, alerts."createdAt", alerts."updatedAt", alerts."deletedAt", alerts."tenantId", alerts."maxFrequency", alerts."lastAlertedAt", alerts."tickerId", alerts."enableExpiringTokenAlerts", alerts."enableWorkflowRunFailureAlerts", alerts."enableTenantResourceLimitAlerts", active_tenant_alerts."lastAlertedAt" AS "prevLastAlertedAt" ` type PollTenantAlertsRow struct { - ID pgtype.UUID `json:"id"` - CreatedAt pgtype.Timestamp `json:"createdAt"` - UpdatedAt pgtype.Timestamp `json:"updatedAt"` - DeletedAt pgtype.Timestamp `json:"deletedAt"` - TenantId pgtype.UUID `json:"tenantId"` - MaxFrequency string `json:"maxFrequency"` - LastAlertedAt pgtype.Timestamp `json:"lastAlertedAt"` - TickerId pgtype.UUID `json:"tickerId"` - EnableExpiringTokenAlerts bool `json:"enableExpiringTokenAlerts"` - EnableWorkflowRunFailureAlerts bool `json:"enableWorkflowRunFailureAlerts"` - PrevLastAlertedAt pgtype.Timestamp `json:"prevLastAlertedAt"` + ID pgtype.UUID `json:"id"` + CreatedAt pgtype.Timestamp `json:"createdAt"` + UpdatedAt pgtype.Timestamp `json:"updatedAt"` + DeletedAt pgtype.Timestamp `json:"deletedAt"` + TenantId pgtype.UUID `json:"tenantId"` + MaxFrequency string `json:"maxFrequency"` + LastAlertedAt pgtype.Timestamp `json:"lastAlertedAt"` + TickerId pgtype.UUID `json:"tickerId"` + EnableExpiringTokenAlerts bool `json:"enableExpiringTokenAlerts"` + EnableWorkflowRunFailureAlerts bool `json:"enableWorkflowRunFailureAlerts"` + EnableTenantResourceLimitAlerts bool `json:"enableTenantResourceLimitAlerts"` + PrevLastAlertedAt pgtype.Timestamp `json:"prevLastAlertedAt"` } // Finds tenant alerts which haven't alerted since their frequency and assigns them to a ticker @@ -651,6 +652,7 @@ func (q *Queries) PollTenantAlerts(ctx context.Context, db DBTX, tickerid pgtype &i.TickerId, &i.EnableExpiringTokenAlerts, &i.EnableWorkflowRunFailureAlerts, + &i.EnableTenantResourceLimitAlerts, &i.PrevLastAlertedAt, ); err != nil { return nil, err @@ -663,6 +665,111 @@ func (q *Queries) PollTenantAlerts(ctx context.Context, db DBTX, tickerid pgtype return items, nil } +const pollTenantResourceLimitAlerts = `-- name: PollTenantResourceLimitAlerts :many +WITH alerting_resource_limits AS ( + SELECT + rl."id" AS "resourceLimitId", + rl."tenantId", + rl."resource", + rl."limitValue", + rl."alarmValue", + rl."value", + rl."window", + rl."lastRefill", + CASE + WHEN rl."value" >= rl."limitValue" THEN 'Exhausted' + WHEN rl."alarmValue" IS NOT NULL AND rl."value" >= rl."alarmValue" THEN 'Alarm' + END AS "alertType" + FROM + "TenantResourceLimit" AS rl + JOIN + "TenantAlertingSettings" AS ta + ON + ta."tenantId" = rl."tenantId"::uuid + WHERE + ta."enableTenantResourceLimitAlerts" = true + AND ( + (rl."alarmValue" IS NOT NULL AND rl."value" >= rl."alarmValue") + OR rl."value" >= rl."limitValue" + ) + FOR UPDATE SKIP LOCKED +), +new_alerts AS ( + SELECT + arl."resourceLimitId", + arl."tenantId", + arl."resource", + arl."alertType", + arl."value", + arl."limitValue" AS "limit", + EXISTS ( + SELECT 1 + FROM "TenantResourceLimitAlert" AS trla + WHERE trla."resourceLimitId" = arl."resourceLimitId" + AND trla."alertType" = arl."alertType"::"TenantResourceLimitAlertType" + AND trla."createdAt" >= NOW() - arl."window"::INTERVAL + ) AS "existingAlert" + FROM + alerting_resource_limits AS arl +) +INSERT INTO "TenantResourceLimitAlert" ( + "id", + "createdAt", + "updatedAt", + "resourceLimitId", + "resource", + "alertType", + "value", + "limit", + "tenantId" +) +SELECT + gen_random_uuid(), + NOW(), + NOW(), + na."resourceLimitId", + na."resource", + na."alertType"::"TenantResourceLimitAlertType", + na."value", + na."limit", + na."tenantId" +FROM + new_alerts AS na +WHERE + na."existingAlert" = false +RETURNING id, "createdAt", "updatedAt", "resourceLimitId", "tenantId", resource, "alertType", value, "limit" +` + +func (q *Queries) PollTenantResourceLimitAlerts(ctx context.Context, db DBTX) ([]*TenantResourceLimitAlert, error) { + rows, err := db.Query(ctx, pollTenantResourceLimitAlerts) + if err != nil { + return nil, err + } + defer rows.Close() + var items []*TenantResourceLimitAlert + for rows.Next() { + var i TenantResourceLimitAlert + if err := rows.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.ResourceLimitId, + &i.TenantId, + &i.Resource, + &i.AlertType, + &i.Value, + &i.Limit, + ); err != nil { + return nil, err + } + items = append(items, &i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const setTickersInactive = `-- name: SetTickersInactive :many UPDATE "Ticker" as tickers diff --git a/internal/repository/prisma/event.go b/internal/repository/prisma/event.go index 26b198f7c..ae2183349 100644 --- a/internal/repository/prisma/event.go +++ b/internal/repository/prisma/event.go @@ -12,6 +12,7 @@ import ( "github.com/rs/zerolog" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" @@ -197,9 +198,10 @@ type eventEngineRepository struct { v validator.Validator queries *dbsqlc.Queries l *zerolog.Logger + m *metered.Metered } -func NewEventEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger) repository.EventEngineRepository { +func NewEventEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, m *metered.Metered) repository.EventEngineRepository { queries := dbsqlc.New() return &eventEngineRepository{ @@ -207,6 +209,7 @@ func NewEventEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zero v: v, queries: queries, l: l, + m: m, } } @@ -215,36 +218,39 @@ func (r *eventEngineRepository) GetEventForEngine(ctx context.Context, tenantId, } func (r *eventEngineRepository) CreateEvent(ctx context.Context, opts *repository.CreateEventOpts) (*dbsqlc.Event, error) { - ctx, span := telemetry.NewSpan(ctx, "db-create-event") - defer span.End() + return metered.MakeMetered(ctx, r.m, dbsqlc.LimitResourceEVENT, opts.TenantId, func() (*dbsqlc.Event, error) { - if err := r.v.Validate(opts); err != nil { - return nil, err - } + ctx, span := telemetry.NewSpan(ctx, "db-create-event") + defer span.End() - createParams := dbsqlc.CreateEventParams{ - ID: sqlchelpers.UUIDFromStr(uuid.New().String()), - Key: opts.Key, - Tenantid: sqlchelpers.UUIDFromStr(opts.TenantId), - Data: opts.Data, - Additionalmetadata: opts.AdditionalMetadata, - } + if err := r.v.Validate(opts); err != nil { + return nil, err + } - if opts.ReplayedEvent != nil { - createParams.ReplayedFromId = sqlchelpers.UUIDFromStr(*opts.ReplayedEvent) - } + createParams := dbsqlc.CreateEventParams{ + ID: sqlchelpers.UUIDFromStr(uuid.New().String()), + Key: opts.Key, + Tenantid: sqlchelpers.UUIDFromStr(opts.TenantId), + Data: opts.Data, + Additionalmetadata: opts.AdditionalMetadata, + } - e, err := r.queries.CreateEvent( - ctx, - r.pool, - createParams, - ) + if opts.ReplayedEvent != nil { + createParams.ReplayedFromId = sqlchelpers.UUIDFromStr(*opts.ReplayedEvent) + } - if err != nil { - return nil, fmt.Errorf("could not create event: %w", err) - } + e, err := r.queries.CreateEvent( + ctx, + r.pool, + createParams, + ) - return e, nil + if err != nil { + return nil, fmt.Errorf("could not create event: %w", err) + } + + return e, nil + }) } func (r *eventEngineRepository) ListEventsByIds(ctx context.Context, tenantId string, ids []string) ([]*dbsqlc.Event, error) { diff --git a/internal/repository/prisma/repository.go b/internal/repository/prisma/repository.go index 2fcd35865..7d840be6c 100644 --- a/internal/repository/prisma/repository.go +++ b/internal/repository/prisma/repository.go @@ -6,8 +6,10 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/rs/zerolog" + "github.com/hatchet-dev/hatchet/internal/config/server" "github.com/hatchet-dev/hatchet/internal/repository" "github.com/hatchet-dev/hatchet/internal/repository/cache" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/validator" ) @@ -36,9 +38,10 @@ type apiRepository struct { type PrismaRepositoryOpt func(*PrismaRepositoryOpts) type PrismaRepositoryOpts struct { - v validator.Validator - l *zerolog.Logger - cache cache.Cacheable + v validator.Validator + l *zerolog.Logger + cache cache.Cacheable + metered *metered.Metered } func defaultPrismaRepositoryOpts() *PrismaRepositoryOpts { @@ -65,6 +68,12 @@ func WithCache(cache cache.Cacheable) PrismaRepositoryOpt { } } +func WithMetered(metered *metered.Metered) PrismaRepositoryOpt { + return func(opts *PrismaRepositoryOpts) { + opts.metered = metered + } +} + func NewAPIRepository(client *db.PrismaClient, pool *pgxpool.Pool, fs ...PrismaRepositoryOpt) repository.APIRepository { opts := defaultPrismaRepositoryOpts() @@ -87,14 +96,14 @@ func NewAPIRepository(client *db.PrismaClient, pool *pgxpool.Pool, fs ...PrismaR tenantAlerting: NewTenantAlertingAPIRepository(client, opts.v, opts.cache), tenantInvite: NewTenantInviteRepository(client, opts.v), workflow: NewWorkflowRepository(client, pool, opts.v, opts.l), - workflowRun: NewWorkflowRunRepository(client, pool, opts.v, opts.l), + workflowRun: NewWorkflowRunRepository(client, pool, opts.v, opts.l, opts.metered), jobRun: NewJobRunAPIRepository(client, pool, opts.v, opts.l), stepRun: NewStepRunAPIRepository(client, pool, opts.v, opts.l), github: NewGithubRepository(client, opts.v), step: NewStepRepository(client, opts.v), slack: NewSlackRepository(client, opts.v), sns: NewSNSRepository(client, opts.v), - worker: NewWorkerAPIRepository(client, pool, opts.v, opts.l), + worker: NewWorkerAPIRepository(client, pool, opts.v, opts.l, opts.metered), userSession: NewUserSessionRepository(client, opts.v), user: NewUserRepository(client, opts.v), health: NewHealthAPIRepository(client, pool), @@ -274,18 +283,44 @@ func NewEngineRepository(pool *pgxpool.Pool, fs ...PrismaRepositoryOpt) reposito health: NewHealthEngineRepository(pool), apiToken: NewEngineTokenRepository(pool, opts.v, opts.l, opts.cache), dispatcher: NewDispatcherRepository(pool, opts.v, opts.l), - event: NewEventEngineRepository(pool, opts.v, opts.l), + event: NewEventEngineRepository(pool, opts.v, opts.l, opts.metered), getGroupKeyRun: NewGetGroupKeyRunRepository(pool, opts.v, opts.l), jobRun: NewJobRunEngineRepository(pool, opts.v, opts.l), stepRun: NewStepRunEngineRepository(pool, opts.v, opts.l), tenant: NewTenantEngineRepository(pool, opts.v, opts.l, opts.cache), tenantAlerting: NewTenantAlertingEngineRepository(pool, opts.v, opts.l, opts.cache), ticker: NewTickerRepository(pool, opts.v, opts.l), - worker: NewWorkerEngineRepository(pool, opts.v, opts.l), - workflow: NewWorkflowEngineRepository(pool, opts.v, opts.l), - workflowRun: NewWorkflowRunEngineRepository(pool, opts.v, opts.l), + worker: NewWorkerEngineRepository(pool, opts.v, opts.l, opts.metered), + workflow: NewWorkflowEngineRepository(pool, opts.v, opts.l, opts.metered), + workflowRun: NewWorkflowRunEngineRepository(pool, opts.v, opts.l, opts.metered), streamEvent: NewStreamEventsEngineRepository(pool, opts.v, opts.l), log: NewLogEngineRepository(pool, opts.v, opts.l), rateLimit: NewRateLimitEngineRepository(pool, opts.v, opts.l), } } + +type entitlementRepository struct { + tenantLimit repository.TenantLimitRepository +} + +func (r *entitlementRepository) TenantLimit() repository.TenantLimitRepository { + return r.tenantLimit +} +func NewEntitlementRepository(pool *pgxpool.Pool, s *server.ConfigFileRuntime, fs ...PrismaRepositoryOpt) repository.EntitlementsRepository { + opts := defaultPrismaRepositoryOpts() + + for _, f := range fs { + f(opts) + } + + newLogger := opts.l.With().Str("service", "database").Logger() + opts.l = &newLogger + + if opts.cache == nil { + opts.cache = cache.New(1 * time.Millisecond) + } + + return &entitlementRepository{ + tenantLimit: NewTenantLimitRepository(pool, opts.v, opts.l, s), + } +} diff --git a/internal/repository/prisma/tenant_alerting.go b/internal/repository/prisma/tenant_alerting.go index 76539bb86..0e537a3b6 100644 --- a/internal/repository/prisma/tenant_alerting.go +++ b/internal/repository/prisma/tenant_alerting.go @@ -44,10 +44,12 @@ func (r *tenantAlertingAPIRepository) UpsertTenantAlertingSettings(tenantId stri db.TenantAlertingSettings.MaxFrequency.SetIfPresent(opts.MaxFrequency), db.TenantAlertingSettings.EnableExpiringTokenAlerts.SetIfPresent(opts.EnableExpiringTokenAlerts), db.TenantAlertingSettings.EnableWorkflowRunFailureAlerts.SetIfPresent(opts.EnableWorkflowRunFailureAlerts), + db.TenantAlertingSettings.EnableTenantResourceLimitAlerts.SetIfPresent(opts.EnableTenantResourceLimitAlerts), ).Update( db.TenantAlertingSettings.MaxFrequency.SetIfPresent(opts.MaxFrequency), db.TenantAlertingSettings.EnableExpiringTokenAlerts.SetIfPresent(opts.EnableExpiringTokenAlerts), db.TenantAlertingSettings.EnableWorkflowRunFailureAlerts.SetIfPresent(opts.EnableWorkflowRunFailureAlerts), + db.TenantAlertingSettings.EnableTenantResourceLimitAlerts.SetIfPresent(opts.EnableTenantResourceLimitAlerts), ).Exec(context.Background()) } diff --git a/internal/repository/prisma/tenant_limit.go b/internal/repository/prisma/tenant_limit.go new file mode 100644 index 000000000..78396cb2e --- /dev/null +++ b/internal/repository/prisma/tenant_limit.go @@ -0,0 +1,254 @@ +package prisma + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/rs/zerolog" + + "github.com/hatchet-dev/hatchet/internal/config/server" + "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" + "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" + "github.com/hatchet-dev/hatchet/internal/validator" +) + +type tenantLimitRepository struct { + pool *pgxpool.Pool + v validator.Validator + queries *dbsqlc.Queries + l *zerolog.Logger + config *server.ConfigFileRuntime +} + +func NewTenantLimitRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, s *server.ConfigFileRuntime) repository.TenantLimitRepository { + queries := dbsqlc.New() + + return &tenantLimitRepository{ + v: v, + queries: queries, + pool: pool, + l: l, + config: s, + } +} + +func (t *tenantLimitRepository) ResolveAllTenantResourceLimits(ctx context.Context) error { + _, err := t.queries.ResolveAllLimitsIfWindowPassed(ctx, t.pool) + return err +} + +func (t *tenantLimitRepository) CreateTenantDefaultLimits(ctx context.Context, tenantId string) error { + err := t.createDefaultWorkflowRunLimit(ctx, tenantId) + + if err != nil { + return err + } + + err = t.createDefaultEventLimit(ctx, tenantId) + + if err != nil { + return err + } + + // TODO: implement cron limits + // err = t.createDefaultCronLimit(ctx, tenantId) + + // if err != nil { + // return err + // } + + // TODO: implement schedule limits + // err = t.createDefaultScheduleLimit(ctx, tenantId) + + // if err != nil { + // return err + // } + + err = t.createDefaultWorkerLimit(ctx, tenantId) + + return err +} + +func (t *tenantLimitRepository) createDefaultWorkflowRunLimit(ctx context.Context, tenantId string) error { + _, err := t.queries.SelectOrInsertTenantResourceLimit(ctx, t.pool, dbsqlc.SelectOrInsertTenantResourceLimitParams{ + Tenantid: sqlchelpers.UUIDFromStr(tenantId), + Resource: dbsqlc.NullLimitResource{ + LimitResource: dbsqlc.LimitResourceWORKFLOWRUN, + Valid: true, + }, + LimitValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultWorkflowRunLimit)), + AlarmValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultWorkflowRunAlarmLimit)), + Window: sqlchelpers.TextFromStr(t.config.Limits.DefaultWorkflowRunWindow.String()), + }) + + return err +} + +func (t *tenantLimitRepository) createDefaultEventLimit(ctx context.Context, tenantId string) error { + + _, err := t.queries.SelectOrInsertTenantResourceLimit(ctx, t.pool, dbsqlc.SelectOrInsertTenantResourceLimitParams{ + Tenantid: sqlchelpers.UUIDFromStr(tenantId), + Resource: dbsqlc.NullLimitResource{ + LimitResource: dbsqlc.LimitResourceEVENT, + Valid: true, + }, + LimitValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultEventLimit)), + AlarmValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultEventAlarmLimit)), + Window: sqlchelpers.TextFromStr(t.config.Limits.DefaultEventWindow.String()), + }) + + return err +} + +// func (t *tenantLimitRepository) createDefaultCronLimit(ctx context.Context, tenantId string) error { + +// _, err := t.queries.SelectOrInsertTenantResourceLimit(ctx, t.pool, dbsqlc.SelectOrInsertTenantResourceLimitParams{ +// Tenantid: sqlchelpers.UUIDFromStr(tenantId), +// Resource: dbsqlc.NullLimitResource{ +// LimitResource: dbsqlc.LimitResourceCRON, +// Valid: true, +// }, +// LimitValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultCronLimit)), +// AlarmValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultCronAlarmLimit)), +// }) + +// return err +// } + +// func (t *tenantLimitRepository) createDefaultScheduleLimit(ctx context.Context, tenantId string) error { + +// _, err := t.queries.SelectOrInsertTenantResourceLimit(ctx, t.pool, dbsqlc.SelectOrInsertTenantResourceLimitParams{ +// Tenantid: sqlchelpers.UUIDFromStr(tenantId), +// Resource: dbsqlc.NullLimitResource{ +// LimitResource: dbsqlc.LimitResourceSCHEDULE, +// Valid: true, +// }, +// LimitValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultScheduleLimit)), +// AlarmValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultScheduleAlarmLimit)), +// }) + +// return err +// } + +func (t *tenantLimitRepository) createDefaultWorkerLimit(ctx context.Context, tenantId string) error { + + _, err := t.queries.SelectOrInsertTenantResourceLimit(ctx, t.pool, dbsqlc.SelectOrInsertTenantResourceLimitParams{ + Tenantid: sqlchelpers.UUIDFromStr(tenantId), + Resource: dbsqlc.NullLimitResource{ + LimitResource: dbsqlc.LimitResourceWORKER, + Valid: true, + }, + LimitValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultWorkerLimit)), + AlarmValue: sqlchelpers.ToInt(int32(t.config.Limits.DefaultWorkerAlarmLimit)), + CustomValueMeter: pgtype.Bool{ + Bool: true, + Valid: true, + }, + }) + + return err +} + +func (t *tenantLimitRepository) GetLimits(ctx context.Context, tenantId string) ([]*dbsqlc.TenantResourceLimit, error) { + if !t.config.EnforceLimits { + return []*dbsqlc.TenantResourceLimit{}, nil + } + + limits, err := t.queries.ListTenantResourceLimits(ctx, t.pool, sqlchelpers.UUIDFromStr(tenantId)) + + if err != nil { + return nil, err + } + + // patch custom worker limits + for _, limit := range limits { + + if limit.Resource == dbsqlc.LimitResourceWORKER { + workerCount, err := t.queries.CountTenantWorkers(ctx, t.pool, sqlchelpers.UUIDFromStr(tenantId)) + if err != nil { + return nil, err + } + limit.Value = int32(workerCount) + } + + } + + return limits, nil +} + +func (t *tenantLimitRepository) CanCreate(ctx context.Context, resource dbsqlc.LimitResource, tenantId string) (bool, int, error) { + + if !t.config.EnforceLimits { + return true, 0, nil + } + + limit, err := t.queries.GetTenantResourceLimit(ctx, t.pool, dbsqlc.GetTenantResourceLimitParams{ + Tenantid: sqlchelpers.UUIDFromStr(tenantId), + Resource: dbsqlc.NullLimitResource{ + LimitResource: resource, + Valid: true, + }, + }) + + if err == pgx.ErrNoRows { + t.l.Warn().Msgf("no %s tenant limit found, creating default limit", string(resource)) + + err = t.CreateTenantDefaultLimits(ctx, tenantId) + + if err != nil { + return false, 0, err + } + + return true, 0, nil + } + + if err != nil { + return false, 0, err + } + + var value = limit.Value + + // patch custom worker limits aggregate methods + if resource == dbsqlc.LimitResourceWORKER { + count, err := t.queries.CountTenantWorkers(ctx, t.pool, sqlchelpers.UUIDFromStr(tenantId)) + value = int32(count) + + if err != nil { + return false, 0, err + } + + } + + if value >= limit.LimitValue { + return false, 100, nil + } + + return true, calcPercent(value, limit.LimitValue), nil +} + +func calcPercent(value int32, limit int32) int { + return int((float64(value) / float64(limit)) * 100) +} + +func (t *tenantLimitRepository) Meter(ctx context.Context, resource dbsqlc.LimitResource, tenantId string) (*dbsqlc.TenantResourceLimit, error) { + if !t.config.EnforceLimits { + return nil, nil + } + + r, err := t.queries.MeterTenantResource(ctx, t.pool, dbsqlc.MeterTenantResourceParams{ + Tenantid: sqlchelpers.UUIDFromStr(tenantId), + Resource: dbsqlc.NullLimitResource{ + LimitResource: resource, + Valid: true, + }, + }) + + if err != nil { + return nil, err + } + + return r, nil +} diff --git a/internal/repository/prisma/ticker.go b/internal/repository/prisma/ticker.go index af5a54114..dc7729b6c 100644 --- a/internal/repository/prisma/ticker.go +++ b/internal/repository/prisma/ticker.go @@ -108,3 +108,7 @@ func (t *tickerRepository) PollTenantAlerts(ctx context.Context, tickerId string func (t *tickerRepository) PollExpiringTokens(ctx context.Context) ([]*dbsqlc.PollExpiringTokensRow, error) { return t.queries.PollExpiringTokens(ctx, t.pool) } + +func (t *tickerRepository) PollTenantResourceLimitAlerts(ctx context.Context) ([]*dbsqlc.TenantResourceLimitAlert, error) { + return t.queries.PollTenantResourceLimitAlerts(ctx, t.pool) +} diff --git a/internal/repository/prisma/worker.go b/internal/repository/prisma/worker.go index d037145a8..4a3e367b5 100644 --- a/internal/repository/prisma/worker.go +++ b/internal/repository/prisma/worker.go @@ -12,6 +12,7 @@ import ( "github.com/rs/zerolog" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" @@ -24,9 +25,10 @@ type workerAPIRepository struct { v validator.Validator queries *dbsqlc.Queries l *zerolog.Logger + m *metered.Metered } -func NewWorkerAPIRepository(client *db.PrismaClient, pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger) repository.WorkerAPIRepository { +func NewWorkerAPIRepository(client *db.PrismaClient, pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, m *metered.Metered) repository.WorkerAPIRepository { queries := dbsqlc.New() return &workerAPIRepository{ @@ -35,6 +37,7 @@ func NewWorkerAPIRepository(client *db.PrismaClient, pool *pgxpool.Pool, v valid v: v, queries: queries, l: l, + m: m, } } @@ -127,9 +130,10 @@ type workerEngineRepository struct { v validator.Validator queries *dbsqlc.Queries l *zerolog.Logger + m *metered.Metered } -func NewWorkerEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger) repository.WorkerEngineRepository { +func NewWorkerEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, m *metered.Metered) repository.WorkerEngineRepository { queries := dbsqlc.New() return &workerEngineRepository{ @@ -137,6 +141,7 @@ func NewWorkerEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zer v: v, queries: queries, l: l, + m: m, } } @@ -148,111 +153,113 @@ func (w *workerEngineRepository) GetWorkerForEngine(ctx context.Context, tenantI } func (w *workerEngineRepository) CreateNewWorker(ctx context.Context, tenantId string, opts *repository.CreateWorkerOpts) (*dbsqlc.Worker, error) { - if err := w.v.Validate(opts); err != nil { - return nil, err - } - - tx, err := w.pool.Begin(ctx) - - if err != nil { - return nil, err - } - - defer deferRollback(ctx, w.l, tx.Rollback) - - pgTenantId := sqlchelpers.UUIDFromStr(tenantId) - - createParams := dbsqlc.CreateWorkerParams{ - Tenantid: pgTenantId, - Dispatcherid: sqlchelpers.UUIDFromStr(opts.DispatcherId), - Name: opts.Name, - } - - if opts.MaxRuns != nil { - createParams.MaxRuns = pgtype.Int4{ - Int32: int32(*opts.MaxRuns), - Valid: true, + return metered.MakeMetered(ctx, w.m, dbsqlc.LimitResourceWORKER, tenantId, func() (*dbsqlc.Worker, error) { + if err := w.v.Validate(opts); err != nil { + return nil, err } - } else { - createParams.MaxRuns = pgtype.Int4{ - Int32: 100, - Valid: true, + + tx, err := w.pool.Begin(ctx) + + if err != nil { + return nil, err } - } - worker, err := w.queries.CreateWorker(ctx, tx, createParams) + defer deferRollback(ctx, w.l, tx.Rollback) - if err != nil { - return nil, fmt.Errorf("could not create worker: %w", err) - } + pgTenantId := sqlchelpers.UUIDFromStr(tenantId) - err = w.queries.StubWorkerSemaphoreSlots(ctx, tx, dbsqlc.StubWorkerSemaphoreSlotsParams{ - Workerid: worker.ID, - MaxRuns: pgtype.Int4{ - Int32: worker.MaxRuns, - Valid: true, - }, - }) + createParams := dbsqlc.CreateWorkerParams{ + Tenantid: pgTenantId, + Dispatcherid: sqlchelpers.UUIDFromStr(opts.DispatcherId), + Name: opts.Name, + } - if err != nil { - return nil, fmt.Errorf("could not stub worker semaphore slots: %w", err) - } + if opts.MaxRuns != nil { + createParams.MaxRuns = pgtype.Int4{ + Int32: int32(*opts.MaxRuns), + Valid: true, + } + } else { + createParams.MaxRuns = pgtype.Int4{ + Int32: 100, + Valid: true, + } + } - svcUUIDs := make([]pgtype.UUID, len(opts.Services)) + worker, err := w.queries.CreateWorker(ctx, tx, createParams) - for i, svc := range opts.Services { - dbSvc, err := w.queries.UpsertService(ctx, tx, dbsqlc.UpsertServiceParams{ - Name: svc, - Tenantid: pgTenantId, + if err != nil { + return nil, fmt.Errorf("could not create worker: %w", err) + } + + err = w.queries.StubWorkerSemaphoreSlots(ctx, tx, dbsqlc.StubWorkerSemaphoreSlotsParams{ + Workerid: worker.ID, + MaxRuns: pgtype.Int4{ + Int32: worker.MaxRuns, + Valid: true, + }, }) if err != nil { - return nil, fmt.Errorf("could not upsert service: %w", err) + return nil, fmt.Errorf("could not stub worker semaphore slots: %w", err) } - svcUUIDs[i] = dbSvc.ID - } + svcUUIDs := make([]pgtype.UUID, len(opts.Services)) - err = w.queries.LinkServicesToWorker(ctx, tx, dbsqlc.LinkServicesToWorkerParams{ - Services: svcUUIDs, - Workerid: worker.ID, - }) + for i, svc := range opts.Services { + dbSvc, err := w.queries.UpsertService(ctx, tx, dbsqlc.UpsertServiceParams{ + Name: svc, + Tenantid: pgTenantId, + }) - if err != nil { - return nil, fmt.Errorf("could not link services to worker: %w", err) - } + if err != nil { + return nil, fmt.Errorf("could not upsert service: %w", err) + } - actionUUIDs := make([]pgtype.UUID, len(opts.Actions)) + svcUUIDs[i] = dbSvc.ID + } - for i, action := range opts.Actions { - dbAction, err := w.queries.UpsertAction(ctx, tx, dbsqlc.UpsertActionParams{ - Action: action, - Tenantid: pgTenantId, + err = w.queries.LinkServicesToWorker(ctx, tx, dbsqlc.LinkServicesToWorkerParams{ + Services: svcUUIDs, + Workerid: worker.ID, }) if err != nil { - return nil, fmt.Errorf("could not upsert action: %w", err) + return nil, fmt.Errorf("could not link services to worker: %w", err) } - actionUUIDs[i] = dbAction.ID - } + actionUUIDs := make([]pgtype.UUID, len(opts.Actions)) - err = w.queries.LinkActionsToWorker(ctx, tx, dbsqlc.LinkActionsToWorkerParams{ - Actionids: actionUUIDs, - Workerid: worker.ID, + for i, action := range opts.Actions { + dbAction, err := w.queries.UpsertAction(ctx, tx, dbsqlc.UpsertActionParams{ + Action: action, + Tenantid: pgTenantId, + }) + + if err != nil { + return nil, fmt.Errorf("could not upsert action: %w", err) + } + + actionUUIDs[i] = dbAction.ID + } + + err = w.queries.LinkActionsToWorker(ctx, tx, dbsqlc.LinkActionsToWorkerParams{ + Actionids: actionUUIDs, + Workerid: worker.ID, + }) + + if err != nil { + return nil, fmt.Errorf("could not link actions to worker: %w", err) + } + + err = tx.Commit(ctx) + + if err != nil { + return nil, fmt.Errorf("could not commit transaction: %w", err) + } + + return worker, nil }) - - if err != nil { - return nil, fmt.Errorf("could not link actions to worker: %w", err) - } - - err = tx.Commit(ctx) - - if err != nil { - return nil, fmt.Errorf("could not commit transaction: %w", err) - } - - return worker, nil } func (w *workerEngineRepository) UpdateWorker(ctx context.Context, tenantId, workerId string, opts *repository.UpdateWorkerOpts) (*dbsqlc.Worker, error) { diff --git a/internal/repository/prisma/workflow.go b/internal/repository/prisma/workflow.go index b76330a45..3a1fb3ee7 100644 --- a/internal/repository/prisma/workflow.go +++ b/internal/repository/prisma/workflow.go @@ -13,6 +13,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/dagutils" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" @@ -285,9 +286,10 @@ type workflowEngineRepository struct { v validator.Validator queries *dbsqlc.Queries l *zerolog.Logger + m *metered.Metered } -func NewWorkflowEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger) repository.WorkflowEngineRepository { +func NewWorkflowEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, m *metered.Metered) repository.WorkflowEngineRepository { queries := dbsqlc.New() return &workflowEngineRepository{ @@ -295,6 +297,7 @@ func NewWorkflowEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *z queries: queries, pool: pool, l: l, + m: m, } } @@ -734,6 +737,7 @@ func (r *workflowEngineRepository) createWorkflowVersionTxs(ctx context.Context, } for _, cronTrigger := range opts.CronTriggers { + _, err := r.queries.CreateWorkflowTriggerCronRef( ctx, tx, @@ -747,6 +751,7 @@ func (r *workflowEngineRepository) createWorkflowVersionTxs(ctx context.Context, if err != nil { return "", err } + } for _, scheduledTrigger := range opts.ScheduledTriggers { diff --git a/internal/repository/prisma/workflow_run.go b/internal/repository/prisma/workflow_run.go index 70c8e19f8..910ea52bf 100644 --- a/internal/repository/prisma/workflow_run.go +++ b/internal/repository/prisma/workflow_run.go @@ -13,6 +13,7 @@ import ( "github.com/rs/zerolog" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/db" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" @@ -27,9 +28,10 @@ type workflowRunAPIRepository struct { v validator.Validator queries *dbsqlc.Queries l *zerolog.Logger + m *metered.Metered } -func NewWorkflowRunRepository(client *db.PrismaClient, pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger) repository.WorkflowRunAPIRepository { +func NewWorkflowRunRepository(client *db.PrismaClient, pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, m *metered.Metered) repository.WorkflowRunAPIRepository { queries := dbsqlc.New() return &workflowRunAPIRepository{ @@ -38,6 +40,7 @@ func NewWorkflowRunRepository(client *db.PrismaClient, pool *pgxpool.Pool, v val pool: pool, queries: queries, l: l, + m: m, } } @@ -58,27 +61,29 @@ func (w *workflowRunAPIRepository) WorkflowRunMetricsCount(tenantId string, opts } func (w *workflowRunAPIRepository) CreateNewWorkflowRun(ctx context.Context, tenantId string, opts *repository.CreateWorkflowRunOpts) (*db.WorkflowRunModel, error) { - if err := w.v.Validate(opts); err != nil { - return nil, err - } + return metered.MakeMetered(ctx, w.m, dbsqlc.LimitResourceWORKFLOWRUN, tenantId, func() (*db.WorkflowRunModel, error) { + if err := w.v.Validate(opts); err != nil { + return nil, err + } - workflowRunId, err := createNewWorkflowRun(ctx, w.pool, w.queries, w.l, tenantId, opts) + workflowRunId, err := createNewWorkflowRun(ctx, w.pool, w.queries, w.l, tenantId, opts) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - res, err := w.client.WorkflowRun.FindUnique( - db.WorkflowRun.ID.Equals(workflowRunId), - ).With( - defaultWorkflowRunPopulator()..., - ).Exec(context.Background()) + res, err := w.client.WorkflowRun.FindUnique( + db.WorkflowRun.ID.Equals(workflowRunId), + ).With( + defaultWorkflowRunPopulator()..., + ).Exec(context.Background()) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } - return res, nil + return res, nil + }) } func (w *workflowRunAPIRepository) GetWorkflowRunById(tenantId, id string) (*db.WorkflowRunModel, error) { @@ -134,9 +139,10 @@ type workflowRunEngineRepository struct { v validator.Validator queries *dbsqlc.Queries l *zerolog.Logger + m *metered.Metered } -func NewWorkflowRunEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger) repository.WorkflowRunEngineRepository { +func NewWorkflowRunEngineRepository(pool *pgxpool.Pool, v validator.Validator, l *zerolog.Logger, m *metered.Metered) repository.WorkflowRunEngineRepository { queries := dbsqlc.New() return &workflowRunEngineRepository{ @@ -144,6 +150,7 @@ func NewWorkflowRunEngineRepository(pool *pgxpool.Pool, v validator.Validator, l pool: pool, queries: queries, l: l, + m: m, } } @@ -217,11 +224,26 @@ func (w *workflowRunEngineRepository) PopWorkflowRunsRoundRobin(ctx context.Cont } func (w *workflowRunEngineRepository) CreateNewWorkflowRun(ctx context.Context, tenantId string, opts *repository.CreateWorkflowRunOpts) (string, error) { - if err := w.v.Validate(opts); err != nil { + id, err := metered.MakeMetered(ctx, w.m, dbsqlc.LimitResourceWORKFLOWRUN, tenantId, func() (*string, error) { + + if err := w.v.Validate(opts); err != nil { + return nil, err + } + + id, err := createNewWorkflowRun(ctx, w.pool, w.queries, w.l, tenantId, opts) + + if err != nil { + return nil, err + } + + return &id, nil + }) + + if err != nil { return "", err } - return createNewWorkflowRun(ctx, w.pool, w.queries, w.l, tenantId, opts) + return *id, nil } func listWorkflowRuns(ctx context.Context, pool *pgxpool.Pool, queries *dbsqlc.Queries, l *zerolog.Logger, tenantId string, opts *repository.ListWorkflowRunsOpts) (*repository.ListWorkflowRunsResult, error) { diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8db7d35fa..e67566800 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -40,6 +40,10 @@ type EngineRepository interface { RateLimit() RateLimitEngineRepository } +type EntitlementsRepository interface { + TenantLimit() TenantLimitRepository +} + func BoolPtr(b bool) *bool { return &b } diff --git a/internal/repository/tenant_alerting.go b/internal/repository/tenant_alerting.go index fe7e8ebe6..338a8b832 100644 --- a/internal/repository/tenant_alerting.go +++ b/internal/repository/tenant_alerting.go @@ -11,9 +11,10 @@ import ( ) type UpsertTenantAlertingSettingsOpts struct { - MaxFrequency *string `validate:"omitnil,duration"` - EnableExpiringTokenAlerts *bool `validate:"omitnil"` - EnableWorkflowRunFailureAlerts *bool `validate:"omitnil"` + MaxFrequency *string `validate:"omitnil,duration"` + EnableExpiringTokenAlerts *bool `validate:"omitnil"` + EnableWorkflowRunFailureAlerts *bool `validate:"omitnil"` + EnableTenantResourceLimitAlerts *bool `validate:"omitnil"` } type UpdateTenantAlertingSettingsOpts struct { diff --git a/internal/repository/tenant_limit.go b/internal/repository/tenant_limit.go new file mode 100644 index 000000000..13dca85a8 --- /dev/null +++ b/internal/repository/tenant_limit.go @@ -0,0 +1,27 @@ +package repository + +import ( + "context" + + "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" +) + +type TenantLimitConfig struct { + EnforceLimits bool +} + +type TenantLimitRepository interface { + GetLimits(ctx context.Context, tenantId string) ([]*dbsqlc.TenantResourceLimit, error) + + // CanCreateWorkflowRun checks if the tenant can create a resource + CanCreate(ctx context.Context, resource dbsqlc.LimitResource, tenantId string) (bool, int, error) + + // MeterWorkflowRun increments the tenant's resource count + Meter(ctx context.Context, resource dbsqlc.LimitResource, tenantId string) (*dbsqlc.TenantResourceLimit, error) + + // Create new Tenant Resource Limits for a tenant + CreateTenantDefaultLimits(ctx context.Context, tenantId string) error + + // Resolve all tenant resource limits + ResolveAllTenantResourceLimits(ctx context.Context) error +} diff --git a/internal/repository/ticker.go b/internal/repository/ticker.go index 0631421d4..21d9f4c1b 100644 --- a/internal/repository/ticker.go +++ b/internal/repository/ticker.go @@ -49,6 +49,8 @@ type TickerEngineRepository interface { PollTenantAlerts(ctx context.Context, tickerId string) ([]*dbsqlc.PollTenantAlertsRow, error) PollExpiringTokens(ctx context.Context) ([]*dbsqlc.PollExpiringTokensRow, error) + + PollTenantResourceLimitAlerts(ctx context.Context) ([]*dbsqlc.TenantResourceLimitAlert, error) // // AddJobRun assigns a job run to a ticker. // AddJobRun(tickerId string, jobRun *db.JobRunModel) (*db.TickerModel, error) diff --git a/internal/services/admin/admin.go b/internal/services/admin/admin.go index 3c406d58a..a9a412f79 100644 --- a/internal/services/admin/admin.go +++ b/internal/services/admin/admin.go @@ -16,17 +16,19 @@ type AdminService interface { type AdminServiceImpl struct { contracts.UnimplementedWorkflowServiceServer - repo repository.EngineRepository - mq msgqueue.MessageQueue - v validator.Validator + entitlements repository.EntitlementsRepository + repo repository.EngineRepository + mq msgqueue.MessageQueue + v validator.Validator } type AdminServiceOpt func(*AdminServiceOpts) type AdminServiceOpts struct { - repo repository.EngineRepository - mq msgqueue.MessageQueue - v validator.Validator + entitlements repository.EntitlementsRepository + repo repository.EngineRepository + mq msgqueue.MessageQueue + v validator.Validator } func defaultAdminServiceOpts() *AdminServiceOpts { @@ -43,6 +45,12 @@ func WithRepository(r repository.EngineRepository) AdminServiceOpt { } } +func WithEntitlementsRepository(r repository.EntitlementsRepository) AdminServiceOpt { + return func(opts *AdminServiceOpts) { + opts.entitlements = r + } +} + func WithMessageQueue(mq msgqueue.MessageQueue) AdminServiceOpt { return func(opts *AdminServiceOpts) { opts.mq = mq @@ -71,8 +79,9 @@ func NewAdminService(fs ...AdminServiceOpt) (AdminService, error) { } return &AdminServiceImpl{ - repo: opts.repo, - mq: opts.mq, - v: opts.v, + repo: opts.repo, + entitlements: opts.entitlements, + mq: opts.mq, + v: opts.v, }, nil } diff --git a/internal/services/admin/server.go b/internal/services/admin/server.go index 874065c30..da91befdb 100644 --- a/internal/services/admin/server.go +++ b/internal/services/admin/server.go @@ -14,6 +14,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" "github.com/hatchet-dev/hatchet/internal/services/admin/contracts" @@ -70,6 +71,13 @@ func (a *AdminServiceImpl) TriggerWorkflow(ctx context.Context, req *contracts.T req.Name, ) + if err == metered.ErrResourceExhausted { + return nil, status.Error( + codes.ResourceExhausted, + "workflow run limit exceeded", + ) + } + if err != nil { if errors.Is(err, pgx.ErrNoRows) { return nil, status.Error( @@ -123,6 +131,10 @@ func (a *AdminServiceImpl) TriggerWorkflow(ctx context.Context, req *contracts.T workflowRunId, err := a.repo.WorkflowRun().CreateNewWorkflowRun(ctx, tenantId, createOpts) + if err == metered.ErrResourceExhausted { + return nil, status.Errorf(codes.ResourceExhausted, "resource exhausted: workflow run limit exceeded for tenant") + } + if err != nil { return nil, fmt.Errorf("could not create workflow run: %w", err) } diff --git a/internal/services/controllers/events/controller.go b/internal/services/controllers/events/controller.go index 4db3290af..86759d0eb 100644 --- a/internal/services/controllers/events/controller.go +++ b/internal/services/controllers/events/controller.go @@ -22,8 +22,11 @@ type EventsController interface { } type EventsControllerImpl struct { - mq msgqueue.MessageQueue - l *zerolog.Logger + mq msgqueue.MessageQueue + l *zerolog.Logger + + entitlements repository.EntitlementsRepository + repo repository.EngineRepository dv datautils.DataDecoderValidator } @@ -31,10 +34,11 @@ type EventsControllerImpl struct { type EventsControllerOpt func(*EventsControllerOpts) type EventsControllerOpts struct { - mq msgqueue.MessageQueue - l *zerolog.Logger - repo repository.EngineRepository - dv datautils.DataDecoderValidator + mq msgqueue.MessageQueue + l *zerolog.Logger + entitlements repository.EntitlementsRepository + repo repository.EngineRepository + dv datautils.DataDecoderValidator } func defaultEventsControllerOpts() *EventsControllerOpts { @@ -63,6 +67,12 @@ func WithRepository(r repository.EngineRepository) EventsControllerOpt { } } +func WithEntitlementsRepository(r repository.EntitlementsRepository) EventsControllerOpt { + return func(opts *EventsControllerOpts) { + opts.entitlements = r + } +} + func WithDataDecoderValidator(dv datautils.DataDecoderValidator) EventsControllerOpt { return func(opts *EventsControllerOpts) { opts.dv = dv @@ -84,14 +94,19 @@ func New(fs ...EventsControllerOpt) (*EventsControllerImpl, error) { return nil, fmt.Errorf("repository is required. use WithRepository") } + if opts.entitlements == nil { + return nil, fmt.Errorf("entitlements repository is required. use WithEntitlementsRepository") + } + newLogger := opts.l.With().Str("service", "events-controller").Logger() opts.l = &newLogger return &EventsControllerImpl{ - mq: opts.mq, - l: opts.l, - repo: opts.repo, - dv: opts.dv, + mq: opts.mq, + l: opts.l, + repo: opts.repo, + entitlements: opts.entitlements, + dv: opts.dv, }, nil } @@ -178,6 +193,7 @@ func (ec *EventsControllerImpl) processEvent(ctx context.Context, tenantId, even workflowCp := workflowVersion g.Go(func() error { + // create a new workflow run in the database createOpts, err := repository.GetCreateWorkflowRunOptsFromEvent(eventId, workflowCp, data, additionalMetadata) @@ -192,7 +208,7 @@ func (ec *EventsControllerImpl) processEvent(ctx context.Context, tenantId, even } // send to workflow processing queue - return ec.mq.AddMessage( + err = ec.mq.AddMessage( context.Background(), msgqueue.WORKFLOW_PROCESSING_QUEUE, tasktypes.WorkflowRunQueuedToTask( @@ -200,6 +216,12 @@ func (ec *EventsControllerImpl) processEvent(ctx context.Context, tenantId, even workflowRunId, ), ) + + if err != nil { + return fmt.Errorf("could not add workflow run queued task: %w", err) + } + + return nil }) } diff --git a/internal/services/dispatcher/dispatcher.go b/internal/services/dispatcher/dispatcher.go index 57ea54606..2b17f4d8b 100644 --- a/internal/services/dispatcher/dispatcher.go +++ b/internal/services/dispatcher/dispatcher.go @@ -35,12 +35,15 @@ type Dispatcher interface { type DispatcherImpl struct { contracts.UnimplementedDispatcherServer - s gocron.Scheduler - mq msgqueue.MessageQueue - l *zerolog.Logger - dv datautils.DataDecoderValidator - v validator.Validator - repo repository.EngineRepository + s gocron.Scheduler + mq msgqueue.MessageQueue + l *zerolog.Logger + dv datautils.DataDecoderValidator + v validator.Validator + repo repository.EngineRepository + + entitlements repository.EntitlementsRepository + dispatcherId string workers *workers a *hatcheterrors.Wrapped @@ -114,6 +117,7 @@ type DispatcherOpts struct { l *zerolog.Logger dv datautils.DataDecoderValidator repo repository.EngineRepository + entitlements repository.EntitlementsRepository dispatcherId string alerter hatcheterrors.Alerter } @@ -148,6 +152,12 @@ func WithRepository(r repository.EngineRepository) DispatcherOpt { } } +func WithEntitlementsRepository(r repository.EntitlementsRepository) DispatcherOpt { + return func(opts *DispatcherOpts) { + opts.entitlements = r + } +} + func WithLogger(l *zerolog.Logger) DispatcherOpt { return func(opts *DispatcherOpts) { opts.l = l @@ -181,6 +191,10 @@ func New(fs ...DispatcherOpt) (*DispatcherImpl, error) { return nil, fmt.Errorf("repository is required. use WithRepository") } + if opts.entitlements == nil { + return nil, fmt.Errorf("entitlements repository is required. use WithEntitlementsRepository") + } + newLogger := opts.l.With().Str("service", "dispatcher").Logger() opts.l = &newLogger @@ -200,6 +214,7 @@ func New(fs ...DispatcherOpt) (*DispatcherImpl, error) { dv: opts.dv, v: validator.NewDefaultValidator(), repo: opts.repo, + entitlements: opts.entitlements, dispatcherId: opts.dispatcherId, workers: &workers{}, s: s, diff --git a/internal/services/dispatcher/server.go b/internal/services/dispatcher/server.go index 7d4688dbe..631183801 100644 --- a/internal/services/dispatcher/server.go +++ b/internal/services/dispatcher/server.go @@ -19,6 +19,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/datautils" "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts" @@ -143,6 +144,10 @@ func (s *DispatcherImpl) Register(ctx context.Context, request *contracts.Worker // create a worker in the database worker, err := s.repo.Worker().CreateNewWorker(ctx, tenantId, opts) + if err == metered.ErrResourceExhausted { + return nil, status.Errorf(codes.ResourceExhausted, "resource exhausted: tenant worker limit exceeded") + } + if err != nil { s.l.Error().Err(err).Msgf("could not create worker for tenant %s", tenantId) return nil, err diff --git a/internal/services/ingestor/ingestor.go b/internal/services/ingestor/ingestor.go index 0600f9afb..bb2275737 100644 --- a/internal/services/ingestor/ingestor.go +++ b/internal/services/ingestor/ingestor.go @@ -7,6 +7,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/datautils" "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" "github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts" @@ -24,10 +25,11 @@ type Ingestor interface { type IngestorOptFunc func(*IngestorOpts) type IngestorOpts struct { - eventRepository repository.EventEngineRepository - streamEventRepository repository.StreamEventsEngineRepository - logRepository repository.LogsEngineRepository - mq msgqueue.MessageQueue + eventRepository repository.EventEngineRepository + streamEventRepository repository.StreamEventsEngineRepository + logRepository repository.LogsEngineRepository + entitlementsRepository repository.EntitlementsRepository + mq msgqueue.MessageQueue } func WithEventRepository(r repository.EventEngineRepository) IngestorOptFunc { @@ -48,6 +50,12 @@ func WithLogRepository(r repository.LogsEngineRepository) IngestorOptFunc { } } +func WithEntitlementsRepository(r repository.EntitlementsRepository) IngestorOptFunc { + return func(opts *IngestorOpts) { + opts.entitlementsRepository = r + } +} + func WithMessageQueue(mq msgqueue.MessageQueue) IngestorOptFunc { return func(opts *IngestorOpts) { opts.mq = mq @@ -61,11 +69,13 @@ func defaultIngestorOpts() *IngestorOpts { type IngestorImpl struct { contracts.UnimplementedEventsServiceServer - eventRepository repository.EventEngineRepository - logRepository repository.LogsEngineRepository - streamEventRepository repository.StreamEventsEngineRepository - mq msgqueue.MessageQueue - v validator.Validator + eventRepository repository.EventEngineRepository + logRepository repository.LogsEngineRepository + streamEventRepository repository.StreamEventsEngineRepository + entitlementsRepository repository.EntitlementsRepository + + mq msgqueue.MessageQueue + v validator.Validator } func NewIngestor(fs ...IngestorOptFunc) (Ingestor, error) { @@ -92,11 +102,13 @@ func NewIngestor(fs ...IngestorOptFunc) (Ingestor, error) { } return &IngestorImpl{ - eventRepository: opts.eventRepository, - streamEventRepository: opts.streamEventRepository, - logRepository: opts.logRepository, - mq: opts.mq, - v: validator.NewDefaultValidator(), + eventRepository: opts.eventRepository, + streamEventRepository: opts.streamEventRepository, + entitlementsRepository: opts.entitlementsRepository, + + logRepository: opts.logRepository, + mq: opts.mq, + v: validator.NewDefaultValidator(), }, nil } @@ -111,6 +123,10 @@ func (i *IngestorImpl) IngestEvent(ctx context.Context, tenantId, key string, da AdditionalMetadata: *metadata, }) + if err == metered.ErrResourceExhausted { + return nil, metered.ErrResourceExhausted + } + if err != nil { return nil, fmt.Errorf("could not create event: %w", err) } @@ -143,6 +159,10 @@ func (i *IngestorImpl) IngestReplayedEvent(ctx context.Context, tenantId string, ReplayedEvent: &replayedId, }) + if err == metered.ErrResourceExhausted { + return nil, metered.ErrResourceExhausted + } + if err != nil { return nil, fmt.Errorf("could not create event: %w", err) } diff --git a/internal/services/ingestor/server.go b/internal/services/ingestor/server.go index 8ee1f4556..2ba3168d6 100644 --- a/internal/services/ingestor/server.go +++ b/internal/services/ingestor/server.go @@ -12,6 +12,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/datautils" "github.com/hatchet-dev/hatchet/internal/msgqueue" "github.com/hatchet-dev/hatchet/internal/repository" + "github.com/hatchet-dev/hatchet/internal/repository/metered" "github.com/hatchet-dev/hatchet/internal/repository/prisma/dbsqlc" "github.com/hatchet-dev/hatchet/internal/repository/prisma/sqlchelpers" "github.com/hatchet-dev/hatchet/internal/services/ingestor/contracts" @@ -30,6 +31,10 @@ func (i *IngestorImpl) Push(ctx context.Context, req *contracts.PushEventRequest } event, err := i.IngestEvent(ctx, tenantId, req.Key, []byte(req.Payload), &additionalMeta) + if err == metered.ErrResourceExhausted { + return nil, status.Errorf(codes.ResourceExhausted, "resource exhausted: event limit exceeded for tenant") + } + if err != nil { return nil, err } diff --git a/internal/services/ticker/cron.go b/internal/services/ticker/cron.go index 5c633b0e3..a4d084fdf 100644 --- a/internal/services/ticker/cron.go +++ b/internal/services/ticker/cron.go @@ -138,6 +138,7 @@ func (t *TickerImpl) runCronWorkflow(tenantId, workflowVersionId, cron, cronPare t.l.Err(err).Msg("could not add workflow run queued task") return } + } } diff --git a/internal/services/ticker/tenant_alerting.go b/internal/services/ticker/tenant_alerting.go index 771d27bcc..2e3660d7a 100644 --- a/internal/services/ticker/tenant_alerting.go +++ b/internal/services/ticker/tenant_alerting.go @@ -74,3 +74,46 @@ func (t *TickerImpl) runExpiringTokenAlerts(ctx context.Context) func() { } } } + +func (t *TickerImpl) runTenantResourceLimitAlerts(ctx context.Context) func() { + return func() { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + t.l.Debug().Msg("ticker: resolving tenant resource limits") + + err := t.entitlements.TenantLimit().ResolveAllTenantResourceLimits(ctx) + + if err != nil { + t.l.Err(err).Msg("could not resolve tenant resource limits") + return + } + + t.l.Debug().Msg("ticker: polling tenant resource limit alerts") + + alerts, err := t.repo.Ticker().PollTenantResourceLimitAlerts(ctx) + + if err != nil { + t.l.Err(err).Msg("could not poll tenant resource limit alerts") + return + } + + t.l.Debug().Msgf("ticker: alerting %d tenant resource limit alerts", len(alerts)) + + for _, alert := range alerts { + tenantId := sqlchelpers.UUIDToStr(alert.TenantId) + + t.l.Debug().Msgf("ticker: handling tenant resource limit alert for tenant %s", tenantId) + + innerErr := t.ta.SendTenantResourceLimitAlert(tenantId, alert) + + if innerErr != nil { + err = multierror.Append(err, innerErr) + } + } + + if err != nil { + t.l.Err(err).Msg("could not handle tenant resource limit alerts") + } + } +} diff --git a/internal/services/ticker/ticker.go b/internal/services/ticker/ticker.go index 4633a23cf..62f645dcc 100644 --- a/internal/services/ticker/ticker.go +++ b/internal/services/ticker/ticker.go @@ -22,8 +22,11 @@ type Ticker interface { } type TickerImpl struct { - mq msgqueue.MessageQueue - l *zerolog.Logger + mq msgqueue.MessageQueue + l *zerolog.Logger + + entitlements repository.EntitlementsRepository + repo repository.EngineRepository s gocron.Scheduler ta *alerting.TenantAlertManager @@ -39,11 +42,13 @@ type TickerImpl struct { type TickerOpt func(*TickerOpts) type TickerOpts struct { - mq msgqueue.MessageQueue - l *zerolog.Logger - repo repository.EngineRepository - tickerId string - ta *alerting.TenantAlertManager + mq msgqueue.MessageQueue + l *zerolog.Logger + + entitlements repository.EntitlementsRepository + repo repository.EngineRepository + tickerId string + ta *alerting.TenantAlertManager dv datautils.DataDecoderValidator } @@ -69,6 +74,12 @@ func WithRepository(r repository.EngineRepository) TickerOpt { } } +func WithEntitlementsRepository(r repository.EntitlementsRepository) TickerOpt { + return func(opts *TickerOpts) { + opts.entitlements = r + } +} + func WithLogger(l *zerolog.Logger) TickerOpt { return func(opts *TickerOpts) { opts.l = l @@ -96,6 +107,10 @@ func New(fs ...TickerOpt) (*TickerImpl, error) { return nil, fmt.Errorf("repository is required. use WithRepository") } + if opts.entitlements == nil { + return nil, fmt.Errorf("entitlements repository is required. use WithEntitlementsRepository") + } + if opts.ta == nil { return nil, fmt.Errorf("tenant alerter is required. use WithTenantAlerter") } @@ -110,13 +125,14 @@ func New(fs ...TickerOpt) (*TickerImpl, error) { } return &TickerImpl{ - mq: opts.mq, - l: opts.l, - repo: opts.repo, - s: s, - dv: opts.dv, - tickerId: opts.tickerId, - ta: opts.ta, + mq: opts.mq, + l: opts.l, + repo: opts.repo, + entitlements: opts.entitlements, + s: s, + dv: opts.dv, + tickerId: opts.tickerId, + ta: opts.ta, }, nil } @@ -235,6 +251,19 @@ func (t *TickerImpl) Start() (func() error, error) { return nil, fmt.Errorf("could not schedule tenant alert polling: %w", err) } + // poll for tenant resource limit alerts every 15 minutes + _, err = t.s.NewJob( + gocron.DurationJob(time.Minute*15), + gocron.NewTask( + t.runTenantResourceLimitAlerts(ctx), + ), + ) + + if err != nil { + cancel() + return nil, fmt.Errorf("could not schedule tenant resource limit alert polling: %w", err) + } + // poll to resolve worker semaphore slots every 1 minute _, err = t.s.NewJob( gocron.DurationJob(time.Minute*1), diff --git a/pkg/client/rest/gen.go b/pkg/client/rest/gen.go index 4a137cf31..5413e66f9 100644 --- a/pkg/client/rest/gen.go +++ b/pkg/client/rest/gen.go @@ -111,6 +111,15 @@ const ( OWNER TenantMemberRole = "OWNER" ) +// Defines values for TenantResource. +const ( + CRON TenantResource = "CRON" + EVENT TenantResource = "EVENT" + SCHEDULE TenantResource = "SCHEDULE" + WORKER TenantResource = "WORKER" + WORKFLOWRUN TenantResource = "WORKFLOW_RUN" +) + // Defines values for WorkerStatus. const ( ACTIVE WorkerStatus = "ACTIVE" @@ -663,6 +672,9 @@ type TenantAlertingSettings struct { // EnableExpiringTokenAlerts Whether to enable alerts when tokens are approaching expiration. EnableExpiringTokenAlerts *bool `json:"enableExpiringTokenAlerts,omitempty"` + // EnableTenantResourceLimitAlerts Whether to enable alerts when tenant resources are approaching limits. + EnableTenantResourceLimitAlerts *bool `json:"enableTenantResourceLimitAlerts,omitempty"` + // EnableWorkflowRunFailureAlerts Whether to send alerts when workflow runs fail. EnableWorkflowRunFailureAlerts *bool `json:"enableWorkflowRunFailureAlerts,omitempty"` @@ -714,6 +726,35 @@ type TenantMemberList struct { // TenantMemberRole defines model for TenantMemberRole. type TenantMemberRole string +// TenantResource defines model for TenantResource. +type TenantResource string + +// TenantResourceLimit defines model for TenantResourceLimit. +type TenantResourceLimit struct { + // AlarmValue The alarm value associated with this limit to warn of approaching limit value. + AlarmValue *int `json:"alarmValue,omitempty"` + + // LastRefill The last time the limit was refilled. + LastRefill *time.Time `json:"lastRefill,omitempty"` + + // LimitValue The limit associated with this limit. + LimitValue int `json:"limitValue"` + Metadata APIResourceMeta `json:"metadata"` + Resource TenantResource `json:"resource"` + + // Value The current value associated with this limit. + Value int `json:"value"` + + // Window The meter window for the limit. (i.e. 1 day, 1 week, 1 month) + Window *string `json:"window,omitempty"` +} + +// TenantResourcePolicy defines model for TenantResourcePolicy. +type TenantResourcePolicy struct { + // Limits A list of resource limits for the tenant. + Limits []TenantResourceLimit `json:"limits"` +} + // TriggerWorkflowRunRequest defines model for TriggerWorkflowRunRequest. type TriggerWorkflowRunRequest struct { AdditionalMetadata *map[string]interface{} `json:"additionalMetadata,omitempty"` @@ -742,6 +783,9 @@ type UpdateTenantRequest struct { // EnableExpiringTokenAlerts Whether to enable alerts when tokens are approaching expiration. EnableExpiringTokenAlerts *bool `json:"enableExpiringTokenAlerts,omitempty"` + // EnableTenantResourceLimitAlerts Whether to enable alerts when tenant resources are approaching limits. + EnableTenantResourceLimitAlerts *bool `json:"enableTenantResourceLimitAlerts,omitempty"` + // EnableWorkflowRunFailureAlerts Whether to send alerts when workflow runs fail. EnableWorkflowRunFailureAlerts *bool `json:"enableWorkflowRunFailureAlerts,omitempty"` @@ -1443,6 +1487,9 @@ type ClientInterface interface { // TenantMemberDelete request TenantMemberDelete(ctx context.Context, tenant openapi_types.UUID, member openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + // TenantResourcePolicyGet request + TenantResourcePolicyGet(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) + // SlackWebhookList request SlackWebhookList(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -2121,6 +2168,18 @@ func (c *Client) TenantMemberDelete(ctx context.Context, tenant openapi_types.UU return c.Client.Do(req) } +func (c *Client) TenantResourcePolicyGet(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewTenantResourcePolicyGetRequest(c.Server, tenant) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) SlackWebhookList(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewSlackWebhookListRequest(c.Server, tenant) if err != nil { @@ -4333,6 +4392,40 @@ func NewTenantMemberDeleteRequest(server string, tenant openapi_types.UUID, memb return req, nil } +// NewTenantResourcePolicyGetRequest generates requests for TenantResourcePolicyGet +func NewTenantResourcePolicyGetRequest(server string, tenant openapi_types.UUID) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "tenant", runtime.ParamLocationPath, tenant) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tenants/%s/resource-policy", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + // NewSlackWebhookListRequest generates requests for SlackWebhookList func NewSlackWebhookListRequest(server string, tenant openapi_types.UUID) (*http.Request, error) { var err error @@ -6234,6 +6327,9 @@ type ClientWithResponsesInterface interface { // TenantMemberDeleteWithResponse request TenantMemberDeleteWithResponse(ctx context.Context, tenant openapi_types.UUID, member openapi_types.UUID, reqEditors ...RequestEditorFn) (*TenantMemberDeleteResponse, error) + // TenantResourcePolicyGetWithResponse request + TenantResourcePolicyGetWithResponse(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*TenantResourcePolicyGetResponse, error) + // SlackWebhookListWithResponse request SlackWebhookListWithResponse(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*SlackWebhookListResponse, error) @@ -7070,6 +7166,7 @@ type EventUpdateReplayResponse struct { JSON200 *EventList JSON400 *APIErrors JSON403 *APIErrors + JSON429 *APIErrors } // Status returns HTTPResponse.Status @@ -7231,6 +7328,30 @@ func (r TenantMemberDeleteResponse) StatusCode() int { return 0 } +type TenantResourcePolicyGetResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *TenantResourcePolicy + JSON400 *APIErrors + JSON403 *APIError +} + +// Status returns HTTPResponse.Status +func (r TenantResourcePolicyGetResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r TenantResourcePolicyGetResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type SlackWebhookListResponse struct { Body []byte HTTPResponse *http.Response @@ -8090,6 +8211,7 @@ type WorkflowRunCreateResponse struct { JSON400 *APIErrors JSON403 *APIErrors JSON404 *APIErrors + JSON429 *APIErrors } // Status returns HTTPResponse.Status @@ -8554,6 +8676,15 @@ func (c *ClientWithResponses) TenantMemberDeleteWithResponse(ctx context.Context return ParseTenantMemberDeleteResponse(rsp) } +// TenantResourcePolicyGetWithResponse request returning *TenantResourcePolicyGetResponse +func (c *ClientWithResponses) TenantResourcePolicyGetWithResponse(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*TenantResourcePolicyGetResponse, error) { + rsp, err := c.TenantResourcePolicyGet(ctx, tenant, reqEditors...) + if err != nil { + return nil, err + } + return ParseTenantResourcePolicyGetResponse(rsp) +} + // SlackWebhookListWithResponse request returning *SlackWebhookListResponse func (c *ClientWithResponses) SlackWebhookListWithResponse(ctx context.Context, tenant openapi_types.UUID, reqEditors ...RequestEditorFn) (*SlackWebhookListResponse, error) { rsp, err := c.SlackWebhookList(ctx, tenant, reqEditors...) @@ -10146,6 +10277,13 @@ func ParseEventUpdateReplayResponse(rsp *http.Response) (*EventUpdateReplayRespo } response.JSON403 = &dest + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 429: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON429 = &dest + } return response, nil @@ -10384,6 +10522,46 @@ func ParseTenantMemberDeleteResponse(rsp *http.Response) (*TenantMemberDeleteRes return response, nil } +// ParseTenantResourcePolicyGetResponse parses an HTTP response from a TenantResourcePolicyGetWithResponse call +func ParseTenantResourcePolicyGetResponse(rsp *http.Response) (*TenantResourcePolicyGetResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &TenantResourcePolicyGetResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest TenantResourcePolicy + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 403: + var dest APIError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON403 = &dest + + } + + return response, nil +} + // ParseSlackWebhookListResponse parses an HTTP response from a SlackWebhookListWithResponse call func ParseSlackWebhookListResponse(rsp *http.Response) (*SlackWebhookListResponse, error) { bodyBytes, err := io.ReadAll(rsp.Body) @@ -11746,6 +11924,13 @@ func ParseWorkflowRunCreateResponse(rsp *http.Response) (*WorkflowRunCreateRespo } response.JSON404 = &dest + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 429: + var dest APIErrors + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON429 = &dest + } return response, nil diff --git a/prisma/migrations/20240606145243_v0_31_0/migration.sql b/prisma/migrations/20240606145243_v0_31_0/migration.sql new file mode 100644 index 000000000..64b8fc991 --- /dev/null +++ b/prisma/migrations/20240606145243_v0_31_0/migration.sql @@ -0,0 +1,58 @@ +-- CreateEnum +CREATE TYPE "LimitResource" AS ENUM ('WORKFLOW_RUN', 'EVENT', 'WORKER', 'CRON', 'SCHEDULE'); + +-- CreateEnum +CREATE TYPE "TenantResourceLimitAlertType" AS ENUM ('Alarm', 'Exhausted'); + +-- AlterTable +ALTER TABLE "TenantAlertingSettings" ADD COLUMN "enableTenantResourceLimitAlerts" BOOLEAN NOT NULL DEFAULT true; + +-- CreateTable +CREATE TABLE "TenantResourceLimit" ( + "id" UUID NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "resource" "LimitResource" NOT NULL, + "tenantId" UUID NOT NULL, + "limitValue" INTEGER NOT NULL, + "alarmValue" INTEGER, + "value" INTEGER NOT NULL DEFAULT 0, + "window" TEXT, + "lastRefill" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "customValueMeter" BOOLEAN NOT NULL DEFAULT false, + + CONSTRAINT "TenantResourceLimit_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "TenantResourceLimitAlert" ( + "id" UUID NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "resourceLimitId" UUID NOT NULL, + "tenantId" UUID NOT NULL, + "resource" "LimitResource" NOT NULL, + "alertType" "TenantResourceLimitAlertType" NOT NULL, + "value" INTEGER NOT NULL, + "limit" INTEGER NOT NULL, + + CONSTRAINT "TenantResourceLimitAlert_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimit_id_key" ON "TenantResourceLimit"("id"); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimit_tenantId_resource_key" ON "TenantResourceLimit"("tenantId", "resource"); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimitAlert_id_key" ON "TenantResourceLimitAlert"("id"); + +-- AddForeignKey +ALTER TABLE "TenantResourceLimit" ADD CONSTRAINT "TenantResourceLimit_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TenantResourceLimitAlert" ADD CONSTRAINT "TenantResourceLimitAlert_resourceLimitId_fkey" FOREIGN KEY ("resourceLimitId") REFERENCES "TenantResourceLimit"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TenantResourceLimitAlert" ADD CONSTRAINT "TenantResourceLimitAlert_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/prisma/schema.prisma b/prisma/schema.prisma index b81563336..d58186712 100644 --- a/prisma/schema.prisma +++ b/prisma/schema.prisma @@ -138,6 +138,77 @@ model Tenant { alertMemberEmails Boolean @default(true) slackWebhooks SlackAppWebhook[] alertingSettings TenantAlertingSettings? + + limits TenantResourceLimit[] + limitAlerts TenantResourceLimitAlert[] +} + + +enum LimitResource { + WORKFLOW_RUN + EVENT + WORKER + CRON + SCHEDULE +} + +model TenantResourceLimit { + id String @id @unique @default(uuid()) @db.Uuid + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + resource LimitResource + + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade, onUpdate: Cascade) + tenantId String @db.Uuid + + // The max number of requests allowed in the window + limitValue Int + + // The max number before an alert is triggered + alarmValue Int? + + // the current rate limit bucket value + value Int @default(0) + + // the meter window + window String? + + // the time the rate limit was last refilled + lastRefill DateTime @default(now()) + + customValueMeter Boolean @default(false) + + alerts TenantResourceLimitAlert[] + + @@unique([tenantId, resource]) +} + +enum TenantResourceLimitAlertType { + Alarm + Exhausted +} + +model TenantResourceLimitAlert { + id String @id @unique @default(uuid()) @db.Uuid + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + resourceLimit TenantResourceLimit @relation(fields: [resourceLimitId], references: [id], onDelete: Cascade, onUpdate: Cascade) + resourceLimitId String @db.Uuid + + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade, onUpdate: Cascade) + tenantId String @db.Uuid + + resource LimitResource + + alertType TenantResourceLimitAlertType + + // the current value of the resource limit at the time of the alert + value Int + + // the limit at the time of the alert + limit Int } model TenantAlertingSettings { @@ -152,8 +223,9 @@ model TenantAlertingSettings { tenantId String @unique @db.Uuid // workflow run failure alerts - enableWorkflowRunFailureAlerts Boolean @default(false) - enableExpiringTokenAlerts Boolean @default(true) + enableWorkflowRunFailureAlerts Boolean @default(false) + enableExpiringTokenAlerts Boolean @default(true) + enableTenantResourceLimitAlerts Boolean @default(true) // the maximum alerting frequency maxFrequency String @default("1h") @@ -165,6 +237,7 @@ model TenantAlertingSettings { tickerId String? @db.Uuid } + enum TenantMemberRole { OWNER ADMIN diff --git a/sql/migrations/20240606145243_v0_31_0.sql b/sql/migrations/20240606145243_v0_31_0.sql new file mode 100644 index 000000000..96a4ec446 --- /dev/null +++ b/sql/migrations/20240606145243_v0_31_0.sql @@ -0,0 +1,16 @@ +-- Create enum type "LimitResource" +CREATE TYPE "LimitResource" AS ENUM ('WORKFLOW_RUN', 'EVENT', 'WORKER', 'CRON', 'SCHEDULE'); +-- Create enum type "TenantResourceLimitAlertType" +CREATE TYPE "TenantResourceLimitAlertType" AS ENUM ('Alarm', 'Exhausted'); +-- Modify "TenantAlertingSettings" table +ALTER TABLE "TenantAlertingSettings" ADD COLUMN "enableTenantResourceLimitAlerts" boolean NOT NULL DEFAULT true; +-- Create "TenantResourceLimit" table +CREATE TABLE "TenantResourceLimit" ("id" uuid NOT NULL, "createdAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "updatedAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "resource" "LimitResource" NOT NULL, "tenantId" uuid NOT NULL, "limitValue" integer NOT NULL, "alarmValue" integer NULL, "value" integer NOT NULL DEFAULT 0, "window" text NULL, "lastRefill" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "customValueMeter" boolean NOT NULL DEFAULT false, PRIMARY KEY ("id"), CONSTRAINT "TenantResourceLimit_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON UPDATE CASCADE ON DELETE CASCADE); +-- Create index "TenantResourceLimit_id_key" to table: "TenantResourceLimit" +CREATE UNIQUE INDEX "TenantResourceLimit_id_key" ON "TenantResourceLimit" ("id"); +-- Create index "TenantResourceLimit_tenantId_resource_key" to table: "TenantResourceLimit" +CREATE UNIQUE INDEX "TenantResourceLimit_tenantId_resource_key" ON "TenantResourceLimit" ("tenantId", "resource"); +-- Create "TenantResourceLimitAlert" table +CREATE TABLE "TenantResourceLimitAlert" ("id" uuid NOT NULL, "createdAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "updatedAt" timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "resourceLimitId" uuid NOT NULL, "tenantId" uuid NOT NULL, "resource" "LimitResource" NOT NULL, "alertType" "TenantResourceLimitAlertType" NOT NULL, "value" integer NOT NULL, "limit" integer NOT NULL, PRIMARY KEY ("id"), CONSTRAINT "TenantResourceLimitAlert_resourceLimitId_fkey" FOREIGN KEY ("resourceLimitId") REFERENCES "TenantResourceLimit" ("id") ON UPDATE CASCADE ON DELETE CASCADE, CONSTRAINT "TenantResourceLimitAlert_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant" ("id") ON UPDATE CASCADE ON DELETE CASCADE); +-- Create index "TenantResourceLimitAlert_id_key" to table: "TenantResourceLimitAlert" +CREATE UNIQUE INDEX "TenantResourceLimitAlert_id_key" ON "TenantResourceLimitAlert" ("id"); diff --git a/sql/migrations/atlas.sum b/sql/migrations/atlas.sum index 31cd09a34..5805980fb 100644 --- a/sql/migrations/atlas.sum +++ b/sql/migrations/atlas.sum @@ -1,4 +1,4 @@ -h1:JJ+ixgxQ7UzKYCWn0uFrl3w3AjPFlq3FXcztV+/sg5k= +h1:mms5Qn9rS16mH8+t39cnvxoR7VXn+p0ay65WHdWypcA= 20240115180414_init.sql h1:Ef3ZyjAHkmJPdGF/dEWCahbwgcg6uGJKnDxW2JCRi2k= 20240122014727_v0_6_0.sql h1:o/LdlteAeFgoHJ3e/M4Xnghqt9826IE/Y/h0q95Acuo= 20240126235456_v0_7_0.sql h1:KiVzt/hXgQ6esbdC6OMJOOWuYEXmy1yeCpmsVAHTFKs= @@ -31,3 +31,4 @@ h1:JJ+ixgxQ7UzKYCWn0uFrl3w3AjPFlq3FXcztV+/sg5k= 20240531142907_v0_29_0.sql h1:lhXwSIKk4deWOq/YKcSYCF/4kbZ0lh0vcbun/vqWOgc= 20240531200417_v_0_30_0.sql h1:Fcse6apNWV+sXg+HzQzi2/0WMFu0GmnNSv4fn8OCa64= 20240531200418_v0_30_1.sql h1:jPAKmGkP0Ecq1mUk9o2qr5S0fEV46oXicdlGh1TmBQg= +20240606145243_v0_31_0.sql h1:ALisDQv8IPGe6MiBSfE/Esdl5x4pzNHIVMavlsBXIPE= diff --git a/sql/schema/schema.sql b/sql/schema/schema.sql index 59d525cff..4b5973ffd 100644 --- a/sql/schema/schema.sql +++ b/sql/schema/schema.sql @@ -10,6 +10,9 @@ CREATE TYPE "JobKind" AS ENUM ('DEFAULT', 'ON_FAILURE'); -- CreateEnum CREATE TYPE "JobRunStatus" AS ENUM ('PENDING', 'RUNNING', 'SUCCEEDED', 'FAILED', 'CANCELLED'); +-- CreateEnum +CREATE TYPE "LimitResource" AS ENUM ('WORKFLOW_RUN', 'EVENT', 'WORKER', 'CRON', 'SCHEDULE'); + -- CreateEnum CREATE TYPE "LogLineLevel" AS ENUM ('DEBUG', 'INFO', 'WARN', 'ERROR'); @@ -25,6 +28,9 @@ CREATE TYPE "StepRunStatus" AS ENUM ('PENDING', 'PENDING_ASSIGNMENT', 'ASSIGNED' -- CreateEnum CREATE TYPE "TenantMemberRole" AS ENUM ('OWNER', 'ADMIN', 'MEMBER'); +-- CreateEnum +CREATE TYPE "TenantResourceLimitAlertType" AS ENUM ('Alarm', 'Exhausted'); + -- CreateEnum CREATE TYPE "VcsProvider" AS ENUM ('GITHUB'); @@ -445,6 +451,7 @@ CREATE TABLE "TenantAlertingSettings" ( "tickerId" UUID, "enableExpiringTokenAlerts" BOOLEAN NOT NULL DEFAULT true, "enableWorkflowRunFailureAlerts" BOOLEAN NOT NULL DEFAULT false, + "enableTenantResourceLimitAlerts" BOOLEAN NOT NULL DEFAULT true, CONSTRAINT "TenantAlertingSettings_pkey" PRIMARY KEY ("id") ); @@ -476,6 +483,38 @@ CREATE TABLE "TenantMember" ( CONSTRAINT "TenantMember_pkey" PRIMARY KEY ("id") ); +-- CreateTable +CREATE TABLE "TenantResourceLimit" ( + "id" UUID NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "resource" "LimitResource" NOT NULL, + "tenantId" UUID NOT NULL, + "limitValue" INTEGER NOT NULL, + "alarmValue" INTEGER, + "value" INTEGER NOT NULL DEFAULT 0, + "window" TEXT, + "lastRefill" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "customValueMeter" BOOLEAN NOT NULL DEFAULT false, + + CONSTRAINT "TenantResourceLimit_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "TenantResourceLimitAlert" ( + "id" UUID NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "resourceLimitId" UUID NOT NULL, + "tenantId" UUID NOT NULL, + "resource" "LimitResource" NOT NULL, + "alertType" "TenantResourceLimitAlertType" NOT NULL, + "value" INTEGER NOT NULL, + "limit" INTEGER NOT NULL, + + CONSTRAINT "TenantResourceLimitAlert_pkey" PRIMARY KEY ("id") +); + -- CreateTable CREATE TABLE "TenantVcsProvider" ( "id" UUID NOT NULL, @@ -928,6 +967,15 @@ CREATE UNIQUE INDEX "TenantMember_id_key" ON "TenantMember"("id" ASC); -- CreateIndex CREATE UNIQUE INDEX "TenantMember_tenantId_userId_key" ON "TenantMember"("tenantId" ASC, "userId" ASC); +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimit_id_key" ON "TenantResourceLimit"("id" ASC); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimit_tenantId_resource_key" ON "TenantResourceLimit"("tenantId" ASC, "resource" ASC); + +-- CreateIndex +CREATE UNIQUE INDEX "TenantResourceLimitAlert_id_key" ON "TenantResourceLimitAlert"("id" ASC); + -- CreateIndex CREATE UNIQUE INDEX "TenantVcsProvider_id_key" ON "TenantVcsProvider"("id" ASC); @@ -1234,6 +1282,15 @@ ALTER TABLE "TenantMember" ADD CONSTRAINT "TenantMember_tenantId_fkey" FOREIGN K -- AddForeignKey ALTER TABLE "TenantMember" ADD CONSTRAINT "TenantMember_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; +-- AddForeignKey +ALTER TABLE "TenantResourceLimit" ADD CONSTRAINT "TenantResourceLimit_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TenantResourceLimitAlert" ADD CONSTRAINT "TenantResourceLimitAlert_resourceLimitId_fkey" FOREIGN KEY ("resourceLimitId") REFERENCES "TenantResourceLimit"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TenantResourceLimitAlert" ADD CONSTRAINT "TenantResourceLimitAlert_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE; + -- AddForeignKey ALTER TABLE "TenantVcsProvider" ADD CONSTRAINT "TenantVcsProvider_tenantId_fkey" FOREIGN KEY ("tenantId") REFERENCES "Tenant"("id") ON DELETE CASCADE ON UPDATE CASCADE;