mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2025-12-21 08:40:10 -06:00
fix: loop
This commit is contained in:
@@ -76,6 +76,7 @@ func (o *OLAPControllerImpl) notifyDAGsUpdated(ctx context.Context, rows []v1.Up
|
|||||||
tenantId := sqlchelpers.UUIDToStr(row.TenantId)
|
tenantId := sqlchelpers.UUIDToStr(row.TenantId)
|
||||||
|
|
||||||
tenantIdToWorkflowIds[tenantId] = append(tenantIdToWorkflowIds[tenantId], row.WorkflowId)
|
tenantIdToWorkflowIds[tenantId] = append(tenantIdToWorkflowIds[tenantId], row.WorkflowId)
|
||||||
|
|
||||||
dagIds = append(dagIds, row.DagId)
|
dagIds = append(dagIds, row.DagId)
|
||||||
dagInsertedAts = append(dagInsertedAts, row.DagInsertedAt)
|
dagInsertedAts = append(dagInsertedAts, row.DagInsertedAt)
|
||||||
readableStatuses = append(readableStatuses, row.ReadableStatus)
|
readableStatuses = append(readableStatuses, row.ReadableStatus)
|
||||||
@@ -96,21 +97,15 @@ func (o *OLAPControllerImpl) notifyDAGsUpdated(ctx context.Context, rows []v1.Up
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, row := range rows {
|
for _, duration := range dagDurations {
|
||||||
if row.ReadableStatus == sqlcv1.V1ReadableStatusOlapCOMPLETED || row.ReadableStatus == sqlcv1.V1ReadableStatusOlapFAILED || row.ReadableStatus == sqlcv1.V1ReadableStatusOlapCANCELLED {
|
if duration.ReadableStatus == sqlcv1.V1ReadableStatusOlapCOMPLETED || duration.ReadableStatus == sqlcv1.V1ReadableStatusOlapFAILED || duration.ReadableStatus == sqlcv1.V1ReadableStatusOlapCANCELLED {
|
||||||
workflowName := workflowNames[row.WorkflowId]
|
workflowName := workflowNames[duration.WorkflowID]
|
||||||
|
|
||||||
if workflowName == "" {
|
if workflowName == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
dagDuration := dagDurations[row.ExternalId]
|
prometheus.TenantWorkflowDurationBuckets.WithLabelValues(tenantId, workflowName, string(duration.ReadableStatus)).Observe(float64(duration.FinishedAt.Time.Sub(duration.StartedAt.Time).Milliseconds()))
|
||||||
|
|
||||||
if dagDuration == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
prometheus.TenantWorkflowDurationBuckets.WithLabelValues(tenantId, workflowName, string(row.ReadableStatus)).Observe(float64(dagDuration.FinishedAt.Time.Sub(dagDuration.StartedAt.Time).Milliseconds()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1557,7 +1557,9 @@ WITH input AS (
|
|||||||
i.inserted_at,
|
i.inserted_at,
|
||||||
d.external_id,
|
d.external_id,
|
||||||
d.display_name,
|
d.display_name,
|
||||||
d.tenant_id
|
d.tenant_id,
|
||||||
|
d.workflow_id,
|
||||||
|
d.readable_status
|
||||||
FROM
|
FROM
|
||||||
input i
|
input i
|
||||||
JOIN
|
JOIN
|
||||||
@@ -1614,6 +1616,8 @@ WITH input AS (
|
|||||||
)
|
)
|
||||||
SELECT
|
SELECT
|
||||||
dd.external_id,
|
dd.external_id,
|
||||||
|
dd.workflow_id,
|
||||||
|
dd.readable_status,
|
||||||
dt.started_at::timestamptz AS started_at,
|
dt.started_at::timestamptz AS started_at,
|
||||||
dt.finished_at::timestamptz AS finished_at
|
dt.finished_at::timestamptz AS finished_at
|
||||||
FROM
|
FROM
|
||||||
|
|||||||
@@ -366,7 +366,9 @@ WITH input AS (
|
|||||||
i.inserted_at,
|
i.inserted_at,
|
||||||
d.external_id,
|
d.external_id,
|
||||||
d.display_name,
|
d.display_name,
|
||||||
d.tenant_id
|
d.tenant_id,
|
||||||
|
d.workflow_id,
|
||||||
|
d.readable_status
|
||||||
FROM
|
FROM
|
||||||
input i
|
input i
|
||||||
JOIN
|
JOIN
|
||||||
@@ -423,6 +425,8 @@ WITH input AS (
|
|||||||
)
|
)
|
||||||
SELECT
|
SELECT
|
||||||
dd.external_id,
|
dd.external_id,
|
||||||
|
dd.workflow_id,
|
||||||
|
dd.readable_status,
|
||||||
dt.started_at::timestamptz AS started_at,
|
dt.started_at::timestamptz AS started_at,
|
||||||
dt.finished_at::timestamptz AS finished_at
|
dt.finished_at::timestamptz AS finished_at
|
||||||
FROM
|
FROM
|
||||||
@@ -440,9 +444,11 @@ type GetDagDurationsByDagIdsParams struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type GetDagDurationsByDagIdsRow struct {
|
type GetDagDurationsByDagIdsRow struct {
|
||||||
ExternalID pgtype.UUID `json:"external_id"`
|
ExternalID pgtype.UUID `json:"external_id"`
|
||||||
StartedAt pgtype.Timestamptz `json:"started_at"`
|
WorkflowID pgtype.UUID `json:"workflow_id"`
|
||||||
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
ReadableStatus V1ReadableStatusOlap `json:"readable_status"`
|
||||||
|
StartedAt pgtype.Timestamptz `json:"started_at"`
|
||||||
|
FinishedAt pgtype.Timestamptz `json:"finished_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *Queries) GetDagDurationsByDagIds(ctx context.Context, db DBTX, arg GetDagDurationsByDagIdsParams) ([]*GetDagDurationsByDagIdsRow, error) {
|
func (q *Queries) GetDagDurationsByDagIds(ctx context.Context, db DBTX, arg GetDagDurationsByDagIdsParams) ([]*GetDagDurationsByDagIdsRow, error) {
|
||||||
@@ -459,7 +465,13 @@ func (q *Queries) GetDagDurationsByDagIds(ctx context.Context, db DBTX, arg GetD
|
|||||||
var items []*GetDagDurationsByDagIdsRow
|
var items []*GetDagDurationsByDagIdsRow
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var i GetDagDurationsByDagIdsRow
|
var i GetDagDurationsByDagIdsRow
|
||||||
if err := rows.Scan(&i.ExternalID, &i.StartedAt, &i.FinishedAt); err != nil {
|
if err := rows.Scan(
|
||||||
|
&i.ExternalID,
|
||||||
|
&i.WorkflowID,
|
||||||
|
&i.ReadableStatus,
|
||||||
|
&i.StartedAt,
|
||||||
|
&i.FinishedAt,
|
||||||
|
); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
items = append(items, &i)
|
items = append(items, &i)
|
||||||
|
|||||||
Reference in New Issue
Block a user