chore(pre-commit): lint whitespace (#494)

Adds a whitespace linter to the pre-commit hook to ensure consistent formatting.
It also enables linting of other SQL files such as for SQLc queries.
This commit is contained in:
Luca Steeb
2024-05-16 14:17:01 +01:00
committed by GitHub
parent f29bd42547
commit d1a4d35830
54 changed files with 390 additions and 389 deletions

View File

@@ -6,7 +6,9 @@ repos:
- id: mixed-line-ending
args: ["--fix=lf"]
- id: end-of-file-fixer
exclude: \.sql
exclude: prisma/migrations/.*\.sql
- id: trailing-whitespace
exclude: prisma/migrations/.*\.sql
- id: check-yaml
- repo: https://github.com/golangci/golangci-lint
rev: v1.57.1

View File

@@ -259,13 +259,13 @@ message WorkflowRunEvent {
message StepRunResult {
string stepRunId = 1;
string stepReadableId = 2;
string stepReadableId = 2;
string jobRunId = 3;
optional string error = 4;
optional string output = 5;
optional string output = 5;
}
message OverridesData {

View File

@@ -9,7 +9,7 @@ echo "generating certs in dir: $1"
openssl genrsa -out $CERTS_DIR/ca.key 4096
openssl req -new -x509 -key $CERTS_DIR/ca.key -sha256 -subj "/C=US/ST=NY/O=Hatchet" -days 365 -out $CERTS_DIR/ca.cert
# Generate a private key and a certificate for cluster
# Generate a private key and a certificate for cluster
openssl genrsa -out $CERTS_DIR/cluster.key 4096
openssl req -new -key $CERTS_DIR/cluster.key -out $CERTS_DIR/cluster.csr -config $CERTS_DIR/cluster-cert.conf
openssl x509 -req -in $CERTS_DIR/cluster.csr -CA $CERTS_DIR/ca.cert -CAkey $CERTS_DIR/ca.key -CAcreateserial -out $CERTS_DIR/cluster.pem -days 365 -sha256 -extfile $CERTS_DIR/cluster-cert.conf -extensions req_ext

View File

@@ -1,7 +1,7 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@layer base {
:root {
--background: 0 0% 100%;
@@ -9,63 +9,63 @@
--card: 0 0% 100%;
--card-foreground: 222.2 84% 4.9%;
--popover: 0 0% 100%;
--popover-foreground: 222.2 84% 4.9%;
--primary: 222.2 47.4% 11.2%;
--primary-foreground: 210 40% 98%;
--secondary: 210 40% 96.1%;
--secondary-foreground: 222.2 47.4% 11.2%;
--muted: 210 40% 96.1%;
--muted-foreground: 215.4 16.3% 46.9%;
--accent: 210 40% 96.1%;
--accent-foreground: 222.2 47.4% 11.2%;
--destructive: 0 84.2% 60.2%;
--destructive-foreground: 210 40% 98%;
--border: 214.3 31.8% 91.4%;
--input: 214.3 31.8% 91.4%;
--ring: 222.2 84% 4.9%;
--radius: 0.5rem;
}
.dark {
--background: 222.2 84% 4.9%;
--foreground: 210 40% 98%;
--card: 222.2 84% 4.9%;
--card-foreground: 210 40% 98%;
--popover: 222.2 84% 4.9%;
--popover-foreground: 210 40% 98%;
--primary: 210 40% 98%;
--primary-foreground: 222.2 47.4% 11.2%;
--secondary: 217.2 32.6% 17.5%;
--secondary-foreground: 210 40% 98%;
--muted: 217.2 32.6% 17.5%;
--muted-foreground: 215 20.2% 65.1%;
--accent: 217.2 32.6% 17.5%;
--accent-foreground: 210 40% 98%;
--destructive: 0 62.8% 30.6%;
--destructive-foreground: 210 40% 98%;
--border: 217.2 32.6% 17.5%;
--input: 217.2 32.6% 17.5%;
--ring: 212.7 26.8% 83.9%;
}
}
@layer base {
* {
@apply border-border;

View File

@@ -1,4 +1,4 @@
# Nextra Docs Template
# Nextra Docs Template
This is a template for creating documentation with [Nextra](https://nextra.site).

View File

@@ -16,7 +16,7 @@ You can attach additional metadata when pushing events or triggering workflow ru
hatchet.event.push("user:create", {'userId': '1234'}, options={
"additional_metadata": {
"source": "api" # Arbitrary key-value pair
}
}
})
```
</Tabs.Tab>
@@ -54,7 +54,7 @@ err := c.Event().Push(
<Tabs.Tab>
```python
workflowRunId = hatchet.admin.run_workflow(
"user-workflow",
"user-workflow",
{'userId': '1234'},
options={
'additional_metadata': {
@@ -63,7 +63,7 @@ workflowRunId = hatchet.admin.run_workflow(
}
)
```
</Tabs.Tab>
</Tabs.Tab>
<Tabs.Tab>
```typescript
const workflowRunId = await hatchet.admin.run_workflow(

View File

@@ -98,17 +98,17 @@ To stream a file from a step context, encode the file data as base64 and stream
def step1(self, context: Context):
# Get the directory of the current script
script_dir = os.path.dirname(os.path.abspath(__file__))
# Construct the path to the image file relative to the script's directory
image_path = os.path.join(script_dir, "image.jpeg")
# Load the image file
with open(image_path, "rb") as image_file:
image_data = image_file.read()
# Encode the image data as base64
base64_image = base64.b64encode(image_data).decode('utf-8')
# Stream the base64-encoded image data
context.put_stream(base64_image)

View File

@@ -5,10 +5,10 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**code** | **int** | a custom Hatchet error code | [optional]
**field** | **str** | the field that this error is associated with, if applicable | [optional]
**description** | **str** | a description for this error |
**docs_link** | **str** | a link to the documentation for this error, if it exists | [optional]
**code** | **int** | a custom Hatchet error code | [optional]
**field** | **str** | the field that this error is associated with, if applicable | [optional]
**description** | **str** | a description for this error |
**docs_link** | **str** | a link to the documentation for this error, if it exists | [optional]
## Example

View File

@@ -5,7 +5,7 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**errors** | [**List[APIError]**](APIError.md) | |
**errors** | [**List[APIError]**](APIError.md) | |
## Example

View File

@@ -5,7 +5,7 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**auth** | [**APIMetaAuth**](APIMetaAuth.md) | | [optional]
**auth** | [**APIMetaAuth**](APIMetaAuth.md) | | [optional]
## Example

View File

@@ -5,7 +5,7 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**schemes** | **List[str]** | the supported types of authentication | [optional]
**schemes** | **List[str]** | the supported types of authentication | [optional]
## Example

View File

@@ -5,8 +5,8 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**name** | **str** | the name of the integration |
**enabled** | **bool** | whether this integration is enabled on the instance |
**name** | **str** | the name of the integration |
**enabled** | **bool** | whether this integration is enabled on the instance |
## Example

View File

@@ -5,9 +5,9 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**id** | **str** | the id of this resource, in UUID format |
**created_at** | **datetime** | the time that this resource was created |
**updated_at** | **datetime** | the time that this resource was last updated |
**id** | **str** | the id of this resource, in UUID format |
**created_at** | **datetime** | the time that this resource was created |
**updated_at** | **datetime** | the time that this resource was last updated |
## Example

View File

@@ -5,9 +5,9 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**current_page** | **int** | the current page | [optional]
**next_page** | **int** | the next page | [optional]
**num_pages** | **int** | the total number of pages for listing | [optional]
**current_page** | **int** | the current page | [optional]
**next_page** | **int** | the next page | [optional]
**num_pages** | **int** | the total number of pages for listing | [optional]
## Example

View File

@@ -5,14 +5,14 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**name** | **str** | The name of the workflow. |
**description** | **str** | The description of the workflow. | [optional]
**versions** | [**List[WorkflowVersionMeta]**](WorkflowVersionMeta.md) | | [optional]
**tags** | [**List[WorkflowTag]**](WorkflowTag.md) | The tags of the workflow. | [optional]
**last_run** | [**WorkflowRun**](WorkflowRun.md) | | [optional]
**jobs** | [**List[Job]**](Job.md) | The jobs of the workflow. | [optional]
**deployment** | [**WorkflowDeploymentConfig**](WorkflowDeploymentConfig.md) | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**name** | **str** | The name of the workflow. |
**description** | **str** | The description of the workflow. | [optional]
**versions** | [**List[WorkflowVersionMeta]**](WorkflowVersionMeta.md) | | [optional]
**tags** | [**List[WorkflowTag]**](WorkflowTag.md) | The tags of the workflow. | [optional]
**last_run** | [**WorkflowRun**](WorkflowRun.md) | | [optional]
**jobs** | [**List[Job]**](Job.md) | The jobs of the workflow. | [optional]
**deployment** | [**WorkflowDeploymentConfig**](WorkflowDeploymentConfig.md) | | [optional]
## Example

View File

@@ -78,7 +78,7 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**step_run** | **str**| The step run id |
**step_run** | **str**| The step run id |
### Return type
@@ -167,8 +167,8 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**step_run** | **str**| The step run id |
**create_pull_request_from_step_run** | [**CreatePullRequestFromStepRun**](CreatePullRequestFromStepRun.md)| The input to create a pull request |
**step_run** | **str**| The step run id |
**create_pull_request_from_step_run** | [**CreatePullRequestFromStepRun**](CreatePullRequestFromStepRun.md)| The input to create a pull request |
### Return type
@@ -256,7 +256,7 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**workflow** | **str**| The workflow id |
**workflow** | **str**| The workflow id |
### Return type
@@ -343,7 +343,7 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**tenant** | **str**| The tenant id |
**tenant** | **str**| The tenant id |
### Return type
@@ -431,8 +431,8 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**tenant** | **str**| The tenant id |
**workflow_run** | **str**| The workflow run id |
**tenant** | **str**| The tenant id |
**workflow_run** | **str**| The workflow run id |
### Return type
@@ -523,11 +523,11 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**tenant** | **str**| The tenant id |
**offset** | **int**| The number to skip | [optional]
**limit** | **int**| The number to limit by | [optional]
**event_id** | **str**| The event id to get runs for. | [optional]
**workflow_id** | **str**| The workflow id to get runs for. | [optional]
**tenant** | **str**| The tenant id |
**offset** | **int**| The number to skip | [optional]
**limit** | **int**| The number to limit by | [optional]
**event_id** | **str**| The event id to get runs for. | [optional]
**workflow_id** | **str**| The workflow id to get runs for. | [optional]
### Return type
@@ -617,9 +617,9 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**tenant** | **str**| The tenant id |
**workflow_run** | **str**| The workflow run id |
**state** | [**PullRequestState**](.md)| The pull request state | [optional]
**tenant** | **str**| The tenant id |
**workflow_run** | **str**| The workflow run id |
**state** | [**PullRequestState**](.md)| The pull request state | [optional]
### Return type
@@ -708,8 +708,8 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**workflow** | **str**| The workflow id |
**link_github_repository_request** | [**LinkGithubRepositoryRequest**](LinkGithubRepositoryRequest.md)| The input to link a github repository |
**workflow** | **str**| The workflow id |
**link_github_repository_request** | [**LinkGithubRepositoryRequest**](LinkGithubRepositoryRequest.md)| The input to link a github repository |
### Return type
@@ -798,8 +798,8 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**workflow** | **str**| The workflow id |
**version** | **str**| The workflow version. If not supplied, the latest version is fetched. | [optional]
**workflow** | **str**| The workflow id |
**version** | **str**| The workflow version. If not supplied, the latest version is fetched. | [optional]
### Return type
@@ -888,8 +888,8 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**workflow** | **str**| The workflow id |
**version** | **str**| The workflow version. If not supplied, the latest version is fetched. | [optional]
**workflow** | **str**| The workflow id |
**version** | **str**| The workflow version. If not supplied, the latest version is fetched. | [optional]
### Return type

View File

@@ -5,12 +5,12 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**git_repo_name** | **str** | The repository name. |
**git_repo_owner** | **str** | The repository owner. |
**git_repo_branch** | **str** | The repository branch. |
**github_app_installation** | [**GithubAppInstallation**](GithubAppInstallation.md) | The Github App installation. | [optional]
**github_app_installation_id** | **str** | The id of the Github App installation. |
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**git_repo_name** | **str** | The repository name. |
**git_repo_owner** | **str** | The repository owner. |
**git_repo_branch** | **str** | The repository branch. |
**github_app_installation** | [**GithubAppInstallation**](GithubAppInstallation.md) | The Github App installation. | [optional]
**github_app_installation_id** | **str** | The id of the Github App installation. |
## Example

View File

@@ -5,9 +5,9 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | | [optional]
**rows** | [**List[Workflow]**](Workflow.md) | | [optional]
**pagination** | [**PaginationResponse**](PaginationResponse.md) | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | | [optional]
**rows** | [**List[Workflow]**](Workflow.md) | | [optional]
**pagination** | [**PaginationResponse**](PaginationResponse.md) | | [optional]
## Example

View File

@@ -5,18 +5,18 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**tenant_id** | **str** | |
**workflow_version_id** | **str** | |
**workflow_version** | [**WorkflowVersion**](WorkflowVersion.md) | | [optional]
**status** | [**WorkflowRunStatus**](WorkflowRunStatus.md) | |
**display_name** | **str** | | [optional]
**job_runs** | [**List[JobRun]**](JobRun.md) | | [optional]
**triggered_by** | [**WorkflowRunTriggeredBy**](WorkflowRunTriggeredBy.md) | |
**input** | **Dict[str, object]** | | [optional]
**error** | **str** | | [optional]
**started_at** | **datetime** | | [optional]
**finished_at** | **datetime** | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**tenant_id** | **str** | |
**workflow_version_id** | **str** | |
**workflow_version** | [**WorkflowVersion**](WorkflowVersion.md) | | [optional]
**status** | [**WorkflowRunStatus**](WorkflowRunStatus.md) | |
**display_name** | **str** | | [optional]
**job_runs** | [**List[JobRun]**](JobRun.md) | | [optional]
**triggered_by** | [**WorkflowRunTriggeredBy**](WorkflowRunTriggeredBy.md) | |
**input** | **Dict[str, object]** | | [optional]
**error** | **str** | | [optional]
**started_at** | **datetime** | | [optional]
**finished_at** | **datetime** | | [optional]
## Example

View File

@@ -72,9 +72,9 @@ with hatchet_sdk.clients.rest.ApiClient(configuration) as api_client:
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**workflow** | **str**| The workflow id |
**trigger_workflow_run_request** | [**TriggerWorkflowRunRequest**](TriggerWorkflowRunRequest.md)| The input to the workflow run |
**version** | **str**| The workflow version. If not supplied, the latest version is fetched. | [optional]
**workflow** | **str**| The workflow id |
**trigger_workflow_run_request** | [**TriggerWorkflowRunRequest**](TriggerWorkflowRunRequest.md)| The input to the workflow run |
**version** | **str**| The workflow version. If not supplied, the latest version is fetched. | [optional]
### Return type

View File

@@ -5,8 +5,8 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**rows** | [**List[WorkflowRun]**](WorkflowRun.md) | | [optional]
**pagination** | [**PaginationResponse**](PaginationResponse.md) | | [optional]
**rows** | [**List[WorkflowRun]**](WorkflowRun.md) | | [optional]
**pagination** | [**PaginationResponse**](PaginationResponse.md) | | [optional]
## Example

View File

@@ -5,12 +5,12 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**parent_id** | **str** | |
**event_id** | **str** | | [optional]
**event** | [**Event**](Event.md) | | [optional]
**cron_parent_id** | **str** | | [optional]
**cron_schedule** | **str** | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**parent_id** | **str** | |
**event_id** | **str** | | [optional]
**event** | [**Event**](Event.md) | | [optional]
**cron_parent_id** | **str** | | [optional]
**cron_schedule** | **str** | | [optional]
## Example

View File

@@ -5,8 +5,8 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**name** | **str** | The name of the workflow. |
**color** | **str** | The description of the workflow. |
**name** | **str** | The name of the workflow. |
**color** | **str** | The description of the workflow. |
## Example

View File

@@ -5,8 +5,8 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**parent_id** | **str** | | [optional]
**cron** | **str** | | [optional]
**parent_id** | **str** | | [optional]
**cron** | **str** | | [optional]
## Example

View File

@@ -5,8 +5,8 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**parent_id** | **str** | | [optional]
**event_key** | **str** | | [optional]
**parent_id** | **str** | | [optional]
**event_key** | **str** | | [optional]
## Example

View File

@@ -5,11 +5,11 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | | [optional]
**workflow_version_id** | **str** | | [optional]
**tenant_id** | **str** | | [optional]
**events** | [**List[WorkflowTriggerEventRef]**](WorkflowTriggerEventRef.md) | | [optional]
**crons** | [**List[WorkflowTriggerCronRef]**](WorkflowTriggerCronRef.md) | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | | [optional]
**workflow_version_id** | **str** | | [optional]
**tenant_id** | **str** | | [optional]
**events** | [**List[WorkflowTriggerEventRef]**](WorkflowTriggerEventRef.md) | | [optional]
**crons** | [**List[WorkflowTriggerCronRef]**](WorkflowTriggerCronRef.md) | | [optional]
## Example

View File

@@ -5,13 +5,13 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**version** | **str** | The version of the workflow. |
**order** | **int** | |
**workflow_id** | **str** | |
**workflow** | [**Workflow**](Workflow.md) | | [optional]
**triggers** | [**WorkflowTriggers**](WorkflowTriggers.md) | | [optional]
**jobs** | [**List[Job]**](Job.md) | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**version** | **str** | The version of the workflow. |
**order** | **int** | |
**workflow_id** | **str** | |
**workflow** | [**Workflow**](Workflow.md) | | [optional]
**triggers** | [**WorkflowTriggers**](WorkflowTriggers.md) | | [optional]
**jobs** | [**List[Job]**](Job.md) | | [optional]
## Example

View File

@@ -5,7 +5,7 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**raw_definition** | **str** | The raw YAML definition of the workflow. |
**raw_definition** | **str** | The raw YAML definition of the workflow. |
## Example

View File

@@ -5,11 +5,11 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**version** | **str** | The version of the workflow. |
**order** | **int** | |
**workflow_id** | **str** | |
**workflow** | [**Workflow**](Workflow.md) | | [optional]
**metadata** | [**APIResourceMeta**](APIResourceMeta.md) | |
**version** | **str** | The version of the workflow. |
**order** | **int** | |
**workflow_id** | **str** | |
**workflow** | [**Workflow**](Workflow.md) | | [optional]
## Example

View File

@@ -1,5 +1,5 @@
#!/bin/bash
# This scripts generates local encryption keys for development.
# This scripts generates local encryption keys for development.
set -eux

View File

@@ -21,4 +21,4 @@ INSERT INTO "APIToken" (
sqlc.narg('tenantId')::uuid,
sqlc.narg('name')::text,
@expiresAt::timestamp
) RETURNING *;
) RETURNING *;

View File

@@ -55,4 +55,4 @@ SET
"lastHeartbeatAt" = sqlc.arg('lastHeartbeatAt')::timestamp
WHERE
"id" = sqlc.arg('id')::uuid
RETURNING *;
RETURNING *;

View File

@@ -127,9 +127,9 @@ ORDER BY
-- name: ListEventsByIDs :many
SELECT
*
*
FROM
"Event" as events
WHERE
"tenantId" = @tenantId::uuid AND
"id" = ANY (sqlc.arg('ids')::uuid[]);
"id" = ANY (sqlc.arg('ids')::uuid[]);

View File

@@ -313,7 +313,7 @@ func (q *Queries) ListEvents(ctx context.Context, db DBTX, arg ListEventsParams)
const listEventsByIDs = `-- name: ListEventsByIDs :many
SELECT
id, "createdAt", "updatedAt", "deletedAt", key, "tenantId", "replayedFromId", data, "additionalMetadata"
id, "createdAt", "updatedAt", "deletedAt", key, "tenantId", "replayedFromId", data, "additionalMetadata"
FROM
"Event" as events
WHERE

View File

@@ -6,7 +6,7 @@ SET
"startedAt" = COALESCE(sqlc.narg('startedAt')::timestamp, "startedAt"),
"finishedAt" = COALESCE(sqlc.narg('finishedAt')::timestamp, "finishedAt"),
"scheduleTimeoutAt" = COALESCE(sqlc.narg('scheduleTimeoutAt')::timestamp, "scheduleTimeoutAt"),
"status" = CASE
"status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED', 'CANCELLED') THEN "status"
ELSE COALESCE(sqlc.narg('status'), "status")
@@ -16,7 +16,7 @@ SET
"error" = COALESCE(sqlc.narg('error')::text, "error"),
"cancelledAt" = COALESCE(sqlc.narg('cancelledAt')::timestamp, "cancelledAt"),
"cancelledReason" = COALESCE(sqlc.narg('cancelledReason')::text, "cancelledReason")
WHERE
WHERE
"id" = @id::uuid AND
"tenantId" = @tenantId::uuid
RETURNING "GetGroupKeyRun".*;
@@ -156,4 +156,4 @@ WHERE
"id" = @getGroupKeyRunId::uuid AND
"tenantId" = @tenantId::uuid AND
EXISTS (SELECT 1 FROM selected_ticker)
RETURNING "GetGroupKeyRun"."id", "GetGroupKeyRun"."tickerId";
RETURNING "GetGroupKeyRun"."id", "GetGroupKeyRun"."tickerId";

View File

@@ -336,7 +336,7 @@ SET
"startedAt" = COALESCE($2::timestamp, "startedAt"),
"finishedAt" = COALESCE($3::timestamp, "finishedAt"),
"scheduleTimeoutAt" = COALESCE($4::timestamp, "scheduleTimeoutAt"),
"status" = CASE
"status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED', 'CANCELLED') THEN "status"
ELSE COALESCE($5, "status")
@@ -346,7 +346,7 @@ SET
"error" = COALESCE($8::text, "error"),
"cancelledAt" = COALESCE($9::timestamp, "cancelledAt"),
"cancelledReason" = COALESCE($10::text, "cancelledReason")
WHERE
WHERE
"id" = $11::uuid AND
"tenantId" = $12::uuid
RETURNING "GetGroupKeyRun".id, "GetGroupKeyRun"."createdAt", "GetGroupKeyRun"."updatedAt", "GetGroupKeyRun"."deletedAt", "GetGroupKeyRun"."tenantId", "GetGroupKeyRun"."workerId", "GetGroupKeyRun"."tickerId", "GetGroupKeyRun".status, "GetGroupKeyRun".input, "GetGroupKeyRun".output, "GetGroupKeyRun"."requeueAfter", "GetGroupKeyRun".error, "GetGroupKeyRun"."startedAt", "GetGroupKeyRun"."finishedAt", "GetGroupKeyRun"."timeoutAt", "GetGroupKeyRun"."cancelledAt", "GetGroupKeyRun"."cancelledReason", "GetGroupKeyRun"."cancelledError", "GetGroupKeyRun"."workflowRunId", "GetGroupKeyRun"."scheduleTimeoutAt"

View File

@@ -18,10 +18,10 @@ WITH stepRuns AS (
FROM "StepRun"
WHERE "id" = @stepRunId::uuid
) AND
"tenantId" = @tenantId::uuid
"tenantId" = @tenantId::uuid
)
UPDATE "JobRun"
SET "status" = CASE
SET "status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED', 'CANCELLED') THEN "status"
-- NOTE: Order of the following conditions is important
@@ -34,7 +34,7 @@ SET "status" = CASE
-- When no step runs exist that are not succeeded, then the job is succeeded
WHEN s.succeededRuns > 0 AND s.pendingRuns = 0 AND s.runningRuns = 0 AND s.failedRuns = 0 AND s.cancelledRuns = 0 THEN 'SUCCEEDED'
ELSE "status"
END, "finishedAt" = CASE
END, "finishedAt" = CASE
-- Final states are final, cannot be updated
WHEN "finishedAt" IS NOT NULL THEN "finishedAt"
WHEN s.runningRuns > 0 THEN NULL
@@ -43,7 +43,7 @@ END, "finishedAt" = CASE
-- When no step runs exist that are not succeeded, then the job is finished
WHEN s.succeededRuns > 0 AND s.pendingRuns = 0 AND s.runningRuns = 0 AND s.failedRuns = 0 AND s.cancelledRuns = 0 THEN NOW()
ELSE "finishedAt"
END, "startedAt" = CASE
END, "startedAt" = CASE
-- Started at is final, cannot be changed
WHEN "startedAt" IS NOT NULL THEN "startedAt"
-- If steps are running (or have finished), then set the started at time
@@ -76,7 +76,7 @@ INSERT INTO "JobRunLookupData" (
@tenantId::uuid,
jsonb_set('{}', @fieldPath::text[], @jsonData::jsonb, true)
) ON CONFLICT ("jobRunId", "tenantId") DO UPDATE
SET
SET
"data" = jsonb_set("JobRunLookupData"."data", @fieldPath::text[], @jsonData::jsonb, true),
"updatedAt" = CURRENT_TIMESTAMP;
@@ -91,11 +91,11 @@ WITH readable_id AS (
)
)
UPDATE "JobRunLookupData"
SET
SET
"data" = jsonb_set(
"JobRunLookupData"."data",
ARRAY['steps', (SELECT "readableId" FROM readable_id)],
@jsonData::jsonb,
"JobRunLookupData"."data",
ARRAY['steps', (SELECT "readableId" FROM readable_id)],
@jsonData::jsonb,
true
),
"updatedAt" = CURRENT_TIMESTAMP
@@ -126,4 +126,4 @@ FROM
WHERE
jr."tenantId" = @tenantId::uuid
AND jr."workflowRunId" = @workflowRunId::uuid
AND jr."jobId" = @jobId::uuid;
AND jr."jobId" = @jobId::uuid;

View File

@@ -92,10 +92,10 @@ WITH stepRuns AS (
FROM "StepRun"
WHERE "id" = $1::uuid
) AND
"tenantId" = $2::uuid
"tenantId" = $2::uuid
)
UPDATE "JobRun"
SET "status" = CASE
SET "status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED', 'CANCELLED') THEN "status"
-- NOTE: Order of the following conditions is important
@@ -108,7 +108,7 @@ SET "status" = CASE
-- When no step runs exist that are not succeeded, then the job is succeeded
WHEN s.succeededRuns > 0 AND s.pendingRuns = 0 AND s.runningRuns = 0 AND s.failedRuns = 0 AND s.cancelledRuns = 0 THEN 'SUCCEEDED'
ELSE "status"
END, "finishedAt" = CASE
END, "finishedAt" = CASE
-- Final states are final, cannot be updated
WHEN "finishedAt" IS NOT NULL THEN "finishedAt"
WHEN s.runningRuns > 0 THEN NULL
@@ -117,7 +117,7 @@ END, "finishedAt" = CASE
-- When no step runs exist that are not succeeded, then the job is finished
WHEN s.succeededRuns > 0 AND s.pendingRuns = 0 AND s.runningRuns = 0 AND s.failedRuns = 0 AND s.cancelledRuns = 0 THEN NOW()
ELSE "finishedAt"
END, "startedAt" = CASE
END, "startedAt" = CASE
-- Started at is final, cannot be changed
WHEN "startedAt" IS NOT NULL THEN "startedAt"
-- If steps are running (or have finished), then set the started at time
@@ -173,11 +173,11 @@ WITH readable_id AS (
)
)
UPDATE "JobRunLookupData"
SET
SET
"data" = jsonb_set(
"JobRunLookupData"."data",
ARRAY['steps', (SELECT "readableId" FROM readable_id)],
$1::jsonb,
"JobRunLookupData"."data",
ARRAY['steps', (SELECT "readableId" FROM readable_id)],
$1::jsonb,
true
),
"updatedAt" = CURRENT_TIMESTAMP
@@ -256,7 +256,7 @@ INSERT INTO "JobRunLookupData" (
$2::uuid,
jsonb_set('{}', $3::text[], $4::jsonb, true)
) ON CONFLICT ("jobRunId", "tenantId") DO UPDATE
SET
SET
"data" = jsonb_set("JobRunLookupData"."data", $3::text[], $4::jsonb, true),
"updatedAt" = CURRENT_TIMESTAMP
`

View File

@@ -1,9 +1,9 @@
-- name: UpsertRateLimit :one
INSERT INTO "RateLimit" (
"tenantId",
"key",
"limitValue",
"value",
"tenantId",
"key",
"limitValue",
"value",
"window"
) VALUES (
@tenantId::uuid,
@@ -11,7 +11,7 @@ INSERT INTO "RateLimit" (
sqlc.arg('limit')::int,
sqlc.arg('limit')::int,
COALESCE(sqlc.narg('window')::text, '1 minute')
) ON CONFLICT ("tenantId", "key") DO UPDATE SET
) ON CONFLICT ("tenantId", "key") DO UPDATE SET
"limitValue" = sqlc.arg('limit')::int,
"window" = COALESCE(sqlc.narg('window')::text, '1 minute')
RETURNING *;
RETURNING *;

View File

@@ -13,10 +13,10 @@ import (
const upsertRateLimit = `-- name: UpsertRateLimit :one
INSERT INTO "RateLimit" (
"tenantId",
"key",
"limitValue",
"value",
"tenantId",
"key",
"limitValue",
"value",
"window"
) VALUES (
$1::uuid,
@@ -24,7 +24,7 @@ INSERT INTO "RateLimit" (
$3::int,
$3::int,
COALESCE($4::text, '1 minute')
) ON CONFLICT ("tenantId", "key") DO UPDATE SET
) ON CONFLICT ("tenantId", "key") DO UPDATE SET
"limitValue" = $3::int,
"window" = COALESCE($4::text, '1 minute')
RETURNING "tenantId", key, "limitValue", value, "window", "lastRefill"

View File

@@ -1356,4 +1356,3 @@ ALTER TABLE "_WorkflowToWorkflowTag" ADD CONSTRAINT "_WorkflowToWorkflowTag_A_fk
-- AddForeignKey
ALTER TABLE "_WorkflowToWorkflowTag" ADD CONSTRAINT "_WorkflowToWorkflowTag_B_fkey" FOREIGN KEY ("B") REFERENCES "WorkflowTag"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@@ -40,7 +40,7 @@ JOIN
"JobRunLookupData" jrld ON jr."id" = jrld."jobRunId"
JOIN
"Job" j ON jr."jobId" = j."id"
JOIN
JOIN
"WorkflowRun" wr ON jr."workflowRunId" = wr."id"
JOIN
"WorkflowVersion" wv ON wr."workflowVersionId" = wv."id"
@@ -59,29 +59,29 @@ WITH job_run AS (
FROM "JobRun"
WHERE "id" = @jobRunId::uuid
)
SELECT
SELECT
DISTINCT ON (child_run."id")
child_run."id" AS "id"
FROM
FROM
"StepRun" AS child_run
LEFT JOIN
LEFT JOIN
"_StepRunOrder" AS step_run_order ON step_run_order."B" = child_run."id"
JOIN
job_run ON true
WHERE
WHERE
child_run."jobRunId" = @jobRunId::uuid
AND child_run."status" = 'PENDING'
AND job_run."status" = 'RUNNING'
-- case on whether parentStepRunId is null
AND (
(sqlc.narg('parentStepRunId')::uuid IS NULL AND step_run_order."A" IS NULL) OR
(sqlc.narg('parentStepRunId')::uuid IS NULL AND step_run_order."A" IS NULL) OR
(
step_run_order."A" = sqlc.narg('parentStepRunId')::uuid
AND NOT EXISTS (
SELECT 1
FROM "_StepRunOrder" AS parent_order
JOIN "StepRun" AS parent_run ON parent_order."A" = parent_run."id"
WHERE
WHERE
parent_order."B" = child_run."id"
AND parent_run."status" != 'SUCCEEDED'
)
@@ -130,7 +130,7 @@ SET
WHEN sqlc.narg('rerun')::boolean THEN NULL
ELSE COALESCE(sqlc.narg('finishedAt')::timestamp, "finishedAt")
END,
"status" = CASE
"status" = CASE
-- if this is a rerun, we permit status updates
WHEN sqlc.narg('rerun')::boolean THEN COALESCE(sqlc.narg('status'), "status")
-- Final states are final, cannot be updated
@@ -160,7 +160,7 @@ SET
END,
"retryCount" = COALESCE(sqlc.narg('retryCount')::int, "retryCount"),
"semaphoreReleased" = COALESCE(sqlc.narg('semaphoreReleased')::boolean, "semaphoreReleased")
WHERE
WHERE
"id" = @id::uuid AND
"tenantId" = @tenantId::uuid
RETURNING "StepRun".*;
@@ -187,9 +187,9 @@ WITH RECURSIVE currStepRun AS (
FROM "StepRun" sr
JOIN "_StepRunOrder" sro ON sr."id" = sro."B"
WHERE sro."A" = (SELECT "id" FROM currStepRun)
UNION ALL
SELECT sr."id", sr."status"
FROM "StepRun" sr
JOIN "_StepRunOrder" sro ON sr."id" = sro."B"
@@ -223,7 +223,7 @@ RETURNING sr.*;
-- name: UpdateStepRunOverridesData :one
UPDATE
"StepRun" AS sr
SET
SET
"updatedAt" = CURRENT_TIMESTAMP,
"input" = jsonb_set("input", @fieldPath::text[], @jsonData::jsonb, true),
"callerFiles" = jsonb_set("callerFiles", @overridesKey::text[], to_jsonb(@callerFile::text), true)
@@ -353,7 +353,7 @@ step_runs AS (
SELECT 1
FROM "_StepRunOrder" AS order_table
JOIN "StepRun" AS prev_sr ON order_table."A" = prev_sr."id"
WHERE
WHERE
order_table."B" = sr."id"
AND prev_sr."status" != 'SUCCEEDED'
)
@@ -376,7 +376,7 @@ SET
-- requeue after now plus 4 seconds
"requeueAfter" = CURRENT_TIMESTAMP + INTERVAL '4 seconds',
"updatedAt" = CURRENT_TIMESTAMP
FROM
FROM
locked_step_runs
WHERE
"StepRun"."id" = locked_step_runs."id"
@@ -425,7 +425,7 @@ step_runs AS (
SELECT 1
FROM "_StepRunOrder" AS order_table
JOIN "StepRun" AS prev_sr ON order_table."A" = prev_sr."id"
WHERE
WHERE
order_table."B" = sr."id"
AND prev_sr."status" != 'SUCCEEDED'
)
@@ -448,7 +448,7 @@ SET
-- requeue after now plus 4 seconds
"requeueAfter" = CURRENT_TIMESTAMP + INTERVAL '4 seconds',
"updatedAt" = CURRENT_TIMESTAMP
FROM
FROM
locked_step_runs
WHERE
"StepRun"."id" = locked_step_runs."id"
@@ -460,7 +460,7 @@ WITH valid_workers AS (
w."id", w."dispatcherId", COALESCE(ws."slots", 100) AS "slots"
FROM
"Worker" w
LEFT JOIN
LEFT JOIN
"WorkerSemaphore" ws ON w."id" = ws."workerId"
WHERE
w."tenantId" = @tenantId::uuid
@@ -494,7 +494,7 @@ WITH valid_workers AS (
w."id", w."dispatcherId", COALESCE(ws."slots", 100) AS "slots"
FROM
"Worker" w
LEFT JOIN
LEFT JOIN
"WorkerSemaphore" ws ON w."id" = ws."workerId"
WHERE
w."tenantId" = @tenantId::uuid
@@ -557,13 +557,13 @@ update_step_run AS (
"tenantId" = @tenantId::uuid AND
"status" = 'PENDING_ASSIGNMENT' AND
EXISTS (SELECT 1 FROM selected_worker)
RETURNING
"StepRun"."id", "StepRun"."workerId",
RETURNING
"StepRun"."id", "StepRun"."workerId",
(SELECT "dispatcherId" FROM selected_worker) AS "dispatcherId"
)
SELECT ts."totalSlots"::int, usr."id", usr."workerId", usr."dispatcherId"
FROM total_slots ts
LEFT JOIN update_step_run usr ON true;
LEFT JOIN update_step_run usr ON true;
-- name: UpdateWorkerSemaphore :one
WITH step_run AS (
@@ -588,7 +588,7 @@ UPDATE
SET
-- This shouldn't happen, but we set guardrails to prevent negative slots or slots over
-- the worker's maxRuns
"slots" = CASE
"slots" = CASE
WHEN (ws."slots" + @inc::int) < 0 THEN 0
WHEN (ws."slots" + @inc::int) > COALESCE(worker."maxRuns", 100) THEN COALESCE(worker."maxRuns", 100)
ELSE (ws."slots" + @inc::int)
@@ -713,4 +713,4 @@ FROM
WHERE
srl."tenantId" = lrl."tenantId" AND
srl."key" = lrl."key"
RETURNING srl.*;
RETURNING srl.*;

View File

@@ -101,7 +101,7 @@ WITH valid_workers AS (
w."id", w."dispatcherId", COALESCE(ws."slots", 100) AS "slots"
FROM
"Worker" w
LEFT JOIN
LEFT JOIN
"WorkerSemaphore" ws ON w."id" = ws."workerId"
WHERE
w."tenantId" = $1::uuid
@@ -164,8 +164,8 @@ update_step_run AS (
"tenantId" = $1::uuid AND
"status" = 'PENDING_ASSIGNMENT' AND
EXISTS (SELECT 1 FROM selected_worker)
RETURNING
"StepRun"."id", "StepRun"."workerId",
RETURNING
"StepRun"."id", "StepRun"."workerId",
(SELECT "dispatcherId" FROM selected_worker) AS "dispatcherId"
)
SELECT ts."totalSlots"::int, usr."id", usr."workerId", usr."dispatcherId"
@@ -380,7 +380,7 @@ JOIN
"JobRunLookupData" jrld ON jr."id" = jrld."jobRunId"
JOIN
"Job" j ON jr."jobId" = j."id"
JOIN
JOIN
"WorkflowRun" wr ON jr."workflowRunId" = wr."id"
JOIN
"WorkflowVersion" wv ON wr."workflowVersionId" = wv."id"
@@ -489,7 +489,7 @@ WITH valid_workers AS (
w."id", w."dispatcherId", COALESCE(ws."slots", 100) AS "slots"
FROM
"Worker" w
LEFT JOIN
LEFT JOIN
"WorkerSemaphore" ws ON w."id" = ws."workerId"
WHERE
w."tenantId" = $1::uuid
@@ -555,29 +555,29 @@ WITH job_run AS (
FROM "JobRun"
WHERE "id" = $1::uuid
)
SELECT
SELECT
DISTINCT ON (child_run."id")
child_run."id" AS "id"
FROM
FROM
"StepRun" AS child_run
LEFT JOIN
LEFT JOIN
"_StepRunOrder" AS step_run_order ON step_run_order."B" = child_run."id"
JOIN
job_run ON true
WHERE
WHERE
child_run."jobRunId" = $1::uuid
AND child_run."status" = 'PENDING'
AND job_run."status" = 'RUNNING'
-- case on whether parentStepRunId is null
AND (
($2::uuid IS NULL AND step_run_order."A" IS NULL) OR
($2::uuid IS NULL AND step_run_order."A" IS NULL) OR
(
step_run_order."A" = $2::uuid
AND NOT EXISTS (
SELECT 1
FROM "_StepRunOrder" AS parent_order
JOIN "StepRun" AS parent_run ON parent_order."A" = parent_run."id"
WHERE
WHERE
parent_order."B" = child_run."id"
AND parent_run."status" != 'SUCCEEDED'
)
@@ -781,7 +781,7 @@ step_runs AS (
SELECT 1
FROM "_StepRunOrder" AS order_table
JOIN "StepRun" AS prev_sr ON order_table."A" = prev_sr."id"
WHERE
WHERE
order_table."B" = sr."id"
AND prev_sr."status" != 'SUCCEEDED'
)
@@ -804,7 +804,7 @@ SET
-- requeue after now plus 4 seconds
"requeueAfter" = CURRENT_TIMESTAMP + INTERVAL '4 seconds',
"updatedAt" = CURRENT_TIMESTAMP
FROM
FROM
locked_step_runs
WHERE
"StepRun"."id" = locked_step_runs."id"
@@ -874,7 +874,7 @@ step_runs AS (
SELECT 1
FROM "_StepRunOrder" AS order_table
JOIN "StepRun" AS prev_sr ON order_table."A" = prev_sr."id"
WHERE
WHERE
order_table."B" = sr."id"
AND prev_sr."status" != 'SUCCEEDED'
)
@@ -897,7 +897,7 @@ SET
-- requeue after now plus 4 seconds
"requeueAfter" = CURRENT_TIMESTAMP + INTERVAL '4 seconds',
"updatedAt" = CURRENT_TIMESTAMP
FROM
FROM
locked_step_runs
WHERE
"StepRun"."id" = locked_step_runs."id"
@@ -937,9 +937,9 @@ WITH RECURSIVE currStepRun AS (
FROM "StepRun" sr
JOIN "_StepRunOrder" sro ON sr."id" = sro."B"
WHERE sro."A" = (SELECT "id" FROM currStepRun)
UNION ALL
SELECT sr."id", sr."status"
FROM "StepRun" sr
JOIN "_StepRunOrder" sro ON sr."id" = sro."B"
@@ -1157,7 +1157,7 @@ SET
WHEN $4::boolean THEN NULL
ELSE COALESCE($5::timestamp, "finishedAt")
END,
"status" = CASE
"status" = CASE
-- if this is a rerun, we permit status updates
WHEN $4::boolean THEN COALESCE($6, "status")
-- Final states are final, cannot be updated
@@ -1187,7 +1187,7 @@ SET
END,
"retryCount" = COALESCE($12::int, "retryCount"),
"semaphoreReleased" = COALESCE($13::boolean, "semaphoreReleased")
WHERE
WHERE
"id" = $14::uuid AND
"tenantId" = $15::uuid
RETURNING "StepRun".id, "StepRun"."createdAt", "StepRun"."updatedAt", "StepRun"."deletedAt", "StepRun"."tenantId", "StepRun"."jobRunId", "StepRun"."stepId", "StepRun"."order", "StepRun"."workerId", "StepRun"."tickerId", "StepRun".status, "StepRun".input, "StepRun".output, "StepRun"."requeueAfter", "StepRun"."scheduleTimeoutAt", "StepRun".error, "StepRun"."startedAt", "StepRun"."finishedAt", "StepRun"."timeoutAt", "StepRun"."cancelledAt", "StepRun"."cancelledReason", "StepRun"."cancelledError", "StepRun"."inputSchema", "StepRun"."callerFiles", "StepRun"."gitRepoBranch", "StepRun"."retryCount", "StepRun"."semaphoreReleased"
@@ -1290,7 +1290,7 @@ func (q *Queries) UpdateStepRunInputSchema(ctx context.Context, db DBTX, arg Upd
const updateStepRunOverridesData = `-- name: UpdateStepRunOverridesData :one
UPDATE
"StepRun" AS sr
SET
SET
"updatedAt" = CURRENT_TIMESTAMP,
"input" = jsonb_set("input", $1::text[], $2::jsonb, true),
"callerFiles" = jsonb_set("callerFiles", $3::text[], to_jsonb($4::text), true)
@@ -1346,7 +1346,7 @@ UPDATE
SET
-- This shouldn't happen, but we set guardrails to prevent negative slots or slots over
-- the worker's maxRuns
"slots" = CASE
"slots" = CASE
WHEN (ws."slots" + $1::int) < 0 THEN 0
WHEN (ws."slots" + $1::int) > COALESCE(worker."maxRuns", 100) THEN COALESCE(worker."maxRuns", 100)
ELSE (ws."slots" + $1::int)

View File

@@ -23,7 +23,7 @@ WHERE
-- name: GetSlackWebhooks :many
SELECT
*
FROM
FROM
"SlackAppWebhook" as slackWebhooks
WHERE
"tenantId" = sqlc.arg('tenantId')::uuid;
@@ -43,4 +43,4 @@ SET
"lastAlertedAt" = COALESCE(sqlc.narg('lastAlertedAt')::timestamp, "lastAlertedAt")
WHERE
"tenantId" = sqlc.arg('tenantId')::uuid
RETURNING *;
RETURNING *;

View File

@@ -50,7 +50,7 @@ func (q *Queries) GetEmailGroups(ctx context.Context, db DBTX, tenantid pgtype.U
const getSlackWebhooks = `-- name: GetSlackWebhooks :many
SELECT
id, "createdAt", "updatedAt", "deletedAt", "tenantId", "teamId", "teamName", "channelId", "channelName", "webhookURL"
FROM
FROM
"SlackAppWebhook" as slackWebhooks
WHERE
"tenantId" = $1::uuid

View File

@@ -80,7 +80,7 @@ WITH stepRunsToTimeout AS (
NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = stepRun."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
OR "tickerId" IS NULL
OR "tickerId" IS NULL
)
FOR UPDATE SKIP LOCKED
)
@@ -107,7 +107,7 @@ WITH getGroupKeyRunsToTimeout AS (
NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = getGroupKeyRun."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
OR "tickerId" IS NULL
OR "tickerId" IS NULL
)
FOR UPDATE SKIP LOCKED
)
@@ -137,15 +137,15 @@ active_cron_schedules AS (
triggers."tenantId" AS "tenantId"
FROM
"WorkflowTriggerCronRef" as cronSchedule
JOIN
JOIN
"WorkflowTriggers" as triggers ON triggers."id" = cronSchedule."parentId"
JOIN
"WorkflowVersion" as versions ON versions."id" = triggers."workflowVersionId"
JOIN
JOIN
latest_workflow_versions l ON versions."workflowId" = l."workflowId" AND versions."order" = l.max_order
WHERE
"enabled" = TRUE AND
("tickerId" IS NULL
("tickerId" IS NULL
OR NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = cronSchedule."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
@@ -182,7 +182,7 @@ not_run_scheduled_workflows AS (
"WorkflowTriggerScheduledRef" as scheduledWorkflow
JOIN
"WorkflowVersion" as versions ON versions."id" = scheduledWorkflow."parentId"
JOIN
JOIN
latest_workflow_versions l ON versions."workflowId" = l."workflowId" AND versions."order" = l.max_order
JOIN
"Workflow" as workflow ON workflow."id" = versions."workflowId"
@@ -192,7 +192,7 @@ not_run_scheduled_workflows AS (
"triggerAt" <= NOW() + INTERVAL '5 seconds'
AND runTriggeredBy IS NULL
AND (
"tickerId" IS NULL
"tickerId" IS NULL
OR NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = scheduledWorkflow."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
@@ -257,4 +257,4 @@ FROM
WHERE
alerts."id" = active_tenant_alerts."id" AND
alerts."tenantId" IN (SELECT "tenantId" FROM failed_run_count_by_tenant WHERE "failedWorkflowRunCount" > 0)
RETURNING alerts.*, active_tenant_alerts."lastAlertedAt" AS "prevLastAlertedAt";
RETURNING alerts.*, active_tenant_alerts."lastAlertedAt" AS "prevLastAlertedAt";

View File

@@ -199,15 +199,15 @@ active_cron_schedules AS (
triggers."tenantId" AS "tenantId"
FROM
"WorkflowTriggerCronRef" as cronSchedule
JOIN
JOIN
"WorkflowTriggers" as triggers ON triggers."id" = cronSchedule."parentId"
JOIN
"WorkflowVersion" as versions ON versions."id" = triggers."workflowVersionId"
JOIN
JOIN
latest_workflow_versions l ON versions."workflowId" = l."workflowId" AND versions."order" = l.max_order
WHERE
"enabled" = TRUE AND
("tickerId" IS NULL
("tickerId" IS NULL
OR NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = cronSchedule."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
@@ -276,7 +276,7 @@ WITH getGroupKeyRunsToTimeout AS (
NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = getGroupKeyRun."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
OR "tickerId" IS NULL
OR "tickerId" IS NULL
)
FOR UPDATE SKIP LOCKED
)
@@ -350,7 +350,7 @@ not_run_scheduled_workflows AS (
"WorkflowTriggerScheduledRef" as scheduledWorkflow
JOIN
"WorkflowVersion" as versions ON versions."id" = scheduledWorkflow."parentId"
JOIN
JOIN
latest_workflow_versions l ON versions."workflowId" = l."workflowId" AND versions."order" = l.max_order
JOIN
"Workflow" as workflow ON workflow."id" = versions."workflowId"
@@ -360,7 +360,7 @@ not_run_scheduled_workflows AS (
"triggerAt" <= NOW() + INTERVAL '5 seconds'
AND runTriggeredBy IS NULL
AND (
"tickerId" IS NULL
"tickerId" IS NULL
OR NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = scheduledWorkflow."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
@@ -446,7 +446,7 @@ WITH stepRunsToTimeout AS (
NOT EXISTS (
SELECT 1 FROM "Ticker" WHERE "id" = stepRun."tickerId" AND "isActive" = true AND "lastHeartbeatAt" >= NOW() - INTERVAL '10 seconds'
)
OR "tickerId" IS NULL
OR "tickerId" IS NULL
)
FOR UPDATE SKIP LOCKED
)

View File

@@ -115,10 +115,10 @@ WHERE ws."workerId" = w."id"
-- name: LinkActionsToWorker :exec
INSERT INTO "_ActionToWorker" (
"A",
"A",
"B"
) SELECT
unnest(@actionIds::uuid[]),
) SELECT
unnest(@actionIds::uuid[]),
@workerId::uuid
ON CONFLICT DO NOTHING;
@@ -137,7 +137,7 @@ VALUES (
@name::text,
@tenantId::uuid
)
ON CONFLICT ("tenantId", "name") DO UPDATE
ON CONFLICT ("tenantId", "name") DO UPDATE
SET
"updatedAt" = CURRENT_TIMESTAMP
WHERE
@@ -147,10 +147,10 @@ RETURNING *;
-- name: LinkServicesToWorker :exec
INSERT INTO "_ServiceToWorker" (
"A",
"B"
"B"
)
VALUES (
unnest(@services::uuid[]),
unnest(@services::uuid[]),
@workerId::uuid
)
ON CONFLICT DO NOTHING;
@@ -160,4 +160,4 @@ DELETE FROM
"Worker"
WHERE
"id" = @id::uuid
RETURNING *;
RETURNING *;

View File

@@ -139,10 +139,10 @@ func (q *Queries) GetWorkerForEngine(ctx context.Context, db DBTX, arg GetWorker
const linkActionsToWorker = `-- name: LinkActionsToWorker :exec
INSERT INTO "_ActionToWorker" (
"A",
"A",
"B"
) SELECT
unnest($1::uuid[]),
) SELECT
unnest($1::uuid[]),
$2::uuid
ON CONFLICT DO NOTHING
`
@@ -160,10 +160,10 @@ func (q *Queries) LinkActionsToWorker(ctx context.Context, db DBTX, arg LinkActi
const linkServicesToWorker = `-- name: LinkServicesToWorker :exec
INSERT INTO "_ServiceToWorker" (
"A",
"B"
"B"
)
VALUES (
unnest($1::uuid[]),
unnest($1::uuid[]),
$2::uuid
)
ON CONFLICT DO NOTHING
@@ -359,7 +359,7 @@ VALUES (
$1::text,
$2::uuid
)
ON CONFLICT ("tenantId", "name") DO UPDATE
ON CONFLICT ("tenantId", "name") DO UPDATE
SET
"updatedAt" = CURRENT_TIMESTAMP
WHERE

View File

@@ -100,14 +100,14 @@ WHERE
-- name: ListWorkflowRuns :many
SELECT
sqlc.embed(runs),
sqlc.embed(workflow),
sqlc.embed(runTriggers),
sqlc.embed(workflowVersion),
sqlc.embed(runs),
sqlc.embed(workflow),
sqlc.embed(runTriggers),
sqlc.embed(workflowVersion),
-- waiting on https://github.com/sqlc-dev/sqlc/pull/2858 for nullable events field
events.id, events.key, events."createdAt", events."updatedAt"
FROM
"WorkflowRun" as runs
"WorkflowRun" as runs
LEFT JOIN
"WorkflowRunTriggeredBy" as runTriggers ON runTriggers."parentId" = runs."id"
LEFT JOIN
@@ -129,7 +129,7 @@ WHERE
(
sqlc.narg('ids')::uuid[] IS NULL OR
runs."id" = ANY(sqlc.narg('ids')::uuid[])
) AND
) AND
(
sqlc.narg('additionalMetadata')::jsonb IS NULL OR
runs."additionalMetadata" @> sqlc.narg('additionalMetadata')::jsonb
@@ -213,7 +213,7 @@ WITH workflow_runs AS (
ORDER BY
rn, seqnum ASC
LIMIT
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
-- total number of running workflows.
(@maxRuns::int) * (SELECT count FROM total_group_count)
) AND
@@ -240,24 +240,24 @@ WITH groupKeyRun AS (
"tenantId" = @tenantId::uuid
)
UPDATE "WorkflowRun" workflowRun
SET "status" = CASE
SET "status" = CASE
-- Final states are final, cannot be updated. We also can't move out of a queued state
WHEN "status" IN ('SUCCEEDED', 'FAILED', 'QUEUED') THEN "status"
-- When the GetGroupKeyRun failed or been cancelled, then the workflow is failed
WHEN groupKeyRun.groupKeyRunStatus IN ('FAILED', 'CANCELLED') THEN 'FAILED'
WHEN groupKeyRun.output IS NOT NULL THEN 'QUEUED'
ELSE "status"
END, "finishedAt" = CASE
END, "finishedAt" = CASE
-- Final states are final, cannot be updated
WHEN "finishedAt" IS NOT NULL THEN "finishedAt"
-- When one job run has failed or been cancelled, then the workflow is failed
WHEN groupKeyRun.groupKeyRunStatus IN ('FAILED', 'CANCELLED') THEN NOW()
ELSE "finishedAt"
END,
END,
"concurrencyGroupId" = groupKeyRun."output"
FROM
groupKeyRun
WHERE
WHERE
workflowRun."id" = groupKeyRun."workflowRunId" AND
workflowRun."tenantId" = @tenantId::uuid
RETURNING workflowRun.*;
@@ -282,7 +282,7 @@ WITH jobRuns AS (
job."kind" = 'DEFAULT'
)
UPDATE "WorkflowRun"
SET "status" = CASE
SET "status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED') THEN "status"
-- We check for running first, because if a job run is running, then the workflow is running
@@ -292,7 +292,7 @@ SET "status" = CASE
-- When all job runs have succeeded, then the workflow is succeeded
WHEN j.succeededRuns > 0 AND j.pendingRuns = 0 AND j.runningRuns = 0 AND j.failedRuns = 0 AND j.cancelledRuns = 0 THEN 'SUCCEEDED'
ELSE "status"
END, "finishedAt" = CASE
END, "finishedAt" = CASE
-- Final states are final, cannot be updated
WHEN "finishedAt" IS NOT NULL THEN "finishedAt"
-- We check for running first, because if a job run is running, then the workflow is not finished
@@ -300,7 +300,7 @@ END, "finishedAt" = CASE
-- When one job run has failed or been cancelled, then the workflow is failed
WHEN j.failedRuns > 0 OR j.cancelledRuns > 0 OR j.succeededRuns > 0 THEN NOW()
ELSE "finishedAt"
END, "startedAt" = CASE
END, "startedAt" = CASE
-- Started at is final, cannot be changed
WHEN "startedAt" IS NOT NULL THEN "startedAt"
-- If a job is running or in a final state, then the workflow has started
@@ -320,7 +320,7 @@ RETURNING "WorkflowRun".*;
UPDATE
"WorkflowRun"
SET
"status" = CASE
"status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED') THEN "status"
ELSE COALESCE(sqlc.narg('status')::"WorkflowRunStatus", "status")
@@ -328,7 +328,7 @@ SET
"error" = COALESCE(sqlc.narg('error')::text, "error"),
"startedAt" = COALESCE(sqlc.narg('startedAt')::timestamp, "startedAt"),
"finishedAt" = COALESCE(sqlc.narg('finishedAt')::timestamp, "finishedAt")
WHERE
WHERE
"id" = @id::uuid AND
"tenantId" = @tenantId::uuid
RETURNING "WorkflowRun".*;
@@ -341,7 +341,7 @@ SET
"error" = COALESCE(sqlc.narg('error')::text, "error"),
"startedAt" = COALESCE(sqlc.narg('startedAt')::timestamp, "startedAt"),
"finishedAt" = COALESCE(sqlc.narg('finishedAt')::timestamp, "finishedAt")
WHERE
WHERE
"tenantId" = @tenantId::uuid AND
"id" = ANY(@ids::uuid[])
RETURNING "WorkflowRun".*;
@@ -462,7 +462,7 @@ INSERT INTO "JobRun" (
"workflowRunId",
"jobId",
"status"
)
)
SELECT
gen_random_uuid(),
CURRENT_TIMESTAMP,
@@ -516,7 +516,7 @@ INSERT INTO "StepRun" (
"status",
"requeueAfter",
"callerFiles"
)
)
SELECT
gen_random_uuid(),
CURRENT_TIMESTAMP,
@@ -534,21 +534,21 @@ WHERE
-- name: LinkStepRunParents :exec
INSERT INTO "_StepRunOrder" ("A", "B")
SELECT
SELECT
parent_run."id" AS "A",
child_run."id" AS "B"
FROM
FROM
"_StepOrder" AS step_order
JOIN
JOIN
"StepRun" AS parent_run ON parent_run."stepId" = step_order."A" AND parent_run."jobRunId" = @jobRunId::uuid
JOIN
JOIN
"StepRun" AS child_run ON child_run."stepId" = step_order."B" AND child_run."jobRunId" = @jobRunId::uuid;
-- name: GetWorkflowRun :many
SELECT
sqlc.embed(runs),
sqlc.embed(runTriggers),
sqlc.embed(workflowVersion),
sqlc.embed(runs),
sqlc.embed(runTriggers),
sqlc.embed(workflowVersion),
workflow."name" as "workflowName",
-- waiting on https://github.com/sqlc-dev/sqlc/pull/2858 for nullable fields
wc."limitStrategy" as "concurrencyLimitStrategy",
@@ -597,4 +597,4 @@ WHERE
-- if childKey is set, use that
(sqlc.narg('childKey')::text IS NULL AND "childIndex" = @childIndex) OR
(sqlc.narg('childKey')::text IS NOT NULL AND "childKey" = sqlc.narg('childKey')::text)
);
);

View File

@@ -259,7 +259,7 @@ INSERT INTO "JobRun" (
"workflowRunId",
"jobId",
"status"
)
)
SELECT
gen_random_uuid(),
CURRENT_TIMESTAMP,
@@ -317,7 +317,7 @@ INSERT INTO "StepRun" (
"status",
"requeueAfter",
"callerFiles"
)
)
SELECT
gen_random_uuid(),
CURRENT_TIMESTAMP,
@@ -590,9 +590,9 @@ func (q *Queries) GetScheduledChildWorkflowRun(ctx context.Context, db DBTX, arg
const getWorkflowRun = `-- name: GetWorkflowRun :many
SELECT
runs."createdAt", runs."updatedAt", runs."deletedAt", runs."tenantId", runs."workflowVersionId", runs.status, runs.error, runs."startedAt", runs."finishedAt", runs."concurrencyGroupId", runs."displayName", runs.id, runs."gitRepoBranch", runs."childIndex", runs."childKey", runs."parentId", runs."parentStepRunId", runs."additionalMetadata",
runtriggers.id, runtriggers."createdAt", runtriggers."updatedAt", runtriggers."deletedAt", runtriggers."tenantId", runtriggers."eventId", runtriggers."cronParentId", runtriggers."cronSchedule", runtriggers."scheduledId", runtriggers.input, runtriggers."parentId",
workflowversion.id, workflowversion."createdAt", workflowversion."updatedAt", workflowversion."deletedAt", workflowversion.version, workflowversion."order", workflowversion."workflowId", workflowversion.checksum, workflowversion."scheduleTimeout", workflowversion."onFailureJobId",
runs."createdAt", runs."updatedAt", runs."deletedAt", runs."tenantId", runs."workflowVersionId", runs.status, runs.error, runs."startedAt", runs."finishedAt", runs."concurrencyGroupId", runs."displayName", runs.id, runs."gitRepoBranch", runs."childIndex", runs."childKey", runs."parentId", runs."parentStepRunId", runs."additionalMetadata",
runtriggers.id, runtriggers."createdAt", runtriggers."updatedAt", runtriggers."deletedAt", runtriggers."tenantId", runtriggers."eventId", runtriggers."cronParentId", runtriggers."cronSchedule", runtriggers."scheduledId", runtriggers.input, runtriggers."parentId",
workflowversion.id, workflowversion."createdAt", workflowversion."updatedAt", workflowversion."deletedAt", workflowversion.version, workflowversion."order", workflowversion."workflowId", workflowversion.checksum, workflowversion."scheduleTimeout", workflowversion."onFailureJobId",
workflow."name" as "workflowName",
-- waiting on https://github.com/sqlc-dev/sqlc/pull/2858 for nullable fields
wc."limitStrategy" as "concurrencyLimitStrategy",
@@ -696,14 +696,14 @@ func (q *Queries) GetWorkflowRun(ctx context.Context, db DBTX, arg GetWorkflowRu
const linkStepRunParents = `-- name: LinkStepRunParents :exec
INSERT INTO "_StepRunOrder" ("A", "B")
SELECT
SELECT
parent_run."id" AS "A",
child_run."id" AS "B"
FROM
FROM
"_StepOrder" AS step_order
JOIN
JOIN
"StepRun" AS parent_run ON parent_run."stepId" = step_order."A" AND parent_run."jobRunId" = $1::uuid
JOIN
JOIN
"StepRun" AS child_run ON child_run."stepId" = step_order."B" AND child_run."jobRunId" = $1::uuid
`
@@ -714,14 +714,14 @@ func (q *Queries) LinkStepRunParents(ctx context.Context, db DBTX, jobrunid pgty
const listWorkflowRuns = `-- name: ListWorkflowRuns :many
SELECT
runs."createdAt", runs."updatedAt", runs."deletedAt", runs."tenantId", runs."workflowVersionId", runs.status, runs.error, runs."startedAt", runs."finishedAt", runs."concurrencyGroupId", runs."displayName", runs.id, runs."gitRepoBranch", runs."childIndex", runs."childKey", runs."parentId", runs."parentStepRunId", runs."additionalMetadata",
workflow.id, workflow."createdAt", workflow."updatedAt", workflow."deletedAt", workflow."tenantId", workflow.name, workflow.description,
runtriggers.id, runtriggers."createdAt", runtriggers."updatedAt", runtriggers."deletedAt", runtriggers."tenantId", runtriggers."eventId", runtriggers."cronParentId", runtriggers."cronSchedule", runtriggers."scheduledId", runtriggers.input, runtriggers."parentId",
workflowversion.id, workflowversion."createdAt", workflowversion."updatedAt", workflowversion."deletedAt", workflowversion.version, workflowversion."order", workflowversion."workflowId", workflowversion.checksum, workflowversion."scheduleTimeout", workflowversion."onFailureJobId",
runs."createdAt", runs."updatedAt", runs."deletedAt", runs."tenantId", runs."workflowVersionId", runs.status, runs.error, runs."startedAt", runs."finishedAt", runs."concurrencyGroupId", runs."displayName", runs.id, runs."gitRepoBranch", runs."childIndex", runs."childKey", runs."parentId", runs."parentStepRunId", runs."additionalMetadata",
workflow.id, workflow."createdAt", workflow."updatedAt", workflow."deletedAt", workflow."tenantId", workflow.name, workflow.description,
runtriggers.id, runtriggers."createdAt", runtriggers."updatedAt", runtriggers."deletedAt", runtriggers."tenantId", runtriggers."eventId", runtriggers."cronParentId", runtriggers."cronSchedule", runtriggers."scheduledId", runtriggers.input, runtriggers."parentId",
workflowversion.id, workflowversion."createdAt", workflowversion."updatedAt", workflowversion."deletedAt", workflowversion.version, workflowversion."order", workflowversion."workflowId", workflowversion.checksum, workflowversion."scheduleTimeout", workflowversion."onFailureJobId",
-- waiting on https://github.com/sqlc-dev/sqlc/pull/2858 for nullable events field
events.id, events.key, events."createdAt", events."updatedAt"
FROM
"WorkflowRun" as runs
"WorkflowRun" as runs
LEFT JOIN
"WorkflowRunTriggeredBy" as runTriggers ON runTriggers."parentId" = runs."id"
LEFT JOIN
@@ -743,7 +743,7 @@ WHERE
(
$4::uuid[] IS NULL OR
runs."id" = ANY($4::uuid[])
) AND
) AND
(
$5::jsonb IS NULL OR
runs."additionalMetadata" @> $5::jsonb
@@ -944,7 +944,7 @@ WITH workflow_runs AS (
ORDER BY
rn, seqnum ASC
LIMIT
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
-- We can run up to maxRuns per group, so we multiple max runs by the number of groups, then subtract the
-- total number of running workflows.
($3::int) * (SELECT count FROM total_group_count)
) AND
@@ -1028,7 +1028,7 @@ WITH jobRuns AS (
job."kind" = 'DEFAULT'
)
UPDATE "WorkflowRun"
SET "status" = CASE
SET "status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED') THEN "status"
-- We check for running first, because if a job run is running, then the workflow is running
@@ -1038,7 +1038,7 @@ SET "status" = CASE
-- When all job runs have succeeded, then the workflow is succeeded
WHEN j.succeededRuns > 0 AND j.pendingRuns = 0 AND j.runningRuns = 0 AND j.failedRuns = 0 AND j.cancelledRuns = 0 THEN 'SUCCEEDED'
ELSE "status"
END, "finishedAt" = CASE
END, "finishedAt" = CASE
-- Final states are final, cannot be updated
WHEN "finishedAt" IS NOT NULL THEN "finishedAt"
-- We check for running first, because if a job run is running, then the workflow is not finished
@@ -1046,7 +1046,7 @@ END, "finishedAt" = CASE
-- When one job run has failed or been cancelled, then the workflow is failed
WHEN j.failedRuns > 0 OR j.cancelledRuns > 0 OR j.succeededRuns > 0 THEN NOW()
ELSE "finishedAt"
END, "startedAt" = CASE
END, "startedAt" = CASE
-- Started at is final, cannot be changed
WHEN "startedAt" IS NOT NULL THEN "startedAt"
-- If a job is running or in a final state, then the workflow has started
@@ -1102,7 +1102,7 @@ SET
"error" = COALESCE($2::text, "error"),
"startedAt" = COALESCE($3::timestamp, "startedAt"),
"finishedAt" = COALESCE($4::timestamp, "finishedAt")
WHERE
WHERE
"tenantId" = $5::uuid AND
"id" = ANY($6::uuid[])
RETURNING "WorkflowRun"."createdAt", "WorkflowRun"."updatedAt", "WorkflowRun"."deletedAt", "WorkflowRun"."tenantId", "WorkflowRun"."workflowVersionId", "WorkflowRun".status, "WorkflowRun".error, "WorkflowRun"."startedAt", "WorkflowRun"."finishedAt", "WorkflowRun"."concurrencyGroupId", "WorkflowRun"."displayName", "WorkflowRun".id, "WorkflowRun"."gitRepoBranch", "WorkflowRun"."childIndex", "WorkflowRun"."childKey", "WorkflowRun"."parentId", "WorkflowRun"."parentStepRunId", "WorkflowRun"."additionalMetadata"
@@ -1167,7 +1167,7 @@ const updateWorkflowRun = `-- name: UpdateWorkflowRun :one
UPDATE
"WorkflowRun"
SET
"status" = CASE
"status" = CASE
-- Final states are final, cannot be updated
WHEN "status" IN ('SUCCEEDED', 'FAILED') THEN "status"
ELSE COALESCE($1::"WorkflowRunStatus", "status")
@@ -1175,7 +1175,7 @@ SET
"error" = COALESCE($2::text, "error"),
"startedAt" = COALESCE($3::timestamp, "startedAt"),
"finishedAt" = COALESCE($4::timestamp, "finishedAt")
WHERE
WHERE
"id" = $5::uuid AND
"tenantId" = $6::uuid
RETURNING "WorkflowRun"."createdAt", "WorkflowRun"."updatedAt", "WorkflowRun"."deletedAt", "WorkflowRun"."tenantId", "WorkflowRun"."workflowVersionId", "WorkflowRun".status, "WorkflowRun".error, "WorkflowRun"."startedAt", "WorkflowRun"."finishedAt", "WorkflowRun"."concurrencyGroupId", "WorkflowRun"."displayName", "WorkflowRun".id, "WorkflowRun"."gitRepoBranch", "WorkflowRun"."childIndex", "WorkflowRun"."childKey", "WorkflowRun"."parentId", "WorkflowRun"."parentStepRunId", "WorkflowRun"."additionalMetadata"
@@ -1232,24 +1232,24 @@ WITH groupKeyRun AS (
"tenantId" = $1::uuid
)
UPDATE "WorkflowRun" workflowRun
SET "status" = CASE
SET "status" = CASE
-- Final states are final, cannot be updated. We also can't move out of a queued state
WHEN "status" IN ('SUCCEEDED', 'FAILED', 'QUEUED') THEN "status"
-- When the GetGroupKeyRun failed or been cancelled, then the workflow is failed
WHEN groupKeyRun.groupKeyRunStatus IN ('FAILED', 'CANCELLED') THEN 'FAILED'
WHEN groupKeyRun.output IS NOT NULL THEN 'QUEUED'
ELSE "status"
END, "finishedAt" = CASE
END, "finishedAt" = CASE
-- Final states are final, cannot be updated
WHEN "finishedAt" IS NOT NULL THEN "finishedAt"
-- When one job run has failed or been cancelled, then the workflow is failed
WHEN groupKeyRun.groupKeyRunStatus IN ('FAILED', 'CANCELLED') THEN NOW()
ELSE "finishedAt"
END,
END,
"concurrencyGroupId" = groupKeyRun."output"
FROM
groupKeyRun
WHERE
WHERE
workflowRun."id" = groupKeyRun."workflowRunId" AND
workflowRun."tenantId" = $1::uuid
RETURNING workflowrun."createdAt", workflowrun."updatedAt", workflowrun."deletedAt", workflowrun."tenantId", workflowrun."workflowVersionId", workflowrun.status, workflowrun.error, workflowrun."startedAt", workflowrun."finishedAt", workflowrun."concurrencyGroupId", workflowrun."displayName", workflowrun.id, workflowrun."gitRepoBranch", workflowrun."childIndex", workflowrun."childKey", workflowrun."parentId", workflowrun."parentStepRunId", workflowrun."additionalMetadata"

View File

@@ -2,32 +2,32 @@
SELECT
count(workflows) OVER() AS total
FROM
"Workflow" as workflows
"Workflow" as workflows
WHERE
workflows."tenantId" = $1 AND
(
sqlc.narg('eventKey')::text IS NULL OR
workflows."id" IN (
SELECT
SELECT
DISTINCT ON(t1."workflowId") t1."workflowId"
FROM
FROM
"WorkflowVersion" AS t1
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
(
j2."id" IN (
SELECT
t3."parentId"
FROM
SELECT
t3."parentId"
FROM
"public"."WorkflowTriggerEventRef" AS t3
WHERE
WHERE
t3."eventKey" = sqlc.narg('eventKey')::text
AND t3."parentId" IS NOT NULL
)
AND j2."id" IS NOT NULL
)
AND j2."id" IS NOT NULL
AND t1."workflowId" IS NOT NULL
)
ORDER BY
ORDER BY
t1."workflowId" DESC, t1."order" DESC
)
);
@@ -46,40 +46,40 @@ WHERE
(
sqlc.narg('eventKey')::text IS NULL OR
workflow."id" IN (
SELECT
SELECT
DISTINCT ON(t1."workflowId") t1."workflowId"
FROM
FROM
"WorkflowVersion" AS t1
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
(
j2."id" IN (
SELECT
t3."parentId"
FROM
SELECT
t3."parentId"
FROM
"public"."WorkflowTriggerEventRef" AS t3
WHERE
WHERE
t3."eventKey" = sqlc.narg('eventKey')::text
AND t3."parentId" IS NOT NULL
)
AND j2."id" IS NOT NULL
)
AND j2."id" IS NOT NULL
AND t1."workflowId" IS NOT NULL
)
ORDER BY
ORDER BY
t1."workflowId" DESC, t1."order" DESC
)
)
ORDER BY
workflow."id" DESC, runs."createdAt" DESC;
ORDER BY
workflow."id" DESC, runs."createdAt" DESC;
-- name: ListWorkflows :many
SELECT
SELECT
sqlc.embed(workflows)
FROM (
SELECT
DISTINCT ON(workflows."id") workflows.*
FROM
"Workflow" as workflows
"Workflow" as workflows
LEFT JOIN
(
SELECT * FROM "WorkflowVersion" as workflowVersion ORDER BY workflowVersion."order" DESC LIMIT 1
@@ -89,31 +89,31 @@ FROM (
LEFT JOIN
"WorkflowTriggerEventRef" as workflowTriggerEventRef ON workflowTrigger."id" = workflowTriggerEventRef."parentId"
WHERE
workflows."tenantId" = $1
workflows."tenantId" = $1
AND
(
sqlc.narg('eventKey')::text IS NULL OR
workflows."id" IN (
SELECT
SELECT
DISTINCT ON(t1."workflowId") t1."workflowId"
FROM
FROM
"WorkflowVersion" AS t1
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
(
j2."id" IN (
SELECT
t3."parentId"
FROM
SELECT
t3."parentId"
FROM
"public"."WorkflowTriggerEventRef" AS t3
WHERE
WHERE
t3."eventKey" = sqlc.narg('eventKey')::text
AND t3."parentId" IS NOT NULL
)
AND j2."id" IS NOT NULL
)
AND j2."id" IS NOT NULL
AND t1."workflowId" IS NOT NULL
)
ORDER BY
ORDER BY
t1."workflowId" DESC
)
)
@@ -248,12 +248,12 @@ INSERT INTO "Step" (
-- name: AddStepParents :exec
INSERT INTO "_StepOrder" ("A", "B")
SELECT
SELECT
step."id",
@id::uuid
FROM
FROM
unnest(@parents::text[]) AS parent_readable_id
JOIN
JOIN
"Step" AS step ON step."readableId" = parent_readable_id AND step."jobId" = @jobId::uuid;
-- name: CreateStepRateLimit :one
@@ -280,7 +280,7 @@ VALUES (
LOWER(@action::text),
@tenantId::uuid
)
ON CONFLICT ("tenantId", "actionId") DO UPDATE
ON CONFLICT ("tenantId", "actionId") DO UPDATE
SET
"tenantId" = EXCLUDED."tenantId"
WHERE
@@ -370,7 +370,7 @@ LEFT JOIN "Workflow" AS j1 ON j1.id = "WorkflowVersion"."workflowId"
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = "WorkflowVersion"."id"
WHERE
(j1."tenantId"::uuid = @tenantId AND j1.id IS NOT NULL)
AND
AND
(j2.id IN (
SELECT t3."parentId"
FROM "WorkflowTriggerEventRef" AS t3
@@ -459,4 +459,4 @@ WHERE
sqlc.narg('status')::"WorkflowRunStatus" IS NULL OR
r1."status" = sqlc.narg('status')::"WorkflowRunStatus"
) AND
workflowVersion."workflowId" = @workflowId::uuid;
workflowVersion."workflowId" = @workflowId::uuid;

View File

@@ -13,12 +13,12 @@ import (
const addStepParents = `-- name: AddStepParents :exec
INSERT INTO "_StepOrder" ("A", "B")
SELECT
SELECT
step."id",
$1::uuid
FROM
FROM
unnest($2::text[]) AS parent_readable_id
JOIN
JOIN
"Step" AS step ON step."readableId" = parent_readable_id AND step."jobId" = $3::uuid
`
@@ -121,32 +121,32 @@ const countWorkflows = `-- name: CountWorkflows :one
SELECT
count(workflows) OVER() AS total
FROM
"Workflow" as workflows
"Workflow" as workflows
WHERE
workflows."tenantId" = $1 AND
(
$2::text IS NULL OR
workflows."id" IN (
SELECT
SELECT
DISTINCT ON(t1."workflowId") t1."workflowId"
FROM
FROM
"WorkflowVersion" AS t1
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
(
j2."id" IN (
SELECT
t3."parentId"
FROM
SELECT
t3."parentId"
FROM
"public"."WorkflowTriggerEventRef" AS t3
WHERE
WHERE
t3."eventKey" = $2::text
AND t3."parentId" IS NOT NULL
)
AND j2."id" IS NOT NULL
)
AND j2."id" IS NOT NULL
AND t1."workflowId" IS NOT NULL
)
ORDER BY
ORDER BY
t1."workflowId" DESC, t1."order" DESC
)
)
@@ -836,13 +836,13 @@ func (q *Queries) LinkOnFailureJob(ctx context.Context, db DBTX, arg LinkOnFailu
}
const listWorkflows = `-- name: ListWorkflows :many
SELECT
SELECT
workflows.id, workflows."createdAt", workflows."updatedAt", workflows."deletedAt", workflows."tenantId", workflows.name, workflows.description
FROM (
SELECT
DISTINCT ON(workflows."id") workflows.id, workflows."createdAt", workflows."updatedAt", workflows."deletedAt", workflows."tenantId", workflows.name, workflows.description
FROM
"Workflow" as workflows
"Workflow" as workflows
LEFT JOIN
(
SELECT id, "createdAt", "updatedAt", "deletedAt", version, "order", "workflowId", checksum, "scheduleTimeout", "onFailureJobId" FROM "WorkflowVersion" as workflowVersion ORDER BY workflowVersion."order" DESC LIMIT 1
@@ -852,31 +852,31 @@ FROM (
LEFT JOIN
"WorkflowTriggerEventRef" as workflowTriggerEventRef ON workflowTrigger."id" = workflowTriggerEventRef."parentId"
WHERE
workflows."tenantId" = $1
workflows."tenantId" = $1
AND
(
$2::text IS NULL OR
workflows."id" IN (
SELECT
SELECT
DISTINCT ON(t1."workflowId") t1."workflowId"
FROM
FROM
"WorkflowVersion" AS t1
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
(
j2."id" IN (
SELECT
t3."parentId"
FROM
SELECT
t3."parentId"
FROM
"public"."WorkflowTriggerEventRef" AS t3
WHERE
WHERE
t3."eventKey" = $2::text
AND t3."parentId" IS NOT NULL
)
AND j2."id" IS NOT NULL
)
AND j2."id" IS NOT NULL
AND t1."workflowId" IS NOT NULL
)
ORDER BY
ORDER BY
t1."workflowId" DESC
)
)
@@ -944,7 +944,7 @@ LEFT JOIN "Workflow" AS j1 ON j1.id = "WorkflowVersion"."workflowId"
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = "WorkflowVersion"."id"
WHERE
(j1."tenantId"::uuid = $1 AND j1.id IS NOT NULL)
AND
AND
(j2.id IN (
SELECT t3."parentId"
FROM "WorkflowTriggerEventRef" AS t3
@@ -992,30 +992,30 @@ WHERE
(
$2::text IS NULL OR
workflow."id" IN (
SELECT
SELECT
DISTINCT ON(t1."workflowId") t1."workflowId"
FROM
FROM
"WorkflowVersion" AS t1
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
LEFT JOIN "WorkflowTriggers" AS j2 ON j2."workflowVersionId" = t1."id"
WHERE
(
j2."id" IN (
SELECT
t3."parentId"
FROM
SELECT
t3."parentId"
FROM
"public"."WorkflowTriggerEventRef" AS t3
WHERE
WHERE
t3."eventKey" = $2::text
AND t3."parentId" IS NOT NULL
)
AND j2."id" IS NOT NULL
)
AND j2."id" IS NOT NULL
AND t1."workflowId" IS NOT NULL
)
ORDER BY
ORDER BY
t1."workflowId" DESC, t1."order" DESC
)
)
ORDER BY
ORDER BY
workflow."id" DESC, runs."createdAt" DESC
`
@@ -1080,7 +1080,7 @@ VALUES (
LOWER($1::text),
$2::uuid
)
ON CONFLICT ("tenantId", "actionId") DO UPDATE
ON CONFLICT ("tenantId", "actionId") DO UPDATE
SET
"tenantId" = EXCLUDED."tenantId"
WHERE

View File

@@ -158,7 +158,7 @@ func (r *eventAPIRepository) ListEventKeys(tenantId string) ([]string, error) {
err := r.client.Prisma.QueryRaw(
`
SELECT DISTINCT ON("Event"."key") "Event"."key"
SELECT DISTINCT ON("Event"."key") "Event"."key"
FROM "Event"
WHERE
"Event"."tenantId"::text = $1