mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-05-01 23:20:09 -05:00
Wip consistent naming (#1480)
* readme changes * test readme changes * try spaces * readme indent * add task orchestration features * newline * fix: headings and indent * add flow control * add scheduling * rest of readme changes * Update README.md * Update README.md * fix: small setup guide improvements * Apply suggestions from code review Co-authored-by: Matt Kaye <mrkaye97@gmail.com> * Update frontend/docs/pages/home/index.mdx Co-authored-by: Gabe Ruttner <gabriel.ruttner@gmail.com> * wip * wip * naming * task * lint * compute * fix: add a bunch of redirects * fix: dynamic * link to docs * revert * revert some changes --------- Co-authored-by: Alexander Belanger <alexander@hatchet.run> Co-authored-by: abelanger5 <belanger@sas.upenn.edu> Co-authored-by: Matt Kaye <mrkaye97@gmail.com>
This commit is contained in:
@@ -110,9 +110,9 @@ export const columns = ({
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: 'Workflow Runs',
|
||||
accessorKey: 'Runs',
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Workflow Runs" />
|
||||
<DataTableColumnHeader column={column} title="Runs" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
if (!row.original.workflowRunSummary) {
|
||||
|
||||
@@ -458,7 +458,7 @@ function EventsTable() {
|
||||
},
|
||||
{
|
||||
columnId: 'workflows',
|
||||
title: 'Workflow',
|
||||
title: 'Task',
|
||||
options: workflowKeyFilters,
|
||||
},
|
||||
{
|
||||
@@ -514,9 +514,7 @@ function ExpandedEventContent({ event }: { event: Event }) {
|
||||
</h3>
|
||||
<Separator />
|
||||
<EventDataSection event={event} />
|
||||
<h3 className="text-lg font-bold leading-tight text-foreground">
|
||||
Workflow Runs
|
||||
</h3>
|
||||
<h3 className="text-lg font-bold leading-tight text-foreground">Runs</h3>
|
||||
<Separator />
|
||||
<EventWorkflowRunsList event={event} />
|
||||
</DialogContent>
|
||||
|
||||
@@ -116,8 +116,8 @@ function Sidebar({ className, memberships, currTenant }: SidebarProps) {
|
||||
<SidebarButtonPrimary
|
||||
key={1}
|
||||
onNavLinkClick={onNavLinkClick}
|
||||
to="/v1/workflow-runs"
|
||||
name="Workflow Runs"
|
||||
to="/v1/runs"
|
||||
name="Runs"
|
||||
icon={<PlayIcon className="mr-2 h-4 w-4" />}
|
||||
/>
|
||||
</div>
|
||||
@@ -157,8 +157,8 @@ function Sidebar({ className, memberships, currTenant }: SidebarProps) {
|
||||
<SidebarButtonPrimary
|
||||
key={1}
|
||||
onNavLinkClick={onNavLinkClick}
|
||||
to="/v1/workflows"
|
||||
name="Workflows"
|
||||
to="/v1/tasks"
|
||||
name="Tasks & Workflows"
|
||||
icon={<Squares2X2Icon className="mr-2 h-4 w-4" />}
|
||||
/>
|
||||
<SidebarButtonPrimary
|
||||
|
||||
@@ -40,7 +40,7 @@ export function BillingRequired({
|
||||
</div>
|
||||
|
||||
<h3 className="text-2xl font-semibold mb-2">
|
||||
Ready to supercharge your workflows?
|
||||
Ready to supercharge your task runs?
|
||||
</h3>
|
||||
|
||||
<p className="text-muted-foreground mb-6">
|
||||
|
||||
+2
-2
@@ -12,7 +12,7 @@ export const columns: ColumnDef<ManagedWorker>[] = [
|
||||
<DataTableColumnHeader column={column} title="Name" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<Link to={`/v1/workflows/${row.original.metadata.id}`}>
|
||||
<Link to={`/v1/tasks/${row.original.metadata.id}`}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap text-md p-2">
|
||||
{row.original.name}
|
||||
</div>
|
||||
@@ -52,7 +52,7 @@ export const columns: ColumnDef<ManagedWorker>[] = [
|
||||
cell: ({ row }) => {
|
||||
return (
|
||||
<div className="flex gap-2 justify-end">
|
||||
<Link to={`/v1/workflows/${row.original.metadata.id}`}>
|
||||
<Link to={`/v1/tasks/${row.original.metadata.id}`}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap text-md p-2">
|
||||
<ChevronRightIcon
|
||||
className="h-5 w-5 flex-none text-gray-700 dark:text-gray-300"
|
||||
|
||||
@@ -153,12 +153,12 @@ export default function DemoTemplate() {
|
||||
}
|
||||
}, 1000);
|
||||
} catch (error) {
|
||||
console.error('Failed to trigger workflow:', error);
|
||||
console.error('Failed to trigger run:', error);
|
||||
setTriggering(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Automatically trigger workflow runs when success step is opened
|
||||
// Automatically trigger task runs when success step is opened
|
||||
useEffect(() => {
|
||||
if (successStepOpen && workflowId && !allRunsTriggered) {
|
||||
const triggerRuns = async () => {
|
||||
@@ -314,7 +314,7 @@ print(result)
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1_workflows "github.com/hatchet-dev/hatchet/examples/v1/workflows"
|
||||
v1_workflows "github.com/hatchet-dev/hatchet/examples/v1/tasks"
|
||||
v1 "github.com/hatchet-dev/hatchet/pkg/v1"
|
||||
"github.com/hatchet-dev/hatchet/pkg/v1/workflow"
|
||||
"github.com/joho/godotenv"
|
||||
@@ -876,22 +876,20 @@ func main() {
|
||||
•
|
||||
</span>
|
||||
<span>
|
||||
Three demo workflow runs have been triggered for you
|
||||
Three demo task runs have been triggered for you
|
||||
</span>
|
||||
</li>
|
||||
<li className="flex items-start">
|
||||
<span className="text-primary mr-2 flex items-center mt-0.5">
|
||||
•
|
||||
</span>
|
||||
<span>
|
||||
Use the API to trigger additional workflow runs
|
||||
</span>
|
||||
<span>Use the API to trigger additional task runs</span>
|
||||
</li>
|
||||
<li className="flex items-start">
|
||||
<span className="text-primary mr-2 flex items-center mt-0.5">
|
||||
•
|
||||
</span>
|
||||
<span>Monitor workflow runs in the dashboard</span>
|
||||
<span>Monitor task runs in the dashboard</span>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -906,14 +904,14 @@ func main() {
|
||||
|
||||
{/* Secondary action buttons */}
|
||||
<div className="grid grid-cols-2 gap-3">
|
||||
<Link to="/v1/workflow-runs">
|
||||
<Link to="/v1/runs">
|
||||
<Button variant="outline" className="w-full">
|
||||
View Workflow Runs
|
||||
View Runs
|
||||
</Button>
|
||||
</Link>
|
||||
<Link to="/v1/workflows">
|
||||
<Link to="/v1/tasks">
|
||||
<Button variant="outline" className="w-full">
|
||||
View Workflows
|
||||
View RegisteredTasks
|
||||
</Button>
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
@@ -61,7 +61,7 @@ export const columns = ({
|
||||
cell: ({ row }) => (
|
||||
<div className="flex flex-row items-center gap-4">
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
<Link to={`/v1/workflows/${row.original.workflowId}`}>
|
||||
<Link to={`/v1/tasks/${row.original.workflowId}`}>
|
||||
{row.original.workflowName}
|
||||
</Link>
|
||||
</div>
|
||||
@@ -128,7 +128,7 @@ export const columns = ({
|
||||
onClick: () => onDeleteClick(row.original),
|
||||
disabled:
|
||||
row.original.method !== 'API'
|
||||
? 'This cron was created via the workflow code definition. Delete it from the workflow definition instead.'
|
||||
? 'This cron was created via a code definition. Delete it from the code definition instead.'
|
||||
: undefined,
|
||||
},
|
||||
]}
|
||||
|
||||
@@ -25,7 +25,7 @@ export const columns = ({
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
return row.original.workflowRunId ? (
|
||||
<Link to={`/v1/workflow-runs/${row.original.workflowRunId}`}>
|
||||
<Link to={`/v1/runs/${row.original.workflowRunId}`}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
{row.original.workflowRunName}
|
||||
</div>
|
||||
@@ -61,7 +61,7 @@ export const columns = ({
|
||||
cell: ({ row }) => (
|
||||
<div className="flex flex-row items-center gap-4">
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
<a href={`/v1/workflows/${row.original.workflowId}`}>
|
||||
<a href={`/v1/tasks/${row.original.workflowId}`}>
|
||||
{row.original.workflowName}
|
||||
</a>
|
||||
</div>
|
||||
@@ -114,7 +114,7 @@ export const columns = ({
|
||||
onClick: () => onDeleteClick(row.original),
|
||||
disabled:
|
||||
row.original.method !== 'API'
|
||||
? 'Cannot delete scheduled workflow created via workflow code definition'
|
||||
? 'Cannot delete scheduled run created via code definition'
|
||||
: undefined,
|
||||
},
|
||||
]}
|
||||
|
||||
@@ -334,7 +334,7 @@ export function ScheduledRunsTable({
|
||||
}}
|
||||
/>
|
||||
<DataTable
|
||||
emptyState={<>No workflow runs found with the given filters.</>}
|
||||
emptyState={<>No runs found with the given filters.</>}
|
||||
error={workflowKeysError}
|
||||
isLoading={isLoading}
|
||||
columns={columns({
|
||||
|
||||
@@ -14,9 +14,7 @@ export default function RateLimits() {
|
||||
<h2 className="text-2xl font-bold leading-tight text-foreground">
|
||||
Scheduled Runs
|
||||
</h2>
|
||||
<Button onClick={() => setTriggerWorkflow(true)}>
|
||||
Schedule Workflow
|
||||
</Button>
|
||||
<Button onClick={() => setTriggerWorkflow(true)}>Schedule Run</Button>
|
||||
</div>
|
||||
<TriggerWorkflowForm
|
||||
defaultTimingOption="schedule"
|
||||
|
||||
@@ -161,7 +161,7 @@ const CancelByExternalIdsContent = ({ label, params }: ModalContentProps) => {
|
||||
return (
|
||||
<div className="flex flex-col gap-y-4">
|
||||
<p className="text-md">
|
||||
Confirm to {label.toLowerCase()} the following workflow runs:
|
||||
Confirm to {label.toLowerCase()} the following runs:
|
||||
</p>
|
||||
<ul className="list-disc pl-4 ml-4">
|
||||
{displayNames?.slice(0, 10).map((record) => (
|
||||
@@ -199,8 +199,8 @@ const ModalContent = ({ label, params }: ModalContentProps) => {
|
||||
return (
|
||||
<div className="gap-y-4 flex flex-col">
|
||||
<p className="text-md">
|
||||
Confirm to {label.toLowerCase()} all workflow runs matching the
|
||||
following filters:
|
||||
Confirm to {label.toLowerCase()} all runs matching the following
|
||||
filters:
|
||||
</p>
|
||||
<div className="grid grid-cols-2 gap-x-2 items-start justify-start gap-y-4">
|
||||
{statusToolbarFilter && (
|
||||
@@ -325,7 +325,7 @@ const ConfirmActionModal = ({
|
||||
<DialogContent className="sm:max-w-[800px] py-12 max-h-screen overflow-auto">
|
||||
<DialogHeader className="gap-2">
|
||||
<div className="flex flex-row justify-between items-center w-full">
|
||||
<DialogTitle>{label} workflow runs</DialogTitle>
|
||||
<DialogTitle>{label} runs</DialogTitle>
|
||||
</div>
|
||||
</DialogHeader>
|
||||
|
||||
|
||||
+2
-2
@@ -107,7 +107,7 @@ export function UpdateTenantAlertingSettings({
|
||||
}}
|
||||
/>
|
||||
<Label htmlFor="awrf" className="text-sm">
|
||||
Enable Workflow Run Failure Alerts
|
||||
Enable Run Failure Alerts
|
||||
</Label>
|
||||
</div>
|
||||
|
||||
@@ -115,7 +115,7 @@ export function UpdateTenantAlertingSettings({
|
||||
{enabledWorkflowAlerting && (
|
||||
<div className="grid gap-2">
|
||||
<Label htmlFor="maxAlertingFrequency">
|
||||
Max Workflow Run Failure Alerting Frequency
|
||||
Max Run Failure Alerting Frequency
|
||||
</Label>
|
||||
<Controller
|
||||
control={control}
|
||||
|
||||
@@ -38,7 +38,7 @@ export default function Alerting() {
|
||||
Alerting
|
||||
</h2>
|
||||
<p className="text-gray-700 dark:text-gray-300 my-4">
|
||||
Manage alerts to get notified on workflow failure.
|
||||
Manage alerts to get notified on task failure.
|
||||
</p>
|
||||
<Separator className="my-4" />
|
||||
<AlertingSettings />
|
||||
|
||||
@@ -46,7 +46,7 @@ const WorkerSlotGrid: React.FC<WorkerSlotGridProps> = ({ slots = [] }) => {
|
||||
{slot.status ? (
|
||||
<>
|
||||
<div>
|
||||
<Link to={'/v1/workflow-runs/' + slot.workflowRunId}>
|
||||
<Link to={'/v1/runs/' + slot.workflowRunId}>
|
||||
<div className="pl-0 cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
{slot.actionId}:{slot.workflowRunId?.split('-')[0]}
|
||||
</div>
|
||||
|
||||
@@ -14,7 +14,7 @@ export const columns: ColumnDef<RecentStepRuns>[] = [
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
return (
|
||||
<Link to={'/v1/workflow-runs/' + row.original.workflowRunId}>
|
||||
<Link to={'/v1/runs/' + row.original.workflowRunId}>
|
||||
<div className="pl-0 cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
{row.original.actionId}
|
||||
</div>
|
||||
|
||||
@@ -204,7 +204,7 @@ export default function ExpandedWorkflowRun() {
|
||||
</Button>
|
||||
</div>
|
||||
<div className="mb-4 text-sm text-gray-700 dark:text-gray-300">
|
||||
A slot represents one step run on a worker to limit load.{' '}
|
||||
A slot represents one task run on a worker to limit load.{' '}
|
||||
<a href="https://docs.hatchet.run/home/workers" className="underline">
|
||||
Learn more.
|
||||
</a>
|
||||
@@ -226,13 +226,18 @@ export default function ExpandedWorkflowRun() {
|
||||
/>
|
||||
<Separator className="my-4" />
|
||||
<h3 className="text-xl font-bold leading-tight text-foreground mb-4">
|
||||
Worker Actions
|
||||
Registered Tasks
|
||||
</h3>
|
||||
<div className="flex-wrap flex flex-row gap-4">
|
||||
{worker.actions?.map((action) => {
|
||||
const [name, method] = action.split(':');
|
||||
|
||||
const printable = name === method ? name : action;
|
||||
// FIXME Link to the task
|
||||
|
||||
return (
|
||||
<Button variant="outline" key={action}>
|
||||
{action}
|
||||
<Button variant="outline" key={printable}>
|
||||
{printable}
|
||||
</Button>
|
||||
);
|
||||
})}
|
||||
|
||||
@@ -49,7 +49,7 @@ export default function Webhooks() {
|
||||
</Button>
|
||||
</div>
|
||||
<p className="text-gray-700 dark:text-gray-300 my-4">
|
||||
Assign workflow runs to a HTTP endpoint.{' '}
|
||||
Assign task runs to a HTTP endpoint.{' '}
|
||||
<a
|
||||
className="underline"
|
||||
target="_blank"
|
||||
@@ -76,7 +76,7 @@ export default function Webhooks() {
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
aria-label="Workflow Actions"
|
||||
aria-label="Registered Tasks"
|
||||
size="icon"
|
||||
variant="ghost"
|
||||
>
|
||||
|
||||
+1
-1
@@ -406,7 +406,7 @@ export function StepRunPlayground({
|
||||
stepRun.childWorkflowRuns.length > 0 && (
|
||||
<div className="flex flex-col gap-4 mt-4">
|
||||
<div className="text-lg font-semibold tracking-tight mb-4">
|
||||
Child Workflow Runs
|
||||
Child Runs
|
||||
</div>
|
||||
<ChildWorkflowRuns
|
||||
refetchInterval={workflowRun.status === 'RUNNING' ? 1000 : 5000}
|
||||
|
||||
@@ -37,9 +37,7 @@ export const V1RunDetailHeader = () => {
|
||||
</BreadcrumbItem>
|
||||
<BreadcrumbSeparator />
|
||||
<BreadcrumbItem>
|
||||
<BreadcrumbLink href="/v1/workflow-runs">
|
||||
Workflow Runs
|
||||
</BreadcrumbLink>
|
||||
<BreadcrumbLink href="/v1/runs">Runs</BreadcrumbLink>
|
||||
</BreadcrumbItem>
|
||||
<BreadcrumbSeparator />
|
||||
<BreadcrumbItem>
|
||||
|
||||
+3
-3
@@ -62,10 +62,10 @@ const TaskRunPermalinkOrBacklink = ({
|
||||
);
|
||||
} else if (taskRun.workflowRunExternalId) {
|
||||
return (
|
||||
<Link to={`/v1/workflow-runs/${taskRun.workflowRunExternalId}`}>
|
||||
<Link to={`/v1/runs/${taskRun.workflowRunExternalId}`}>
|
||||
<Button size={'sm'} className="px-2 py-2 gap-2" variant={'outline'}>
|
||||
<LinkIcon className="w-4 h-4" />
|
||||
View Workflow Run
|
||||
View Run
|
||||
</Button>
|
||||
</Link>
|
||||
);
|
||||
@@ -106,7 +106,7 @@ export const TaskRunDetail = ({
|
||||
<div className="flex flex-row gap-4 items-center">
|
||||
{taskRun.status && <V1RunIndicator status={taskRun.status} />}
|
||||
<h3 className="text-lg font-mono font-semibold leading-tight tracking-tight text-foreground flex flex-row gap-4 items-center">
|
||||
{taskRun.displayName || 'Step Run Detail'}
|
||||
{taskRun.displayName || 'Task Run Detail'}
|
||||
</h3>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -80,7 +80,7 @@ export default memo(({ data }: { data: NodeData }) => {
|
||||
{data.childWorkflowsCount && data.taskRun ? (
|
||||
<Link
|
||||
to={{
|
||||
pathname: '/v1/workflow-runs',
|
||||
pathname: '/v1/runs',
|
||||
search: new URLSearchParams({
|
||||
...Object.fromEntries(new URLSearchParams(location.search)),
|
||||
[queryParamNames.parentTaskExternalId]: data.taskRun.metadata.id,
|
||||
|
||||
@@ -104,7 +104,7 @@ export const columns: (
|
||||
cell: ({ row }) => {
|
||||
if (row.getCanExpand()) {
|
||||
return (
|
||||
<Link to={'/v1/workflow-runs/' + row.original.metadata.id}>
|
||||
<Link to={'/v1/runs/' + row.original.metadata.id}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
{row.original.displayName}
|
||||
</div>
|
||||
@@ -154,7 +154,7 @@ export const columns: (
|
||||
return (
|
||||
<div className="min-w-fit whitespace-nowrap">
|
||||
{(workflowId && workflowName && (
|
||||
<a href={`/v1/workflows/${workflowId}`}>{workflowName}</a>
|
||||
<a href={`/v1/tasks/${workflowId}`}>{workflowName}</a>
|
||||
)) ||
|
||||
'N/A'}
|
||||
</div>
|
||||
|
||||
+1
-1
@@ -406,7 +406,7 @@ export function StepRunPlayground({
|
||||
stepRun.childWorkflowRuns.length > 0 && (
|
||||
<div className="flex flex-col gap-4 mt-4">
|
||||
<div className="text-lg font-semibold tracking-tight mb-4">
|
||||
Child Workflow Runs
|
||||
Child Runs
|
||||
</div>
|
||||
<ChildWorkflowRuns
|
||||
refetchInterval={workflowRun.status === 'RUNNING' ? 1000 : 5000}
|
||||
|
||||
@@ -43,7 +43,7 @@ export const columns: (
|
||||
<DataTableColumnHeader column={column} title="Run Id" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<Link to={'/v1/workflow-runs/' + row.original.metadata.id}>
|
||||
<Link to={'/v1/runs/' + row.original.metadata.id}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap">
|
||||
{row.original.displayName || row.original.metadata.id}
|
||||
</div>
|
||||
@@ -74,7 +74,7 @@ export const columns: (
|
||||
return (
|
||||
<div className="min-w-fit whitespace-nowrap">
|
||||
{(workflow && (
|
||||
<a href={`/v1/workflows/${workflowId}`}>{workflowName}</a>
|
||||
<a href={`/v1/tasks/${workflowId}`}>{workflowName}</a>
|
||||
)) ||
|
||||
'N/A'}
|
||||
</div>
|
||||
|
||||
+19
-28
@@ -17,8 +17,6 @@ import { Button } from '@/components/v1/ui/button';
|
||||
import invariant from 'tiny-invariant';
|
||||
import { useApiError } from '@/lib/hooks';
|
||||
import { useMutation, useQuery } from '@tanstack/react-query';
|
||||
import { PlusIcon } from '@heroicons/react/24/outline';
|
||||
import { cn } from '@/lib/utils';
|
||||
import { useNavigate, useOutletContext } from 'react-router-dom';
|
||||
import { TenantContextType } from '@/lib/outlet';
|
||||
import { CodeEditor } from '@/components/v1/ui/code-editor';
|
||||
@@ -86,6 +84,7 @@ export function TriggerWorkflowForm({
|
||||
|
||||
const { data: workflowKeys, isFetched } = useQuery({
|
||||
...queries.workflows.list(tenant.metadata.id, { limit: 200 }),
|
||||
refetchInterval: 15000,
|
||||
});
|
||||
|
||||
const workflow = useMemo(() => {
|
||||
@@ -127,7 +126,7 @@ export function TriggerWorkflowForm({
|
||||
return;
|
||||
}
|
||||
|
||||
navigate(`/v1/workflow-runs/${workflowRun.run.metadata.id}`);
|
||||
navigate(`/v1/runs/${workflowRun.run.metadata.id}`);
|
||||
},
|
||||
onError: handleApiError,
|
||||
});
|
||||
@@ -277,18 +276,18 @@ export function TriggerWorkflowForm({
|
||||
>
|
||||
<DialogContent className="sm:max-w-[625px] py-12 max-h-screen overflow-auto">
|
||||
<DialogHeader className="gap-2">
|
||||
<DialogTitle>Trigger Workflow</DialogTitle>
|
||||
<DialogTitle>Trigger Run</DialogTitle>
|
||||
<DialogDescription className="text-muted-foreground">
|
||||
Trigger a workflow to run now, at a scheduled time, or on a cron
|
||||
schedule.
|
||||
Trigger a task or workflow to run now, at a scheduled time, or on a
|
||||
cron schedule.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="font-bold">Workflow</div>
|
||||
<div className="font-bold">Task or Workflow</div>
|
||||
<Combobox
|
||||
values={selectedWorkflowId ? [selectedWorkflowId] : []}
|
||||
setValues={(values) => setSelectedWorkflowId(values[0])}
|
||||
title="Select Workflow"
|
||||
title="Select Task or Workflow"
|
||||
options={workflowKeys?.rows?.map((w) => ({
|
||||
value: w.metadata.id,
|
||||
label: w.name,
|
||||
@@ -428,27 +427,19 @@ export function TriggerWorkflowForm({
|
||||
</Tabs>
|
||||
</div>
|
||||
|
||||
<Button
|
||||
className="w-fit mt-6"
|
||||
disabled={
|
||||
triggerNowMutation.isPending ||
|
||||
triggerScheduleMutation.isPending ||
|
||||
triggerCronMutation.isPending
|
||||
}
|
||||
onClick={handleSubmit}
|
||||
>
|
||||
<PlusIcon
|
||||
className={cn(
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
className="w-fit mt-6"
|
||||
disabled={
|
||||
triggerNowMutation.isPending ||
|
||||
triggerScheduleMutation.isPending ||
|
||||
triggerCronMutation.isPending
|
||||
? 'rotate-180'
|
||||
: '',
|
||||
'h-4 w-4 mr-2',
|
||||
)}
|
||||
/>
|
||||
Trigger workflow
|
||||
</Button>
|
||||
triggerScheduleMutation.isPending ||
|
||||
triggerCronMutation.isPending
|
||||
}
|
||||
onClick={handleSubmit}
|
||||
>
|
||||
Run Task
|
||||
</Button>
|
||||
</div>
|
||||
{(errors.length > 0 ||
|
||||
triggerNowMutation.error ||
|
||||
triggerScheduleMutation.error ||
|
||||
|
||||
@@ -85,7 +85,7 @@ export default function ExpandedWorkflow() {
|
||||
return res.data;
|
||||
},
|
||||
onSuccess: () => {
|
||||
navigate('/v1/workflows');
|
||||
navigate('/v1/tasks');
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ export const columns: ColumnDef<Workflow>[] = [
|
||||
<DataTableColumnHeader column={column} title="Name" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<Link to={`/v1/workflows/${row.original.metadata.id}`}>
|
||||
<Link to={`/v1/tasks/${row.original.metadata.id}`}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap text-md p-2">
|
||||
{row.original.name}
|
||||
</div>
|
||||
@@ -70,7 +70,7 @@ export const columns: ColumnDef<Workflow>[] = [
|
||||
cell: ({ row }) => {
|
||||
return (
|
||||
<div className="flex gap-2 justify-end">
|
||||
<Link to={`/v1/workflows/${row.original.metadata.id}`}>
|
||||
<Link to={`/v1/tasks/${row.original.metadata.id}`}>
|
||||
<div className="cursor-pointer hover:underline min-w-fit whitespace-nowrap text-md p-2">
|
||||
<ChevronRightIcon
|
||||
className="h-5 w-5 flex-none text-gray-700 dark:text-gray-300"
|
||||
|
||||
@@ -86,7 +86,7 @@ export function WorkflowTable() {
|
||||
<div className="px-4 py-5 sm:p-6">
|
||||
<div className="flex flex-row justify-between items-center">
|
||||
<h3 className="text-lg leading-6 font-medium text-foreground">
|
||||
<Link to={`/v1/workflows/${data.metadata?.id}`}>{data.name}</Link>
|
||||
<Link to={`/v1/tasks/${data.metadata?.id}`}>{data.name}</Link>
|
||||
</h3>
|
||||
{data.isPaused ? (
|
||||
<Badge variant="inProgress">Paused</Badge>
|
||||
@@ -103,7 +103,7 @@ export function WorkflowTable() {
|
||||
</div>
|
||||
<div className="px-4 py-4 sm:px-6">
|
||||
<div className="text-sm text-background-secondary">
|
||||
<Link to={`/v1/workflows/${data.metadata?.id}`}>
|
||||
<Link to={`/v1/tasks/${data.metadata?.id}`}>
|
||||
<Button>View Workflow</Button>
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
@@ -11,7 +11,7 @@ export const WorkflowDefinitionLink = ({
|
||||
<Link to={`/v1/workflows/${workflowId}`} target="_blank" rel="noreferrer">
|
||||
<Button size={'sm'} className="px-2 py-2 gap-2" variant="outline">
|
||||
<ArrowTopRightIcon className="w-4 h-4" />
|
||||
Workflow Definition
|
||||
Configuration
|
||||
</Button>
|
||||
</Link>
|
||||
);
|
||||
|
||||
+1
-1
@@ -51,7 +51,7 @@ export const DefaultOnboardingWorkflow: React.FC<{
|
||||
return;
|
||||
}
|
||||
|
||||
navigate(`/v1/workflow-runs/${workflowRun.run.metadata.id}`);
|
||||
navigate(`/v1/runs/${workflowRun.run.metadata.id}`);
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ export default function TenantInvites() {
|
||||
navigate(`/workflow-runs?tenant=${tenantId}`);
|
||||
break;
|
||||
case TenantVersion.V1:
|
||||
navigate(`/v1/workflow-runs?tenant=${tenantId}`);
|
||||
navigate(`/v1/runs?tenant=${tenantId}`);
|
||||
break;
|
||||
default:
|
||||
navigate('/');
|
||||
|
||||
@@ -362,7 +362,18 @@ export const routes: RouteObject[] = [
|
||||
lazy: async () => {
|
||||
return {
|
||||
loader: function () {
|
||||
return redirect('/v1/workflow-runs');
|
||||
return redirect('/v1/runs');
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
{
|
||||
path: '/v1/workflow-runs',
|
||||
// FIXME: i'm not sure why we're still redirecting from root to here
|
||||
lazy: async () => {
|
||||
return {
|
||||
loader: function () {
|
||||
return redirect('/v1/runs');
|
||||
},
|
||||
};
|
||||
},
|
||||
@@ -423,6 +434,17 @@ export const routes: RouteObject[] = [
|
||||
},
|
||||
{
|
||||
path: '/v1/workflows',
|
||||
// FIXME: i'm not sure why we're still redirecting from root to here
|
||||
lazy: async () => {
|
||||
return {
|
||||
loader: function () {
|
||||
return redirect('/v1/tasks');
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
{
|
||||
path: '/v1/tasks',
|
||||
lazy: async () =>
|
||||
import('./pages/main/v1/workflows').then((res) => {
|
||||
return {
|
||||
@@ -432,6 +454,17 @@ export const routes: RouteObject[] = [
|
||||
},
|
||||
{
|
||||
path: '/v1/workflows/:workflow',
|
||||
// FIXME: i'm not sure why we're still redirecting from root to here
|
||||
lazy: async () => {
|
||||
return {
|
||||
loader: function ({ params }) {
|
||||
return redirect(`/v1/tasks/${params.workflow}`);
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
{
|
||||
path: '/v1/tasks/:workflow',
|
||||
lazy: async () =>
|
||||
import('./pages/main/v1/workflows/$workflow').then((res) => {
|
||||
return {
|
||||
@@ -440,7 +473,7 @@ export const routes: RouteObject[] = [
|
||||
}),
|
||||
},
|
||||
{
|
||||
path: '/v1/workflow-runs',
|
||||
path: '/v1/runs',
|
||||
lazy: async () =>
|
||||
import('./pages/main/v1/workflow-runs-v1/index.tsx').then(
|
||||
(res) => {
|
||||
@@ -452,6 +485,17 @@ export const routes: RouteObject[] = [
|
||||
},
|
||||
{
|
||||
path: '/v1/workflow-runs/:run',
|
||||
// FIXME: i'm not sure why we're still redirecting from root to here
|
||||
lazy: async () => {
|
||||
return {
|
||||
loader: function ({ params }) {
|
||||
return redirect(`/v1/runs/${params.run}`);
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
{
|
||||
path: '/v1/runs/:run',
|
||||
lazy: async () =>
|
||||
import('./pages/main/v1/workflow-runs-v1/$run').then(
|
||||
(res) => {
|
||||
|
||||
@@ -20,6 +20,16 @@ const nextConfig = {
|
||||
},
|
||||
async redirects() {
|
||||
return [
|
||||
{
|
||||
source: '/compute',
|
||||
destination: '/home/compute',
|
||||
permanent: true,
|
||||
},
|
||||
{
|
||||
source: '/compute/:path',
|
||||
destination: '/home/compute',
|
||||
permanent: true,
|
||||
},
|
||||
{
|
||||
source: '/:path((?!home|v1|v0|compute|sdk|contributing|self-hosting|launches|blog|favicon\\.ico|hatchet_logo\\.png|_next/.*|monitoring\-demo\.mp4).*)',
|
||||
destination: '/home/:path*',
|
||||
@@ -30,6 +40,106 @@ const nextConfig = {
|
||||
destination: "https://app.posthog.com/:path*",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/basics/overview",
|
||||
destination: "/home/setup",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/basics/(steps|workflows)",
|
||||
destination: "/home/your-first-task",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/basics/environments",
|
||||
destination: "/home/environments",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/concurrency/:path*",
|
||||
destination: "/home/concurrency",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/durable-execution",
|
||||
destination: "/home/durable-execution",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/retries/:path*",
|
||||
destination: "/home/retry-policies",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/errors-and-logging",
|
||||
destination: "/home/logging",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/on-failure-step",
|
||||
destination: "/home/on-failure-tasks",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/triggering-runs/event-trigger",
|
||||
destination: "/home/run-on-event",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/triggering-runs/cron-trigger",
|
||||
destination: "/home/cron-runs",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/triggering-runs/schedule-trigger",
|
||||
destination: "/home/scheduled-runs",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/rate-limits",
|
||||
destination: "/home/rate-limits",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/worker-assignment/overview",
|
||||
destination: "/home/sticky-assignment",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/worker-assignment/(overview|sticky-assignment)",
|
||||
destination: "/home/sticky-assignment",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/worker-assignment/worker-affinity",
|
||||
destination: "/home/worker-affinity",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/additional-metadata",
|
||||
destination: "/home/additional-metadata",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/advanced/manual-slot-release",
|
||||
destination: "/home/manual-slot-release",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/opentelemetry",
|
||||
destination: "/home/opentelemetry",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/cancellation",
|
||||
destination: "/home/cancellation",
|
||||
permanent: false,
|
||||
},
|
||||
{
|
||||
source: "/home/features/child-workflows",
|
||||
destination: "/home/child-spawning",
|
||||
permanent: false,
|
||||
},
|
||||
];
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ export default {
|
||||
"compute": {
|
||||
"title": "Managed Compute",
|
||||
"type": "page",
|
||||
"href": "/compute",
|
||||
"href": "/home/compute",
|
||||
"index": "Overview",
|
||||
"getting-started": "Getting Started",
|
||||
"cpu": "CPU Machine Types",
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
export default {
|
||||
"-- Managed Compute": {
|
||||
"type": "separator",
|
||||
"title": "Managed Compute"
|
||||
},
|
||||
"index": "Overview",
|
||||
"getting-started": "Getting Started",
|
||||
"cpu": "CPU Machine Types",
|
||||
"gpu": "GPU Machine Types",
|
||||
"-- SDKs": {
|
||||
"type": "separator",
|
||||
"title": "SDK Deployment Guides"
|
||||
},
|
||||
"python": {
|
||||
"title": "Python ↗",
|
||||
"href": "/sdks/python-sdk/docker"
|
||||
},
|
||||
"typescript": {
|
||||
"title": "TypeScript ↗",
|
||||
"href": "/sdks/typescript-sdk/docker"
|
||||
},
|
||||
"golang": {
|
||||
"title": "Golang ↗",
|
||||
"href": "/sdks/go-sdk"
|
||||
}
|
||||
}
|
||||
@@ -14,11 +14,11 @@ export default {
|
||||
"workers": "Workers",
|
||||
"running-your-task": "Running Tasks",
|
||||
"environments": "Environments",
|
||||
"--running-workflows": {
|
||||
"--running-tasks": {
|
||||
"title": "Ways of Running Tasks",
|
||||
"type": "separator"
|
||||
},
|
||||
"running-workflows": "Introduction",
|
||||
"running-tasks": "Introduction",
|
||||
"run-with-results": "Run and Wait Trigger",
|
||||
"run-no-wait": "Run Without Wait Trigger",
|
||||
"scheduled-runs": "Scheduled Trigger",
|
||||
@@ -48,6 +48,7 @@ export default {
|
||||
"title": "Directed Acyclic Graphs (DAGs)"
|
||||
},
|
||||
"conditional-workflows": "Conditional Workflows",
|
||||
"on-failure-tasks": "On Failure Tasks",
|
||||
"durable-execution": {
|
||||
"title": "Durable Execution"
|
||||
},
|
||||
@@ -62,7 +63,6 @@ export default {
|
||||
"type": "separator"
|
||||
},
|
||||
"timeouts": "Timeouts",
|
||||
"on-failure-tasks": "On Failure Tasks",
|
||||
"retry-policies": "Retry Policies",
|
||||
"bulk-retries-and-cancellations": "Bulk Retries and Cancellations",
|
||||
|
||||
@@ -84,6 +84,7 @@ export default {
|
||||
"type": "separator"
|
||||
},
|
||||
"docker": "Running with Docker",
|
||||
"compute": "Managed Compute",
|
||||
"worker-healthchecks": "Worker Health Checks",
|
||||
"--cancellation": {
|
||||
"title": "Cancellation",
|
||||
|
||||
@@ -17,7 +17,7 @@ export const getStaticProps = ({}) => getSnippets([TS, Py, GO]);
|
||||
|
||||
# Additional Metadata
|
||||
|
||||
Hatchet allows you to attach arbitrary key-value string pairs to events and workflow runs, which can be used for filtering, searching, or any other lookup purposes. This additional metadata is not part of the event payload or workflow input data but provides supplementary information for better organization and discoverability.
|
||||
Hatchet allows you to attach arbitrary key-value string pairs to events and task runs, which can be used for filtering, searching, or any other lookup purposes. This additional metadata is not part of the event payload or task input data but provides supplementary information for better organization and discoverability.
|
||||
|
||||
<Callout type="info">
|
||||
Additional metadata can be added to `Runs`, `Scheduled Runs`, `Cron Runs`, and
|
||||
@@ -25,9 +25,9 @@ Hatchet allows you to attach arbitrary key-value string pairs to events and work
|
||||
runs.
|
||||
</Callout>
|
||||
|
||||
You can attach additional metadata when pushing events or triggering workflow runs using the Hatchet client libraries:
|
||||
You can attach additional metadata when pushing events or triggering task runs using the Hatchet client libraries:
|
||||
|
||||
<Tabs items={['Event Push', 'Workflow Run Trigger']}>
|
||||
<Tabs items={['Event Push', 'Task Run Trigger']}>
|
||||
<Tabs.Tab>
|
||||
<UniversalTabs items={['Python', 'Typescript', 'Go']}>
|
||||
<Tabs.Tab>
|
||||
@@ -76,9 +76,9 @@ err := c.Event().Push(
|
||||
<UniversalTabs items={['Python', 'Typescript', 'Go']}>
|
||||
<Tabs.Tab>
|
||||
```python
|
||||
simple_workflow.run(
|
||||
simple_task.run(
|
||||
SimpleInput(user_id=1234),
|
||||
options=TriggerWorkflowOptions(
|
||||
options=TriggerTaskOptions(
|
||||
additional_metadata={
|
||||
"hello": "moon" # Arbitrary key-value pair
|
||||
}
|
||||
@@ -88,8 +88,7 @@ simple_workflow.run(
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab>
|
||||
```typescript
|
||||
const workflowRunId = await hatchet.admin.run_workflow(
|
||||
'user-workflow',
|
||||
const taskRunId = await simple.run(
|
||||
{
|
||||
userId: '1234',
|
||||
},
|
||||
@@ -103,8 +102,8 @@ const workflowRunId = await hatchet.admin.run_workflow(
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab>
|
||||
```go
|
||||
workflowRunId, err := c.Admin().RunWorkflow(
|
||||
"user-workflow",
|
||||
taskRunId, err := c.Admin().RunWorkflow(
|
||||
"user-task",
|
||||
&userCreateEvent{
|
||||
UserID: "1234",
|
||||
},
|
||||
@@ -118,11 +117,9 @@ workflowRunId, err := c.Admin().RunWorkflow(
|
||||
</Tabs.Tab>
|
||||
</Tabs>
|
||||
|
||||
> **Note:** This feature is in development. Cron, Schedule, and Child Workflow triggers do not currently support additional metadata.
|
||||
|
||||
## Filtering in the Dashboard
|
||||
|
||||
Once you've attached additional metadata to events or workflow runs, this data will be available in the Event and Workflow Run list views in the Hatchet dashboard. You can use the filter input field to search for events or workflow runs based on the additional metadata key-value pairs you've attached.
|
||||
Once you've attached additional metadata to events or task runs, this data will be available in the Event and Task Run list views in the Hatchet dashboard. You can use the filter input field to search for events or task runs based on the additional metadata key-value pairs you've attached.
|
||||
|
||||
For example, you can filter events by the `source` metadata keys to quickly find events originating from a specific source or environment.
|
||||
|
||||
@@ -132,8 +129,8 @@ For example, you can filter events by the `source` metadata keys to quickly find
|
||||
|
||||
Some common use cases for additional metadata include:
|
||||
|
||||
- Tagging events or workflow runs with environment information (e.g., `production`, `staging`, `development`)
|
||||
- Tagging events or task runs with environment information (e.g., `production`, `staging`, `development`)
|
||||
- Specifying the source or origin of events (e.g., `api`, `webhook`, `manual`)
|
||||
- Categorizing events or workflow runs based on business-specific criteria (e.g., `priority`, `region`, `product`)
|
||||
- Categorizing events or task runs based on business-specific criteria (e.g., `priority`, `region`, `product`)
|
||||
|
||||
By leveraging additional metadata, you can enhance the organization, searchability, and discoverability of your events and workflow runs within Hatchet.
|
||||
By leveraging additional metadata, you can enhance the organization, searchability, and discoverability of your events and task runs within Hatchet.
|
||||
|
||||
@@ -11,34 +11,34 @@ export const ReplayPy = {
|
||||
|
||||
export const getStaticProps = ({}) => getSnippets([CancelPy, ReplayPy]);
|
||||
|
||||
## Bulk Cancellations and Replays
|
||||
# Bulk Cancellations and Replays
|
||||
|
||||
V1 add the ability to cancel or replay workflow runs in bulk, which you can now do either in the Hatchet Dashboard or programmatically via the SDKs and the REST API.
|
||||
V1 adds the ability to cancel or replay task runs in bulk, which you can now do either in the Hatchet Dashboard or programmatically via the SDKs and the REST API.
|
||||
|
||||
There are two ways of bulk cancelling or replaying workflows in both cases:
|
||||
There are two ways of bulk cancelling or replaying tasks in both cases:
|
||||
|
||||
1. You can provide a list of workflow run ids to cancel or replay, which will cancel or replay all of the workflows in the list.
|
||||
2. You can provide a list of filters, similar to the list of filters on workflow runs in the Dashboard, and cancel or replay runs matching those filters. For instance, if you wanted to replay all failed runs of a `SimpleWorkflow` from the past fifteen minutes that had the `foo` field in `additional_metadata` set to `bar`, you could apply those filters and replay all of the matching runs.
|
||||
1. You can provide a list of task run ids to cancel or replay, which will cancel or replay all of the tasks in the list.
|
||||
2. You can provide a list of filters, similar to the list of filters on task runs in the Dashboard, and cancel or replay runs matching those filters. For instance, if you wanted to replay all failed runs of a `SimpleTask` from the past fifteen minutes that had the `foo` field in `additional_metadata` set to `bar`, you could apply those filters and replay all of the matching runs.
|
||||
|
||||
### Bulk Operations by Run Ids
|
||||
|
||||
The first way to bulk cancel or replay runs is by providing a list of run ids. This is the most straightforward way to cancel or replay runs in bulk.
|
||||
|
||||
<UniversalTabs items={["Python"]}>
|
||||
{/* <UniversalTabs items={["Python", "Typescript", "Go"]}> TODO V1 DOCS - Add TS and Go */}
|
||||
{/* <UniversalTabs items={"Python", "Typescript", "Go"]}> TODO V1 DOCS - Add TS and Go */}
|
||||
<Tabs.Tab title="Python">
|
||||
|
||||
<Callout type="info">
|
||||
In the Python SDK, the mechanics of bulk replaying and bulk cancelling
|
||||
workflows are exactly the same. The only change would be replacing e.g.
|
||||
In the Python SDK, the mechanics of bulk replaying and bulk cancelling tasks
|
||||
are exactly the same. The only change would be replacing e.g.
|
||||
`hatchet.runs.bulk_cancel` with `hatchet.runs.bulk_replay`.
|
||||
</Callout>
|
||||
|
||||
First, we'll start by fetching a workflow via the REST API.
|
||||
First, we'll start by fetching a task via the REST API.
|
||||
|
||||
<GithubSnippet src={CancelPy} target="Setup" />
|
||||
|
||||
Now that we have a workflow, we'll get runs for it, so that we can use them to bulk cancel by run id.
|
||||
Now that we have a task, we'll get runs for it, so that we can use them to bulk cancel by run id.
|
||||
|
||||
<GithubSnippet src={CancelPy} target="List runs" />
|
||||
|
||||
@@ -71,11 +71,11 @@ The second way to bulk cancel or replay runs is by providing a list of filters.
|
||||
{/* <UniversalTabs items={["Python", "Typescript", "Go"]}> TODO V1 DOCS - Add TS and Go */}
|
||||
<Tabs.Tab title="Python">
|
||||
|
||||
The example below provides some filters you might use to cancel or replay runs in bulk. Importantly, these filters are very similar to the filters you can use in the Hatchet Dashboard to filter which workflow runs are displaying.
|
||||
The example below provides some filters you might use to cancel or replay runs in bulk. Importantly, these filters are very similar to the filters you can use in the Hatchet Dashboard to filter which task runs are displaying.
|
||||
|
||||
<GithubSnippet src={CancelPy} target="Cancel by filters" />
|
||||
|
||||
Running this request will cancel all workflow runs matching the filters provided.
|
||||
Running this request will cancel all task runs matching the filters provided.
|
||||
|
||||
</Tabs.Tab>
|
||||
{/* <Tabs.Tab title="Typescript">
|
||||
@@ -88,18 +88,18 @@ The second way to bulk cancel or replay runs is by providing a list of filters.
|
||||
|
||||
# Manual Retries
|
||||
|
||||
Hatchet provides a manual retry mechanism that allows you to handle failed workflow instances flexibly from the Hatchet dashboard.
|
||||
Hatchet provides a manual retry mechanism that allows you to handle failed task instances flexibly from the Hatchet dashboard.
|
||||
|
||||
Navigate to the specific workflow in the Hatchet dashboard and click on the failed run. From there, you can inspect the details of the run, including the input data and the failure reason for each task.
|
||||
Navigate to the specific task in the Hatchet dashboard and click on the failed run. From there, you can inspect the details of the run, including the input data and the failure reason for each task.
|
||||
|
||||
To retry a failed task, simply click on the task in the run details view and then click the "Replay" button. This will create a new instance of the workflow, starting from the failed task, and using the same input data as the original run.
|
||||
To retry a failed task, simply click on the task in the run details view and then click the "Replay" button. This will create a new instance of the task, starting from the failed task, and using the same input data as the original run.
|
||||
|
||||
Manual retries give you full control over when and how to reprocess failed instances. For example, you may choose to wait until an external service is back online before retrying instances that depend on that service, or you may need to deploy a bug fix to your workflow code before retrying instances that were affected by the bug.
|
||||
Manual retries give you full control over when and how to reprocess failed instances. For example, you may choose to wait until an external service is back online before retrying instances that depend on that service, or you may need to deploy a bug fix to your task code before retrying instances that were affected by the bug.
|
||||
|
||||
## A Note on Dead Letter Queues
|
||||
|
||||
A dead letter queue (DLQ) is a messaging concept used to handle messages that cannot be processed successfully. In the context of workflow management, a DLQ can be used to store failed workflow instances that require manual intervention or further analysis.
|
||||
A dead letter queue (DLQ) is a messaging concept used to handle messages that cannot be processed successfully. In the context of task management, a DLQ can be used to store failed task instances that require manual intervention or further analysis.
|
||||
|
||||
While Hatchet does not have a built-in dead letter queue feature, the persistence of failed workflow instances in the dashboard serves a similar purpose. By keeping a record of failed instances, Hatchet allows you to track and manage failures, perform root cause analysis, and take appropriate actions, such as modifying input data or updating your workflow code before manually retrying the failed instances.
|
||||
While Hatchet does not have a built-in dead letter queue feature, the persistence of failed task instances in the dashboard serves a similar purpose. By keeping a record of failed instances, Hatchet allows you to track and manage failures, perform root cause analysis, and take appropriate actions, such as modifying input data or updating your task code before manually retrying the failed instances.
|
||||
|
||||
It's important to note that the term "dead letter queue" is more commonly associated with messaging systems like Apache Kafka or Amazon SQS, where unprocessed messages are automatically moved to a separate queue for manual handling. In Hatchet, the failed instances are not automatically moved to a separate queue but are instead persisted in the dashboard for manual management.
|
||||
|
||||
@@ -16,11 +16,11 @@ export const CancelGo = {
|
||||
export const getStaticProps = ({}) =>
|
||||
getSnippets([CancelPy, CancelTs, CancelGo]);
|
||||
|
||||
# Cancellation in Hatchet Workflows
|
||||
# Cancellation in Hatchet Tasks
|
||||
|
||||
Hatchet provides a mechanism for canceling workflow executions gracefully, allowing you to stop running workflows and their associated tasks when needed. Cancellation can be triggered on graceful termination of a worker or automatically through concurrency control strategies like [`CANCEL_IN_PROGRESS`](./concurrency.mdx#cancel_in_progress), which cancels currently running workflow instances to free up slots for new instances when the concurrency limit is reached.
|
||||
Hatchet provides a mechanism for canceling task executions gracefully, allowing you to signal to running tasks that they should stop running. Cancellation can be triggered on graceful termination of a worker or automatically through concurrency control strategies like [`CANCEL_IN_PROGRESS`](./concurrency.mdx#cancel_in_progress), which cancels currently running task instances to free up slots for new instances when the concurrency limit is reached.
|
||||
|
||||
When a workflow is canceled, Hatchet sends a cancellation signal to all the currently executing tasks. The tasks can then check for the cancellation signal and take appropriate action, such as cleaning up resources, aborting network requests, or gracefully terminating their execution.
|
||||
When a task is canceled, Hatchet sends a cancellation signal to the task. The task can then check for the cancellation signal and take appropriate action, such as cleaning up resources, aborting network requests, or gracefully terminating their execution.
|
||||
|
||||
## Cancellation Mechanisms
|
||||
|
||||
@@ -55,7 +55,7 @@ While your task is running, you can manage cancellation by:
|
||||
|
||||
<Tabs.Tab>
|
||||
|
||||
Hatchet uses the standard `AbortController` and `AbortSignal` interfaces from Node.js to handle cancellation. Each task in a workflow has access to a `context.controller` property, which is an instance of `AbortController`. The `AbortController` provides a way to signal cancellation to the task and any asynchronous operations it may be performing.
|
||||
Hatchet uses the standard `AbortController` and `AbortSignal` interfaces from Node.js to handle cancellation. Each task has access to a `context.controller` property, which is an instance of `AbortController`. The `AbortController` provides a way to signal cancellation to the task and any asynchronous operations it may be performing.
|
||||
|
||||
Inside a task, you can check for cancellation by accessing the `cancelled` property of the `Context`, which is a boolean value indicating whether the task has been cancelled or not. For example:
|
||||
|
||||
@@ -89,7 +89,7 @@ Here's an example of how to check for cancellation in a task:
|
||||
|
||||
## Cancellation Best Practices
|
||||
|
||||
When working with cancellation in Hatchet workflows, consider the following best practices:
|
||||
When working with cancellation in Hatchet tasks, consider the following best practices:
|
||||
|
||||
1. **Graceful Termination**: When a task receives a cancellation signal, aim to terminate its execution gracefully. Clean up any resources, abort pending operations, and perform any necessary cleanup tasks before returning from the task function.
|
||||
|
||||
@@ -99,7 +99,7 @@ When working with cancellation in Hatchet workflows, consider the following best
|
||||
|
||||
4. **Error Handling**: Handle cancellation errors appropriately. Distinguish between cancellation errors and other types of errors to provide meaningful error messages and take appropriate actions.
|
||||
|
||||
5. **Cancellation Propagation**: If a task invokes other functions or libraries, consider propagating the cancellation signal to those dependencies. This ensures that cancellation is handled consistently throughout the workflow.
|
||||
5. **Cancellation Propagation**: If a task invokes other functions or libraries, consider propagating the cancellation signal to those dependencies. This ensures that cancellation is handled consistently throughout the task.
|
||||
|
||||
## Additional Features
|
||||
|
||||
@@ -107,6 +107,6 @@ In addition to the methods of cancellation listed here, Hatchet also supports [b
|
||||
|
||||
## Conclusion
|
||||
|
||||
Cancellation is a powerful feature in Hatchet that allows you to gracefully stop workflow executions when needed. Remember to follow best practices when implementing cancellation in your workflows, such as graceful termination, regular cancellation checks, handling asynchronous operations, proper error handling, and cancellation propagation.
|
||||
Cancellation is a powerful feature in Hatchet that allows you to gracefully stop task executions when needed. Remember to follow best practices when implementing cancellation in your tasks, such as graceful termination, regular cancellation checks, handling asynchronous operations, proper error handling, and cancellation propagation.
|
||||
|
||||
By incorporating cancellation into your Hatchet workflows, you can build more resilient and responsive systems that can adapt to changing circumstances and user needs.
|
||||
By incorporating cancellation into your Hatchet tasks and workflows, you can build more resilient and responsive systems that can adapt to changing circumstances and user needs.
|
||||
|
||||
@@ -8,31 +8,31 @@ export const FanoutPy = {
|
||||
|
||||
export const getStaticProps = ({}) => getSnippets([FanoutPy]);
|
||||
|
||||
# Procedural Child Workflow Spawning
|
||||
# Procedural Child Task Spawning
|
||||
|
||||
Hatchet supports the dynamic creation of child workflows during a parent workflow's execution. This powerful feature enables:
|
||||
Hatchet supports the dynamic creation of child tasks during a parent task's execution. This powerful feature enables:
|
||||
|
||||
- **Complex, reusable workflow hierarchies** - Break down complex workflows into simpler, reusable components
|
||||
- **Complex, reusable task hierarchies** - Break down complex tasks into simpler, reusable components
|
||||
- **Fan-out parallelism** - Scale out to multiple parallel tasks dynamically
|
||||
- **Dynamic workflow behavior** - Create loops and conditional branches at runtime
|
||||
- **Agent-based workflows** - Support AI agents that can create new workflows based on analysis results or loop until a condition is met
|
||||
- **Dynamic task behavior** - Create loops and conditional branches at runtime
|
||||
- **Agent-based tasks** - Support AI agents that can create new tasks based on analysis results or loop until a condition is met
|
||||
|
||||
## Creating Parent and Child Workflows
|
||||
## Creating Parent and Child Tasks
|
||||
|
||||
To implement child workflow spawning, you first need to create both parent and child workflow definitions.
|
||||
To implement child task spawning, you first need to create both parent and child task definitions.
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
|
||||
First, we'll declare a couple of workflows for the parent and child:
|
||||
First, we'll declare a couple of tasks for the parent and child:
|
||||
|
||||
<GithubSnippet src={FanoutPy} target="FanoutParent" />
|
||||
|
||||
We also created a task on the parent workflow that spawns the child workflows. Now, we'll add a couple of tasks to the child workflow:
|
||||
We also created a step on the parent task that spawns the child tasks. Now, we'll add a couple of steps to the child task:
|
||||
|
||||
<GithubSnippet src={FanoutPy} target="FanoutChild" />
|
||||
|
||||
And that's it! The fanout parent will run and spawn the child, and then will collect the results from its tasks.
|
||||
And that's it! The fanout parent will run and spawn the child, and then will collect the results from its steps.
|
||||
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
@@ -40,23 +40,9 @@ And that's it! The fanout parent will run and spawn the child, and then will col
|
||||
```typescript
|
||||
import { hatchet } from "../hatchet-client";
|
||||
|
||||
// Child workflow definition
|
||||
type ChildInput = {
|
||||
N: number;
|
||||
};
|
||||
|
||||
type ChildOutput = {
|
||||
value: {
|
||||
Value: number;
|
||||
};
|
||||
};
|
||||
|
||||
export const child = hatchet.workflow<ChildInput, ChildOutput>({
|
||||
// Child task definition
|
||||
export const child = hatchet.task({
|
||||
name: "child",
|
||||
});
|
||||
|
||||
child.task({
|
||||
name: "value",
|
||||
fn: (input) => {
|
||||
return {
|
||||
Value: input.N,
|
||||
@@ -64,37 +50,23 @@ child.task({
|
||||
},
|
||||
});
|
||||
|
||||
// Parent workflow definition
|
||||
type ParentInput = {
|
||||
N: number;
|
||||
};
|
||||
|
||||
type ParentOutput = {
|
||||
sum: {
|
||||
Result: number;
|
||||
};
|
||||
};
|
||||
|
||||
export const parent = hatchet.workflow<ParentInput, ParentOutput>({
|
||||
// Parent task definition
|
||||
export const parent = hatchet.task({
|
||||
name: "parent",
|
||||
});
|
||||
|
||||
parent.task({
|
||||
name: "sum",
|
||||
fn: async (input, ctx) => {
|
||||
const n = input.N;
|
||||
const promises = [];
|
||||
|
||||
// Spawn multiple child workflows in parallel
|
||||
// Spawn multiple child tasks in parallel
|
||||
for (let i = 0; i < n; i++) {
|
||||
promises.push(ctx.runChild(child, { N: i }));
|
||||
}
|
||||
|
||||
// Wait for all child workflows to complete
|
||||
// Wait for all child tasks to complete
|
||||
const childRes = await Promise.all(promises);
|
||||
|
||||
// Sum the results
|
||||
const sum = childRes.reduce((acc, curr) => acc + curr.value.Value, 0);
|
||||
const sum = childRes.reduce((acc, curr) => acc + curr.Value, 0);
|
||||
|
||||
return {
|
||||
Result: sum,
|
||||
@@ -116,7 +88,7 @@ import (
|
||||
"github.com/hatchet-dev/hatchet/pkg/worker"
|
||||
)
|
||||
|
||||
// Child workflow input and output types
|
||||
// Child task input and output types
|
||||
type ChildInput struct {
|
||||
N int `json:"n"`
|
||||
}
|
||||
@@ -152,7 +124,7 @@ func Child(hatchet *v1.HatchetClient) workflow.WorkflowDeclaration[ChildInput, C
|
||||
return child
|
||||
}
|
||||
|
||||
// Parent workflow input and output types
|
||||
// Parent task input and output types
|
||||
type ParentInput struct {
|
||||
N int `json:"n"`
|
||||
}
|
||||
@@ -237,16 +209,16 @@ func Parent(hatchet *v1.HatchetClient) workflow.WorkflowDeclaration[ParentInput,
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
## Running Child Workflows
|
||||
## Running Child Tasks
|
||||
|
||||
To spawn and run a child workflow from a parent task, use the appropriate method for your language:
|
||||
To spawn and run a child task from a parent task, use the appropriate method for your language:
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
|
||||
```python
|
||||
# Inside a parent task
|
||||
child_result = child_workflow.run(child_input)
|
||||
child_result = child_task.run(child_input)
|
||||
```
|
||||
|
||||
</Tabs.Tab>
|
||||
@@ -254,7 +226,7 @@ child_result = child_workflow.run(child_input)
|
||||
|
||||
```typescript
|
||||
// Inside a parent task
|
||||
const childResult = await ctx.runChild(childWorkflow, childInput);
|
||||
const childResult = await ctx.runChild(childTask, childInput);
|
||||
```
|
||||
|
||||
</Tabs.Tab>
|
||||
@@ -268,9 +240,9 @@ result, err := childWorkflow.RunAsChild(ctx, childInput)
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
## Parallel Child Workflow Execution
|
||||
## Parallel Child Task Execution
|
||||
|
||||
As shown in the examples above, you can spawn multiple child workflows in parallel:
|
||||
As shown in the examples above, you can spawn multiple child tasks in parallel:
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
@@ -297,7 +269,7 @@ child_results = await run_child_workflows(input.n)
|
||||
<Tabs.Tab title="Typescript">
|
||||
|
||||
```typescript
|
||||
// Run multiple child workflows in parallel
|
||||
// Run multiple child tasks in parallel
|
||||
const promises = [];
|
||||
for (let i = 0; i < n; i++) {
|
||||
promises.push(ctx.runChild(child, { N: i }));
|
||||
@@ -309,7 +281,7 @@ const childResults = await Promise.all(promises);
|
||||
<Tabs.Tab title="Go">
|
||||
|
||||
```go
|
||||
// Run multiple child workflows in parallel using goroutines
|
||||
// Run multiple child tasks in parallel using goroutines
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
results := make([]*ChildResult, 0, n)
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
export default {
|
||||
"index": "Overview",
|
||||
"getting-started": "Getting Started",
|
||||
"cpu": "CPU Machine Types",
|
||||
"gpu": "GPU Machine Types",
|
||||
"git-ops": "GitOps",
|
||||
"auto-scaling": "Auto Scaling",
|
||||
"environment-variables": "Environment Variables",
|
||||
}
|
||||
@@ -0,0 +1,114 @@
|
||||
# Autoscaling with Hatchet Compute
|
||||
|
||||
Hatchet Compute provides automatic scaling capabilities to ensure your workflow workers efficiently handle varying workloads. This guide explains how to configure and use autoscaling features.
|
||||
|
||||
## Overview
|
||||
|
||||
Autoscaling automatically adjusts the number of worker replicas based on utilization metrics. When workload increases, Hatchet scales up your workers to handle the load. When workload decreases, it scales down to optimize resource usage.
|
||||
|
||||
## Basic Configuration
|
||||
|
||||
The basic autoscaling configuration requires two parameters:
|
||||
|
||||
1. **Minimum Replicas**: The minimum number of worker replicas that should be running at all times.
|
||||
2. **Maximum Replicas**: The maximum number of worker replicas that can be created during high load.
|
||||
|
||||
You can also enable "Scale to zero during periods of inactivity" to allow the system to scale down to zero replicas when there's no work to be done, helping to reduce costs.
|
||||
|
||||
## Advanced Autoscaling Settings
|
||||
|
||||
### Wait Duration
|
||||
|
||||
The wait duration specifies how long to wait between autoscaling events. This prevents rapid scaling changes that could destabilize your system.
|
||||
|
||||
**Format**: Use time units like:
|
||||
|
||||
- `10s` (10 seconds)
|
||||
- `5m` (5 minutes)
|
||||
- `1h` (1 hour)
|
||||
|
||||
**Default**: `1m` (1 minute)
|
||||
|
||||
### Rolling Window Duration
|
||||
|
||||
This setting determines the time window used to calculate utilization metrics for scaling decisions. Shorter windows make the system more responsive but potentially more volatile.
|
||||
|
||||
**Format**: Use time units like:
|
||||
|
||||
- `2m` (2 minutes)
|
||||
- `5m` (5 minutes)
|
||||
- `1h` (1 hour)
|
||||
|
||||
**Default**: `2m` (2 minutes)
|
||||
|
||||
### Utilization Scale Up Threshold
|
||||
|
||||
This threshold determines when to scale up worker replicas. It's expressed as a decimal between 0 and 1.
|
||||
|
||||
**Example**: A value of `0.75` means:
|
||||
|
||||
- If utilization exceeds 75%, add more replicas
|
||||
- Scale up occurs in increments defined by the scaling increment
|
||||
|
||||
**Default**: `0.75`
|
||||
|
||||
### Utilization Scale Down Threshold
|
||||
|
||||
This threshold determines when to scale down worker replicas. It's expressed as a decimal between 0 and 1.
|
||||
|
||||
**Example**: A value of `0.25` means:
|
||||
|
||||
- If utilization falls below 25%, remove replicas
|
||||
- Scale down occurs in increments defined by the scaling increment
|
||||
|
||||
**Default**: `0.25`
|
||||
|
||||
### Scaling Increment
|
||||
|
||||
The number of replicas to add or remove during each scaling event.
|
||||
|
||||
**Example**: A value of `1` means:
|
||||
|
||||
- Add or remove 1 replica at a time
|
||||
- Higher values enable faster scaling but may be less stable
|
||||
|
||||
**Default**: `1`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start Conservative**: Begin with moderate thresholds (e.g., 0.75 for scale-up and 0.25 for scale-down) and adjust based on your workload patterns.
|
||||
|
||||
2. **Tune Wait Duration**:
|
||||
|
||||
- Shorter durations (e.g., 1m) work well for bursty workloads
|
||||
- Longer durations (e.g., 5m) are better for stable, predictable loads
|
||||
|
||||
3. **Rolling Window**:
|
||||
|
||||
- Shorter windows (2-5m) provide faster response to changes
|
||||
- Longer windows (10m+) provide more stable scaling behavior
|
||||
|
||||
4. **Monitor and Adjust**:
|
||||
- Watch your scaling patterns in the Hatchet UI
|
||||
- Adjust thresholds based on observed behavior
|
||||
- Consider your cost vs. performance requirements
|
||||
|
||||
## Monitoring Autoscaling
|
||||
|
||||
You can monitor your autoscaling behavior in the Hatchet UI under the Managed Compute section. The UI shows:
|
||||
|
||||
- Current number of replicas
|
||||
- Scaling events history
|
||||
- Utilization metrics
|
||||
- Scaling decisions and their triggers
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you're experiencing unexpected scaling behavior:
|
||||
|
||||
1. Check your utilization metrics in the Hatchet UI
|
||||
2. Verify your threshold settings are appropriate for your workload
|
||||
3. Ensure your wait duration isn't too short or too long
|
||||
4. Review your scaling events history for patterns
|
||||
|
||||
For additional help, join the [Hatchet Community](https://hatchet.run/discord) or reach out to us [here](https://hatchet.run/office-hours).
|
||||
@@ -1,37 +1,12 @@
|
||||
import { Tabs, Callout } from "nextra/components";
|
||||
import UniversalTabs from "../../components/UniversalTabs";
|
||||
import UniversalTabs from "../../../components/UniversalTabs";
|
||||
|
||||
# CPU Instance Configuration
|
||||
|
||||
<Callout type="warning">
|
||||
This feature is currently in beta and may be subject to change.
|
||||
</Callout>
|
||||
|
||||
## Overview
|
||||
|
||||
The Hatchet SDK provides a `Compute` class that allows you to define and manage compute resources for your workflows. Each step in your workflow can have its own compute configuration, enabling fine-grained control over resource allocation.
|
||||
|
||||
## Basic Configuration
|
||||
|
||||
<UniversalTabs items={['Python', 'TypeScript', 'Go']}>
|
||||
<Tabs.Tab>
|
||||
|
||||
```python
|
||||
from hatchet_sdk.compute.configs import Compute
|
||||
|
||||
compute = Compute(
|
||||
cpu_kind="shared", # "shared" or "performance"
|
||||
cpus=2, # Number of CPU cores
|
||||
memory_mb=1024, # Memory in MB
|
||||
num_replicas=2, # Number of instances
|
||||
regions=["ewr"] # Region codes
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
## CPU Types and Memory Scaling
|
||||
|
||||
### Shared CPU
|
||||
@@ -46,40 +21,6 @@ regions=["ewr"] # Region codes
|
||||
- **Minimum Memory**: 2048MB per CPU core
|
||||
- **Use Case**: Production and compute-intensive workloads
|
||||
|
||||
### Memory Calculation Examples
|
||||
|
||||
#### Shared CPU
|
||||
|
||||
```python
|
||||
# 4 shared CPUs
|
||||
max_memory = 2048 * 4 # = 8192 MB (8GB)
|
||||
min_memory = 256 * 4 # = 1024 MB (1GB)
|
||||
|
||||
compute = Compute(
|
||||
cpu_kind="shared",
|
||||
cpus=4,
|
||||
memory_mb=4096, # Must be between min_memory and max_memory
|
||||
num_replicas=1,
|
||||
regions=["ewr"]
|
||||
)
|
||||
```
|
||||
|
||||
#### Performance CPU
|
||||
|
||||
```python
|
||||
# 4 performance CPUs
|
||||
max_memory = 8192 * 4 # = 32768 MB (32GB)
|
||||
min_memory = 2048 * 4 # = 8192 MB (8GB)
|
||||
|
||||
compute = Compute(
|
||||
cpu_kind="performance",
|
||||
cpus=4,
|
||||
memory_mb=16384, # Must be between min_memory and max_memory
|
||||
num_replicas=1,
|
||||
regions=["ewr"]
|
||||
)
|
||||
```
|
||||
|
||||
## Available Regions
|
||||
|
||||
| Region Code | Location |
|
||||
@@ -123,43 +64,6 @@ compute = Compute(
|
||||
|
||||
The `num_replicas` parameter determines the total number of machines that will run your workload. These instances are randomly distributed across the specified regions.
|
||||
|
||||
### Example Configurations
|
||||
|
||||
```python
|
||||
# Single region, multiple replicas
|
||||
compute = Compute(
|
||||
cpu_kind="shared",
|
||||
cpus=2,
|
||||
memory_mb=1024,
|
||||
num_replicas=3,
|
||||
regions=["ewr"] # All 3 replicas in ewr
|
||||
)
|
||||
|
||||
# Multiple regions, multiple replicas
|
||||
compute = Compute(
|
||||
cpu_kind="shared",
|
||||
cpus=2,
|
||||
memory_mb=1024,
|
||||
num_replicas=6,
|
||||
regions=["ewr", "lax", "lhr"] # 6 replicas randomly distributed across the three regions
|
||||
)
|
||||
```
|
||||
|
||||
## Usage in Workflows
|
||||
|
||||
```python
|
||||
from hatchet_sdk import Hatchet, Context
|
||||
|
||||
hatchet = Hatchet()
|
||||
|
||||
@hatchet.workflow()
|
||||
class MyWorkflow:
|
||||
@hatchet.step(compute=compute)
|
||||
def process_data(self, context: Context):
|
||||
# Your code here
|
||||
pass
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Resource Allocation**
|
||||
@@ -0,0 +1,118 @@
|
||||
import { Callout, Tabs } from "nextra/components";
|
||||
|
||||
# Environment Variables
|
||||
|
||||
Environment variables allow you to configure your worker's runtime behavior without modifying code. This guide explains how to manage environment variables for your Hatchet workers.
|
||||
|
||||
## Overview
|
||||
|
||||
Environment variables in Hatchet Compute:
|
||||
|
||||
- Are securely stored and encrypted at rest
|
||||
- Can be modified without rebuilding your container
|
||||
- Support different values across environments
|
||||
- Are injected into your worker containers at runtime
|
||||
|
||||
## Configuring Environment Variables
|
||||
|
||||
You can configure environment variables through:
|
||||
|
||||
1. The Hatchet UI during service creation/configuration
|
||||
2. Infrastructure as Code using the Hatchet CLI
|
||||
3. Service configuration files
|
||||
|
||||
### Using the UI
|
||||
|
||||
1. Navigate to your service's "Runtime configuration" section
|
||||
2. Find the "Environment Variables" panel
|
||||
3. Click "Add row" to create a new variable
|
||||
4. Enter the key and value:
|
||||
- Key: The environment variable name (e.g., `DATABASE_URL`)
|
||||
- Value: The environment variable value (e.g., `postgresql://<password>@<host>:<port>/<database>`)
|
||||
5. Click "Save" to apply the changes and trigger a redeployment of your service
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Security**
|
||||
|
||||
- Never commit sensitive values to version control
|
||||
- Use Hatchet's secrets management for sensitive data
|
||||
- Rotate sensitive values regularly
|
||||
|
||||
2. **Naming Conventions**
|
||||
|
||||
- Use descriptive, meaningful names
|
||||
- Follow a consistent naming pattern
|
||||
- Document non-obvious variables
|
||||
|
||||
3. **Value Management**
|
||||
|
||||
- Use appropriate data types
|
||||
- Validate values before deployment
|
||||
- Keep values consistent across related services
|
||||
|
||||
4. **Common Variables**
|
||||
```
|
||||
# Examples of commonly used variables
|
||||
NODE_ENV=production
|
||||
LOG_LEVEL=info
|
||||
API_TIMEOUT=30000
|
||||
DATABASE_URL=postgresql://user:pass@host:5432/db
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
`HATCHET_CLIENT_TOKEN` is automatically set for your service and should not be
|
||||
set in your environment variables.
|
||||
</Callout>
|
||||
|
||||
## Accessing Variables in Code
|
||||
|
||||
Environment variables are mounted into your worker containers at runtime. You can access them in your code using standard methods for your language.
|
||||
|
||||
<Tabs items={['Python', 'JavaScript', 'Go']}>
|
||||
<Tabs.Tab>
|
||||
```python
|
||||
import os
|
||||
|
||||
database_url = os.getenv('DATABASE_URL')
|
||||
api_key = os.getenv('API_KEY')
|
||||
```
|
||||
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab>
|
||||
```javascript
|
||||
const databaseUrl = process.env.DATABASE_URL;
|
||||
const apiKey = process.env.API_KEY;
|
||||
```
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab>
|
||||
```go
|
||||
import "os"
|
||||
|
||||
databaseUrl := os.Getenv("DATABASE_URL")
|
||||
apiKey := os.Getenv("API_KEY")
|
||||
```
|
||||
|
||||
</Tabs.Tab>
|
||||
</Tabs>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
1. **Variables Not Available**
|
||||
|
||||
- Verify the variable is correctly set in your configuration
|
||||
- Check for typos in variable names
|
||||
- Ensure the service has been redeployed after changes
|
||||
|
||||
2. **Incorrect Values**
|
||||
|
||||
- Check for proper escaping of special characters
|
||||
- Verify the value format is correct
|
||||
- Look for conflicting definitions
|
||||
|
||||
3. **Security Issues**
|
||||
- Review access permissions to sensitive variables
|
||||
- Ensure secrets are properly managed
|
||||
- Check for accidental exposure in logs
|
||||
|
||||
For additional help, join the [Hatchet Community](https://hatchet.run/discord) or reach out to us [here](https://hatchet.run/office-hours).
|
||||
+7
-5
@@ -1,8 +1,10 @@
|
||||
# Getting Started with Hatchet Compute
|
||||
|
||||
Hatchet Compute is available in Hatchet Cloud.
|
||||
Hatchet Compute is available in [Hatchet Cloud](https://cloud.onhatchet.run).
|
||||
|
||||
## Project Setup
|
||||
All Free tier users with a linked card receive $5/month in free compute credits to get started.
|
||||
|
||||
## Quickstart
|
||||
|
||||
This guide will walk you through the process of setting up a Hatchet Compute project and assumes you have a basic understanding of Hatchet.
|
||||
If you'd like you can fork the [Hatchet Compute Example Repo](https://github.com/hatchet-dev/managed-compute-examples) to follow along.
|
||||
@@ -10,10 +12,10 @@ If you'd like you can fork the [Hatchet Compute Example Repo](https://github.com
|
||||
1. Sign in or sign up for Hatchet Cloud [here](https://cloud.onhatchet.run).
|
||||
2. Navigate to the Managed Compute view to configure your compute resources.
|
||||
3. Ensure your code is committed to a Git repository.
|
||||
4. Click **+ New Worker** to create a new managed compute worker pool.
|
||||
5. Connect your Git repository to Hatchet and select the repository and branch you'd like to deploy.
|
||||
4. Click **+ Add Service** to create a new managed compute service.
|
||||
5. Connect your Git Hub repository to Hatchet and select the repository and branch you'd like to deploy.
|
||||
6. Specify the directory where your Dockerfile is located.
|
||||
7. Select Infrastructure as Code to have Hatchet automatically manage your compute resources based on your workflow code, or select Manual to manage your compute resources through the Hatchet UI.
|
||||
8. Click **Deploy** to deploy your workflow.
|
||||
|
||||
Your workflow will be deployed and you'll be able to monitor and scale your compute resources through the Hatchet UI.
|
||||
Your service will be deployed and you'll be able to monitor and scale your compute resources through the Hatchet UI.
|
||||
@@ -0,0 +1,159 @@
|
||||
import { Callout } from "nextra/components";
|
||||
|
||||
# GitOps with Hatchet Compute
|
||||
|
||||
Hatchet Compute uses GitOps principles to manage and deploy your services. This approach ensures that your infrastructure configuration is version controlled, reproducible, and follows best practices for continuous deployment.
|
||||
|
||||
<Callout type="info">
|
||||
GitHub accounts must have read access to a repository to configure worker
|
||||
pools. Make sure your GitHub account has the necessary permissions before
|
||||
proceeding.
|
||||
</Callout>
|
||||
|
||||
## Overview
|
||||
|
||||
GitOps in Hatchet Compute means that:
|
||||
|
||||
- Your service configuration lives in your Git repository
|
||||
- Changes to your service are triggered by Git events
|
||||
- The deployed state always matches your Git repository's state
|
||||
- You have a complete audit trail of all service changes
|
||||
|
||||
## Build Configuration
|
||||
|
||||
### Repository Setup
|
||||
|
||||
1. **Github Account**
|
||||
|
||||
- Select your GitHub account from the dropdown
|
||||
- If your account isn't listed, click "Link a new repository" to connect your GitHub account
|
||||
- Hatchet requires GitHub repository access to enable GitOps workflows
|
||||
|
||||
2. **Github Repository**
|
||||
|
||||
- Choose the repository containing your service code
|
||||
- The repository should contain your application code and Dockerfile(s)
|
||||
- Ensure Hatchet has the necessary permissions to access your repository
|
||||
|
||||
3. **Github Branch**
|
||||
- Select the branch you want to deploy from
|
||||
- This branch will be monitored for changes
|
||||
- Any commits to this branch will trigger new deployments
|
||||
- Note: You can configure Dev/Prod branches to deploy to different services with [namespaces or tenants](../environments.mdx)
|
||||
|
||||
### Build Settings
|
||||
|
||||
1. **Build Directory**
|
||||
|
||||
- Specify the directory containing your service code
|
||||
- This is the root directory where your build will run
|
||||
- Example: `.` for repository root, or `services/myservice` for a monorepo
|
||||
|
||||
2. **Path to Dockerfile(s)**
|
||||
- Provide the relative path to your Dockerfile
|
||||
- Default: `./Dockerfile`
|
||||
- The path is relative to your build directory
|
||||
- You can maintain multiple Dockerfiles for different worker configurations:
|
||||
```
|
||||
.
|
||||
├── Dockerfile.worker1 # For worker pool 1
|
||||
├── Dockerfile.worker2 # For worker pool 2
|
||||
└── services/
|
||||
└── special/
|
||||
└── Dockerfile # For specialized worker
|
||||
```
|
||||
|
||||
## Docker Configuration
|
||||
|
||||
Your Dockerfile needs to be properly configured to run Hatchet workers. See our [Docker configuration guide](../docker.mdx) for:
|
||||
|
||||
- Language-specific Dockerfile examples (Python, JavaScript, Go)
|
||||
- Package manager configurations (pip, poetry, npm, yarn, pnpm)
|
||||
- Entry point configuration
|
||||
- Multi-stage build optimizations
|
||||
|
||||
<Callout type="info">
|
||||
When using multiple worker configurations in the same repository, you can
|
||||
create separate Dockerfiles with different dependencies, environment
|
||||
variables, and entry points for each worker pool.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Example Repository Structure**
|
||||
|
||||
```
|
||||
.
|
||||
├── Dockerfile.worker1 # Primary worker configuration
|
||||
├── Dockerfile.worker2 # Secondary worker configuration
|
||||
├── .dockerignore
|
||||
├── src/
|
||||
│ ├── worker1/ # Worker 1 specific code
|
||||
│ │ └── worker.py
|
||||
│ └── worker2/ # Worker 2 specific code
|
||||
│ └── worker.py
|
||||
└── hatchet.yaml # Optional Hatchet configuration
|
||||
```
|
||||
|
||||
2. **Dockerfile Guidelines**
|
||||
|
||||
- Use multi-stage builds to optimize image size
|
||||
- Specify exact versions for base images
|
||||
- Include only necessary files using `.dockerignore`
|
||||
- Set appropriate user permissions
|
||||
- Cache dependencies effectively
|
||||
|
||||
3. **Branch Management**
|
||||
|
||||
- Use feature branches for development
|
||||
- Set up branch protection rules
|
||||
- Consider using environment-specific branches (e.g., `main` for production, `staging` for staging)
|
||||
|
||||
4. **Security**
|
||||
- Never commit secrets to your repository
|
||||
- Use Hatchet's secrets management for sensitive data
|
||||
- Regularly update dependencies
|
||||
- Scan your containers for vulnerabilities
|
||||
|
||||
## Continuous Deployment
|
||||
|
||||
Hatchet automatically sets up a continuous deployment pipeline that:
|
||||
|
||||
1. Monitors your selected branch for changes
|
||||
2. Triggers a new build when changes are detected
|
||||
3. Builds your Docker image using the specified Dockerfile
|
||||
4. Deploys the new image to your compute environment
|
||||
5. Updates your service with zero downtime
|
||||
|
||||
## Monitoring Deployments
|
||||
|
||||
You can monitor your deployments in the Hatchet UI:
|
||||
|
||||
1. View build status and logs
|
||||
2. See deployment history
|
||||
3. Track service health metrics
|
||||
4. Monitor resource utilization
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Common issues and solutions:
|
||||
|
||||
1. **Build Failures**
|
||||
|
||||
- Check your Dockerfile syntax
|
||||
- Verify build context is correct
|
||||
- Ensure all required files are included
|
||||
- Review build logs in the Hatchet UI
|
||||
|
||||
2. **Permission Issues**
|
||||
|
||||
- Verify GitHub repository access for Hatchet users who need to manage configuration
|
||||
- Ensure correct branch configuration for your service
|
||||
|
||||
3. **Deployment Problems**
|
||||
- Validate your container configuration
|
||||
- Check resource limits and requests
|
||||
- Review service logs
|
||||
- Verify network connectivity
|
||||
|
||||
For additional help, join the [Hatchet Community](https://hatchet.run/discord) or reach out to us [here](https://hatchet.run/office-hours).
|
||||
@@ -3,12 +3,13 @@ import { Callout } from "nextra/components";
|
||||
# GPU Instance Configuration
|
||||
|
||||
<Callout type="warning">
|
||||
This feature is currently in beta and may be subject to change.
|
||||
GPU compute is currently in closed beta, reach out to us on
|
||||
[Discord](https://hatchet.run/discord) to request early access!
|
||||
</Callout>
|
||||
|
||||
## Overview
|
||||
|
||||
Hatchet supports GPU-accelerated workloads using NVIDIA GPUs. This guide covers GPU configuration options, Docker setup, and available regions. For basic compute configuration concepts, please refer to the [CPU Instance Configuration](cpu-configuration.md) documentation.
|
||||
Hatchet supports GPU-accelerated workloads using NVIDIA GPUs. This guide covers GPU configuration options, Docker setup, and available regions. For basic compute configuration concepts, please refer to the [CPU Instance Configuration](cpu.mdx) documentation.
|
||||
|
||||
## GPU Types and Availability
|
||||
|
||||
@@ -21,20 +22,6 @@ Hatchet currently supports the following NVIDIA GPU types:
|
||||
| NVIDIA A100-PCIe | 40GB | ord (Chicago) |
|
||||
| NVIDIA A100-SXM4 | 80GB | ams (Amsterdam), iad (Ashburn), mia (Miami), sjc (San Jose), syd (Sydney) |
|
||||
|
||||
## Basic Configuration
|
||||
|
||||
```python
|
||||
from hatchet_sdk.compute.configs import Compute
|
||||
|
||||
compute = Compute(
|
||||
gpu_kind="a100-80gb", # GPU type
|
||||
gpus=1, # Number of GPUs
|
||||
memory_mb=163840, # Memory in MB
|
||||
num_replicas=1, # Number of instances
|
||||
regions=["ams"] # Must be a region that supports your chosen GPU
|
||||
)
|
||||
```
|
||||
|
||||
## Docker Configuration
|
||||
|
||||
### Example Dockerfile
|
||||
@@ -2,10 +2,6 @@ import { Callout } from "nextra/components";
|
||||
|
||||
# Hatchet Managed Compute
|
||||
|
||||
<Callout type="warning">
|
||||
This feature is currently in beta and may be subject to change.
|
||||
</Callout>
|
||||
|
||||
## Overview
|
||||
|
||||
Hatchet Managed Compute provides the simplicity of serverless while delivering the performance and control of traditional infrastructure, making it ideal for long lived, or data intensive AI applications and background job processing. It enables dynamic scaling while eliminating common serverless limitations like cold starts and timeouts.
|
||||
@@ -14,14 +10,14 @@ Hatchet Managed Compute provides the simplicity of serverless while delivering t
|
||||
|
||||
- **Sub-100ms Instance Provisioning**: Pre-warms instances before resource demands
|
||||
- **Distributed Architecture**: Built on [Hatchet Queue](../home/) for reliable workload distribution
|
||||
- **Multi-Region Support**: Deploy across regions for fault tolerance and data locality
|
||||
- **[Multi-Region Support](./cpu.mdx#available-regions)**: Deploy across regions for fault tolerance and data locality
|
||||
- **[Auto-Scaling](./auto-scaling.mdx)**: Automatically scale your compute resources based on workload demand
|
||||
|
||||
### Available Compute Classes
|
||||
|
||||
- Shared CPUs
|
||||
- Performance CPUs
|
||||
- GPU instances
|
||||
- Customizable worker pools
|
||||
- [Shared CPUs](./cpu.mdx#shared-cpu)
|
||||
- [Performance CPUs](./cpu.mdx#performance-cpu)
|
||||
- [GPU instances](./gpu.mdx)
|
||||
|
||||
### Smart Workload Management
|
||||
|
||||
@@ -29,13 +25,9 @@ Hatchet Managed Compute provides the simplicity of serverless while delivering t
|
||||
- **Burstable Capacity**: Scales dynamically based on queue depth
|
||||
- **Sticky Assignment**: Routes tasks to the same instance when possible using [sticky assignments](../home/features/worker-assignment/sticky-assignment.mdx)
|
||||
|
||||
## Infrastructure as Code
|
||||
|
||||
Hatchet Managed Compute is defined directly in your workflow code, making it extremely easy to manage your compute resources.
|
||||
|
||||
## Deployment
|
||||
|
||||
- **GitOps Integration**: Automatic builds and deployments on commit
|
||||
- **[GitOps Integration](./git-ops.mdx)**: Automatic builds and deployments on commit
|
||||
- **Zero-Ops**: Managed infrastructure eliminates operational overhead
|
||||
- **Version Control**: Infrastructure changes tracked in code
|
||||
|
||||
@@ -46,7 +38,3 @@ Hatchet Managed Compute is defined directly in your workflow code, making it ext
|
||||
3. Cost-effective for sustained workloads
|
||||
4. Fine-grained control over compute resources
|
||||
5. Better suited for AI and data processing tasks
|
||||
|
||||
## Getting Started
|
||||
|
||||
Reach out to support@hatchet.dev to get access to managed compute.
|
||||
@@ -16,9 +16,9 @@ export const GO = {
|
||||
|
||||
export const getStaticProps = ({}) => getSnippets([TS, PY, GO]);
|
||||
|
||||
## Concurrency Control in Hatchet Workflows
|
||||
# Concurrency Control in Hatchet Tasks
|
||||
|
||||
Hatchet provides powerful concurrency control features to help you manage the execution of your workflows. This is particularly useful when you have workflows that may be triggered frequently or have long-running steps, and you want to limit the number of concurrent executions to prevent overloading your system, ensure fairness, or avoid race conditions.
|
||||
Hatchet provides powerful concurrency control features to help you manage the execution of your tasks. This is particularly useful when you have tasks that may be triggered frequently or have long-running steps, and you want to limit the number of concurrent executions to prevent overloading your system, ensure fairness, or avoid race conditions.
|
||||
|
||||
<Callout type="info">
|
||||
Concurrency strategies can be added to both `Tasks` and `Workflows`.
|
||||
@@ -26,29 +26,29 @@ Hatchet provides powerful concurrency control features to help you manage the ex
|
||||
|
||||
### Why use concurrency control?
|
||||
|
||||
There are several reasons why you might want to use concurrency control in your Hatchet workflows:
|
||||
There are several reasons why you might want to use concurrency control in your Hatchet tasks:
|
||||
|
||||
1. **Fairness**: When you have multiple clients or users triggering workflows, concurrency control can help ensure fair access to resources. By limiting the number of concurrent runs per client or user, you can prevent a single client from monopolizing the system and ensure that all clients get a fair share of the available resources.
|
||||
1. **Fairness**: When you have multiple clients or users triggering tasks, concurrency control can help ensure fair access to resources. By limiting the number of concurrent runs per client or user, you can prevent a single client from monopolizing the system and ensure that all clients get a fair share of the available resources.
|
||||
|
||||
2. **Resource management**: If your workflow steps are resource-intensive (e.g., they make external API calls or perform heavy computations), running too many instances concurrently can overload your system. By limiting concurrency, you can ensure your system remains stable and responsive.
|
||||
2. **Resource management**: If your task steps are resource-intensive (e.g., they make external API calls or perform heavy computations), running too many instances concurrently can overload your system. By limiting concurrency, you can ensure your system remains stable and responsive.
|
||||
|
||||
3. **Avoiding race conditions**: If your workflow steps modify shared resources, running multiple instances concurrently can lead to race conditions and inconsistent data. Concurrency control helps you avoid these issues by ensuring only a limited number of instances run at a time.
|
||||
3. **Avoiding race conditions**: If your task steps modify shared resources, running multiple instances concurrently can lead to race conditions and inconsistent data. Concurrency control helps you avoid these issues by ensuring only a limited number of instances run at a time.
|
||||
|
||||
4. **Compliance with external service limits**: If your workflow steps interact with external services that have rate limits, concurrency control can help you stay within those limits and avoid being throttled or blocked.
|
||||
4. **Compliance with external service limits**: If your task steps interact with external services that have rate limits, concurrency control can help you stay within those limits and avoid being throttled or blocked.
|
||||
|
||||
5. **Spike Protection**: When you have workflows that are triggered by external events, such as webhooks or user actions, you may experience spikes in traffic that can overwhelm your system. Concurrency control can help you manage these spikes by limiting the number of concurrent runs and queuing new runs until resources become available.
|
||||
5. **Spike Protection**: When you have tasks that are triggered by external events, such as webhooks or user actions, you may experience spikes in traffic that can overwhelm your system. Concurrency control can help you manage these spikes by limiting the number of concurrent runs and queuing new runs until resources become available.
|
||||
|
||||
### Available Strategies:
|
||||
|
||||
- [`GROUP_ROUND_ROBIN`](./overview.mdx#group-round-robin): Distribute workflow instances across available slots in a round-robin fashion based on the `key` function.
|
||||
- [`CANCEL_IN_PROGRESS`](./overview.mdx#cancel-in-progress): Cancel the currently running workflow instances for the same concurrency key to free up slots for the new instance.
|
||||
- [`CANCEL_NEWEST`](./overview.mdx#cancel-newest): Cancel the newest workflow instance for the same concurrency key to free up slots for the new instance.
|
||||
- [`GROUP_ROUND_ROBIN`](./overview.mdx#group-round-robin): Distribute task instances across available slots in a round-robin fashion based on the `key` function.
|
||||
- [`CANCEL_IN_PROGRESS`](./overview.mdx#cancel-in-progress): Cancel the currently running task instances for the same concurrency key to free up slots for the new instance.
|
||||
- [`CANCEL_NEWEST`](./overview.mdx#cancel-newest): Cancel the newest task instance for the same concurrency key to free up slots for the new instance.
|
||||
|
||||
> We're always open to adding more strategies to fit your needs. Join our [discord](https://hatchet.run/discord) to let us know.
|
||||
|
||||
### Setting concurrency on workers
|
||||
|
||||
In addition to setting concurrency limits at the workflow level, you can also control concurrency at the worker level by passing the `maxRuns` option when creating a new `Worker` instance:
|
||||
In addition to setting concurrency limits at the task level, you can also control concurrency at the worker level by passing the `slots` option when creating a new `Worker` instance:
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab>
|
||||
@@ -68,23 +68,23 @@ This example will only let 1 run in each group run at a given time to fairly dis
|
||||
|
||||
### How it works
|
||||
|
||||
When a new workflow instance is triggered, the `GROUP_ROUND_ROBIN` strategy will:
|
||||
When a new task instance is triggered, the `GROUP_ROUND_ROBIN` strategy will:
|
||||
|
||||
1. Determine the group that the instance belongs to based on the `key` function defined in the workflow's concurrency configuration.
|
||||
2. Check if there are any available slots for the instance's group based on the `maxRuns` limit of available workers.
|
||||
3. If a slot is available, the new workflow instance starts executing immediately.
|
||||
4. If no slots are available, the new workflow instance is added to a queue for its group.
|
||||
5. When a running workflow instance completes and a slot becomes available for a group, the next queued instance for that group (in round-robin order) is dequeued and starts executing.
|
||||
1. Determine the group that the instance belongs to based on the `key` function defined in the task's concurrency configuration.
|
||||
2. Check if there are any available slots for the instance's group based on the `slots` limit of available workers.
|
||||
3. If a slot is available, the new task instance starts executing immediately.
|
||||
4. If no slots are available, the new task instance is added to a queue for its group.
|
||||
5. When a running task instance completes and a slot becomes available for a group, the next queued instance for that group (in round-robin order) is dequeued and starts executing.
|
||||
|
||||
This strategy ensures that workflow instances are processed fairly across different groups, preventing any one group from monopolizing the available resources. It also helps to reduce latency for instances within each group, as they are processed in a round-robin fashion rather than strictly in the order they were triggered.
|
||||
This strategy ensures that task instances are processed fairly across different groups, preventing any one group from monopolizing the available resources. It also helps to reduce latency for instances within each group, as they are processed in a round-robin fashion rather than strictly in the order they were triggered.
|
||||
|
||||
### When to use `GROUP_ROUND_ROBIN`
|
||||
|
||||
The `GROUP_ROUND_ROBIN` strategy is particularly useful in scenarios where:
|
||||
|
||||
- You have multiple clients or users triggering workflow instances, and you want to ensure fair resource allocation among them.
|
||||
- You have multiple clients or users triggering task instances, and you want to ensure fair resource allocation among them.
|
||||
- You want to process instances within each group in a round-robin fashion to minimize latency and ensure that no single instance within a group is starved for resources.
|
||||
- You have long-running workflow instances and want to avoid one group's instances monopolizing the available slots.
|
||||
- You have long-running task instances and want to avoid one group's instances monopolizing the available slots.
|
||||
|
||||
Keep in mind that the `GROUP_ROUND_ROBIN` strategy may not be suitable for all use cases, especially those that require strict ordering or prioritization of the most recent events.
|
||||
|
||||
@@ -92,21 +92,21 @@ Keep in mind that the `GROUP_ROUND_ROBIN` strategy may not be suitable for all u
|
||||
|
||||
### How it works
|
||||
|
||||
When a new workflow instance is triggered, the `CANCEL_IN_PROGRESS` strategy will:
|
||||
When a new task instance is triggered, the `CANCEL_IN_PROGRESS` strategy will:
|
||||
|
||||
1. Determine the group that the instance belongs to based on the `key` function defined in the workflow's concurrency configuration.
|
||||
1. Determine the group that the instance belongs to based on the `key` function defined in the task's concurrency configuration.
|
||||
2. Check if there are any available slots for the instance's group based on the `maxRuns` limit of available workers.
|
||||
3. If a slot is available, the new workflow instance starts executing immediately.
|
||||
4. If there are no available slots, currently running workflow instances for the same concurrency key are cancelled to free up slots for the new instance.
|
||||
5. The new workflow instance starts executing immediately.
|
||||
3. If a slot is available, the new task instance starts executing immediately.
|
||||
4. If there are no available slots, currently running task instances for the same concurrency key are cancelled to free up slots for the new instance.
|
||||
5. The new task instance starts executing immediately.
|
||||
|
||||
### When to use `CANCEL_IN_PROGRESS`
|
||||
|
||||
The `CANCEL_IN_PROGRESS` strategy is particularly useful in scenarios where:
|
||||
|
||||
- You have long-running workflow instances that may become stale or irrelevant if newer instances are triggered.
|
||||
- You want to prioritize processing the most recent data or events, even if it means canceling older workflow instances.
|
||||
- You have resource-intensive workflows where it's more efficient to cancel an in-progress instance and start a new one than to wait for the old instance to complete.
|
||||
- You have long-running task instances that may become stale or irrelevant if newer instances are triggered.
|
||||
- You want to prioritize processing the most recent data or events, even if it means canceling older task instances.
|
||||
- You have resource-intensive tasks where it's more efficient to cancel an in-progress instance and start a new one than to wait for the old instance to complete.
|
||||
- Your user UI allows for multiple inputs, but only the most recent is relevant (i.e. chat messages, form submissions, etc.).
|
||||
|
||||
## CANCEL_NEWEST
|
||||
@@ -120,4 +120,4 @@ The `CANCEL_NEWEST` strategy is similar to `CANCEL_IN_PROGRESS`, but it cancels
|
||||
The `CANCEL_NEWEST` strategy is particularly useful in scenarios where:
|
||||
|
||||
- You want to allow in progress runs to complete before starting new work.
|
||||
- You have long-running workflow instances and want to avoid one group's instances monopolizing the available slots.
|
||||
- You have long-running task instances and want to avoid one group's instances monopolizing the available slots.
|
||||
|
||||
@@ -48,11 +48,11 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
# Recurring Runs with Cron
|
||||
|
||||
> This example assumes we have a [workflow](./basic-workflows.mdx) registered on a running [worker](./workers.mdx).
|
||||
> This example assumes we have a [task](./your-first-task.mdx) registered on a running [worker](./workers.mdx).
|
||||
|
||||
A [Cron](https://en.wikipedia.org/wiki/Cron) is a time-based job scheduler that allows you to define when a workflow should be executed automatically on a pre-determined schedule.
|
||||
A [Cron](https://en.wikipedia.org/wiki/Cron) is a time-based job scheduler that allows you to define when a task should be executed automatically on a pre-determined schedule.
|
||||
|
||||
Some example use cases for cron-style workflows might include:
|
||||
Some example use cases for cron-style tasks might include:
|
||||
|
||||
1. Running a daily report at a specific time.
|
||||
2. Sending weekly digest emails to users about their activity from the past week.
|
||||
@@ -60,13 +60,13 @@ Some example use cases for cron-style workflows might include:
|
||||
|
||||
Hatchet supports cron triggers to run on a schedule defined in a few different ways:
|
||||
|
||||
- [Workflow Definitions](./cron-runs.mdx#defining-a-cron-in-your-workflow-definition): Define a cron expression in your workflow definition to trigger the workflow on a predefined schedule.
|
||||
- [Dynamic Programmatically](./cron-runs.mdx#programmatically-creating-cron-triggers): Use the Hatchet SDKs to dynamically set the cron schedule of a workflow.
|
||||
- [Task Definitions](./cron-runs.mdx#defining-a-cron-in-your-task-definition): Define a cron expression in your task definition to trigger the task on a predefined schedule.
|
||||
- [Dynamic Programmatically](./cron-runs.mdx#programmatically-creating-cron-triggers): Use the Hatchet SDKs to dynamically set the cron schedule of a task.
|
||||
- [Hatchet Dashboard](./cron-runs.mdx#managing-cron-jobs-in-the-hatchet-dashboard): Manually create cron triggers from the Hatchet Dashboard.
|
||||
|
||||
<Callout type="info">
|
||||
The expression is when Hatchet **enqueues** the workflow, not when the run
|
||||
starts. Scheduling constraints like concurrency limits, rate limits, and retry
|
||||
<Callout type="warning">
|
||||
The expression is when Hatchet **enqueues** the task, not when the run starts.
|
||||
Scheduling constraints like concurrency limits, rate limits, and retry
|
||||
policies can affect run start times.
|
||||
</Callout>
|
||||
|
||||
@@ -90,9 +90,9 @@ Each field can contain a specific value, an asterisk (`*`) to represent all poss
|
||||
- `0 9 * * 1`: Run every Monday at 9 AM
|
||||
- `0 0 1 * *`: Run on the first day of every month at midnight
|
||||
|
||||
## Defining a Cron in Your Workflow Definition
|
||||
## Defining a Cron in Your Task Definition
|
||||
|
||||
You can define a workflow with a cron schedule by configuring the cron expression as part of the workflow definition:
|
||||
You can define a task with a cron schedule by configuring the cron expression as part of the task definition:
|
||||
|
||||
<UniversalTabs items={["Python-Sync", "Python-Async", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python-Sync">
|
||||
@@ -118,19 +118,19 @@ You can define a workflow with a cron schedule by configuring the cron expressio
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
In the examples above, we set the `on cron` property of the workflow. The property specifies the cron expression that determines when the workflow should be triggered.
|
||||
In the examples above, we set the `on cron` property of the task. The property specifies the cron expression that determines when the task should be triggered.
|
||||
|
||||
<Callout>
|
||||
Note: When modifying a cron in your workflow definition, it will override any
|
||||
cron schedule for previous crons defined in previous workflow definitions, but
|
||||
crons created via the API or Dashboard will still be respected.
|
||||
Note: When modifying a cron in your task definition, it will override any cron
|
||||
schedule for previous crons defined in previous task definitions, but crons
|
||||
created via the API or Dashboard will still be respected.
|
||||
</Callout>
|
||||
|
||||
## Programmatically Creating Cron Triggers
|
||||
|
||||
### Create a Cron Trigger
|
||||
|
||||
You can create dynamic cron triggers programmatically via the API. This is useful if you want to create a cron trigger that is not known at the time of workflow definition,
|
||||
You can create dynamic cron triggers programmatically via the API. This is useful if you want to create a cron trigger that is not known at the time of task definition,
|
||||
|
||||
Here's an example of creating a a cron to trigger a report for a specific customer every day at noon:
|
||||
|
||||
@@ -179,13 +179,13 @@ You can delete a cron trigger by passing the cron object or a cron trigger id to
|
||||
|
||||
<Callout>
|
||||
Note: Deleting a cron trigger will not cancel any currently running instances
|
||||
of the workflow. It will simply stop the cron trigger from triggering the
|
||||
workflow again.
|
||||
of the task. It will simply stop the cron trigger from triggering the task
|
||||
again.
|
||||
</Callout>
|
||||
|
||||
### List Cron Triggers
|
||||
|
||||
Retrieves a list of all workflow cron triggers matching the criteria.
|
||||
Retrieves a list of all task cron triggers matching the criteria.
|
||||
|
||||
<UniversalTabs items={["Python-Sync", "Python-Async", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python-Sync">
|
||||
@@ -204,7 +204,7 @@ Retrieves a list of all workflow cron triggers matching the criteria.
|
||||
|
||||
## Managing Cron Triggers in the Hatchet Dashboard
|
||||
|
||||
In the Hatchet Dashboard, you can view and manage cron triggers for your workflows.
|
||||
In the Hatchet Dashboard, you can view and manage cron triggers for your tasks.
|
||||
|
||||
Navigate to "Triggers" > "Cron Jobs" in the left sidebar and click "Create Cron Job" at the top right.
|
||||
|
||||
@@ -218,8 +218,8 @@ When using cron triggers, there are a few considerations to keep in mind:
|
||||
|
||||
1. **Time Zone**: Cron schedules are UTC. Make sure to consider the time zone when defining your cron expressions.
|
||||
|
||||
2. **Execution Time**: The actual execution time of a cron-triggered workflow may vary slightly from the scheduled time. Hatchet makes a best-effort attempt to enqueue the workflow as close to the scheduled time as possible, but there may be slight delays due to system load or other factors.
|
||||
2. **Execution Time**: The actual execution time of a cron-triggered task may vary slightly from the scheduled time. Hatchet makes a best-effort attempt to enqueue the task as close to the scheduled time as possible, but there may be slight delays due to system load or other factors.
|
||||
|
||||
3. **Missed Schedules**: If a scheduled workflow is missed (e.g., due to system downtime), Hatchet will not automatically run the missed instances. It will wait for the next scheduled time to trigger the workflow.
|
||||
3. **Missed Schedules**: If a scheduled task is missed (e.g., due to system downtime), Hatchet will not automatically run the missed instances. It will wait for the next scheduled time to trigger the task.
|
||||
|
||||
4. **Overlapping Schedules**: If a workflow is still running when the next scheduled time arrives, Hatchet will start a new instance of the workflow or respect [concurrency](./concurrency.mdx) policy.
|
||||
4. **Overlapping Schedules**: If a task is still running when the next scheduled time arrives, Hatchet will start a new instance of the task or respect [concurrency](../concurrency/overview.mdx) policy.
|
||||
|
||||
@@ -51,11 +51,6 @@ export const simple = hatchet.workflow<DagInput, DagOutput>({
|
||||
});
|
||||
```
|
||||
|
||||
<Callout variant="info">
|
||||
Declaring input and output types are optional, but recommended. When
|
||||
declaring output types, the keys of the object will be the names of the
|
||||
tasks in the workflow where the output will be returned.
|
||||
</Callout>
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Go">
|
||||
|
||||
@@ -64,6 +59,12 @@ export const simple = hatchet.workflow<DagInput, DagOutput>({
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
<Callout variant="info">
|
||||
The Workflow return object can be interacted with in the same way as a
|
||||
[task](./your-first-task.mdx), however, it can only take a subset of options
|
||||
which are applied at the task level.
|
||||
</Callout>
|
||||
|
||||
## Defining a Task
|
||||
|
||||
Now that we have a workflow, we can define a task to be executed as part of the workflow. Tasks are defined by calling the `task` method on the workflow object.
|
||||
|
||||
@@ -7,6 +7,8 @@ export const DurablePy = {
|
||||
|
||||
export const getStaticProps = ({}) => getSnippets([DurablePy]);
|
||||
|
||||
# Durable Execution
|
||||
|
||||
## Introduction
|
||||
|
||||
**Durable execution** refers to the ability of a function to easily recover from failures or interruptions. In Hatchet, we refer to a function with this ability as a **durable task**. Durable tasks are essentially tasks that store intermediate results in a durable event log - in other words, they're a fancy cache.
|
||||
@@ -14,24 +16,24 @@ export const getStaticProps = ({}) => getSnippets([DurablePy]);
|
||||
This is especially useful in cases such as:
|
||||
|
||||
1. Tasks which need to always run to completion, even if the underlying machine crashes or the task is interrupted.
|
||||
2. Situations where a task needs to wait for an very long amount of time for something to complete before continuing. Running a durable task will not take up a slot on the main worker, so is a strong candidate for e.g. fanout workflows that spawn a large number of children and then wait for their results.
|
||||
3. Waiting for a potentially long time for an event, such as human-in-the-loop workflows where we might not get human feedback for hours or days.
|
||||
2. Situations where a task needs to wait for an very long amount of time for something to complete before continuing. Running a durable task will not take up a slot on the main worker, so is a strong candidate for e.g. fanout tasks that spawn a large number of children and then wait for their results.
|
||||
3. Waiting for a potentially long time for an event, such as human-in-the-loop tasks where we might not get human feedback for hours or days.
|
||||
|
||||
## How Hatchet Runs Durable Workflows
|
||||
## How Hatchet Runs Durable Tasks
|
||||
|
||||
When you register a durable task, Hatchet marks the entire workflow as durable. Then, when you start your worker, Hatchet will start a second worker in the background for running durable tasks.
|
||||
When you register a durable task, Hatchet marks the entire task as durable. Then, when you start your worker, Hatchet will start a second worker in the background for running durable tasks.
|
||||
|
||||
If you don't register any durable workflows, the durable worker will not be started. Similarly, if you start a worker with _only_ durable workflows, the "main" worker will not start, and _only_ the durable worker will run. The durable worker will show up as a second worker in the Hatchet Dashboard.
|
||||
If you don't register any durable tasks, the durable worker will not be started. Similarly, if you start a worker with _only_ durable tasks, the "main" worker will not start, and _only_ the durable worker will run. The durable worker will show up as a second worker in the Hatchet Dashboard.
|
||||
|
||||
Tasks that are declared as being durable (using `durable_task` instead of `task`), will receive a `DurableContext` object instead of a normal `Context,` which extends the `Context` by providing some additional tools for working with durable execution features.
|
||||
|
||||
## Example Workflow
|
||||
## Example Task
|
||||
|
||||
Now that we know a bit about how Hatchet handles durable execution, let's build a workflow. We'll start by declaring a workflow that will run durably, on the "durable worker".
|
||||
Now that we know a bit about how Hatchet handles durable execution, let's build a task. We'll start by declaring a task that will run durably, on the "durable worker".
|
||||
|
||||
<GithubSnippet src={DurablePy} target="Create a durable workflow" />
|
||||
|
||||
Here, we've declared a Hatchet workflow just like any other. Now, we can add some tasks to it:
|
||||
Here, we've declared a Hatchet task just like any other. Now, we can add some tasks to it:
|
||||
|
||||
<GithubSnippet src={DurablePy} target="Add durable task" />
|
||||
|
||||
|
||||
@@ -20,13 +20,13 @@ Background tasks are functions which are executed outside of the main request/re
|
||||
|
||||
**Workers**
|
||||
|
||||
Hatchet is responsible for invoking tasks which run on **workers**. Workers are long-running processes which are connected to Hatchet, and execute the functions defined in your workflows. They can be run on your own infrastructure, or on Hatchet's [managed compute](./compute) offering.
|
||||
Hatchet is responsible for invoking tasks which run on **workers**. Workers are long-running processes which are connected to Hatchet, and execute the functions defined in your tasks. They can be run on your own infrastructure, or on Hatchet's [managed compute](./compute) offering.
|
||||
|
||||
One of the design goals of Hatchet is to ensure that workers can be run anywhere, from a PaaS like Heroku to a Kubernetes cluster running in your own data center.
|
||||
|
||||
**What is a workflow?**
|
||||
**What is a task?**
|
||||
|
||||
A workflow is a collection of one or more tasks. Workflows can be run directly, or can be executed in response to an external trigger (an event, schedule, or API call). For example, if you'd like to send notifications to a user after they've signed up, you could create a workflow for that.
|
||||
A task is a unit of work that can be executed by Hatchet. Tasks can be run directly, or can be executed in response to an external trigger (an event, schedule, or API call). For example, if you'd like to send notifications to a user after they've signed up, you could create a task for that. Tasks can be spawned from within another task or can be built into a [directed acyclic graph based workflow](./dags).
|
||||
|
||||
**Durable queue**
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import { Callout } from "nextra/components";
|
||||
|
||||
## Hatchet Go V1 Migration Guide
|
||||
|
||||
This guide will help you migrate Hatchet workflows from the V0 SDK to the V1 SDK. Note that the v1 engine will continue to support v0 workflows until September 30th, 2025.
|
||||
This guide will help you migrate from the V0 SDK to the V1 SDK. Note that the v1 engine will continue to support v0 tasks until September 30th, 2025.
|
||||
|
||||
The v1 Go SDK can be found in `github.com/hatchet-dev/hatchet/pkg/v1`. You can instantiate a new Hatchet client via:
|
||||
|
||||
@@ -22,11 +22,11 @@ func main() {
|
||||
|
||||
## Declaring Tasks and Workflows
|
||||
|
||||
In the new SDKs, tasks and workflows use Go generics to make typing easier. As a result, there are a number of improvements made to the context methods and the way tasks are defined.
|
||||
In the new SDKs, tasks use Go generics to make typing easier. As a result, there are a number of improvements made to the context methods and the way tasks are defined.
|
||||
|
||||
### Single-Task Workflows
|
||||
### Single Function Tasks
|
||||
|
||||
Single tasks are much easier to define than before using the `factory` package in the V1 SDK. Here's an example of how to define a simple workflow with the V1 SDK:
|
||||
Single function tasks are much easier to define than before using the `factory` package in the V1 SDK. Here's an example of how to define a simple task with the V1 SDK:
|
||||
|
||||
```go
|
||||
import "github.com/hatchet-dev/hatchet/pkg/v1/factory"
|
||||
@@ -121,24 +121,24 @@ return simple
|
||||
workflow from `factory.NewWorkflow` will need to return a `interface{}` type.
|
||||
</Callout>
|
||||
|
||||
### Workflow and Task Configuration
|
||||
### Task Configuration
|
||||
|
||||
In the V1 SDK, workflows and tasks are configured using the `create` package. The `create` package provides a number of options for configuring tasks and workflows, such as setting the task name, setting the task's parent tasks, and setting the task's wait conditions:
|
||||
In the V1 SDK, tasks are configured using the `create` package. The `create` package provides a number of options for configuring tasks, such as setting the task name, setting the task's parent tasks, and setting the task's wait conditions:
|
||||
|
||||
```go
|
||||
// import the create package
|
||||
import "github.com/hatchet-dev/hatchet/pkg/client/create"
|
||||
|
||||
// utilize the following structs to configure tasks and workflows
|
||||
// utilize the following structs to configure tasks
|
||||
|
||||
type WorkflowTask[I, O any] struct {
|
||||
// (required) The name of the task and workflow
|
||||
// (required) The name of the task
|
||||
Name string
|
||||
|
||||
// (optional) The version of the workflow
|
||||
// (optional) The version of the task
|
||||
Version string
|
||||
|
||||
// (optional) The human-readable description of the workflow
|
||||
// (optional) The human-readable description of the task
|
||||
Description string
|
||||
|
||||
// (optional) ExecutionTimeout specifies the maximum duration a task can run before being terminated
|
||||
@@ -179,10 +179,10 @@ type WorkflowTask[I, O any] struct {
|
||||
}
|
||||
|
||||
type WorkflowOnFailureTask[I, O any] struct {
|
||||
// (optional) The version of the workflow
|
||||
// (optional) The version of the task
|
||||
Version string
|
||||
|
||||
// (optional) The human-readable description of the workflow
|
||||
// (optional) The human-readable description of the task
|
||||
Description string
|
||||
|
||||
// (optional) ExecutionTimeout specifies the maximum duration a task can run before being terminated
|
||||
@@ -211,16 +211,14 @@ type WorkflowOnFailureTask[I, O any] struct {
|
||||
}
|
||||
|
||||
// TaskCreateOpts defines options for creating a standalone task.
|
||||
// This combines both workflow and task properties in a single type.
|
||||
type StandaloneTask struct {
|
||||
|
||||
// (required) The name of the task and workflow
|
||||
// (required) The name of the task
|
||||
Name string
|
||||
|
||||
// (optional) The version of the workflow
|
||||
// (optional) The version of the task
|
||||
Version string
|
||||
|
||||
// (optional) The human-readable description of the workflow
|
||||
// (optional) The human-readable description of the task
|
||||
Description string
|
||||
|
||||
// (optional) ExecutionTimeout specifies the maximum duration a task can run before being terminated
|
||||
|
||||
@@ -16,7 +16,7 @@ This guide will help you migrate Hatchet workflows from the V0 SDK to the V1 SDK
|
||||
|
||||
#### Introductory Example
|
||||
|
||||
First, a simple example of how to define a workflow with the V1 SDK:
|
||||
First, a simple example of how to define a task with the V1 SDK:
|
||||
|
||||
<GithubSnippet src={SimpleWorker} target="Simple" />
|
||||
|
||||
@@ -51,16 +51,16 @@ Typing improvements:
|
||||
|
||||
1. All times and durations, such as `timeout` and `schedule_timeout` fields are now `datetime.timedelta` objects instead of strings (e.g. `"10s"` becomes `timedelta(seconds=10)`).
|
||||
2. External-facing protobuf objects, such as `StickyStrategy` and `ConcurrencyLimitStrategy`, have been replaced by native Python enums to make working with them easier.
|
||||
3. All interactions with the `Workflow` object are now typed, so you know e.g. what the type of the input to the workflow needs to be at type checking time (we see this in the Pydantic example above).
|
||||
4. All external-facing types that are used for triggering workflows, scheduling workflows, etc. are now Pydantic objects, as opposed to being `TypedDict`s.
|
||||
3. All interactions with the `Task` and `Workflow` objects are now typed, so you know e.g. what the type of the input to the task needs to be at type checking time (we see this in the Pydantic example above).
|
||||
4. All external-facing types that are used for triggering tasks, scheduling tasks, etc. are now Pydantic objects, as opposed to being `TypedDict`s.
|
||||
5. The return type of each `Task` is restricted to a `JSONSerializableMapping` or a Pydantic model, to better align with what the Hatchet Engine expects.
|
||||
6. The `ClientConfig` now uses Pydantic Settings, and we've removed the static methods on the Client for `from_environment` and `from_config` in favor of passing configuration in correctly. See the [configuration example](./client.mdx) for more details.
|
||||
7. The REST API wrappers, which previously were under `hatchet.rest`, have been completely overhauled.
|
||||
|
||||
Naming changes:
|
||||
|
||||
1. We no longer have nested `aio` clients for async methods. Instead, async methods throughout the entire SDK are prefixed by `aio_`, similar to [Langchain's use of the `a` prefix](https://python.langchain.com/docs/concepts/streaming/#stream-and-astream) to indicate async. For example, to run a workflow, you may now either use `workflow.run()` or `workflow.aio_run()`.
|
||||
2. All functions on Hatchet clients are now _verbs_. For instance the way to list workflow runs is via `hatchet.workflows.list`.
|
||||
1. We no longer have nested `aio` clients for async methods. Instead, async methods throughout the entire SDK are prefixed by `aio_`, similar to [Langchain's use of the `a` prefix](https://python.langchain.com/docs/concepts/streaming/#stream-and-astream) to indicate async. For example, to run a task, you may now either use `workflow.run()` or `workflow.aio_run()`.
|
||||
2. All functions on Hatchet clients are now _verbs_. For instance the way to list workflow runs is via `hatchet.runs.list()`.
|
||||
3. `max_runs` on the worker has been renamed to `slots`.
|
||||
|
||||
Removals:
|
||||
@@ -75,5 +75,5 @@ Other miscellaneous changes:
|
||||
|
||||
There are a handful of other new features that will make interfacing with the SDK easier, which are listed below.
|
||||
|
||||
1. Concurrency keys using the `input` to a workflow are now checked for validity at runtime. If the workflow's `input_validator` does not contain a field that's used in a key, Hatchet will reject the workflow when it's created. For example, if the key is `input.user_id`, the `input_validator` Pydantic model _must_ contain a `user_id` field.
|
||||
2. There is now an `on_success_task` on the `Workflow` object, which works just like an on-failure task, but it runs after all upstream tasks in the workflow have _succeeded_.
|
||||
1. Concurrency keys using the `input` to a task are now checked for validity at runtime. If the task's `input_validator` does not contain a field that's used in a key, Hatchet will reject the task when it's created. For example, if the key is `input.user_id`, the `input_validator` Pydantic model _must_ contain a `user_id` field.
|
||||
2. There is now an `on_success_task` on the `Workflow` object, which works just like an on-failure task, but it runs after all upstream tasks have _succeeded_.
|
||||
|
||||
@@ -23,11 +23,11 @@ This guide will help you migrate Hatchet workflows from the V0 SDK to the V1 SDK
|
||||
|
||||
#### Introductory Example
|
||||
|
||||
First, we've exposed a new `hatchet.task` method in the V1 SDK for single-task workflows.
|
||||
First, we've exposed a new `hatchet.task` method in the V1 SDK for single function tasks.
|
||||
|
||||
<GithubSnippet src={SimpleWorker} target="Declaring a Task" />
|
||||
|
||||
Dags are still defined as workflows, but they can now be declared using the `hatchet.workflow` method.
|
||||
DAGs are still defined as workflows, but they can now be declared using the `hatchet.workflow` method.
|
||||
|
||||
<GithubSnippet src={DagsWorker} target="Declaring a DAG Workflow" />
|
||||
|
||||
@@ -37,21 +37,21 @@ You can now run work for tasks and workflows by directly interacting with the re
|
||||
|
||||
There are a few important things to note when migrating to the new SDK:
|
||||
|
||||
1. The new SDK uses a factory pattern (shown above) for creating workflows and declaring their tasks, which we've found to be more ergonomic than the previous SDK.
|
||||
2. The old method of defining workflows will still work in the new SDK, but we recommend migrating over to the new method shown above for improved type checking and for access to new features.
|
||||
3. New features of the SDK, such as the new durable execution features rolled out in V1, will only be accessible from the new `WorkflowDeclaration` object in the new SDK.
|
||||
1. The new SDK uses a factory pattern (shown above) for creating tasks and workflows, which we've found to be more ergonomic than the previous SDK.
|
||||
2. The old method of defining tasks will still work in the new SDK, but we recommend migrating over to the new method shown above for improved type checking and for access to new features.
|
||||
3. New features of the SDK, such as the new durable execution features rolled out in V1, will only be accessible from the new `TaskDeclaration` object in the new SDK.
|
||||
|
||||
Since the old pattern for declaring workflows will still work in the new SDK, we recommend migrating existing workflows to the new patterns in V1 gradually.
|
||||
Since the old pattern for declaring tasks will still work in the new SDK, we recommend migrating existing tasks to the new patterns in V1 gradually.
|
||||
|
||||
#### Fanout Example
|
||||
|
||||
The new SDK also provides improved type support for spawning workflows (such as children) from around the codebase. Consider the following example:
|
||||
The new SDK also provides improved type support for spawning child tasks from around the codebase. Consider the following example:
|
||||
|
||||
First, we declare a child workflow:
|
||||
First, we declare a child task:
|
||||
|
||||
<GithubSnippet src={FanoutWorker} target="Declaring a Child" />
|
||||
|
||||
Next, we spawn that child from a parent workflow:
|
||||
Next, we spawn that child from a parent task:
|
||||
|
||||
<GithubSnippet src={FanoutWorker} target="Declaring a Parent" />
|
||||
|
||||
|
||||
@@ -19,11 +19,11 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
# On-Failure Tasks
|
||||
|
||||
The on-failure task is a special type of task in Hatchet workflows that allows you to define a function to be executed in the event that any task in the main workflow fails. This feature enables you to handle errors, perform cleanup tasks, or trigger notifications in case of workflow failures.
|
||||
The on-failure task is a special type of task in Hatchet that allows you to define a function to be executed in the event that any task in the main task fails. This feature enables you to handle errors, perform cleanup tasks, or trigger notifications in case of task failure within a workflow.
|
||||
|
||||
## Defining an on-failure task
|
||||
|
||||
You can define an on-failure task on your workflow the same as you'd define any other task:
|
||||
You can define an on-failure task on your task the same as you'd define any other task:
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
@@ -40,15 +40,15 @@ You can define an on-failure task on your workflow the same as you'd define any
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
In the examples above, the on-failure task will be executed only if any of the main workflow task fail.
|
||||
In the examples above, the on-failure task will be executed only if any of the main tasks in the workflow fail.
|
||||
|
||||
## Use Cases
|
||||
|
||||
Some common use cases for the on-failure task include:
|
||||
|
||||
- Performing cleanup tasks after a workflow failure
|
||||
- Performing cleanup tasks after a task failure in a workflow
|
||||
- Sending notifications or alerts about the failure
|
||||
- Logging additional information for debugging purposes
|
||||
- Triggering a compensating action or a fallback workflow
|
||||
- Triggering a compensating action or a fallback task
|
||||
|
||||
By utilizing the on-failure task, you can handle workflow failures gracefully and ensure that necessary actions are taken in case of errors.
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from "nextra/components";
|
||||
OpenTelemetry support is currently only available for the Python SDK.
|
||||
</Callout>
|
||||
|
||||
Hatchet supports exporting traces from your workflows to an [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) to improve visibility into your Hatchet tasks.
|
||||
Hatchet supports exporting traces from your tasks to an [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) to improve visibility into your Hatchet tasks.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -61,9 +61,9 @@ The `HatchetInstrumentor` also has some methods for generating traceparents that
|
||||
|
||||
### Spans
|
||||
|
||||
By default, Hatchet creates spans at the following points in the lifecycle of a workflow run:
|
||||
By default, Hatchet creates spans at the following points in the lifecycle of a task run:
|
||||
|
||||
1. When a trigger is run on the client side, e.g. `run_workflow()` or `push()` is called.
|
||||
1. When a trigger is run on the client side, e.g. `run()` or `push()` is called.
|
||||
2. When a worker handles a task event, like starting running the task or cancelling the task
|
||||
|
||||
In addition, you'll get a handful of attributes set (prefixed by `hatchet.`) on the task run events, such as the workflow name and the worker id, as well as success / failure states, and so on.
|
||||
In addition, you'll get a handful of attributes set (prefixed by `hatchet.`) on the task run events, such as the task name and the worker id, as well as success / failure states, and so on.
|
||||
|
||||
@@ -29,7 +29,7 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
# Simple Task Retries
|
||||
|
||||
Hatchet provides a simple and effective way to handle failures in your workflow tasks using the task-level retry configuration. This feature allows you to specify the number of times a task should be retried if it fails, helping to improve the reliability and resilience of your workflows.
|
||||
Hatchet provides a simple and effective way to handle failures in your tasks using a retry policy. This feature allows you to specify the number of times a task should be retried if it fails, helping to improve the reliability and resilience of your tasks.
|
||||
|
||||
<Callout type="info">
|
||||
Task-level retries can be added to both `Standalone Tasks` and `Workflow
|
||||
@@ -38,18 +38,18 @@ Hatchet provides a simple and effective way to handle failures in your workflow
|
||||
|
||||
## How it works
|
||||
|
||||
When a task in your workflow fails (i.e. throws an error or returns a non-zero exit code), Hatchet can automatically retry the task based on the `retries` configuration defined in the task object. Here's how it works:
|
||||
When a task fails (i.e. throws an error or returns a non-zero exit code), Hatchet can automatically retry the task based on the `retries` configuration defined in the task object. Here's how it works:
|
||||
|
||||
1. If a task fails and `retries` is set to a value greater than 0, Hatchet will catch the error and retry the task.
|
||||
2. The task will be retried up to the specified number of times, with each retry being executed after a short delay to avoid overwhelming the system.
|
||||
3. If the task succeeds during any of the retries, the workflow will continue to the next task as normal.
|
||||
4. If the task continues to fail after exhausting all the specified retries, the workflow will be marked as failed.
|
||||
3. If the task succeeds during any of the retries, the task will continue as normal.
|
||||
4. If the task continues to fail after exhausting all the specified retries, the task will be marked as failed.
|
||||
|
||||
This simple retry mechanism can help to mitigate transient failures, such as network issues or temporary unavailability of external services, without requiring complex error handling logic in your workflow code.
|
||||
This simple retry mechanism can help to mitigate transient failures, such as network issues or temporary unavailability of external services, without requiring complex error handling logic in your task code.
|
||||
|
||||
## How to use task-level retries
|
||||
|
||||
To enable retries for a task in your workflow, simply add the `retries` property to the task object in your workflow definition:
|
||||
To enable retries for a task, simply add the `retries` property to the task object in your task definition:
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab>
|
||||
@@ -63,13 +63,13 @@ To enable retries for a task in your workflow, simply add the `retries` property
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
You can add the `retries` property to any task in your workflow, and Hatchet will handle the retry logic automatically.
|
||||
You can add the `retries` property to any task, and Hatchet will handle the retry logic automatically.
|
||||
|
||||
It's important to note that task-level retries are not suitable for all types of failures. For example, if a task fails due to a programming error or an invalid configuration, retrying the task will likely not resolve the issue. In these cases, you should fix the underlying problem in your code or configuration rather than relying on retries.
|
||||
|
||||
Additionally, if a task interacts with external services or databases, you should ensure that the operation is idempotent (i.e. can be safely repeated without changing the result) before enabling retries. Otherwise, retrying the task could lead to unintended side effects or inconsistencies in your data.
|
||||
|
||||
## Accessing the Retry Count in a Step
|
||||
## Accessing the Retry Count in a Running Task
|
||||
|
||||
If you need to access the current retry count within a task, you can use the `retryCount` method available in the task context:
|
||||
|
||||
@@ -125,6 +125,6 @@ In these cases, even though `retries` is set to a non-zero number (meaning the t
|
||||
|
||||
## Conclusion
|
||||
|
||||
Hatchet's task-level retry feature is a simple and effective way to handle transient failures in your workflow tasks, improving the reliability and resilience of your workflows. By specifying the number of retries for each task, you can ensure that your workflows can recover from temporary issues without requiring complex error handling logic.
|
||||
Hatchet's task-level retry feature is a simple and effective way to handle transient failures in your tasks, improving the reliability and resilience of your tasks. By specifying the number of retries for each task, you can ensure that your tasks can recover from temporary issues without requiring complex error handling logic.
|
||||
|
||||
Remember to use retries judiciously and only for tasks that are idempotent and can safely be repeated. For more advanced retry strategies, such as exponential backoff or circuit breaking, stay tuned for future updates to Hatchet's retry capabilities.
|
||||
|
||||
@@ -12,20 +12,20 @@ export const SimpleGo = {
|
||||
|
||||
export const getStaticProps = ({}) => getSnippets([SimpleTs, SimpleGo]);
|
||||
|
||||
# Enqueuing a Workflow Run (Fire and Forget)
|
||||
# Enqueuing a Task Run (Fire and Forget)
|
||||
|
||||
> This example assumes we have a [workflow](./basic-workflows.mdx) registered on a running [worker](./workers.mdx).
|
||||
> This example assumes we have a [task](./your-first-task.mdx) registered on a running [worker](./workers.mdx).
|
||||
|
||||
Another method of triggering a workflow in Hatchet is to _enqueue_ the workflow without waiting for it to complete, sometimes known as "fire and forget". This pattern is useful for workflows that take a long time to complete or are not critical to the immediate operation of your application.
|
||||
Another method of triggering a task in Hatchet is to _enqueue_ the task without waiting for it to complete, sometimes known as "fire and forget". This pattern is useful for tasks that take a long time to complete or are not critical to the immediate operation of your application.
|
||||
|
||||
Some example use cases for fire-and-forget style workflows might be:
|
||||
Some example use cases for fire-and-forget style tasks might be:
|
||||
|
||||
1. Sending a shipping confirmation email to a user once their order has shipped. This is a truly async task, in the sense that the user is not necessarily using your application when it happens, and the part of the application triggering the workflow does not need to know the result of the work, just that it has been enqueued (assuming that it will complete, of course).
|
||||
1. Sending a shipping confirmation email to a user once their order has shipped. This is a truly async task, in the sense that the user is not necessarily using your application when it happens, and the part of the application triggering the task does not need to know the result of the work, just that it has been enqueued (assuming that it will complete, of course).
|
||||
2. Triggering a machine learning model training job that can take minutes, hours, or even days to complete. Similarly to above, it's likely that no part of the application needs to wait on the result of this work, it just needs to "fire and forget" it - meaning that it needs to kick it off, and let it complete whenever it completes.
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
You can use your `Workflow` object to run a workflow and "forget" it by calling the `run_no_wait` method. This method enqueue a workflow run and return a `WorkflowRunRef`, a reference to that run, without waiting for the result.
|
||||
You can use your `Workflow` object to run a task and "forget" it by calling the `run_no_wait` method. This method enqueue a task run and return a `WorkflowRunRef`, a reference to that run, without waiting for the result.
|
||||
|
||||
```python
|
||||
from src.workflows import my_workflow, MyWorkflowInputModel
|
||||
@@ -36,10 +36,10 @@ ref = my_workflow.run_no_wait(MyWorkflowInputModel(foo="bar"))
|
||||
You can also `await` the result of `aio_run_no_wait`:
|
||||
|
||||
```python
|
||||
ref = await my_workflow.aio_run_no_wait(input=MyWorkflowInputModel(foo="bar"))
|
||||
ref = await my_task.aio_run_no_wait(input=MyTaskInputModel(foo="bar"))
|
||||
```
|
||||
|
||||
Note that the type of `input` here is a Pydantic model that matches the input schema of your workflow.
|
||||
Note that the type of `input` here is a Pydantic model that matches the input schema of your task.
|
||||
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
@@ -57,9 +57,9 @@ Note that the type of `input` here is a Pydantic model that matches the input sc
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
## Subscribing to results from an enqueued workflow
|
||||
## Subscribing to results from an enqueued task
|
||||
|
||||
Often is is useful to subscribe to the results of a workflow at a later time. The `run_no_wait` method returns a `WorkflowRunRef` object which includes a listener for the result of the workflow.
|
||||
Often is is useful to subscribe to the results of a task at a later time. The `run_no_wait` method returns a `WorkflowRunRef` object which includes a listener for the result of the task.
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
@@ -91,9 +91,9 @@ result = await ref.aio_result()
|
||||
|
||||
## Triggering Runs in the Hatchet Dashboard
|
||||
|
||||
In the Hatchet Dashboard, you can trigger and view runs for your workflows.
|
||||
In the Hatchet Dashboard, you can trigger and view runs for your tasks.
|
||||
|
||||
Navigate to "Workflow Runs" in the left sidebar and click "Trigger Run" at the top right.
|
||||
Navigate to "Task Runs" in the left sidebar and click "Trigger Run" at the top right.
|
||||
|
||||
You can specify run parameters such as Input, Additional Metadata, and the Scheduled Time.
|
||||
|
||||
|
||||
@@ -37,21 +37,21 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
# Run on Event
|
||||
|
||||
> This example assumes we have a [workflow](./basic-workflows.mdx) registered on a running [worker](./workers.mdx).
|
||||
> This example assumes we have a [task](./your-first-task.mdx) registered on a running [worker](./workers.mdx).
|
||||
|
||||
Run-on-event allows you to trigger one or more workflows when a specific event occurs. This is useful when you need to execute a workflow in response to an ephemeral event where the result is not important. A few common use cases for event-triggered workflow runs are:
|
||||
Run-on-event allows you to trigger one or more tasks when a specific event occurs. This is useful when you need to execute a task in response to an ephemeral event where the result is not important. A few common use cases for event-triggered task runs are:
|
||||
|
||||
1. Running a workflow when an ephemeral event is received, such as a webhook or a message from a queue.
|
||||
2. When you want to run multiple independent workflows in response to a single event. For instance, if you wanted to run a `send_welcome_email` workflow, and you also wanted to run a `grant_new_user_credits` workflow, and a `reward_referral` workflow, all triggered by the signup. In this case, you might declare all three of those workflows with an event trigger for `user:signup`, and then have them all kick off when that event happens.
|
||||
1. Running a task when an ephemeral event is received, such as a webhook or a message from a queue.
|
||||
2. When you want to run multiple independent tasks in response to a single event. For instance, if you wanted to run a `send_welcome_email` task, and you also wanted to run a `grant_new_user_credits` task, and a `reward_referral` task, all triggered by the signup. In this case, you might declare all three of those tasks with an event trigger for `user:signup`, and then have them all kick off when that event happens.
|
||||
|
||||
<Callout type="info">
|
||||
Event triggers evaluate workflows to run at the time of the event. If an event
|
||||
is received before the workflow is registered, the workflow will not be run.
|
||||
Event triggers evaluate tasks to run at the time of the event. If an event is
|
||||
received before the task is registered, the task will not be run.
|
||||
</Callout>
|
||||
|
||||
## Declaring Event Triggers
|
||||
|
||||
To run a workflow on an event, you need to declare the event that will trigger the workflow. This is done by declaring the `on_events` property in the workflow declaration.
|
||||
To run a task on an event, you need to declare the event that will trigger the task. This is done by declaring the `on_events` property in the task declaration.
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
@@ -66,7 +66,7 @@ To run a workflow on an event, you need to declare the event that will trigger t
|
||||
</UniversalTabs>
|
||||
|
||||
<Callout type="info">
|
||||
Note: Multiple workflows can be triggered by the same event.
|
||||
Note: Multiple tasks can be triggered by the same event.
|
||||
</Callout>
|
||||
|
||||
### Pushing an Event
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# Running Tasks
|
||||
|
||||
Once you have a running worker, you'll want to run your tasks. Hatchet provides a number of ways of triggering task runs, from which you should select the one(s) that best suit(s) your use case.
|
||||
|
||||
1. Tasks can be [run, and have their results waited on](./run-with-results.mdx)
|
||||
2. Tasks can be [enqueued without waiting for their results ("fire and forget")](./enqueue-runs.mdx).
|
||||
3. Tasks can be run on [cron schedules](./cron-runs.mdx).
|
||||
4. Tasks can be [triggered by events](./run-on-event.mdx).
|
||||
5. Tasks can be [scheduled for a later time](./scheduled-runs.mdx).
|
||||
|
||||
Each of these methods for triggering tasks have their own uses in different scenarios, and the next few sections will give some examples of each.
|
||||
@@ -1,11 +0,0 @@
|
||||
# Running Workflows
|
||||
|
||||
Once you have a running worker, you'll want to run your workflows. Hatchet provides a number of ways of triggering workflow runs, from which you should select the one(s) that best suit(s) your use case.
|
||||
|
||||
1. Workflows can be [run, and have their results waited on](./run-with-results.mdx)
|
||||
2. Workflows can be [enqueued without waiting for their results ("fire and forget")](./enqueue-runs.mdx).
|
||||
3. Workflows can be run on [cron schedules](./cron-runs.mdx).
|
||||
4. Workflows can be [triggered by events](./run-on-event.mdx).
|
||||
5. Workflows can be [scheduled for a later time](./scheduled-runs.mdx).
|
||||
|
||||
Each of these methods for triggering workflows have their own uses in different scenarios, and the next few sections will give some examples of each.
|
||||
@@ -29,7 +29,7 @@ With your task defined, you can import it wherever you need to use it and invoke
|
||||
<Tabs.Tab title="Python">
|
||||
|
||||
```python
|
||||
from .workflow import simple
|
||||
from .task import simple
|
||||
|
||||
simple.run(
|
||||
input=SimpleInput(Message="Hello, World!"),
|
||||
|
||||
@@ -17,9 +17,9 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
# Scheduled Runs
|
||||
|
||||
> This example assumes we have a [workflow](./basic-workflows.mdx) registered on a running [worker](./workers.mdx).
|
||||
> This example assumes we have a [task](./your-first-task.mdx) registered on a running [worker](./workers.mdx).
|
||||
|
||||
Scheduled runs allow you to trigger a workflow at a specific time in the future. Some example use cases of scheduling runs might include:
|
||||
Scheduled runs allow you to trigger a task at a specific time in the future. Some example use cases of scheduling runs might include:
|
||||
|
||||
- Sending a reminder email at a specific time after a user took an action.
|
||||
- Running a one-time maintenance task at a predetermined time as determined by your application. For instance, you might want to run a database vacuum during a maintenance window any time a task matches a certain criteria.
|
||||
@@ -27,11 +27,11 @@ Scheduled runs allow you to trigger a workflow at a specific time in the future.
|
||||
|
||||
Hatchet supports scheduled runs to run on a schedule defined in a few different ways:
|
||||
|
||||
- [Programmatically](./scheduled-runs.mdx#programmatically-creating-scheduled-runs): Use the Hatchet SDKs to dynamically set the schedule of a workflow.
|
||||
- [Programmatically](./scheduled-runs.mdx#programmatically-creating-scheduled-runs): Use the Hatchet SDKs to dynamically set the schedule of a task.
|
||||
- [Hatchet Dashboard](./scheduled-runs.mdx#managing-scheduled-runs-in-the-hatchet-dashboard): Manually create scheduled runs from the Hatchet Dashboard.
|
||||
|
||||
<Callout type="warning">
|
||||
The scheduled time is when Hatchet **enqueues** the workflow, not when the run
|
||||
The scheduled time is when Hatchet **enqueues** the task, not when the run
|
||||
starts. Scheduling constraints like concurrency limits, rate limits, and retry
|
||||
policies can affect run start times.
|
||||
</Callout>
|
||||
@@ -40,26 +40,28 @@ Hatchet supports scheduled runs to run on a schedule defined in a few different
|
||||
|
||||
### Create a Scheduled Run
|
||||
|
||||
You can create dynamic scheduled runs programmatically via the API to run workflows at a specific time in the future.
|
||||
You can create dynamic scheduled runs programmatically via the API to run tasks at a specific time in the future.
|
||||
|
||||
Here's an example of creating a scheduled run to trigger a workflow tomorrow at noon:
|
||||
Here's an example of creating a scheduled run to trigger a task tomorrow at noon:
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
```python
|
||||
schedule = simple.schedule([datetime(2025, 3, 14, 15, 9, 26)])
|
||||
|
||||
```python
|
||||
schedule = simple.schedule([datetime(2025, 3, 14, 15, 9, 26)])
|
||||
|
||||
## do something with the id
|
||||
|
||||
print(schedule.id)
|
||||
|
||||
```
|
||||
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
<GithubSnippet src={SimpleTs} target="Create a Scheduled Run" />
|
||||
<GithubSnippet src={SimpleTs} target="Create a Scheduled Run" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Go">
|
||||
<GithubSnippet src={ScheduleTriggerGo} target="Create" />
|
||||
<GithubSnippet src={ScheduleTriggerGo} target="Create" />
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
@@ -68,8 +70,8 @@ In this example you can have different scheduled times for different customers,
|
||||
When creating a scheduled run via the API, you will receive a scheduled run object with a metadata property containing the id of the scheduled run. This id can be used to reference the scheduled run when deleting the scheduled run and is often stored in a database or other persistence layer.
|
||||
|
||||
<Callout type="info">
|
||||
Note: Be mindful of the time zone of the scheduled run. Scheduled runs are
|
||||
**always** stored and returned in UTC.
|
||||
Note: Be mindful of the time zone of the scheduled run. Scheduled runs are
|
||||
**always** stored and returned in UTC.
|
||||
</Callout>
|
||||
|
||||
### Deleting a Scheduled Run
|
||||
@@ -77,36 +79,36 @@ Note: Be mindful of the time zone of the scheduled run. Scheduled runs are
|
||||
You can delete a scheduled run by calling the `delete` method on the scheduled run object.
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
<GithubSnippet src={SchedulePy} target="Delete" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
<GithubSnippet src={SimpleTs} target="Deleting a Scheduled Run" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Go">
|
||||
<GithubSnippet src={ScheduleTriggerGo} target="Delete" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Python">
|
||||
<GithubSnippet src={SchedulePy} target="Delete" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
<GithubSnippet src={SimpleTs} target="Deleting a Scheduled Run" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Go">
|
||||
<GithubSnippet src={ScheduleTriggerGo} target="Delete" />
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
### Listing Scheduled Runs
|
||||
|
||||
You can list all scheduled runs for a workflow by calling the `list` method on the scheduled run object.
|
||||
You can list all scheduled runs for a task by calling the `list` method on the scheduled run object.
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
<GithubSnippet src={SchedulePy} target="List" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
<GithubSnippet src={SimpleTs} target="Listing Scheduled Runs" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Go">
|
||||
<GithubSnippet src={ScheduleTriggerGo} target="List" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Python">
|
||||
<GithubSnippet src={SchedulePy} target="List" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Typescript">
|
||||
<GithubSnippet src={SimpleTs} target="Listing Scheduled Runs" />
|
||||
</Tabs.Tab>
|
||||
<Tabs.Tab title="Go">
|
||||
<GithubSnippet src={ScheduleTriggerGo} target="List" />
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
## Managing Scheduled Runs in the Hatchet Dashboard
|
||||
|
||||
In the Hatchet Dashboard, you can view and manage scheduled runs for your workflows.
|
||||
In the Hatchet Dashboard, you can view and manage scheduled runs for your tasks.
|
||||
|
||||
Navigate to "Triggers" > "Scheduled Runs" in the left sidebar and click "Create Scheduled Run" at the top right.
|
||||
|
||||
@@ -120,9 +122,8 @@ When using scheduled runs, there are a few considerations to keep in mind:
|
||||
|
||||
1. **Time Zone**: Scheduled runs are stored and returned in UTC. Make sure to consider the time zone when defining your scheduled time.
|
||||
|
||||
2. **Execution Time**: The actual execution time of a scheduled run may vary slightly from the scheduled time. Hatchet makes a best-effort attempt to enqueue the workflow as close to the scheduled time as possible, but there may be slight delays due to system load or other factors.
|
||||
2. **Execution Time**: The actual execution time of a scheduled run may vary slightly from the scheduled time. Hatchet makes a best-effort attempt to enqueue the task as close to the scheduled time as possible, but there may be slight delays due to system load or other factors.
|
||||
|
||||
3. **Missed Schedules**: If a scheduled workflow is missed (e.g., due to system downtime), Hatchet will not automatically run the missed instances.
|
||||
3. **Missed Schedules**: If a scheduled task is missed (e.g., due to system downtime), Hatchet will not automatically run the missed instances.
|
||||
|
||||
4. **Overlapping Schedules**: If a workflow is still running when a second scheduled run is scheduled to start, Hatchet will start a new instance of the workflow or respect [concurrency](./concurrency.mdx) policy.
|
||||
```
|
||||
4. **Overlapping Schedules**: If a task is still running when a second scheduled run is scheduled to start, Hatchet will start a new instance of the task or respect [concurrency](../concurrency/overview.mdx) policy.
|
||||
|
||||
@@ -2,7 +2,7 @@ import Tabs from "../_setup/tabs.mdx";
|
||||
|
||||
# Get Started with Hatchet
|
||||
|
||||
This guide will help you get started with Hatchet, at the end of the guide you will have a Hatchet project with a basic workflow and a worker to execute the workflow.
|
||||
This guide will help you get started with Hatchet, at the end of the guide you will have a Hatchet project with a basic task and a worker to execute the task.
|
||||
|
||||
## Set environment variables
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ export const getStaticProps = ({}) =>
|
||||
This feature is currently in beta and may be subject to change.
|
||||
</Callout>
|
||||
|
||||
Sticky assignment is a workflow property that allows you to specify that all tasks of a workflow should be assigned to the same worker for the duration of its execution. This can be useful in situations like when you need to maintain expensive local memory state across multiple tasks in a workflow or ensure that certain workflows are processed by the same worker for consistency.
|
||||
Sticky assignment is a task property that allows you to specify that all child tasks should be assigned to the same worker for the duration of its execution. This can be useful in situations like when you need to maintain expensive local memory state across multiple tasks in a workflow or ensure that certain tasks are processed by the same worker for consistency.
|
||||
|
||||
<Callout type="warning">
|
||||
This feature is only compatible with long lived workers, and not webhook
|
||||
@@ -31,20 +31,20 @@ Sticky assignment is a workflow property that allows you to specify that all tas
|
||||
|
||||
## Setting Sticky Assignment
|
||||
|
||||
Sticky assignment is set on the workflow level by adding the `sticky` property to the workflow definition. When a workflow is marked as sticky, all tasks within that workflow will be assigned to the same worker for the duration of the workflow execution.
|
||||
Sticky assignment is set on the task level by adding the `sticky` property to the task definition. When a task is marked as sticky, all steps within that task will be assigned to the same worker for the duration of the task execution.
|
||||
|
||||
<Callout type="warning">
|
||||
While sticky assignment can be useful in certain scenarios, it can also
|
||||
introduce potential bottlenecks if the assigned worker becomes unavailable, or
|
||||
if local state is not maintained when the job is picked up. Be sure to
|
||||
consider the implications of sticky assignment when designing your workflows
|
||||
and have a plan in place to handle local state issues.
|
||||
consider the implications of sticky assignment when designing your tasks and
|
||||
have a plan in place to handle local state issues.
|
||||
</Callout>
|
||||
|
||||
There are two strategies for setting sticky assignment:
|
||||
There are two strategies for setting sticky assignment for [DAG](./dags.mdx) workflows:
|
||||
|
||||
- `SOFT`: The all tasks in the workflow will attempt to be assigned to the same worker, but if that worker is unavailable, it will be assigned to another worker.
|
||||
- `HARD`: The all tasks in the workflow will only be assigned to the same worker. If that worker is unavailable, the task run will not be assigned to another worker and will remain in a pending state until the original worker becomes available or timeout is reached. (See [Scheduling Timeouts](./timeouts.mdx#task-level-timeouts))
|
||||
- `SOFT`: All tasks in the workflow will attempt to be assigned to the same worker, but if that worker is unavailable, it will be assigned to another worker.
|
||||
- `HARD`: All taks in the workflow will only be assigned to the same worker. If that worker is unavailable, the workflow run will not be assigned to another worker and will remain in a pending state until the original worker becomes available or timeout is reached. (See [Scheduling Timeouts](./timeouts.mdx#task-level-timeouts))
|
||||
|
||||
<UniversalTabs items={['Python', 'Typescript']}>
|
||||
<Tabs.Tab>
|
||||
@@ -64,18 +64,18 @@ There are two strategies for setting sticky assignment:
|
||||
</Tabs.Tab>
|
||||
</UniversalTabs>
|
||||
|
||||
In this example, the `sticky` property is set to `SOFT`, which means that the workflow will attempt to be assigned to the same worker for the duration of its execution. If the original worker is unavailable, the workflow will be assigned to another worker.
|
||||
In this example, the `sticky` property is set to `SOFT`, which means that the task will attempt to be assigned to the same worker for the duration of its execution. If the original worker is unavailable, the task will be assigned to another worker.
|
||||
|
||||
## Sticky Child Workflows
|
||||
## Sticky Child Tasks
|
||||
|
||||
It is possible to spawn child workflows on the same worker as the parent workflow by setting the `sticky` property to `true` in the `spawnWorkflow` method options. This can be useful when you need to maintain local state across multiple workflows or ensure that child workflows are processed by the same worker for consistency.
|
||||
It is possible to spawn child tasks on the same worker as the parent task by setting the `sticky` property to `true` in the `run` method options. This can be useful when you need to maintain local state across multiple tasks or ensure that child tasks are processed by the same worker for consistency.
|
||||
|
||||
However, the child workflow must:
|
||||
However, the child task must:
|
||||
|
||||
1. Specify a `sticky` strategy in the child workflow's definition
|
||||
2. Be registered with the same worker as the parent workflow
|
||||
1. Specify a `sticky` strategy in the child task's definition
|
||||
2. Be registered with the same worker as the parent task
|
||||
|
||||
If either condition is not met, an error will be thrown when the child workflow is spawned.
|
||||
If either condition is not met, an error will be thrown when the child task is spawned.
|
||||
|
||||
<UniversalTabs items={['Python', 'Typescript']}>
|
||||
<Tabs.Tab>
|
||||
|
||||
@@ -19,7 +19,7 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
# Timeouts in Hatchet
|
||||
|
||||
Timeouts are an important concept in Hatchet that allow you to control how long a workflow or task is allowed to run before it is considered to have failed. This is useful for ensuring that your workflows don't run indefinitely and consume unnecessary resources. Timeouts in Hatchet are treated as failures and the task will be [retried](./retries.mdx) if specified.
|
||||
Timeouts are an important concept in Hatchet that allow you to control how long a task is allowed to run before it is considered to have failed. This is useful for ensuring that your tasks don't run indefinitely and consume unnecessary resources. Timeouts in Hatchet are treated as failures and the task will be [retried](./retries.mdx) if specified.
|
||||
|
||||
There are two types of timeouts in Hatchet:
|
||||
|
||||
@@ -104,11 +104,11 @@ The `refreshTimeout` function can be called multiple times within a step to furt
|
||||
|
||||
Timeouts are useful in a variety of scenarios:
|
||||
|
||||
- Ensuring workflows don't run indefinitely and consume unnecessary resources
|
||||
- Failing workflows early if a critical step takes too long
|
||||
- Keeping workflows responsive by ensuring individual steps complete in a timely manner
|
||||
- Ensuring tasks don't run indefinitely and consume unnecessary resources
|
||||
- Failing tasks early if a critical step takes too long
|
||||
- Keeping tasks responsive by ensuring individual steps complete in a timely manner
|
||||
- Preventing infinite loops or hung processes from blocking the entire system
|
||||
|
||||
For example, if you have a workflow that makes an external API call, you may want to set a timeout to ensure the workflow fails quickly if the API is unresponsive, rather than waiting indefinitely.
|
||||
For example, if you have a task that makes an external API call, you may want to set a timeout to ensure the task fails quickly if the API is unresponsive, rather than waiting indefinitely.
|
||||
|
||||
By carefully considering timeouts for your workflows and steps, you can build more resilient and responsive systems with Hatchet.
|
||||
By carefully considering timeouts for your tasks and steps, you can build more resilient and responsive systems with Hatchet.
|
||||
|
||||
@@ -99,7 +99,7 @@ And that's it! Once you run your script to start the worker, you'll see some log
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:32,758 - action listener starting on PID: 26434
|
||||
[INFO] 🪓 -- 2025-03-24 15:11:32,760 - starting runner...
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:32,761 - starting action listener health check...
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:32,764 - 'test-worker' waiting for ['simpleworkflow:step1']
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:32,764 - 'test-worker' waiting for ['simpletask:step1']
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:33,413 - starting action listener: test-worker
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:33,542 - acquired action listener: efc4aaf2-be4a-4964-a578-db6465f9297e
|
||||
[DEBUG] 🪓 -- 2025-03-24 15:11:33,542 - sending heartbeat
|
||||
@@ -113,9 +113,7 @@ And that's it! Once you run your script to start the worker, you'll see some log
|
||||
|
||||
## Understanding Slots
|
||||
|
||||
Slots are the number of concurrent _task_ runs that a worker can execute, are are configured using the `slots` option on the worker. For instance, if you set `slots=5` on your worker, then your worker will be able to run five tasks concurrently before new tasks start needing to wait in the queue before being picked up.
|
||||
|
||||
As a simple example, if we have a worker with `slots` set to `1` and we have a single task declared that sleeps for 1 second, then if we submit more than one task per second, the tasks will begin to queue up, as the worker can only handle one task at a time. Increasing the number of `slots` on your worker will allow you to handle more concurrent work (and thus more throughput, in many cases), without needing to run more workers.
|
||||
Slots are the number of concurrent _task_ runs that a worker can execute, are are configured using the `slots` option on the worker. For instance, if you set `slots=5` on your worker, then your worker will be able to run five tasks concurrently before new tasks start needing to wait in the queue before being picked up. Increasing the number of `slots` on your worker, or the number of workers you run, will allow you to handle more concurrent work (and thus more throughput, in many cases).
|
||||
|
||||
An important caveat is that slot-level concurrency is only helpful up to the point where the worker is not bottlenecked by another resource, such as CPU, memory, or network bandwidth. If your worker is bottlenecked by one of these resources, increasing the number of slots will not improve throughput.
|
||||
|
||||
|
||||
@@ -25,13 +25,13 @@ export const getStaticProps = ({}) =>
|
||||
|
||||
In Hatchet, the fundamental unit of invocable work is a [Task](#defining-a-task). Each task is an atomic function.
|
||||
|
||||
As we continue to build with Hatchet, we'll add additional configuration options to compose tasks into workflows with [DAGs](./dags.mdx) or [procedural child spawning](./child-spawning.mdx).
|
||||
As we continue to build with Hatchet, we'll add additional configuration options to compose tasks into [DAG workflows](./dags.mdx) or [procedural child spawning](./child-spawning.mdx).
|
||||
|
||||
## Defining a Task
|
||||
|
||||
Start by declaring a task with a name. The task object can declare additional task-level configuration options which we'll cover later.
|
||||
|
||||
The returned object is an instance of the `StandaloneTaskWorkflow` class, which is the primary interface for interacting with the task (i.e. [running](./run-with-results.mdx), [enqueuing](./enqueue-runs.mdx), [scheduling](./scheduled-runs.mdx), etc).
|
||||
The returned object is an instance of the `Task` class, which is the primary interface for interacting with the task (i.e. [running](./run-with-results.mdx), [enqueuing](./enqueue-runs.mdx), [scheduling](./scheduled-runs.mdx), etc).
|
||||
|
||||
<UniversalTabs items={["Python", "Typescript", "Go"]}>
|
||||
<Tabs.Tab title="Python">
|
||||
@@ -45,7 +45,7 @@ hatchet = Hatchet(debug=True)
|
||||
class SimpleInput(BaseModel):
|
||||
message: str
|
||||
|
||||
@hatchet.task(name="SimpleWorkflow")
|
||||
@hatchet.task(name="SimpleTask", input_validator=SimpleInput)
|
||||
def simple(input: SimpleInput, ctx: Context) -> dict[str, str]:
|
||||
return {
|
||||
"transformed_message": input.message.lower(),
|
||||
|
||||
+1
-20
@@ -26,26 +26,7 @@ poetry add hatchet-sdk
|
||||
|
||||
## Quick Start
|
||||
|
||||
Here's a simple example of how to use the Hatchet Python SDK:
|
||||
|
||||
```python
|
||||
from hatchet_sdk import Context, EmptyModel, Hatchet
|
||||
|
||||
hatchet = Hatchet(debug=True)
|
||||
|
||||
|
||||
@hatchet.task(name="SimpleWorkflow")
|
||||
def step1(input: EmptyModel, ctx: Context) -> None:
|
||||
print("executed step1")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
worker = hatchet.worker("test-worker", slots=1, workflows=[step1])
|
||||
worker.start()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
For examples of how to use the Hatchet Python SDK, including worker setup and task execution, please see our [official documentation](https://docs.hatchet.run/home/setup).
|
||||
|
||||
## Features
|
||||
|
||||
|
||||
@@ -32,39 +32,7 @@ pnpm add @hatchet-dev/typescript-sdk
|
||||
|
||||
## Quick Start
|
||||
|
||||
Here's a simple example of how to use the Hatchet TypeScript SDK:
|
||||
|
||||
```typescript
|
||||
import { HatchetClient } from '@hatchet-dev/typescript-sdk';
|
||||
|
||||
export const hatchet = HatchetClient.init();
|
||||
|
||||
export type SimpleInput = {
|
||||
Message: string;
|
||||
};
|
||||
|
||||
export const simple = hatchet.task({
|
||||
name: 'simple',
|
||||
fn: (input: SimpleInput) => {
|
||||
return {
|
||||
TransformedMessage: input.Message.toLowerCase(),
|
||||
};
|
||||
},
|
||||
});
|
||||
|
||||
async function main() {
|
||||
const worker = await hatchet.worker('simple-worker', {
|
||||
workflows: [simple],
|
||||
slots: 100,
|
||||
});
|
||||
|
||||
await worker.start();
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
main();
|
||||
}
|
||||
```
|
||||
For examples of how to use the Hatchet TypeScript SDK, including worker setup and task execution, please see our [official documentation](https://docs.hatchet.run/home/setup).
|
||||
|
||||
## Features
|
||||
|
||||
|
||||
@@ -376,6 +376,14 @@ export class HatchetClient implements IHatchetClient {
|
||||
return this._workflows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the tasks client for creating and managing tasks
|
||||
* @returns A tasks client instance
|
||||
*/
|
||||
get tasks() {
|
||||
return this.workflows;
|
||||
}
|
||||
|
||||
private _workers: WorkersClient | undefined;
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user