docs: new blog post (#1728)

* docs: new blog post

* readability

* subscribe form

* drop reo

* linting

* resolve feedback

---------

Co-authored-by: gabriel ruttner <gabriel.ruttner@gmail.com>
This commit is contained in:
abelanger5
2025-05-15 17:00:07 -04:00
committed by GitHub
parent 8b25035c8d
commit 4d95b1c80c
17 changed files with 1294 additions and 75 deletions
@@ -0,0 +1,81 @@
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import { useState } from "react";
import { CheckCircle2 } from "lucide-react";
type FormState = {
success?: boolean;
error?: string;
} | null;
export function MailingListSubscription() {
const [state, setState] = useState<FormState>(null);
const [isLoading, setIsLoading] = useState(false);
async function handleSubmit(e: React.FormEvent<HTMLFormElement>) {
e.preventDefault();
setIsLoading(true);
setState(null);
const formData = new FormData(e.currentTarget);
const email = formData.get('email') as string;
try {
const response = await fetch('/api/subscribe', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ email }),
});
if (!response.ok) {
throw new Error('Failed to subscribe');
}
setState({ success: true });
} catch (error) {
setState({ success: false, error: 'Failed to subscribe' });
} finally {
setIsLoading(false);
}
}
return (
<div className="w-full max-w-md mx-auto p-6 space-y-4">
{state?.success ? (
<div className="flex items-center gap-2 p-3 bg-green-50 dark:bg-green-950/50 rounded-md border border-green-200 dark:border-green-800">
<CheckCircle2 className="h-5 w-5 text-green-600 dark:text-green-400" />
<p className="text-sm text-green-700 dark:text-green-300">Thank you for subscribing!</p>
</div>
) : (
<>
<div className="text-center space-y-2">
<h3 className="text-lg font-semibold">Subscribe for more technical deep dives</h3>
<p className="text-sm text-muted-foreground">
Stay updated with our latest work. We share insights about distributed systems, workflow engines, and developer tools.
</p>
</div>
<form onSubmit={handleSubmit} className="space-y-4">
<div className="flex gap-2 md:flex-row flex-col">
<Input
type="email"
name="email"
placeholder="Enter your email"
required
className="flex-1"
/>
<Button type="submit" disabled={isLoading}>
{isLoading ? 'Subscribing...' : 'Subscribe'}
</Button>
</div>
{state?.error && (
<p className="text-sm text-red-600">{state.error}</p>
)}
</form>
</>
)}
</div>
);
}
+76
View File
@@ -0,0 +1,76 @@
import * as React from "react"
import { cn } from "@/lib/utils"
const Card = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn(
"rounded-xl border bg-card text-card-foreground shadow",
className
)}
{...props}
/>
))
Card.displayName = "Card"
const CardHeader = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("flex flex-col space-y-1.5 p-6", className)}
{...props}
/>
))
CardHeader.displayName = "CardHeader"
const CardTitle = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("font-semibold leading-none tracking-tight", className)}
{...props}
/>
))
CardTitle.displayName = "CardTitle"
const CardDescription = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("text-sm text-muted-foreground", className)}
{...props}
/>
))
CardDescription.displayName = "CardDescription"
const CardContent = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div ref={ref} className={cn("p-6 pt-0", className)} {...props} />
))
CardContent.displayName = "CardContent"
const CardFooter = React.forwardRef<
HTMLDivElement,
React.HTMLAttributes<HTMLDivElement>
>(({ className, ...props }, ref) => (
<div
ref={ref}
className={cn("flex items-center p-6 pt-0", className)}
{...props}
/>
))
CardFooter.displayName = "CardFooter"
export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }
+25
View File
@@ -0,0 +1,25 @@
import * as React from "react"
import { cn } from "@/lib/utils"
export interface InputProps
extends React.InputHTMLAttributes<HTMLInputElement> {}
const Input = React.forwardRef<HTMLInputElement, InputProps>(
({ className, type, ...props }, ref) => {
return (
<input
type={type}
className={cn(
"flex h-9 w-full rounded-md border border-input bg-transparent px-3 py-1 text-sm shadow-sm transition-colors file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:cursor-not-allowed disabled:opacity-50",
className
)}
ref={ref}
{...props}
/>
)
}
)
Input.displayName = "Input"
export { Input }
+2 -1
View File
@@ -1,5 +1,6 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
/// <reference types="next/navigation-types/compat/navigation" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/pages/building-your-application/configuring/typescript for more information.
// see https://nextjs.org/docs/app/building-your-application/configuring/typescript for more information.
+1 -1
View File
@@ -31,7 +31,7 @@ const nextConfig = {
permanent: true,
},
{
source: '/:path((?!home|v1|v0|compute|sdk|contributing|self-hosting|launches|blog|favicon\\.ico|hatchet_logo\\.png|_next/.*|monitoring\-demo\.mp4).*)',
source: '/:path((?!api|home|v1|v0|compute|sdk|contributing|self-hosting|launches|blog|favicon\\.ico|.*\\.png|.*\\.gif|_next/.*|monitoring\-demo\.mp4).*)',
destination: '/home/:path*',
permanent: false,
},
+3 -2
View File
@@ -6,6 +6,7 @@
"scripts": {
"taskfile-dev": "pnpm run generate-examples && next dev",
"dev": "task docs",
"only:dev": "next dev",
"build": "next build",
"start": "next start",
"lint:check": "npm run prettier:check",
@@ -13,7 +14,6 @@
"prettier:check": "prettier \"pages/**/*.{tsx,mdx}\" --list-different",
"prettier:fix": "prettier \"pages/**/*.{tsx,mdx,js}\" --write",
"generate-examples": "cd ../snips/ && pnpm i && pnpm generate && pnpm run copy:docs"
},
"repository": {
"type": "git",
@@ -31,6 +31,7 @@
"autoprefixer": "^10.4.17",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"loops": "^5.0.1",
"lucide-react": "^0.459.0",
"next": "^14.2.25",
"nextra": "^3.0.0-alpha.22",
@@ -41,7 +42,7 @@
"react-dom": "^18.2.0",
"react-lottie-player": "^2.0.0",
"react-tweet": "^3.2.0",
"reodotdev": "^1.0.0",
"recharts": "^2.15.3",
"shiki": "^1.22.2",
"tailwind-merge": "^2.5.4",
"tailwindcss": "^3.4.1",
+37
View File
@@ -0,0 +1,37 @@
import { LoopsClient } from "loops";
import type { NextApiRequest, NextApiResponse } from 'next';
const mailingLists = {
'newsletter': 'cmapskb8v00za0iyib5ux3r6i'
}
export default async function handler(
req: NextApiRequest,
res: NextApiResponse
) {
if (req.method !== 'POST') {
return res.status(405).json({ error: 'Method not allowed' });
}
const { email } = req.body;
if (!email) {
return res.status(400).json({ error: 'Email is required' });
}
if (!process.env.LOOPS_API_KEY) {
return res.status(500).json({ error: 'Server configuration error' });
}
const loops = new LoopsClient(process.env.LOOPS_API_KEY);
try {
await loops.createContact(email, {}, {
[mailingLists.newsletter]: true,
});
return res.status(200).json({ success: true });
} catch (error) {
console.error('Subscription error:', error);
return res.status(500).json({ error: 'Failed to subscribe' });
}
}
+3
View File
@@ -1,4 +1,7 @@
export default {
"fastest-postgres-inserts": {
title: "The fastest Postgres inserts",
},
"task-queue-modern-python": {
title: "A task queue for modern Python applications",
},
@@ -0,0 +1,200 @@
"use client";
import {
BarChart,
Bar,
CartesianGrid,
XAxis,
YAxis,
Tooltip,
ResponsiveContainer,
ReferenceLine,
} from "recharts";
import { Zap } from "lucide-react";
import {
Card,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
} from "@/components/ui/card";
const chartData = [
{
connections: 10,
throughput: 11004.0,
latency: 0.907187,
},
{
connections: 20,
throughput: 16654.29,
latency: 1.196766,
},
{
connections: 30,
throughput: 16506.04,
latency: 1.809936,
},
{
connections: 40,
throughput: 16533.93,
latency: 2.415203,
},
{
connections: 50,
throughput: 16797.25,
latency: 2.971404,
},
{
connections: 60,
throughput: 16965.12,
latency: 3.53187,
},
{
connections: 70,
throughput: 17006.43,
latency: 4.110432,
},
{
connections: 80,
throughput: 16357.61,
latency: 4.884029,
},
{
connections: 90,
throughput: 16875.42,
latency: 5.326193,
},
{
connections: 100,
throughput: 17001.26,
latency: 5.872899,
},
];
// Calculate percentage increase from first to highest throughput
const maxThroughput = Math.max(...chartData.map((item) => item.throughput));
// Chart configuration
const chartConfig = {
throughput: {
color: "hsl(var(--primary))",
},
};
export default function ConnectionsThroughputChart() {
// Common label and axis style with foreground color
const labelStyle = {
fontWeight: "bold",
fontSize: "11px",
textAnchor: "middle",
fill: "hsl(var(--foreground))",
};
// Style for axis text (without userSelect property)
const axisStyle = {
fontSize: "10px",
fill: "hsl(var(--foreground))",
};
// Custom tooltip component
const CustomTooltip = (props) => {
const { active, payload } = props;
if (active && payload && payload.length) {
return (
<div className="bg-background p-3 border border-border rounded-md shadow-md text-sm font-mono">
<p className="font-semibold mb-2">{`Connections: ${payload[0]?.payload.connections}`}</p>
<p className="text-primary flex items-center mb-1">
<Zap className="h-3 w-3 mr-1" />
Throughput: {payload[0]?.value?.toLocaleString()} rows/s
</p>
<p className="text-muted-foreground text-xs">
Latency: {payload[0]?.payload.latency?.toFixed(2)} ms
</p>
</div>
);
}
return null;
};
return (
<Card className="w-full my-8">
<CardHeader>
<CardTitle>PostgreSQL Insert Performance</CardTitle>
<CardDescription>
Throughput vs Number of Concurrent Connections
</CardDescription>
</CardHeader>
<CardContent>
<div className="h-96">
<ResponsiveContainer width="100%" height="100%">
<BarChart
data={chartData}
margin={{
left: 45,
right: 20,
top: 10,
bottom: 20,
}}
>
<CartesianGrid vertical={false} />
<XAxis
dataKey="connections"
label={{
value: "Number of Connections",
position: "insideBottom",
offset: -10,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
style={axisStyle}
/>
<YAxis
label={{
value: "Throughput (rows/s)",
angle: -90,
position: "insideLeft",
offset: -30,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
style={axisStyle}
domain={[0, "dataMax + 1000"]}
/>
<Tooltip content={<CustomTooltip />} cursor={false} />
<ReferenceLine
y={chartData[0].throughput}
stroke="gray"
strokeDasharray="3 3"
label={{
value: "Baseline",
position: "insideBottomLeft",
style: { fill: "hsl(var(--foreground))", fontSize: 9 },
}}
/>
<Bar
dataKey="throughput"
name="Throughput"
stroke={chartConfig.throughput.color}
fill={chartConfig.throughput.color}
isAnimationActive={false}
fillOpacity={0.8}
/>
</BarChart>
</ResponsiveContainer>
</div>
</CardContent>
<CardFooter className="flex-col items-start gap-2 text-sm">
<div className="leading-none text-muted-foreground mt-2">
Tested with 100,000 rows
</div>
</CardFooter>
</Card>
);
}
@@ -0,0 +1,290 @@
import { Callout, Tabs } from "nextra/components";
import PerformanceChart from "./latency-bench";
import ConnectionsThroughputChart from "./connections-throughput";
import { MailingListSubscription } from "@/components/mailing-list-subscription";
# The fastest Postgres inserts
_Since you're here, you might be interested in checking out [Hatchet](https://hatchet.run) — the platform for running background tasks, data pipelines and AI agents at scale._
<div className="w-full pb-4 mx-auto border-b shadow-md flex flex-row justify-between items-center mt-10">
<h5 className="text-xl font-bold tracking-tight text-foreground">
Alexander Belanger
</h5>
<p className="font-light text-foreground">Published on May 15, 2025</p>
</div>
At Hatchet, we spent the past half year running hundreds of benchmarks against different Postgres configurations. We set out with a simple question: at what scale does Postgres break?
For us, the question is existential — we use Postgres as the backing database for our task queue, orchestrator, and monitoring system.
We had good reasons for building on top of Postgres, which we've written about [elsewhere](https://news.ycombinator.com/item?id=39643136). But after starting to scale the system, we'd been observing some concerning behavior on our database -- very high spikes in CPU, esoteric errors like `multixact members limit exceeded`, high lock contention -- and we weren't sure whether we'd reached some universal limit in our Postgres cluster, or whether we were doing something wrong.
<img
src="/multixact-meme.png"
alt="multixact-this-is-fine"
className="max-w-full md:max-w-[60%] mx-auto my-8"
/>
It turns out, after some simple tweaks, we saw a major performance boost. And with some non-simple tweaks, we squeezed much more performance out of Postgres than we expected.
If you're used to interacting with Postgres from the client side, perhaps you're having a similar (probably less existential) crisis. Given the wide array of specialized data stores out there — Kafka, Valkey, Clickhouse, countless others — and thousands of infrastructure consultants whose sole purpose is to convince you to switch to a difficult-to-manage managed service, there's generally a lack of intuition for how far a database like Postgres can scale on modern hardware.
Here's attempt to make things more concrete, at least on the write side of Postgres.
## The basics
_Note: you might want to skip to the [next section](#batched-inserts) if you're familiar with connection pooling and the danger of using too many connections._
Let's start simple: we'd like to insert some data into Postgres. Out of a force of habit, I'm going to call the table we're interacting with `tasks` — this is a very simple table that contains an ID, a created_at timestamp, and a JSONB payload to represent arbitrary data:
```sql
CREATE TABLE tasks (
id BIGINT GENERATED ALWAYS AS IDENTITY,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
args JSONB,
PRIMARY KEY (id)
);
```
Here's how an insert might look:
```sql
INSERT INTO tasks (args)
VALUES ($1)
RETURNING *;
```
Let's try inserting 100k records into our table. For this, we'll use a single database connection and just loop until each record has been inserted. How long does this take? Using my benchmarking tool written in Go (whose code can be found [here](https://github.com/abelanger5/postgres-fast-inserts)), I get the following data on my local machine (Postgres in Docker on a Macbook Pro M3 Max chip):
```
==== Execution Report ====
Total tasks executed: 100000
Total time: 43.782110417s
Average DB write latency: 437.71µs
Throughput: 2284.04 rows/second
========================
```
Not bad — on a single connection running queries with virtually no network latency, we can get up to ~2k writes/second.
(Side note — I'd typically use `pg_bench` but we're going to add in some application-side buffers in a moment, and wanted testing to be consistent with the buffered benchmarks as well as our built-in connection manager [`pgxpool`](https://pkg.go.dev/github.com/jackc/pgx/v5/pgxpool))
What happens if we introduce network latency? Throughput is going to be impacted very significantly — by adding an artificial 2ms of latency, which might be expected if you're connecting to a managed database in the same region but not the same cloud provider, this will drop to about `400 rows/second`!
<Callout>
Optimization #1: reduce network latency (if possible)
</Callout>
You may have also noticed that we ran this on a single connection. Naturally we'd like to increase the number of parallel writes, which brings us to the second optimization: connection pooling. If you're unfamiliar, this is basically the practice of re-using a group of long-lived session-level connections for queries. So instead of running queries one at a time in a loop, we'll write rows using all of the connections available in the pool. How fast can we write data on 10 connections at once?
```
==== Execution Report ====
Total tasks executed: 100000
Total time: 9.087605458s
Average DB write latency: 907.187µs
Throughput: 11004.00 rows/second
========================
```
So by using all 10 connections in the pool, we're able to get **5x the amount of throughput**!
<Callout>
Optimization #2: use a connection pool
</Callout>
Amazing, let's increase connections even more! We're basically printing money!
<img
src="/infinite-connections.gif"
alt="infinite-connections-money"
className="max-w-full md:max-w-[60%] mx-auto my-8"
/>
...but not so fast.
You may have noticed that despite having 10x the amount of connections, we only have 5x the throughput. Let's try doubling the connections again, to 20 connections:
```
==== Execution Report ====
Total tasks executed: 100000
Total time: 6.004460458s
Average DB write latency: 1.196766ms
Throughput: 16654.29 rows/second
========================
```
Hmm, our average write times are going up, and our throughput is only 50% higher. Let's try doubling one more time, to 40 connections:
```
==== Execution Report ====
Total tasks executed: 100000
Total time: 6.048169041s
Average DB write latency: 2.415203ms
Throughput: 16533.93 rows/second
========================
```
Our throughput when using 40 connections is actually _slightly lower_ than when using 20 connections, and our average write latencies have increased!
What's going on? Each connection in Postgres doesn't come for free — there's overhead introduced in multiple places:
1. There's overhead at the application level to acquiring a connection from the pool
2. With multiple writers, there's some overhead involved in both CPU and shared resources like buffers and locks, as well as other bottlenecks like parallel disk i/o. At a certain point, the overhead of each connection will exceed the performance gain of using more connections.
3. If we're saturating CPU, this will introduce dampening behavior, making it slower to acquire locks and for Postgres to spawn a query process, which can increase the average write time and thus slowing throughput.
On my local machine, we saturate the usefulness of more connections at around 20 connections:
<ConnectionsThroughputChart />
Which brings us to optimization number three:
<Callout>
Optimization #3: don't use too many connections.
</Callout>
We've seen quite a few databases where connection limits are set to an incredibly high number, which can cause more harm than good, particularly if you trigger a "connection storm" (a sudden spike in connections), which can cause the database to saturate its lightweight lock manager and cause _extremely_ slow write times.
What's the right size of the connection pool? This will vary from database to database — see [here](https://www.cybertec-postgresql.com/en/estimating-connection-pool-size-with-postgresql-database-statistics/) for a guide on right-sizing your connection pool.
So, let's recap the basics:
- Reduce network latency
- Use a connection pool
- Don't use too many connections
## Batched inserts
We got to 12k writes/s on my local instance — can we push it further? Not only is there overhead involved in each connection, but there's also overhead involved in each query: this includes the round-trip time to the database, the time it takes the internal application connection pool to acquire a connection, and the time it takes Postgres to process the query (including a set of [internal Postgres locks](https://github.com/postgres/postgres/blob/master/src/backend/storage/lmgr/README) which can be bottlenecks in high-throughput scenarios).
To reduce this overhead, we can pack a batch of rows into each query. The simplest way to do this is to send all queries to the Postgres server at once in an implicit transaction (in Go, we can use `pgx` to execute a [`SendBatch` ](https://pkg.go.dev/github.com/jackc/pgx/v5#Conn.SendBatch))
What does performance look like when we use a batched insert for 100k rows instead of inserting rows 1 at a time (on a single connection)?
```
==== Execution Report ====
Total tasks executed: 100000
Total time: 2.669160083s
Average DB write latency: 2.668188328s
Throughput: 37464.97 rows/second
Number of batches: 1
Average batch size: 100000
========================
```
If we compare this to the single-connection inserts from before, we see a **>10x improvement in throughput**. But one thing that may not be immediately obvious is how to implement batched inserts in your application.
The way that we tackled this in [Hatchet](https://hatchet.run) is to add a set of very lightweight, in-memory buffers which flush an array of tasks to the database with the following properties:
1. The buffer has reached its _flush interval_ , or
2. If the buffer has reached its _maximum size_ , it blocks writes until the buffer has been flushed, to properly exert backpressure on the application.
To test this, let's tweak our testing methodology slightly. Instead of writing 100k rows at a time, let's continuously generate data as quickly as possible, and write it to the database as quickly as possible (a more realistic test for an OLTP workload). In this test, we'll use a set of 20 buffers to take advantage of connection pooling with a max size per buffer of 100 rows -- in Postgres semantics, each buffer utilizes 1 connection at a time, and we can write 100 rows per transaction. How does this perform?
```
==== Execution Report ====
Total tasks executed: 2399523
Total time: 30.021498625s
Average DB write latency: 42.973188ms
Throughput: 79926.82 rows/second
Number of batches: 24006
Average batch size: 99
========================
```
We've gone from 2k writes/s → 80k writes/s without doing much work! (we'll discuss the increased latency in a moment).
<Callout>
Optimization #4: use batched inserts
</Callout>
## COPY
Can we push this further? There are a few more things we can do on the query side to increase throughput. Notably, if you only care about writing data and you don't need rows to be returned to the application, you can use [`COPY`](https://www.postgresql.org/docs/current/sql-copy.html) to get even better performance. Let's try a single batch of 100k rows using `COPY...FROM`:
```
==== Execution Report ====
Total tasks executed: 100000
Total time: 1.580160667s
Average DB write latency: 1.577824529s
Throughput: 63284.70 rows/second
Number of batches: 1
Average batch size: 100000
========================
```
So our throughput has increased to **63k writes/s**, up from 2k writes/s when looping over a single connection, and 37k writes/s sending a batch of inserts with a single query.
Why is `COPY...FROM` so much faster? I won't go into the full details here, but my understanding is that Postgres has several optimizations when performing a COPY related to the usage of shared resources like buffers and locks. For more information there's a great writeup [here](https://pganalyze.com/blog/5mins-postgres-optimizing-bulk-loads-copy-vs-insert#postgres-insert-vs-copy-impact-on-shared-buffers).
Let's continuously generate data again, and see how this compares to our batched inserts from before. Can we beat 80k writes/s?
```
==== Execution Report ====
Total tasks executed: 2826355
Total time: 30.676920125s
Average DB write latency: 17.986946ms
Throughput: 92132.95 rows/second
Number of batches: 39423
Average batch size: 71
========================
```
So, we can perform 92k writes/s, which is a 31x improvement from our original, naive implementation, and our average write latency is down as well, from ~43ms to ~18ms.
<Callout>
Optimization #5: use `COPY…FROM` where appropriate.
</Callout>
## Optimizing Latency
Unfortunately, there's no free lunch. Although our throughput is much, much higher, our average write latency has gone from \< 1ms to 17ms in the buffered `COPY...FROM` case.
These are inherently opposed forces: to increase throughput, we need to reduce the impact of shared resources (on the database, this is primarily locks, processes and i/o). To do this, we have to pack more data into a single query, which means that our writes will inherently be slower.
But we don't have to sacrifice _this much_ latency in order to increase throughput. At a certain point, increasing the batch size will not yield any additional throughput, because the time spent on i/o when writing the data to disk will far exceed the overhead of each connection and query. To illustrate this, let's look at throughput and latency as a function of batch size and flush intervals:
<PerformanceChart className="my-4" />
You'll see from the graph above that even an average batch size of 25 rows nearly saturated the throughput we could achieve on the database, with only a latency overhead of ~10ms. Which brings us to the final optimization:
<Callout>
Optimization #6: determine the batch size which optimizes throughput and
latency for your inserts, and don't waste unnecessary time on flushing batches
which are too large.
</Callout>
As an aside, you might be wondering why the latency is sometimes lower when batch sizes/flush intervals are increased. We've set up the buffers to not flush more than their flush interval allows, which means that the buffer is at capacity before the flush interval, it may have to wait before it can flush data. The ideal buffer hits its flush interval exactly when it reaches its maximum size, so there's some additional tuning we could do with the flush interval.
## To recap
We made 6 optimizations to increase our throughput while keeping latency low:
1. Reduce network latency
2. Use a connection pool
3. Don't use too many connections
4. Use batched inserts
5. Use `COPY...FROM` where appropriate
6. Determine the batch size which optimizes throughput and latency for your inserts, and don't waste unnecessary time on flushing batches which are too large.
We've gotten pretty far using some simple application-side changes for data inserts. And while batch inserts will always get you more throughput, there are a lot of additional unintuitive ways to improve the behavior of writes in some scenarios, like:
- Inserting into multiple tables in the same transaction
- Issues with writes on tables with foreign keys (and the source of the `multixact` error from above)
- Using `UNNEST` instead of batched inserts
- Using unlogged tables
- Upserting data with potential conflicts
Stay tuned for part 2 where we'll dig into each of these scenarios!
<MailingListSubscription />
+424
View File
@@ -0,0 +1,424 @@
"use client";
import { useState } from "react";
import {
BarChart,
LineChart,
Line,
Bar,
CartesianGrid,
XAxis,
YAxis,
Legend,
Tooltip,
ResponsiveContainer,
ReferenceArea,
} from "recharts";
import { TrendingUp, TrendingDown, Clock, Zap } from "lucide-react";
import {
Card,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
} from "@/components/ui/card";
const chartData = [
{
batchSize: 5,
flushInterval: 0.5,
label: "5/0.5ms",
throughput: 33372,
latency: 5.78,
},
{
batchSize: 10,
flushInterval: 1,
label: "10/1ms",
throughput: 69896,
latency: 5.28,
},
{
batchSize: 25,
flushInterval: 2.5,
label: "25/2.5ms",
throughput: 89071,
latency: 9.6,
},
{
batchSize: 50,
flushInterval: 5,
label: "50/5ms",
throughput: 91680,
latency: 14.91,
},
{
batchSize: 100,
flushInterval: 10,
label: "100/10ms",
throughput: 98633,
latency: 14.01,
},
{
batchSize: 200,
flushInterval: 20,
label: "200/20ms",
throughput: 107647,
latency: 17.89,
},
];
// Calculate percentage increases for the footer
const throughputIncrease = (
((chartData[chartData.length - 1].throughput - chartData[0].throughput) /
chartData[0].throughput) *
100
).toFixed(1);
const latencyIncrease = (
((chartData[chartData.length - 1].latency - chartData[0].latency) /
chartData[0].latency) *
100
).toFixed(1);
// Chart configuration
const chartConfig = {
throughput: {
label: "Throughput",
color: "hsl(var(--chart-1))",
},
latency: {
label: "Latency",
color: "hsl(var(--primary))",
},
};
export default function PerformanceChart() {
const [activeTab, setActiveTab] = useState("both");
const [refAreaLeft, setRefAreaLeft] = useState(null);
const [refAreaRight, setRefAreaRight] = useState(null);
// Custom tooltip component to show both metrics
const CustomTooltip = (props) => {
const { active, payload } = props;
if (active && payload && payload.length) {
return (
<div className="bg-background p-3 border border-border rounded-md shadow-md text-sm font-mono">
<p className="font-semibold mb-2">{`Batch: ${payload[0]?.payload.batchSize}, Flush: ${payload[0]?.payload.flushInterval}ms`}</p>
{activeTab === "both" || activeTab === "throughput" ? (
<p className="text-primary flex items-center mb-1">
<Zap className="h-3 w-3 mr-1" />
Throughput: {payload[0]?.value?.toLocaleString()} rows/s
</p>
) : null}
{activeTab === "both" || activeTab === "latency" ? (
<p className="text-primary flex items-center">
<Clock className="h-3 w-3 mr-1" />
Latency:{" "}
{(activeTab === "both"
? payload[1]?.value
: payload[0]?.value
)?.toFixed(2)}{" "}
ms
</p>
) : null}
</div>
);
}
return null;
};
// Chart interaction handlers (for zooming functionality)
const handleMouseDown = (e) => {
if (e && e.activeLabel) {
setRefAreaLeft(e.activeLabel);
}
};
const handleMouseMove = (e) => {
if (refAreaLeft && e && e.activeLabel) {
setRefAreaRight(e.activeLabel);
}
};
const handleMouseUp = () => {
// Reset reference area when mouse is released
setRefAreaLeft(null);
setRefAreaRight(null);
};
// Common label and axis style with foreground color
const labelStyle = {
fontWeight: "bold",
fontSize: "11px",
textAnchor: "middle",
fill: "hsl(var(--foreground))",
};
// Style for axis text (without userSelect property)
const axisStyle = {
fontSize: "10px",
fill: "hsl(var(--foreground))",
};
return (
<Card className="w-full my-8">
<CardHeader className="gap-2">
<CardTitle>PostgreSQL COPY Performance with Buffered Writes</CardTitle>
<CardDescription>
Throughput and Latency vs Batch Size/Flush Interval
</CardDescription>
<div className="flex space-x-2 mt-2">
<button
className={`px-3 py-1 text-sm rounded-md ${activeTab === "both" ? "bg-primary/10 text-primary font-medium" : "bg-muted"}`}
onClick={() => setActiveTab("both")}
>
Both
</button>
<button
className={`px-3 py-1 text-sm rounded-md ${activeTab === "throughput" ? "bg-primary/10 text-primary font-medium" : "bg-muted"}`}
onClick={() => setActiveTab("throughput")}
>
Throughput
</button>
<button
className={`px-3 py-1 text-sm rounded-md ${activeTab === "latency" ? "bg-primary/10 text-primary font-medium" : "bg-muted"}`}
onClick={() => setActiveTab("latency")}
>
Latency
</button>
</div>
</CardHeader>
<CardContent>
<div className="h-96">
<ResponsiveContainer width="100%" height="100%">
{activeTab === "both" ? (
<ComposedChart
data={chartData}
margin={{
left: 35,
right: 35,
top: 10,
bottom: 20,
}}
onMouseDown={handleMouseDown}
onMouseMove={handleMouseMove}
onMouseUp={handleMouseUp}
>
<CartesianGrid vertical={false} />
<XAxis
dataKey="label"
label={{
value: "Batch Size/Flush Interval",
position: "insideBottom",
offset: -10,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
minTickGap={16}
style={axisStyle}
/>
<YAxis
yAxisId="left"
label={{
value: "Throughput (rows/s)",
angle: -90,
position: "insideLeft",
offset: -30,
style: labelStyle,
}}
orientation="left"
domain={[0, "auto"]}
tickLine={false}
axisLine={false}
tickMargin={8}
style={axisStyle}
/>
<YAxis
yAxisId="right"
label={{
value: "Latency (ms)",
angle: 90,
position: "insideRight",
offset: -25,
style: labelStyle,
}}
orientation="right"
domain={[0, "auto"]}
tickLine={false}
axisLine={false}
tickMargin={8}
style={axisStyle}
/>
<Tooltip content={<CustomTooltip />} cursor={false} />
<Bar
yAxisId="left"
dataKey="throughput"
name="Throughput"
stroke={chartConfig.throughput.color}
fill={chartConfig.throughput.color}
isAnimationActive={false}
fillOpacity={1}
/>
<Line
yAxisId="right"
type="monotone"
dataKey="latency"
name="Latency"
stroke={chartConfig.latency.color}
strokeWidth={2}
dot={{ r: 3, fill: chartConfig.latency.color }}
isAnimationActive={false}
/>
{refAreaLeft && refAreaRight && (
<ReferenceArea
x1={refAreaLeft}
x2={refAreaRight}
strokeOpacity={0.3}
fill="hsl(var(--foreground))"
fillOpacity={0.1}
/>
)}
</ComposedChart>
) : activeTab === "throughput" ? (
<BarChart
data={chartData}
margin={{
left: 45,
right: 20,
top: 10,
bottom: 20,
}}
onMouseDown={handleMouseDown}
onMouseMove={handleMouseMove}
onMouseUp={handleMouseUp}
>
<CartesianGrid vertical={false} />
<XAxis
dataKey="label"
label={{
value: "Batch Size/Flush Interval",
position: "insideBottom",
offset: -10,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
minTickGap={16}
style={axisStyle}
/>
<YAxis
label={{
value: "Throughput (rows/s)",
angle: -90,
position: "insideLeft",
offset: -30,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
style={axisStyle}
/>
<Tooltip content={<CustomTooltip />} cursor={false} />
<Bar
dataKey="throughput"
name="Throughput"
stroke={chartConfig.throughput.color}
fill={chartConfig.throughput.color}
isAnimationActive={false}
fillOpacity={1}
/>
{refAreaLeft && refAreaRight && (
<ReferenceArea
x1={refAreaLeft}
x2={refAreaRight}
strokeOpacity={0.3}
fill="hsl(var(--foreground))"
fillOpacity={0.1}
/>
)}
</BarChart>
) : (
<LineChart
data={chartData}
margin={{
left: 45,
right: 20,
top: 10,
bottom: 20,
}}
onMouseDown={handleMouseDown}
onMouseMove={handleMouseMove}
onMouseUp={handleMouseUp}
>
<CartesianGrid vertical={false} />
<XAxis
dataKey="label"
label={{
value: "Batch Size/Flush Interval",
position: "insideBottom",
offset: -10,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
minTickGap={16}
style={axisStyle}
/>
<YAxis
label={{
value: "Latency (ms)",
angle: -90,
position: "insideLeft",
offset: -30,
style: labelStyle,
}}
tickLine={false}
axisLine={false}
tickMargin={8}
style={axisStyle}
/>
<Tooltip content={<CustomTooltip />} cursor={false} />
<Line
type="monotone"
dataKey="latency"
name="Latency"
stroke={chartConfig.latency.color}
strokeWidth={2}
dot={{ r: 3, fill: chartConfig.latency.color }}
isAnimationActive={false}
/>
{refAreaLeft && refAreaRight && (
<ReferenceArea
x1={refAreaLeft}
x2={refAreaRight}
strokeOpacity={0.3}
fill="hsl(var(--foreground))"
fillOpacity={0.1}
/>
)}
</LineChart>
)}
</ResponsiveContainer>
</div>
</CardContent>
<CardFooter className="flex-col items-start gap-2 text-sm">
<div className="leading-none text-muted-foreground mt-2">
Testing with 20 connections over 30 seconds
</div>
</CardFooter>
</Card>
);
}
// Import ComposedChart from recharts
import { ComposedChart } from "recharts";
+147 -8
View File
@@ -27,6 +27,9 @@ importers:
clsx:
specifier: ^2.1.1
version: 2.1.1
loops:
specifier: ^5.0.1
version: 5.0.1
lucide-react:
specifier: ^0.459.0
version: 0.459.0(react@18.3.1)
@@ -57,9 +60,9 @@ importers:
react-tweet:
specifier: ^3.2.0
version: 3.2.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
reodotdev:
specifier: ^1.0.0
version: 1.0.0
recharts:
specifier: ^2.15.3
version: 2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
shiki:
specifier: ^1.22.2
version: 1.23.0
@@ -101,6 +104,10 @@ packages:
'@antfu/utils@8.1.1':
resolution: {integrity: sha512-Mex9nXf9vR6AhcXmMrlz/HVgYYZpVGJ6YlPgwl7UnaFpnshXs6EK/oa5Gpf3CzENMjkvEx2tQtntGnb7UtSTOQ==}
'@babel/runtime@7.27.1':
resolution: {integrity: sha512-1x3D2xEk2fRo3PAhwQwu5UubzgiVWSXTBfWpVd2Mx2AzRqJuDJCsgaDVZ7HB5iGzDW1Hl1sWN2mFyKjmR9uAog==}
engines: {node: '>=6.9.0'}
'@braintree/sanitize-url@7.1.1':
resolution: {integrity: sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==}
@@ -1001,6 +1008,9 @@ packages:
supports-color:
optional: true
decimal.js-light@2.5.1:
resolution: {integrity: sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg==}
decode-named-character-reference@1.0.2:
resolution: {integrity: sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==}
@@ -1020,6 +1030,9 @@ packages:
dlv@1.1.3:
resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==}
dom-helpers@5.2.1:
resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==}
dompurify@3.2.4:
resolution: {integrity: sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==}
@@ -1096,6 +1109,9 @@ packages:
estree-walker@3.0.3:
resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==}
eventemitter3@4.0.7:
resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==}
execa@8.0.1:
resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==}
engines: {node: '>=16.17'}
@@ -1113,6 +1129,10 @@ packages:
fast-deep-equal@3.1.3:
resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
fast-equals@5.2.2:
resolution: {integrity: sha512-V7/RktU11J3I36Nwq2JnZEM7tNm17eBJz+u25qdxBZeCKiX6BkVSZQjwWIr+IobgnZy+ag73tTZgZi7tr0LrBw==}
engines: {node: '>=6.0.0'}
fast-glob@3.3.2:
resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==}
engines: {node: '>=8.6.0'}
@@ -1387,9 +1407,16 @@ packages:
lodash-es@4.17.21:
resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==}
lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
longest-streak@3.1.0:
resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==}
loops@5.0.1:
resolution: {integrity: sha512-xM1c9mnlr8Hr4cHW944TQoK6ApynjinUWOgYZd9/B0/3lwTThq24BQ7+XLjgbFAP5kJzqDTRDQi3t+Diy51Udw==}
engines: {node: '>=18'}
loose-envify@1.4.0:
resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==}
hasBin: true
@@ -1841,6 +1868,9 @@ packages:
engines: {node: '>=14'}
hasBin: true
prop-types@15.8.1:
resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==}
property-information@6.5.0:
resolution: {integrity: sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==}
@@ -1858,6 +1888,12 @@ packages:
peerDependencies:
react: ^18.3.1
react-is@16.13.1:
resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==}
react-is@18.3.1:
resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==}
react-lottie-player@2.1.0:
resolution: {integrity: sha512-rxSNIVVLWYnwzsIow377vZsh7GDbReu70V7IDD9TbbcdcJWons4pSh3nyuEa4QWIZw0ZBIieoZRTsiqnb6MZ3g==}
engines: {node: '>=10'}
@@ -1870,6 +1906,18 @@ packages:
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-smooth@4.0.4:
resolution: {integrity: sha512-gnGKTpYwqL0Iii09gHobNolvX4Kiq4PKx6eWBCYYix+8cdw+cGo3do906l1NBPKkSWx1DghC1dlWG9L2uGd61Q==}
peerDependencies:
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-transition-group@4.4.5:
resolution: {integrity: sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==}
peerDependencies:
react: '>=16.6.0'
react-dom: '>=16.6.0'
react-tweet@3.2.1:
resolution: {integrity: sha512-dktP3RMuwRB4pnSDocKpSsW5Hq1IXRW6fONkHhxT5EBIXsKZzdQuI70qtub1XN2dtZdkJWWxfBm/Q+kN+vRYFA==}
peerDependencies:
@@ -1890,6 +1938,16 @@ packages:
reading-time@1.5.0:
resolution: {integrity: sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==}
recharts-scale@0.4.5:
resolution: {integrity: sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==}
recharts@2.15.3:
resolution: {integrity: sha512-EdOPzTwcFSuqtvkDoaM5ws/Km1+WTAO2eizL7rqiG0V2UVhTnz0m7J2i0CjVPUCdEkZImaWvXLbZDS2H5t6GFQ==}
engines: {node: '>=14'}
peerDependencies:
react: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
recma-build-jsx@1.0.0:
resolution: {integrity: sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==}
@@ -1963,9 +2021,6 @@ packages:
remark-stringify@11.0.0:
resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==}
reodotdev@1.0.0:
resolution: {integrity: sha512-wXe1vJucZjrhQL0SxOL9EvmJrtbMCIEGMdZX5lj/57n2T3UhBHZsAcM5TQASJ0T6ZBbrETRnMhH33bsbJeRO6Q==}
resolve@1.22.8:
resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
hasBin: true
@@ -2145,6 +2200,9 @@ packages:
thenify@3.3.1:
resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
tiny-invariant@1.3.3:
resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==}
tinyexec@0.3.2:
resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==}
@@ -2260,6 +2318,9 @@ packages:
vfile@6.0.3:
resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==}
victory-vendor@36.9.2:
resolution: {integrity: sha512-PnpQQMuxlwYdocC8fIJqVXvkeViHYzotI+NJrCuav0ZYFoq912ZHBk3mCeuj+5/VpodOjPe1z0Fk2ihgzlXqjQ==}
vscode-jsonrpc@8.2.0:
resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==}
engines: {node: '>=14.0.0'}
@@ -2338,6 +2399,8 @@ snapshots:
'@antfu/utils@8.1.1': {}
'@babel/runtime@7.27.1': {}
'@braintree/sanitize-url@7.1.1': {}
'@chevrotain/cst-dts-gen@11.0.3':
@@ -3287,6 +3350,8 @@ snapshots:
dependencies:
ms: 2.1.3
decimal.js-light@2.5.1: {}
decode-named-character-reference@1.0.2:
dependencies:
character-entities: 2.0.2
@@ -3305,6 +3370,11 @@ snapshots:
dlv@1.1.3: {}
dom-helpers@5.2.1:
dependencies:
'@babel/runtime': 7.27.1
csstype: 3.1.3
dompurify@3.2.4:
optionalDependencies:
'@types/trusted-types': 2.0.7
@@ -3386,6 +3456,8 @@ snapshots:
dependencies:
'@types/estree': 1.0.6
eventemitter3@4.0.7: {}
execa@8.0.1:
dependencies:
cross-spawn: 7.0.5
@@ -3408,6 +3480,8 @@ snapshots:
fast-deep-equal@3.1.3: {}
fast-equals@5.2.2: {}
fast-glob@3.3.2:
dependencies:
'@nodelib/fs.stat': 2.0.5
@@ -3761,8 +3835,12 @@ snapshots:
lodash-es@4.17.21: {}
lodash@4.17.21: {}
longest-streak@3.1.0: {}
loops@5.0.1: {}
loose-envify@1.4.0:
dependencies:
js-tokens: 4.0.0
@@ -4582,6 +4660,12 @@ snapshots:
prettier@3.3.3: {}
prop-types@15.8.1:
dependencies:
loose-envify: 1.4.0
object-assign: 4.1.1
react-is: 16.13.1
property-information@6.5.0: {}
property-information@7.0.0: {}
@@ -4596,6 +4680,10 @@ snapshots:
react: 18.3.1
scheduler: 0.23.2
react-is@16.13.1: {}
react-is@18.3.1: {}
react-lottie-player@2.1.0(react@18.3.1):
dependencies:
fast-deep-equal: 3.1.3
@@ -4608,6 +4696,23 @@ snapshots:
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-smooth@4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
fast-equals: 5.2.2
prop-types: 15.8.1
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-transition-group: 4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
react-transition-group@4.4.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@babel/runtime': 7.27.1
dom-helpers: 5.2.1
loose-envify: 1.4.0
prop-types: 15.8.1
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-tweet@3.2.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@swc/helpers': 0.5.15
@@ -4630,6 +4735,23 @@ snapshots:
reading-time@1.5.0: {}
recharts-scale@0.4.5:
dependencies:
decimal.js-light: 2.5.1
recharts@2.15.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
clsx: 2.1.1
eventemitter3: 4.0.7
lodash: 4.17.21
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-is: 18.3.1
react-smooth: 4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
recharts-scale: 0.4.5
tiny-invariant: 1.3.3
victory-vendor: 36.9.2
recma-build-jsx@1.0.0:
dependencies:
'@types/estree': 1.0.6
@@ -4790,8 +4912,6 @@ snapshots:
mdast-util-to-markdown: 2.1.2
unified: 11.0.5
reodotdev@1.0.0: {}
resolve@1.22.8:
dependencies:
is-core-module: 2.15.1
@@ -4999,6 +5119,8 @@ snapshots:
dependencies:
any-promise: 1.3.0
tiny-invariant@1.3.3: {}
tinyexec@0.3.2: {}
title@4.0.1:
@@ -5141,6 +5263,23 @@ snapshots:
'@types/unist': 3.0.3
vfile-message: 4.0.2
victory-vendor@36.9.2:
dependencies:
'@types/d3-array': 3.2.1
'@types/d3-ease': 3.0.2
'@types/d3-interpolate': 3.0.4
'@types/d3-scale': 4.0.8
'@types/d3-shape': 3.1.7
'@types/d3-time': 3.0.3
'@types/d3-timer': 3.0.2
d3-array: 3.2.4
d3-ease: 3.0.1
d3-interpolate: 3.0.1
d3-scale: 4.0.2
d3-shape: 3.2.0
d3-time: 3.1.0
d3-timer: 3.0.1
vscode-jsonrpc@8.2.0: {}
vscode-languageserver-protocol@3.17.5:
Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 MiB

+5 -5
View File
@@ -36,11 +36,11 @@
--ring: 0 0% 3.9%;
/* Chart Colors */
--chart-1: 12 76% 61%;
--chart-2: 173 58% 39%;
--chart-3: 197 37% 24%;
--chart-4: 43 74% 66%;
--chart-5: 27 87% 67%;
--chart-1: 221.2 83.2% 53.3%;
--chart-2: 212 95% 68%;
--chart-3: 216 92% 60%;
--chart-4: 210 98% 78%;
--chart-5: 212 97% 87%;
--radius: 0.5rem;
}
@@ -4,9 +4,6 @@ import { useEffect, useState } from "react";
import { cn } from "@/lib/utils";
import { Button } from "./button";
import { CookieIcon } from "@radix-ui/react-icons";
import posthog from "posthog-js";
import { loadReoScript } from "@/lib/reoWrapper";
export function cookieConsentGiven() {
@@ -67,27 +64,6 @@ export default function CookieConsent({ variant = "default", demo = false, onAcc
}
}, []);
const [reo, setReo] = useState<boolean>(false);
useEffect(() => {
const consented = consentGiven === 'yes';
posthog.capture("accept-cookies", { accepted: consented });
if (consented && !reo) {
const clientID = "c3e6c6700582dae";
// Resolve promise to get access to methods on Reo
const reoPromise = loadReoScript({ clientID });
reoPromise
.then((Reo: any) => {
Reo.init({ clientID });
setReo(true);
})
.catch((error: any) => {
console.error("Error loading r", error);
});
}
}, [consentGiven, reo]);
// Default banner
if (variant === "default") {
-34
View File
@@ -1,34 +0,0 @@
/**
* This is a wrapper for the reodotdev library to handle the ES Module / CommonJS compatibility issues.
* It dynamically imports the library at runtime on the client side only to avoid server-side import errors.
*/
interface ReoScriptOptions {
clientID: string;
}
export async function loadReoScript(options: ReoScriptOptions): Promise<any> {
// Only import on client side
if (typeof window !== 'undefined') {
// Use dynamic import to load the module at runtime
try {
// This will be imported only on the client side
const reodotdevModule = await import('reodotdev');
// Access the loadReoScript function if it exists
if (reodotdevModule && typeof reodotdevModule.loadReoScript === 'function') {
return reodotdevModule.loadReoScript(options);
} else {
throw new Error('loadReoScript function not found in reodotdev module');
}
} catch (error) {
console.error('Error importing reodotdev:', error);
throw error;
}
}
// Return a dummy promise on server side
return Promise.resolve({
init: () => console.log('Reo initialization skipped on server')
});
}