diff --git a/docs/src/content/docs/reference/_benchmarks/benchmarks.tsx b/docs/src/content/docs/reference/_benchmarks/benchmarks.tsx
index 33410b09..a86b4d49 100644
--- a/docs/src/content/docs/reference/_benchmarks/benchmarks.tsx
+++ b/docs/src/content/docs/reference/_benchmarks/benchmarks.tsx
@@ -18,6 +18,7 @@ type Datum = {
};
const colors = {
+ payload: "rgb(0, 101, 101)",
supabase: "rgb(62, 207, 142)",
pocketbase0: "rgb(230, 128, 30)",
pocketbase1: "rgb(238, 175, 72)",
@@ -57,37 +58,41 @@ function transformMillisecondTicks(
}
}
-const durations100k = [
- {
+const durations100k = {
+ payload: {
+ label: "Payload v3+SQLite",
+ data: [656.09],
+ backgroundColor: colors.payload,
+ },
+ supabase: {
label: "SupaBase",
data: [151],
backgroundColor: colors.supabase,
},
- {
+ pocketbase_ts: {
label: "PocketBase TS",
data: [67.721],
backgroundColor: colors.pocketbase0,
},
- // {
- // label: "PocketBase Dart (AOT)",
- // data: [62.8136],
- // },
- {
+ pocketbase_dart_aot: {
+ label: "PocketBase Dart (AOT)",
+ data: [62.8136],
+ },
+ pocketbase_dart_jit: {
label: "PocketBase Dart (JIT)",
data: [61.687],
backgroundColor: colors.pocketbase1,
},
- {
+ trailbase_ts: {
label: "TrailBase TS",
data: [16.742],
backgroundColor: colors.trailbase0,
},
- // {
- // label: "TrailBase Dart (AOT)",
- // data: [11.1],
- // },
- {
- // label: "TrailBase Dart (JIT)",
+ trailbase_dart_aot: {
+ label: "TrailBase Dart (AOT)",
+ data: [11.1],
+ },
+ trailbase_dart_jit: {
label: "TrailBase Dart",
data: [9.4247],
backgroundColor: colors.trailbase1,
@@ -96,22 +101,46 @@ const durations100k = [
// label: "TrailBase Dart (JIT + PGO)",
// data: [10.05],
// },
- // {
- // label: "TrailBase Dart (INT PK)",
- // data: [8.5249],
- // backgroundColor: colors.trailbase2,
- // },
- {
+ trailbase_dart_jit_int_pk: {
+ label: "TrailBase Dart (INT PK)",
+ data: [8.5249],
+ backgroundColor: colors.trailbase2,
+ },
+ drizzle: {
label: "In-process SQLite (Drizzle)",
data: [8.803],
backgroundColor: colors.drizzle,
},
-];
+};
export function Duration100kInsertsChart() {
const data: ChartData<"bar"> = {
- labels: ["Time [s] (lower is better)"],
- datasets: durations100k as ChartDataset<"bar">[],
+ labels: ["Time in seconds (lower is faster)"],
+ datasets: [
+ durations100k.supabase,
+ durations100k.pocketbase_ts,
+ durations100k.pocketbase_dart_jit,
+ durations100k.trailbase_ts,
+ durations100k.trailbase_dart_jit,
+ durations100k.drizzle,
+ ] as ChartDataset<"bar">[],
+ };
+
+ return
;
+}
+
+export function Duration100kInsertsChartMoreResults() {
+ const data: ChartData<"bar"> = {
+ labels: ["Time in seconds (lower is faster)"],
+ datasets: [
+ durations100k.payload,
+ durations100k.supabase,
+ durations100k.pocketbase_ts,
+ durations100k.pocketbase_dart_jit,
+ durations100k.trailbase_ts,
+ durations100k.trailbase_dart_jit,
+ durations100k.drizzle,
+ ] as ChartDataset<"bar">[],
};
return
;
diff --git a/docs/src/content/docs/reference/benchmarks.mdx b/docs/src/content/docs/reference/benchmarks.mdx
index 819348a4..b5f14571 100644
--- a/docs/src/content/docs/reference/benchmarks.mdx
+++ b/docs/src/content/docs/reference/benchmarks.mdx
@@ -4,7 +4,7 @@ description: Performance comparison with similar products.
---
import {
- Duration100kInsertsChart,
+ Duration100kInsertsChartMoreResults,
PocketBaseAndTrailBaseReadLatencies,
PocketBaseAndTrailBaseInsertLatencies,
SupaBaseMemoryUsageChart,
@@ -53,13 +53,13 @@ Ultimately, nothing beats benchmarking your own workload and setup.
_Total Time for 100k Insertions_
-
The graph shows the overall time it takes to insert 100k messages into a mock
-"chat-room" table setup. Less time is better.
+*chat-room* table setup. Less time is better.
Unsurprisingly, in-process SQLite is the quickest [^2].
All other setups add additional table look-ups for access checking, IPC
@@ -69,11 +69,15 @@ and the cost a project would pay by adopting any of the systems over in-process
SQLite.
The data suggests that depending on your setup (client, data, hardware)
-TrailBase can insert 100k records 9 to 16 times faster than SupaBase[^4] and
-roughly 6 to 7 times faster than PocketBase [^1].
+TrailBase can insert 100k records almost 70 times faster than Payload[^4], 9 to
+16 times faster than SupaBase[^5], and roughly 6 to 7 times faster than
+PocketBase [^1].
+
+{/*
The fact that our TS/node.js benchmark is slower than the Dart one, suggests a
client-side bottleneck that could be overcome by tuning the setup or trying
other JS runtimes with lower overhead HTTP clients.
+*/}
Total time of inserting a large batch of data tells only part of the story,
let's have a quick look at resource consumption to get an intuition for
@@ -90,7 +94,7 @@ _TrailBase & PocketBase Utilization_
The graph shows the CPU utilization and memory consumption (RSS) of both
PocketBase and TrailBase. They look fairly similar apart from TrailBase
finishing earlier. They both load roughly 3 CPUs with PocketBase's CPU
-consumption being slightly more variable [^5].
+consumption being slightly more variable [^6].
The little bump after the TrailBase run is likely due to SQLite check-pointing.
Both only consume about 140MB of memory at full tilt, which makes them a great
@@ -218,8 +222,8 @@ The benchmarks are available on [GitHub](https://github.com/trailbaseio/trailbas
[^2]:
Our setup with drizzle and node.js is certainly not the fastest possible.
- For example, we could drop down to using raw SQLite in C or another
- low-level language.
+ For example, we could drop down to SQLite in C or another low-level
+ language with less FFI overhead.
That said, drizzle is a great popular choice which mostly serves as a
point-of-reference and sanity check.
@@ -228,6 +232,15 @@ The benchmarks are available on [GitHub](https://github.com/trailbaseio/trailbas
For the benchmarks at hand we're using a loopback network device.
[^4]:
+ We picked Payload as representative of popular Node.js CMS, which
+ [itself claims](https://payloadcms.com/blog/performance-benchmarks)
+ to be many times faster than popular options like Strapi or Directus.
+ We were using a v3 pre-release, as recommended, also using the
+ SQLite/drizzle database adapter marked as beta.
+ We manually turned on WAL mode and filed an issue with payload, otherwise
+ stock payload was ~210x times slower.
+
+[^5]:
The SupaBase benchmark setup skips row-level access checks. Technically,
this is in its favor from a performance standpoint, however looking at the
overall load on its constituents with PG being only a sliver, it probably
@@ -235,7 +248,7 @@ The benchmarks are available on [GitHub](https://github.com/trailbaseio/trailbas
which has been released since the benchmarks were run. That said, these
claims deserve re-validation.
-[^5]:
+[^6]:
We're unsure as to what causes these 1-core swings.
Runtime-effects, such as garbage collection, may have an effect, however we
would have expected these to show on shorter time-scales.