Squash all commits for a fresh start.
This is only to avoid accidentally leaking any secrets from early development especially in the light of short-sha attacks.
20
.dockerignore
Normal file
@@ -0,0 +1,20 @@
|
||||
.hooks/
|
||||
|
||||
target/
|
||||
../../target/
|
||||
**/target/
|
||||
**/node_modules/
|
||||
**/dist/
|
||||
|
||||
traildepot/
|
||||
docs/
|
||||
|
||||
Dockerfile*
|
||||
.docker*
|
||||
|
||||
.git/
|
||||
.git*
|
||||
|
||||
*.image
|
||||
.rustfmt.toml
|
||||
.env
|
||||
39
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y --no-install-recommends curl libssl-dev pkg-config libclang-dev protobuf-compiler libprotobuf-dev libsqlite3-dev
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
- name: PNPM install
|
||||
run: |
|
||||
pnpm i
|
||||
- name: Set up Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
channel: stable
|
||||
flutter-version: 3.24.3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
# - name: Rust tests
|
||||
# run: |
|
||||
# cargo test -p trailbase-core -p trailbase-extension -p trailbase-sqlite -p trailbase-cli
|
||||
13
.gitignore
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Build artifacts
|
||||
target/
|
||||
node_modules/
|
||||
|
||||
# macOS-specific files
|
||||
.DS_Store
|
||||
|
||||
# jetbrains setting folder
|
||||
.idea/
|
||||
|
||||
# Dev artifacts
|
||||
public/
|
||||
traildepot/
|
||||
9
.gitmodules
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
[submodule "vendor/refinery"]
|
||||
path = vendor/refinery
|
||||
url = git@github.com:trailbaseio/refinery.git
|
||||
[submodule "vendor/sqlite_loadable"]
|
||||
path = vendor/sqlite-loadable
|
||||
url = git@github.com:trailbaseio/sqlite-loadable-rs.git
|
||||
[submodule "vendor/sqlean/bundled/sqlean"]
|
||||
path = vendor/sqlean/bundled/sqlean
|
||||
url = https://github.com/trailbaseio/sqlean
|
||||
103
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
exclude: '(trailbase-core/bindings|bindings)/.*'
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
|
||||
# Self-validation for pre-commit manifest.
|
||||
- repo: https://github.com/pre-commit/pre-commit
|
||||
rev: v3.8.0
|
||||
hooks:
|
||||
- id: validate_manifest
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
### Rust ###
|
||||
- id: cargofmt
|
||||
name: Cargo Format
|
||||
entry: cargo fmt -- --check
|
||||
pass_filenames: false
|
||||
# NOTE: language refers to the language in which the hook is implemented
|
||||
# in, rather than the inputs. In this case we rely on cargo being
|
||||
# installed on the system
|
||||
language: system
|
||||
# NOTE: types/files/exclude narrow the inputs the hook should run on.
|
||||
types: [rust]
|
||||
exclude: '^vendor/'
|
||||
|
||||
- id: cargoclippy
|
||||
name: Cargo Clippy
|
||||
# Be verbose to at least still see warnings scroll by.
|
||||
verbose: true
|
||||
entry: cargo clippy --workspace --no-deps
|
||||
language: system
|
||||
types: [rust]
|
||||
exclude: '^vendor/'
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargotest
|
||||
name: Cargo Test
|
||||
entry: cargo test --workspace -- --show-output
|
||||
language: system
|
||||
types: [rust]
|
||||
exclude: '^(vendor|bindings)/'
|
||||
pass_filenames: false
|
||||
|
||||
### Auth, Admin, Docs UI ###
|
||||
- id: prettier
|
||||
name: Prettier
|
||||
entry: pnpm -r format --check
|
||||
language: system
|
||||
types: [file]
|
||||
files: .*\.(js|mjs|cjs|ts|jsx|tsx|astro|md|mdx)$
|
||||
pass_filenames: false
|
||||
|
||||
- id: typescript_check
|
||||
name: Typescript Check
|
||||
entry: pnpm -r check
|
||||
language: system
|
||||
types: [file]
|
||||
files: .*\.(js|mjs|cjs|ts|jsx|tsx|astro|mdx)$
|
||||
pass_filenames: false
|
||||
|
||||
- id: javascript_test
|
||||
name: JavaScript Test
|
||||
entry: pnpm -r test
|
||||
language: system
|
||||
types: [file]
|
||||
files: .*\.(js|mjs|cjs|ts|jsx|tsx|astro)$
|
||||
pass_filenames: false
|
||||
|
||||
### Dart client and example
|
||||
- id: dart_format
|
||||
name: Dart format
|
||||
entry: dart format -o none --set-exit-if-changed client/trailbase-dart examples/blog/flutter
|
||||
language: system
|
||||
types: [file]
|
||||
files: .*\.dart$
|
||||
pass_filenames: false
|
||||
|
||||
- id: dart_analyze
|
||||
name: Dart analyze
|
||||
entry: sh -c 'dart pub -C client/trailbase-dart get && dart pub -C examples/blog/flutter get && dart analyze -- client/trailbase-dart examples/blog/flutter'
|
||||
language: system
|
||||
types: [file]
|
||||
files: .*\.dart$
|
||||
pass_filenames: false
|
||||
|
||||
- id: dart_test
|
||||
name: Dart test
|
||||
entry: sh -c 'cd client/trailbase-dart && dart pub get && dart test'
|
||||
language: system
|
||||
types: [file]
|
||||
files: .*\.dart$
|
||||
pass_filenames: false
|
||||
8
.rustfmt.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Docs: https://rust-lang.github.io/rustfmt/
|
||||
edition = "2021"
|
||||
brace_style = "SameLineWhere"
|
||||
empty_item_single_line = true
|
||||
max_width = 100
|
||||
comment_width = 100
|
||||
wrap_comments = true
|
||||
tab_spaces = 2
|
||||
4658
Cargo.lock
generated
Normal file
46
Cargo.toml
Normal file
@@ -0,0 +1,46 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"examples/custom-binary",
|
||||
"trailbase-cli",
|
||||
"trailbase-core",
|
||||
"trailbase-extension",
|
||||
"trailbase-sqlite",
|
||||
"vendor/refinery-libsql",
|
||||
"vendor/sqlean",
|
||||
]
|
||||
default-members = [
|
||||
"trailbase-cli",
|
||||
"trailbase-core",
|
||||
"trailbase-extension",
|
||||
"trailbase-sqlite",
|
||||
]
|
||||
exclude = [
|
||||
"vendor/refinery",
|
||||
"vendor/sqlite-loadable",
|
||||
]
|
||||
|
||||
# https://doc.rust-lang.org/cargo/reference/profiles.html
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
opt-level = 3
|
||||
# PGO doesn't work with LTO: https://github.com/llvm/llvm-project/issues/57501
|
||||
# lto = "off"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.dependencies]
|
||||
libsql = { package = "libsql", version = "^0.6.0", default-features = false, features = ["core", "serde"] }
|
||||
refinery = { package = "refinery", path = "vendor/refinery/refinery", default-features = false }
|
||||
refinery-core = { package = "refinery-core", path = "vendor/refinery/refinery_core" }
|
||||
refinery-libsql = { package = "refinery-libsql", path = "vendor/refinery-libsql" }
|
||||
rusqlite = { package = "libsql-rusqlite", version = "^0.32", default-features = false, features = [
|
||||
"libsql-experimental",
|
||||
"column_decltype",
|
||||
"load_extension",
|
||||
"modern_sqlite",
|
||||
"functions",
|
||||
"limits",
|
||||
"backup",
|
||||
] }
|
||||
sqlite-loadable = { package = "sqlite-loadable", path = "./vendor/sqlite-loadable", features=["static"] }
|
||||
51
Dockerfile
Normal file
@@ -0,0 +1,51 @@
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.81-slim AS chef
|
||||
|
||||
# Install additional build dependencies.
|
||||
#
|
||||
# NOTE: we should consider building sqlean against
|
||||
# libsql/libsql-sqlite3/src/sqlite3ext.h rather than upstrean libsqlite3-dev
|
||||
# for increased consistency.
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends curl libssl-dev pkg-config libclang-dev protobuf-compiler libprotobuf-dev libsqlite3-dev
|
||||
|
||||
ENV PATH=/usr/local/node/bin:$PATH
|
||||
ARG NODE_VERSION=22.9.0
|
||||
|
||||
RUN curl -sL https://github.com/nodenv/node-build/archive/master.tar.gz | tar xz -C /tmp/ && \
|
||||
/tmp/node-build-master/bin/node-build "${NODE_VERSION}" /usr/local/node && \
|
||||
rm -rf /tmp/node-build-master
|
||||
|
||||
RUN npm install -g pnpm
|
||||
RUN pnpm --version
|
||||
|
||||
FROM chef AS planner
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
|
||||
FROM planner AS builder
|
||||
# Re-build dependencies in case they have changed.
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN RUSTFLAGS="-C target-feature=+crt-static" cargo build --target x86_64-unknown-linux-gnu --release --bin trail
|
||||
|
||||
FROM alpine:3.20 AS runtime
|
||||
RUN apk add --no-cache tini curl
|
||||
|
||||
COPY --from=builder /app/target/x86_64-unknown-linux-gnu/release/trail /app/
|
||||
|
||||
# When `docker run` is executed, launch the binary as unprivileged user.
|
||||
RUN adduser -D trailbase
|
||||
USER trailbase
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
EXPOSE 4000
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
|
||||
CMD ["/app/trail", "--data-dir", "/app/traildepot", "run", "--address", "0.0.0.0:4000"]
|
||||
|
||||
HEALTHCHECK CMD curl --fail http://localhost:4000/api/healthcheck || exit 1
|
||||
105
LICENSE
Normal file
@@ -0,0 +1,105 @@
|
||||
# Functional Source License, Version 1.1, Apache 2.0 Future License
|
||||
|
||||
## Abbreviation
|
||||
|
||||
FSL-1.1-Apache-2.0
|
||||
|
||||
## Notice
|
||||
|
||||
Copyright 2024 Sebastian Jeltsch
|
||||
|
||||
## Terms and Conditions
|
||||
|
||||
### Licensor ("We")
|
||||
|
||||
The party offering the Software under these Terms and Conditions.
|
||||
|
||||
### The Software
|
||||
|
||||
The "Software" is each version of the software that we make available under
|
||||
these Terms and Conditions, as indicated by our inclusion of these Terms and
|
||||
Conditions with the Software.
|
||||
|
||||
### License Grant
|
||||
|
||||
Subject to your compliance with this License Grant and the Patents,
|
||||
Redistribution and Trademark clauses below, we hereby grant you the right to
|
||||
use, copy, modify, create derivative works, publicly perform, publicly display
|
||||
and redistribute the Software for any Permitted Purpose identified below.
|
||||
|
||||
### Permitted Purpose
|
||||
|
||||
A Permitted Purpose is any purpose other than a Competing Use. A Competing Use
|
||||
means making the Software available to others in a commercial product or
|
||||
service that:
|
||||
|
||||
1. substitutes for the Software;
|
||||
|
||||
2. substitutes for any other product or service we offer using the Software
|
||||
that exists as of the date we make the Software available; or
|
||||
|
||||
3. offers the same or substantially similar functionality as the Software.
|
||||
|
||||
Permitted Purposes specifically include using the Software:
|
||||
|
||||
1. for your internal use and access;
|
||||
|
||||
2. for non-commercial education;
|
||||
|
||||
3. for non-commercial research; and
|
||||
|
||||
4. in connection with professional services that you provide to a licensee
|
||||
using the Software in accordance with these Terms and Conditions.
|
||||
|
||||
### Patents
|
||||
|
||||
To the extent your use for a Permitted Purpose would necessarily infringe our
|
||||
patents, the license grant above includes a license under our patents. If you
|
||||
make a claim against any party that the Software infringes or contributes to
|
||||
the infringement of any patent, then your patent license to the Software ends
|
||||
immediately.
|
||||
|
||||
### Redistribution
|
||||
|
||||
The Terms and Conditions apply to all copies, modifications and derivatives of
|
||||
the Software.
|
||||
|
||||
If you redistribute any copies, modifications or derivatives of the Software,
|
||||
you must include a copy of or a link to these Terms and Conditions and not
|
||||
remove any copyright notices provided in or with the Software.
|
||||
|
||||
### Disclaimer
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR
|
||||
PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT.
|
||||
|
||||
IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE
|
||||
SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES,
|
||||
EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE.
|
||||
|
||||
### Trademarks
|
||||
|
||||
Except for displaying the License Details and identifying us as the origin of
|
||||
the Software, you have no right under these Terms and Conditions to use our
|
||||
trademarks, trade names, service marks or product names.
|
||||
|
||||
## Grant of Future License
|
||||
|
||||
We hereby irrevocably grant you an additional license to use the Software under
|
||||
the Apache License, Version 2.0 that is effective on the second anniversary of
|
||||
the date we make the Software available. On or after that date, you may use the
|
||||
Software under the Apache License, Version 2.0, in which case the following
|
||||
will apply:
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
this file except in compliance with the License.
|
||||
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed
|
||||
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
20
Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
default: format check
|
||||
|
||||
target/x86_64-unknown-linux-gnu/release/trail:
|
||||
RUSTFLAGS="-C target-feature=+crt-static" cargo build --target x86_64-unknown-linux-gnu --release --bin trail
|
||||
|
||||
format:
|
||||
pnpm -r format; \
|
||||
cargo +nightly fmt; \
|
||||
dart format client/trailbase-dart/ examples/blog/flutter/; \
|
||||
txtpbfmt `find . -regex ".*.textproto"`
|
||||
|
||||
check:
|
||||
pnpm -r check; \
|
||||
cargo clippy --workspace --no-deps; \
|
||||
dart analyze client/trailbase-dart examples/blog/flutter
|
||||
|
||||
docker:
|
||||
docker build . -t trailbase/trailbase
|
||||
|
||||
.PHONY: default format check
|
||||
40
README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# TrailBase
|
||||
|
||||
A blazingly fast, single-file, and open-source server for your application with
|
||||
type-safe restful APIs, auth, admin dashboard, etc.
|
||||
|
||||
For more context, documentation, and an online live demo, check out our website
|
||||
[trailbase.io](https://trailbase.io).
|
||||
|
||||
## FAQ
|
||||
|
||||
Check out our [website](https://trailbase.io/reference/faq/).
|
||||
|
||||
## Project Structure
|
||||
|
||||
This repository contains all components that make up TrailBase, as well as
|
||||
tests, documentation and examples.
|
||||
Only our [benchmarks](https://github.com/trailbaseio/trailbase-benchmark) are
|
||||
kept separately due to their external dependencies.
|
||||
|
||||
## Building
|
||||
|
||||
If you have all the necessary build dependencies (rust, nodejs, pnpm, ...)
|
||||
installed, you can simply build TrailBase by running:
|
||||
|
||||
```bash
|
||||
$ git submodule update --init --recursive
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
Alternatively, you can build with docker:
|
||||
|
||||
```bash
|
||||
$ git submodule update --init --recursive
|
||||
$ docker build . -t trailbase
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are very welcome, let's just talk upfront to see how a proposal
|
||||
fits into the overall roadmap and avoid any surprises.
|
||||
341
assets/colors.svg
Normal file
@@ -0,0 +1,341 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="511.99997"
|
||||
height="280"
|
||||
viewBox="0 0 135.46666 74.083332"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
sodipodi:docname="colors.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="true"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="3.1994777"
|
||||
inkscape:cx="202.06423"
|
||||
inkscape:cy="132.20908"
|
||||
inkscape:window-width="1576"
|
||||
inkscape:window-height="859"
|
||||
inkscape:window-x="26"
|
||||
inkscape:window-y="23"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="g3011" />
|
||||
<defs
|
||||
id="defs2" />
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-4.2835965,-65.849648)">
|
||||
<rect
|
||||
style="fill:#f2f2f2;fill-opacity:1;stroke:none;stroke-width:11.1992;stroke-miterlimit:1"
|
||||
id="rect3091"
|
||||
width="135.46666"
|
||||
height="74.083336"
|
||||
x="4.2835965"
|
||||
y="65.849648" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="16.509363"
|
||||
y="96.747765"
|
||||
id="text236"><tspan
|
||||
sodipodi:role="line"
|
||||
style="font-size:6.35px;stroke:none;stroke-width:11.1125"
|
||||
x="16.509363"
|
||||
y="96.747765"
|
||||
id="tspan740">Accent</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="24.681833"
|
||||
y="111.20528"
|
||||
id="text750"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan748"
|
||||
style="stroke-width:11.1125"
|
||||
x="24.681833"
|
||||
y="111.20528">gray</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="102.48339"
|
||||
y="78.995728"
|
||||
id="text824"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan822"
|
||||
style="stroke-width:11.1125"
|
||||
x="102.48339"
|
||||
y="78.995728" /></text>
|
||||
<g
|
||||
id="g3011">
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-86.746117"
|
||||
y="48.093643"
|
||||
id="text812"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan810"
|
||||
style="stroke-width:11.1125"
|
||||
x="-86.746117"
|
||||
y="48.093643">200</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-86.873116"
|
||||
y="72.932739"
|
||||
id="text816"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan814"
|
||||
style="stroke-width:11.1125"
|
||||
x="-86.873116"
|
||||
y="72.932739">600</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-86.765167"
|
||||
y="87.676506"
|
||||
id="text820"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan818"
|
||||
style="stroke-width:11.1125"
|
||||
x="-86.765167"
|
||||
y="87.676506">900</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-86.765167"
|
||||
y="102.62367"
|
||||
id="text820-3"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan818-6"
|
||||
style="stroke-width:11.1125"
|
||||
x="-86.765167"
|
||||
y="102.62367">950</tspan></text>
|
||||
<rect
|
||||
style="fill:#92d1f2;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect859"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="41.288521"
|
||||
y="90.019928" />
|
||||
<rect
|
||||
style="fill:#0073aa;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect1727"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="65.760696"
|
||||
y="90.019928" />
|
||||
<rect
|
||||
style="fill:#003654;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect1727-7"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="80.707855"
|
||||
y="90.019928"
|
||||
inkscape:transform-center-x="8.6717628"
|
||||
inkscape:transform-center-y="1.6366789" />
|
||||
<rect
|
||||
style="fill:#00273d;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect1756"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="95.655022"
|
||||
y="90.019928"
|
||||
inkscape:transform-center-x="8.6717628"
|
||||
inkscape:transform-center-y="1.6366789" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-86.746117"
|
||||
y="60.264488"
|
||||
id="text348"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan346"
|
||||
style="stroke-width:11.1125"
|
||||
x="-86.746117"
|
||||
y="60.264488">400</tspan></text>
|
||||
<rect
|
||||
style="fill:#47a1cd;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect350"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="53.459366"
|
||||
y="90.019928" />
|
||||
</g>
|
||||
<g
|
||||
id="g3037">
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-125.91222"
|
||||
y="47.781227"
|
||||
id="text808"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan806"
|
||||
style="stroke-width:11.1125"
|
||||
x="-125.91222"
|
||||
y="47.781227">100</tspan></text>
|
||||
<rect
|
||||
style="fill:#f3f7f9;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2070"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="40.81258"
|
||||
y="105.64202" />
|
||||
<rect
|
||||
style="fill:#e7eff2;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2072"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="51.379787"
|
||||
y="105.64202" />
|
||||
<rect
|
||||
style="fill:#bac4c8;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2074"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="61.946995"
|
||||
y="105.64202" />
|
||||
<rect
|
||||
style="fill:#7b8f96;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2076"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="72.514198"
|
||||
y="105.64202" />
|
||||
<rect
|
||||
style="fill:#7b8f96;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2078"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="83.081406"
|
||||
y="105.64202" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-126.91235"
|
||||
y="58.348434"
|
||||
id="text2151"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan2149"
|
||||
style="stroke-width:11.1125"
|
||||
x="-126.91235"
|
||||
y="58.348434">200</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-126.82344"
|
||||
y="68.915642"
|
||||
id="text2151-5"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
style="stroke-width:11.1125"
|
||||
x="-126.82344"
|
||||
y="68.915642"
|
||||
id="tspan2288">300</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-126.99489"
|
||||
y="79.482849"
|
||||
id="text2294"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
style="stroke-width:11.1125"
|
||||
x="-126.99489"
|
||||
y="79.482849"
|
||||
id="tspan2292">400</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-126.85202"
|
||||
y="90.050056"
|
||||
id="text2298"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
style="stroke-width:11.1125"
|
||||
x="-126.85202"
|
||||
y="90.050056"
|
||||
id="tspan2296">500</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-126.51865"
|
||||
y="100.61726"
|
||||
id="text2298-9"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
style="stroke-width:11.1125"
|
||||
x="-126.51865"
|
||||
y="100.61726"
|
||||
id="tspan2296-1">700</tspan></text>
|
||||
<rect
|
||||
style="fill:#2a3b41;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2300"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="93.648613"
|
||||
y="105.64202" />
|
||||
<rect
|
||||
style="fill:#182a2f;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2818"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="104.21582"
|
||||
y="105.64202" />
|
||||
<rect
|
||||
style="fill:#121a1c;fill-opacity:1;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
id="rect2818-2"
|
||||
width="9.3652973"
|
||||
height="7.6955438"
|
||||
x="114.78303"
|
||||
y="105.64202" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-127.02029"
|
||||
y="111.18447"
|
||||
id="text2891"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
style="stroke-width:11.1125"
|
||||
x="-127.02029"
|
||||
y="111.18447"
|
||||
id="tspan2889">800</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:6.35px;font-family:Manrope;-inkscape-font-specification:'Manrope Bold';fill:#1a1a1a;stroke:none;stroke-width:11.1125;stroke-miterlimit:1"
|
||||
x="-127.07745"
|
||||
y="121.75168"
|
||||
id="text2895"
|
||||
transform="rotate(-90)"><tspan
|
||||
sodipodi:role="line"
|
||||
style="stroke-width:11.1125"
|
||||
x="-127.07745"
|
||||
y="121.75168"
|
||||
id="tspan2893">900</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 12 KiB |
85
assets/favicon.svg
Normal file
@@ -0,0 +1,85 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="512"
|
||||
height="512"
|
||||
viewBox="0 0 135.46667 135.46667"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
sodipodi:docname="favicon.svg"
|
||||
xml:space="preserve"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="true"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="0.32087254"
|
||||
inkscape:cx="166.73287"
|
||||
inkscape:cy="794.70807"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1131"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="svg5"
|
||||
showguides="false" /><defs
|
||||
id="defs2"><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath16302"><circle
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:2.13168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="circle16304"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)" /></clipPath><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath16302-3"><circle
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:2.13168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="circle16304-6"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)" /></clipPath></defs><g
|
||||
id="g6948"
|
||||
transform="matrix(2.3958548,0,0,2.3958548,1090.7447,-426.56942)"
|
||||
style="stroke-width:0.492063"><g
|
||||
id="g33829-7-2"
|
||||
transform="matrix(0.50000001,0,0,0.50000001,-215.20999,172.08712)"
|
||||
style="stroke:#0273aa;stroke-width:1.76694;stroke-dasharray:none;stroke-opacity:1"><g
|
||||
id="g43570-5-7"
|
||||
style="stroke:#0273aa;stroke-width:1.78217;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
transform="matrix(0.99145508,0,0,0.99145508,-4.0873751,0.11691371)"><circle
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:#0273aa;stroke-width:3.56434;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
id="path11937-0-35-0"
|
||||
cx="423.09241"
|
||||
cy="68.929489"
|
||||
r="55.247246"
|
||||
transform="scale(-1,1)" /></g></g><g
|
||||
id="g16795-61-6"
|
||||
transform="matrix(0.76389306,0,0,0.76389306,-108.95252,127.46204)"
|
||||
inkscape:label="acorn"
|
||||
style="stroke-width:0.546462;stroke-dasharray:none"><path
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:0.546462;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -398.79932,116.98984 c 2.14816,-3.18843 5.3282,-9.00453 6.86159,-16.2357 -1.68801,-8.452686 -5.41474,-9.528969 -12.67201,-13.175064 l -6.77667,-2.1655 c -0.59411,-4.916754 3.66346,-6.007316 1.95818,-7.562799 -0.60935,-0.60799 -0.85772,-1.152153 -2.92807,-0.747044 -0.68887,2.828557 -1.5891,4.882655 -1.38413,7.856743 -10.88291,-0.324825 -19.86773,1.21596 -23.75555,10.917323 l 0.59462,6.157521 c 0.40774,4.06454 0.90942,8.36039 1.19673,10.7182 2.51007,9.53776 7.2756,12.84946 15.14952,17.9473 10.80658,-1.60382 18.0178,-8.23239 21.75579,-13.71098 z"
|
||||
id="path11886-3-5-8-2"
|
||||
sodipodi:nodetypes="cccccccccccc" /><path
|
||||
style="display:inline;fill:#01557e;fill-opacity:1;stroke:none;stroke-width:0.546462;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -437.24943,98.587152 c 15.47059,5.160838 28.853,6.210908 44.45035,5.528858 l -0.33269,1.00136 c -2.84453,8.48885 -7.11438,16.19984 -14.78081,20.77251 -4.22299,2.87641 -8.95937,4.24265 -12.64118,4.80585 4.90186,-1.9411 6.28154,-3.13013 9.44127,-5.62107 7.30314,-5.75735 10.53897,-13.79201 -7.2632,-17.54634 -2.04827,-0.7546 -6.97065,-1.79557 -8.95696,-2.4842 -3.78691,-1.31289 -5.87451,-2.69569 -8.49494,-4.19919 -0.64857,-0.55614 -1.05123,-1.499972 -1.42184,-2.257778 z"
|
||||
id="path14904-79-9"
|
||||
sodipodi:nodetypes="cccccscscc" /><path
|
||||
style="fill:#01557e;fill-opacity:1;stroke-width:0.546462;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none"
|
||||
d="m -412.38484,74.540497 c 1.04648,1.02564 2.03773,1.53722 3.32802,1.217522 0.0656,0.210395 0.0231,0.418915 -0.0326,0.627204 -1.5334,2.409182 -2.45327,4.552421 -2.21329,7.284672 l 0.16083,1.055961 c -0.9361,0.0984 -1.87578,0.258567 -2.4535,-0.468121 0.75107,0.18109 0.90263,0.01389 1.42464,-0.485892 -0.17483,-2.661716 0.30288,-4.511654 1.49152,-6.770675 -0.34469,-1.051553 -1.02053,-1.564634 -1.70561,-2.460671 z"
|
||||
id="path567-1"
|
||||
sodipodi:nodetypes="ccccccccc"
|
||||
transform="translate(-1.6924549e-5,2.6458344)" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 5.3 KiB |
191
assets/logo.svg
Normal file
@@ -0,0 +1,191 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="512"
|
||||
height="512"
|
||||
viewBox="0 0 135.46667 135.46667"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
sodipodi:docname="logo.svg"
|
||||
xml:space="preserve"
|
||||
inkscape:export-filename="logo_512.webp"
|
||||
inkscape:export-xdpi="96"
|
||||
inkscape:export-ydpi="96"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="true"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="0.28040335"
|
||||
inkscape:cx="-230.02578"
|
||||
inkscape:cy="1305.2626"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1131"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="svg5"
|
||||
showguides="false" /><defs
|
||||
id="defs2"><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath16302"><circle
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:2.13168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="circle16304"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)" /></clipPath><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath16302-3"><circle
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:2.13168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="circle16304-6"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)" /></clipPath><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath16302-36-7"><circle
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:2.13168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="circle16304-75-5"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)" /></clipPath><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath16302-36-7-3"><circle
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:2.13168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="circle16304-75-5-6"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)" /></clipPath><clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath656"><path
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:1.85208;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -432.00265,67.318698 c -1.73616,0.139667 -3.50154,0.246024 -4.3977,0.178677 -1.81069,-4.111525 1.52048,-8.953296 4.44978,-8.84645 6.20224,0.226235 4.32785,8.328991 -0.0521,8.667773 z"
|
||||
id="path658"
|
||||
sodipodi:nodetypes="ccscc" /></clipPath></defs><g
|
||||
id="g43570-56-7"
|
||||
transform="matrix(1.1789127,0,0,1.1789127,563.88433,-16.166525)"
|
||||
style="stroke-width:0.5"><circle
|
||||
style="fill:#0073aa;fill-opacity:1;stroke:none;stroke-width:1.06584;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
id="path11937-0-2-5"
|
||||
cx="420.85474"
|
||||
cy="71.167152"
|
||||
r="57.454071"
|
||||
transform="scale(-1,1)"
|
||||
inkscape:label="circle" /><g
|
||||
id="g16298-9-3"
|
||||
clip-path="url(#clipPath16302-36-7-3)"
|
||||
inkscape:label="squirrel"
|
||||
style="stroke-width:0.5"><path
|
||||
style="display:inline;fill:#e6e6e6;fill-opacity:1;stroke:none;stroke-width:0.66502;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m -394.37046,131.47753 c 5.12125,-4.34385 11.67689,-7.38652 14.74179,-13.66074 7.51252,-10.78724 9.93689,-23.023486 7.08569,-35.021399 -3.59312,-12.873181 -10.70247,-24.576135 -20.0541,-34.075166 -6.49968,-8.652975 -17.65011,-20.955638 -5.00797,-28.104256 -6.30691,-4.056153 -18.75609,-4.63811 -25.70383,-1.749206 -15.25854,5.960156 -26.03291,19.153286 -31.66631,33.953367 -3.2501,8.538654 -0.75393,16.12354 -1.76923,25.046648 -0.35669,4.145274 17.319,8.98466 17.65317,13.107967 13.49706,14.623365 15.84552,37.512255 36.89824,39.290795 2.60748,0.40399 5.21497,0.80799 7.82245,1.21199 z"
|
||||
id="path41116-1-5"
|
||||
sodipodi:nodetypes="ccccccsccccc"
|
||||
inkscape:label="tail" /><path
|
||||
style="display:inline;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.66502;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m -404.67281,53.96564 c -15.1265,-12.030261 -8.8751,-28.360639 7.06776,-33.349671 -6.30691,-4.056153 -18.75609,-4.63811 -25.70383,-1.749206 -15.25854,5.960156 -26.03291,19.153286 -31.66631,33.953367 -3.2501,8.538654 -0.75393,16.12354 -1.76923,25.046648 -0.35669,4.145274 17.319,8.98466 17.65317,13.107967 26.5708,-16.773958 21.84372,-0.325359 34.41844,-37.009105 z"
|
||||
id="path3419-6"
|
||||
sodipodi:nodetypes="cccsccc"
|
||||
inkscape:label="tail_highlight" /><path
|
||||
style="opacity:1;fill:#0073aa;fill-opacity:1;stroke:none;stroke-width:0.79375;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m -457.72736,69.431518 c 3.3896,-13.264962 8.17328,-20.647211 18.28329,-28.817159 2.54315,-3.757231 5.89263,-9.337896 8.31222,-10.306417 2.7432,1.237322 3.70892,3.928026 5.27209,6.366975 l 6.12486,-1.227345 c 1.63581,-5.260125 3.12238,-8.447075 7.62011,-10.654554 6.27135,4.105499 10.78712,9.441913 8.73926,18.138952 13.09917,13.464505 16.44363,20.397287 24.60428,39.658663 0.49305,9.347413 -0.27747,21.544127 -1.46969,29.794347 0,0 -1.69258,6.25854 -2.47729,5.72446 -0.7847,-0.53409 -4.76437,-5.18721 -4.76437,-5.18721 l -14.10706,-45.69118 -26.40483,-16.864179 c 0,0 -13.84655,5.577975 -14.887,5.946963 -1.04044,0.368988 -13.53338,18.760194 -13.53338,18.760194 z"
|
||||
id="path41710-2-2"
|
||||
sodipodi:nodetypes="cccccccccscccscc"
|
||||
inkscape:label="outline" /><path
|
||||
style="display:inline;fill:#ffffff;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -384.87624,121.37652 c 3.75488,-12.90478 3.79893,-25.678538 3.46109,-38.414706 -3.68701,-9.015838 -6.90659,-16.25675 -11.00759,-22.767045 -33.4486,11.590743 -41.50958,10.306557 -48.19197,20.239302 -0.97945,7.933407 -6.13515,12.228544 -0.80529,12.978815 -3.93992,6.715814 -7.95535,17.147064 -7.099,28.870934 4.52456,2.43084 9.62565,4.76081 14.50144,6.38118 4.0208,1.03673 8.17605,1.37868 12.37812,1.36613 3.83891,0.3095 7.67953,0.82426 11.5103,0.15208 4.55232,-0.17052 8.85841,-1.80004 13.22085,-2.94852 2.98304,-1.36738 4.01069,-1.56562 6.99373,-2.933 z"
|
||||
id="path11884-0-6-7-9"
|
||||
sodipodi:nodetypes="cccccccccccc"
|
||||
inkscape:label="body" /><path
|
||||
id="path12948-3-0-1"
|
||||
style="fill:#e6e6e6;fill-opacity:1;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -396.08075,90.674118 c -0.0102,-2.186513 -0.31014,-3.657934 0.73553,-8.452944 -0.56307,-1.566319 -1.3733,-3.116694 -4.37726,-4.129161 l -27.88609,-5.756495 -13.8158,21.085237 c -0.37346,0.632177 -1.5594,2.73663 -2.3232,4.495613 l 7.33211,2.007166 3.04952,16.392586 c -1.01658,5.79059 -1.71085,11.2009 1.8533,14.8264 l 35.36206,2.99202 c -0.42715,-3.20821 -4.12887,-11.82958 -2.52045,-19.80255 z"
|
||||
inkscape:label="body shade"
|
||||
sodipodi:nodetypes="cccccccccccc"
|
||||
transform="translate(-1.6924549e-5)" /><g
|
||||
id="g682"
|
||||
inkscape:label="head"
|
||||
style="stroke-width:0.5"><path
|
||||
style="fill:#ffffff;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -389.17548,65.820707 c -4.29469,-8.08968 -9.26583,-14.81462 -16.99742,-21.788301 2.53845,-7.601334 -0.90679,-11.83714 -5.95942,-15.95915 -3.96817,2.763543 -4.35541,5.263903 -5.51422,9.53362 l -10.07786,1.87569 c -0.60659,-1.465688 -1.69634,-5.47392 -3.58709,-5.947589 -2.69409,2.292964 -3.62617,7.287264 -9.06298,11.06401 -10.71811,8.957439 -16.27586,23.967124 -17.44664,36.400021 -0.60434,3.593749 1.28377,5.083628 2.17454,7.084879 4.0042,4.344485 8.87315,4.771188 14.22657,5.328999 12.38671,1.290659 47.91435,-12.996238 52.24452,-27.592179 z"
|
||||
id="path593"
|
||||
sodipodi:nodetypes="cccccccccsc" /><path
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -432.00265,67.318696 c -1.73616,0.139669 -3.50154,0.246026 -4.3977,0.17868 -1.81069,-4.111526 1.52048,-8.953299 4.44978,-8.84645 6.20224,0.226233 4.32785,8.328992 -0.0521,8.66777 z"
|
||||
id="path11890-1-0-3-2"
|
||||
sodipodi:nodetypes="ccscc" /><circle
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1.17961;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
id="path16232-2-7"
|
||||
cx="-435.367"
|
||||
cy="65.691788"
|
||||
r="2.1497555"
|
||||
clip-path="url(#clipPath656)"
|
||||
transform="translate(-1.6924549e-5)" /><path
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -412.33041,46.837035 c 3.38872,-4.79066 4.27837,-5.677734 3.5344,-11.123199 -1.28741,-2.525126 -3.55942,-0.0091 -3.5778,0.37371 -0.0793,0.268859 0.62604,5.692511 0.0434,10.749489 z"
|
||||
id="path11888-6-2-9-0"
|
||||
sodipodi:nodetypes="cccc"
|
||||
inkscape:label="path11888-6-2-9"
|
||||
transform="translate(-1.6924549e-5)" /><path
|
||||
style="opacity:1;fill:#e6e6e6;fill-opacity:1;stroke:none;stroke-width:0.79375;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m -439.13116,43.583622 c 3.72307,-1.614419 7.3675,-3.483388 11.41474,-4.048403 l -1.36077,-3.617316 c -0.80217,-1.389627 -1.75454,-2.490926 -2.27688,-2.366713 -0.0768,0.01826 -0.1989,0.05969 -1.03532,1.145831 -1.08724,1.411848 -2.19455,3.571613 -3.52156,5.397558 -1.46888,2.021146 -2.98564,3.50213 -3.22021,3.489043 z"
|
||||
id="path26944-3-9"
|
||||
sodipodi:nodetypes="cccsssc"
|
||||
transform="translate(-1.6924549e-5)" /><path
|
||||
style="fill:#e6e6e6;fill-opacity:1;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -406.08838,44.108528 -3.38124,5.263692 6.11582,-2.679375 z"
|
||||
id="path13150-5-2-3"
|
||||
sodipodi:nodetypes="cccc" /><g
|
||||
id="g665"
|
||||
inkscape:label="nose"
|
||||
transform="translate(-1.6924549e-5)"
|
||||
style="stroke-width:0.5"><path
|
||||
style="opacity:1;fill:#0073aa;fill-opacity:1;stroke:none;stroke-width:0.79375;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m -457.87586,83.179939 c 0,0 -1.17624,2.935047 1.46367,5.058495 4.10609,-3.375529 4.34075,-6.079122 3.99985,-6.604356 -0.61922,-0.0073 -3.50156,0.755828 -5.46352,1.545857 z"
|
||||
id="path31425-0-6"
|
||||
sodipodi:nodetypes="ccccc" /><path
|
||||
style="opacity:1;fill:#0073aa;fill-opacity:1;stroke:none;stroke-width:0.887605;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
d="m -454.56334,85.363491 c 0.18842,1.165661 1.1436,2.897617 2.73115,3.885001 -0.0329,0.217466 -0.86381,0.708773 -0.86381,0.708773 0,0 -2.31741,-1.087639 -3.14466,-3.035718"
|
||||
id="path33246-2-0"
|
||||
sodipodi:nodetypes="cccc" /></g></g><g
|
||||
id="g16795-61-6"
|
||||
transform="translate(0,-2.6458334)"
|
||||
inkscape:label="acorn"
|
||||
style="display:inline;stroke-width:0.5"><path
|
||||
style="fill:#0273aa;fill-opacity:1;stroke:none;stroke-width:1.06653;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -398.79932,116.98984 c 2.14816,-3.18843 5.3282,-9.00453 6.86159,-16.2357 -1.68801,-8.452686 -5.41474,-9.528969 -12.67201,-13.175064 l -6.77667,-2.1655 c -0.59411,-4.916754 3.66346,-6.007316 1.95818,-7.562799 -0.60935,-0.60799 -0.85772,-1.152153 -2.92807,-0.747044 -0.68887,2.828557 -1.5891,4.882655 -1.38413,7.856743 -10.88291,-0.324825 -19.86773,1.21596 -23.75555,10.917323 l 0.59462,6.157521 c 0.40774,4.06454 0.90942,8.36039 1.19673,10.7182 2.51007,9.53776 7.2756,12.84946 15.14952,17.9473 10.80658,-1.60382 18.0178,-8.23239 21.75579,-13.71098 z"
|
||||
id="path11886-3-5-8-2"
|
||||
sodipodi:nodetypes="cccccccccccc" /><path
|
||||
style="display:inline;fill:#01557e;fill-opacity:1;stroke:none;stroke-width:0.92604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;paint-order:normal"
|
||||
d="m -437.24943,98.587152 c 15.47059,5.160838 28.853,6.210908 44.45035,5.528858 l -0.33269,1.00136 c -2.84453,8.48885 -7.11438,16.19984 -14.78081,20.77251 -4.22299,2.87641 -8.95937,4.24265 -12.64118,4.80585 4.90186,-1.9411 6.28154,-3.13013 9.44127,-5.62107 7.30314,-5.75735 10.53897,-13.79201 -7.2632,-17.54634 -2.04827,-0.7546 -6.97065,-1.79557 -8.95696,-2.4842 -3.78691,-1.31289 -5.87451,-2.69569 -8.49494,-4.19919 -0.64857,-0.55614 -1.05123,-1.499972 -1.42184,-2.257778 z"
|
||||
id="path14904-79-6"
|
||||
sodipodi:nodetypes="cccccscscc" /><path
|
||||
style="fill:#01557e;fill-opacity:1;stroke-width:0.44886;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
d="m -412.38484,74.540497 c 1.04648,1.02564 2.03773,1.53722 3.32802,1.217522 0.0656,0.210395 0.0231,0.418915 -0.0326,0.627204 -1.5334,2.409182 -2.45327,4.552421 -2.21329,7.284672 l 0.16083,1.055961 c -0.9361,0.0984 -1.87578,0.258567 -2.4535,-0.468121 0.75107,0.18109 0.90263,0.01389 1.42464,-0.485892 -0.17483,-2.661716 0.30288,-4.511654 1.49152,-6.770675 -0.34469,-1.051553 -1.02053,-1.564634 -1.70561,-2.460671 z"
|
||||
id="path567-1"
|
||||
sodipodi:nodetypes="ccccccccc"
|
||||
transform="translate(-1.6924549e-5,2.6458344)" /></g><path
|
||||
style="fill:#ffffff;fill-opacity:1;stroke-width:0.253293;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
d="m -425.34578,109.54285 c -4.28572,-3.13939 -8.22729,-6.28553 -13.56434,-7.79875 l -2.74389,-0.63156 -1.54247,5.90823 3.47191,4.95663 c 4.23416,-0.0815 11.40165,1.60574 14.77029,-0.71646 0.83261,-0.45672 0.27535,-1.182 -0.3915,-1.71809 z"
|
||||
id="path2086-8"
|
||||
sodipodi:nodetypes="ccccccc"
|
||||
transform="translate(-1.6924549e-5,-2.6458352)"
|
||||
inkscape:label="hand right" /><path
|
||||
style="display:inline;fill:#ffffff;fill-opacity:1;stroke-width:0.44886;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
d="m -395.66357,114.61895 c -3.65971,-0.56995 -7.76016,-0.93577 -11.11157,-1.77569 -2.18796,-0.93216 -3.00632,-2.3589 -1.70861,-3.71925 0.37365,-0.39735 5.69577,-2.25753 8.58864,-3.25262 2.85206,-1.18646 5.57388,-2.652 8.32495,-4.05099 l 3.67759,-2.614861 2.4911,6.808421 -2.29694,7.91659 -3.92475,1.47713 z"
|
||||
id="path2472-7"
|
||||
sodipodi:nodetypes="cccccccccc"
|
||||
inkscape:label="hand left" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 16 KiB |
BIN
assets/logo_104.webp
Normal file
|
After Width: | Height: | Size: 37 KiB |
2
client/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
**/data/
|
||||
**/secrets/
|
||||
4
client/testfixture/Makefile
Normal file
@@ -0,0 +1,4 @@
|
||||
clean:
|
||||
rm -rf data/ uploads/
|
||||
|
||||
.PHONY: clean
|
||||
63
client/testfixture/config.textproto
Normal file
@@ -0,0 +1,63 @@
|
||||
# Auto-generated config.Config textproto
|
||||
email {}
|
||||
server {
|
||||
application_name: "TrailBase"
|
||||
site_url: "http://localhost:4000"
|
||||
logs_retention_sec: 604800
|
||||
}
|
||||
auth {}
|
||||
record_apis: [
|
||||
{
|
||||
name: "_user_avatar"
|
||||
table_name: "_user_avatar"
|
||||
conflict_resolution: REPLACE
|
||||
autofill_missing_user_id_columns: true
|
||||
acl_world: [READ]
|
||||
acl_authenticated: [CREATE, READ, UPDATE, DELETE]
|
||||
create_access_rule: "_REQ_.user IS NULL OR _REQ_.user = _USER_.id"
|
||||
update_access_rule: "_ROW_.user = _USER_.id"
|
||||
delete_access_rule: "_ROW_.user = _USER_.id"
|
||||
},
|
||||
{
|
||||
name: "simple_strict_table"
|
||||
table_name: "simple_strict_table"
|
||||
acl_authenticated: [CREATE, READ, UPDATE, DELETE]
|
||||
},
|
||||
{
|
||||
name: "simple_complete_view"
|
||||
table_name: "simple_complete_view"
|
||||
acl_authenticated: [CREATE, READ, UPDATE, DELETE]
|
||||
},
|
||||
{
|
||||
name: "simple_subset_view"
|
||||
table_name: "simple_subset_view"
|
||||
acl_authenticated: [CREATE, READ, UPDATE, DELETE]
|
||||
}
|
||||
]
|
||||
query_apis: [
|
||||
{
|
||||
name: "simple_query_api"
|
||||
virtual_table_name: "simple_query_api"
|
||||
params: [
|
||||
{
|
||||
name: "number"
|
||||
type: INTEGER
|
||||
}
|
||||
]
|
||||
acl: WORLD
|
||||
}
|
||||
]
|
||||
schemas: [
|
||||
{
|
||||
name: "simple_schema"
|
||||
schema:
|
||||
"{"
|
||||
" \"type\": \"object\","
|
||||
" \"properties\": {"
|
||||
" \"name\": { \"type\": \"string\" },"
|
||||
" \"obj\": { \"type\": \"object\" }"
|
||||
" },"
|
||||
" \"required\": [\"name\"]"
|
||||
"}"
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,4 @@
|
||||
INSERT INTO _user
|
||||
(id, email, password_hash, verified, admin)
|
||||
VALUES
|
||||
(uuid_v7(), 'admin@localhost', (hash_password('secret')), TRUE, TRUE);
|
||||
16
client/testfixture/migrations/U1725019361__add_users.sql
Normal file
@@ -0,0 +1,16 @@
|
||||
-- Add a a few non-admin users.
|
||||
INSERT INTO _user (id, email, password_hash, verified)
|
||||
VALUES
|
||||
(uuid_v7(), '0@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '1@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '2@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '3@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '4@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '5@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '6@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '7@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '8@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '9@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '10@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '11@localhost', (hash_password('secret')), TRUE),
|
||||
(uuid_v7(), '12@localhost', (hash_password('secret')), TRUE);
|
||||
@@ -0,0 +1,69 @@
|
||||
-- Create a canonical table satisfying API requirements.
|
||||
CREATE TABLE simple_strict_table (
|
||||
id BLOB PRIMARY KEY CHECK (is_uuid_v7(id)) DEFAULT (uuid_v7()) NOT NULL,
|
||||
|
||||
text_null TEXT,
|
||||
text_default TEXT DEFAULT '',
|
||||
text_not_null TEXT NOT NULL DEFAULT '',
|
||||
|
||||
int_null INTEGER,
|
||||
int_default INTEGER DEFAULT 5,
|
||||
int_not_null INTEGER NOT NULL DEFAULT 7,
|
||||
|
||||
real_null REAL,
|
||||
real_default REAL DEFAULT 5.1,
|
||||
real_not_null REAL NOT NULL DEFAULT 7.1,
|
||||
|
||||
blob_null BLOB,
|
||||
blob_default BLOB DEFAULT X'AABBCCDD',
|
||||
blob_not_null BLOB NOT NULL DEFAULT X'AABBCCDD'
|
||||
) STRICT;
|
||||
|
||||
|
||||
-- Create a variety of views.
|
||||
CREATE VIEW simple_complete_view AS SELECT * FROM simple_strict_table;
|
||||
CREATE VIEW simple_subset_view AS SELECT id, text_null AS t_null, text_default AS t_default, text_not_null AS t_not_null FROM simple_strict_table;
|
||||
CREATE VIEW simple_subset_wo_id_view AS SELECT text_null, text_default, text_not_null FROM simple_strict_table;
|
||||
CREATE VIEW simple_filter_view AS SELECT * FROM simple_strict_table WHERE (int_not_null % 2) = 0;
|
||||
|
||||
|
||||
INSERT INTO simple_strict_table
|
||||
(text_default, text_not_null, int_default, int_not_null, real_default, real_not_null, blob_default, blob_not_null)
|
||||
VALUES
|
||||
('1', '1', 1, 1, 1.1, 1.2, X'01', X'01'),
|
||||
('2', '2', 2, 2, 2.1, 2.2, X'02', X'02'),
|
||||
('3', '3', 3, 3, 3.1, 3.2, X'03', X'03'),
|
||||
('4', '4', 4, 4, 4.1, 4.2, X'04', X'04'),
|
||||
('5', '5', 5, 5, 5.1, 5.2, X'05', X'05'),
|
||||
('6', '6', 6, 6, 6.1, 6.2, X'06', X'06'),
|
||||
('7', '7', 7, 7, 7.1, 7.2, X'07', X'07'),
|
||||
('8', '8', 8, 8, 8.1, 8.2, X'08', X'08'),
|
||||
('9', '9', 9, 9, 9.1, 9.2, X'09', X'09'),
|
||||
('10', '10', 10, 10, 10.1, 10.2, X'0A', X'0A'),
|
||||
('11', '11', 11, 11, 11.1, 11.2, X'0B', X'0B'),
|
||||
('12', '12', 12, 12, 12.1, 12.2, X'0C', X'0C'),
|
||||
('13', '13', 13, 13, 13.1, 13.2, X'0D', X'0D'),
|
||||
('14', '14', 14, 14, 14.1, 14.2, X'0E', X'0E'),
|
||||
('15', '15', 15, 15, 15.1, 15.2, X'0F', X'0F'),
|
||||
('16', '16', 16, 16, 16.1, 16.2, X'10', X'10'),
|
||||
('17', '17', 17, 17, 17.1, 17.2, X'11', X'11'),
|
||||
('18', '18', 18, 18, 18.1, 18.2, X'12', X'12'),
|
||||
('19', '19', 19, 19, 19.1, 19.2, X'13', X'13'),
|
||||
('20', '20', 20, 20, 20.1, 20.2, X'14', X'14'),
|
||||
('21', '21', 21, 21, 21.1, 21.2, X'15', X'15');
|
||||
|
||||
CREATE TABLE simple_strict_table_int (
|
||||
id INTEGER PRIMARY KEY,
|
||||
|
||||
text_null TEXT,
|
||||
blob_null BLOB,
|
||||
int_null INTEGER,
|
||||
real_null REAL,
|
||||
any_col ANY
|
||||
) STRICT;
|
||||
|
||||
INSERT INTO simple_strict_table_int (id, text_null, blob_null, int_null, real_null, any_col)
|
||||
VALUES
|
||||
(NULL, '1', X'01', 1, 1.1, 'one'),
|
||||
(NULL, '2', X'02', 2, 2.2, 2),
|
||||
(NULL, '3', X'03', 3, 3.3, 3.3);
|
||||
@@ -0,0 +1,31 @@
|
||||
-- Create a table that doesn't satisfy record API requirements and uses
|
||||
-- "affinity names" rather than strict storage types.
|
||||
CREATE TABLE non_strict_table (
|
||||
id INTEGER PRIMARY KEY NOT NULL,
|
||||
|
||||
tinyint_col TINYINT,
|
||||
bigint_col BIGINT,
|
||||
|
||||
varchar_col VARCHAR(64),
|
||||
double_col DOUBLE,
|
||||
float_col FLOAT,
|
||||
|
||||
boolean_col BOOLEAN,
|
||||
date_col DATE,
|
||||
datetime_col DATETIME
|
||||
);
|
||||
|
||||
INSERT INTO non_strict_table
|
||||
(id, tinyint_col, bigint_col, varchar_col, double_col, float_col, boolean_col, date_col, datetime_col)
|
||||
VALUES
|
||||
(0, 5, 64, 'varchar', 5.2, 2.4, FALSE, UNIXEPOCH(), UNIXEPOCH()),
|
||||
(1, 5, 64, 'varchar', 5.2, 2.4, FALSE, UNIXEPOCH(), UNIXEPOCH()),
|
||||
(2, 5, 64, 'varchar', 5.2, 2.4, FALSE, UNIXEPOCH(), UNIXEPOCH()),
|
||||
(NULL, 5, 64, 'varchar', 5.2, 2.4, FALSE, UNIXEPOCH(), UNIXEPOCH());
|
||||
|
||||
CREATE TABLE non_strict_autoincrement_table (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
int4_col INT4
|
||||
);
|
||||
|
||||
INSERT INTO non_strict_autoincrement_table (int4_col) VALUES (12);
|
||||
@@ -0,0 +1,51 @@
|
||||
-- Create a virtual R-star table backed by physical, shadow tables.
|
||||
--
|
||||
-- NOTE: The column types are here only for readability. rtree doesn't care.
|
||||
CREATE VIRTUAL TABLE virtual_spatial_index USING rtree(
|
||||
id INTEGER,
|
||||
|
||||
-- Minimum and maximum X coordinate (rtree uses f32)
|
||||
minX,
|
||||
maxX,
|
||||
|
||||
-- Minimum and maximum Y coordinate (rtree uses f32)
|
||||
minY,
|
||||
maxY,
|
||||
|
||||
-- From the docs:
|
||||
--
|
||||
-- "For auxiliary columns, only the name of the column matters. The type
|
||||
-- affinity is ignored. Constraints such as NOT NULL, UNIQUE, REFERENCES, or
|
||||
-- CHECK are also ignored. However, future versions of SQLite might start
|
||||
-- paying attention to the type affinity and constraints, so users of
|
||||
-- auxiliary columns are advised to leave both blank, to avoid future
|
||||
-- compatibility problems."
|
||||
+uuid BLOB
|
||||
);
|
||||
|
||||
-- 14 zipcodes near Charlotte, NC. Inspired by https://sqlite.org/rtree.html.
|
||||
INSERT INTO virtual_spatial_index VALUES
|
||||
(28215, -80.781227, -80.604706, 35.208813, 35.297367, uuid_v7()),
|
||||
(28216, -80.957283, -80.840599, 35.235920, 35.367825, uuid_v7()),
|
||||
(28217, -80.960869, -80.869431, 35.133682, 35.208233, uuid_v7()),
|
||||
(28226, -80.878983, -80.778275, 35.060287, 35.154446, uuid_v7()),
|
||||
(28227, -80.745544, -80.555382, 35.130215, 35.236916, uuid_v7()),
|
||||
(28244, -80.844208, -80.841988, 35.223728, 35.225471, uuid_v7()),
|
||||
(28262, -80.809074, -80.682938, 35.276207, 35.377747, uuid_v7()),
|
||||
(28269, -80.851471, -80.735718, 35.272560, 35.407925, uuid_v7()),
|
||||
(28270, -80.794983, -80.728966, 35.059872, 35.161823, uuid_v7()),
|
||||
(28273, -80.994766, -80.875259, 35.074734, 35.172836, uuid_v7()),
|
||||
(28277, -80.876793, -80.767586, 35.001709, 35.101063, uuid_v7()),
|
||||
(28278, -81.058029, -80.956375, 35.044701, 35.223812, uuid_v7()),
|
||||
(28280, -80.844208, -80.841972, 35.225468, 35.227203, uuid_v7()),
|
||||
(28282, -80.846382, -80.844193, 35.223972, 35.225655, uuid_v7());
|
||||
|
||||
-- NOTE: define rejects mutating statements.
|
||||
-- CREATE VIRTUAL TABLE virtual_spatial_index_writer USING define(
|
||||
-- (INSERT INTO virtual_spatial_index VALUES ($1, $2, $3, $4, $5, uuid_v7()) RETURNING *));
|
||||
|
||||
-- Create a virtual table based on a stored procedure.
|
||||
--
|
||||
-- This virtual table is also exposed as a Query API in the config. To see in
|
||||
-- action browse to: http://localhost:4000/api/query/v1/simple_query_api?number=4.
|
||||
CREATE VIRTUAL TABLE simple_query_api USING define((SELECT UNIXEPOCH() AS epoch, $1 AS random_number));
|
||||
7
client/trailbase-dart/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# https://dart.dev/guides/libraries/private-files
|
||||
# Created by `dart pub`
|
||||
.dart_tool/
|
||||
|
||||
# Avoid committing pubspec.lock for library packages; see
|
||||
# https://dart.dev/guides/libraries/private-files#pubspeclock.
|
||||
pubspec.lock
|
||||
2
client/trailbase-dart/.pubignore
Normal file
@@ -0,0 +1,2 @@
|
||||
test
|
||||
analysis_options.yaml
|
||||
7
client/trailbase-dart/CHANGELOG.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Changelog
|
||||
|
||||
## 0.1.0
|
||||
|
||||
### Features
|
||||
|
||||
- Initial client release including support for authentication and record APIs.
|
||||
1
client/trailbase-dart/LICENSE
Symbolic link
@@ -0,0 +1 @@
|
||||
../../LICENSE
|
||||
13
client/trailbase-dart/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# TrailBase client library for Dart and Flutter
|
||||
|
||||
TrailBase is a blazingly fast, single-file, and open-source server for your
|
||||
application with type-safe restful APIs, auth, admin dashboard, etc.
|
||||
|
||||
For more context, documentation, and an online live demo, check out our website
|
||||
[trailbase.io](https://trailbase.io).
|
||||
|
||||
This is the first-party client for hooking up your Flutter or Dart applications
|
||||
with your TrailBase server.
|
||||
While working on documentation, an example setup can be found under
|
||||
[`/examples/blog/flutter`](https://github.com/trailbaseio/trailbase/tree/main/examples/blog/flutter)
|
||||
in the repository.
|
||||
31
client/trailbase-dart/analysis_options.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
# This file configures the static analysis results for your project (errors,
|
||||
# warnings, and lints).
|
||||
#
|
||||
# This enables the 'recommended' set of lints from `package:lints`.
|
||||
# This set helps identify many issues that may lead to problems when running
|
||||
# or consuming Dart code, and enforces writing Dart using a single, idiomatic
|
||||
# style and format.
|
||||
#
|
||||
# If you want a smaller set of lints you can change this to specify
|
||||
# 'package:lints/core.yaml'. These are just the most critical lints
|
||||
# (the recommended set includes the core lints).
|
||||
# The core lints are also what is used by pub.dev for scoring packages.
|
||||
|
||||
include: package:lints/recommended.yaml
|
||||
|
||||
linter:
|
||||
rules:
|
||||
prefer_single_quotes: true
|
||||
unnecessary_brace_in_string_interps: false
|
||||
unawaited_futures: true
|
||||
sort_child_properties_last: false
|
||||
|
||||
# analyzer:
|
||||
# exclude:
|
||||
# - path/to/excluded/files/**
|
||||
|
||||
# For more information about the core and recommended set of lints, see
|
||||
# https://dart.dev/go/core-lints
|
||||
|
||||
# For additional information about configuring this file, see
|
||||
# https://dart.dev/guides/language/analysis-options
|
||||
548
client/trailbase-dart/lib/src/client.dart
Normal file
@@ -0,0 +1,548 @@
|
||||
import 'dart:convert';
|
||||
import 'dart:typed_data';
|
||||
|
||||
import 'package:jwt_decoder/jwt_decoder.dart';
|
||||
import 'package:logging/logging.dart';
|
||||
import 'package:dio/dio.dart' as dio;
|
||||
|
||||
class User {
|
||||
final String id;
|
||||
final String email;
|
||||
|
||||
const User({
|
||||
required this.id,
|
||||
required this.email,
|
||||
});
|
||||
|
||||
User.fromJson(Map<String, dynamic> json)
|
||||
: id = json['id'],
|
||||
email = json['email'];
|
||||
|
||||
@override
|
||||
String toString() => 'User(id=${id}, email=${email})';
|
||||
}
|
||||
|
||||
class Tokens {
|
||||
final String auth;
|
||||
final String? refresh;
|
||||
final String? csrf;
|
||||
|
||||
const Tokens(this.auth, this.refresh, this.csrf);
|
||||
|
||||
Tokens.fromJson(Map<String, dynamic> json)
|
||||
: auth = json['auth_token'],
|
||||
refresh = json['refresh_token'],
|
||||
csrf = json['csrf_token'];
|
||||
|
||||
Map<String, dynamic> toJson() => {
|
||||
'auth_token': auth,
|
||||
'refresh_token': refresh,
|
||||
'csrf_token': csrf,
|
||||
};
|
||||
|
||||
@override
|
||||
String toString() => 'Tokens(${auth}, ${refresh}, ${csrf})';
|
||||
}
|
||||
|
||||
class JwtToken {
|
||||
final String sub;
|
||||
final int iat;
|
||||
final int exp;
|
||||
final String email;
|
||||
final String csrfToken;
|
||||
|
||||
const JwtToken({
|
||||
required this.sub,
|
||||
required this.iat,
|
||||
required this.exp,
|
||||
required this.email,
|
||||
required this.csrfToken,
|
||||
});
|
||||
|
||||
JwtToken.fromJson(Map<String, dynamic> json)
|
||||
: sub = json['sub'],
|
||||
iat = json['iat'],
|
||||
exp = json['exp'],
|
||||
email = json['email'],
|
||||
csrfToken = json['csrf_token'];
|
||||
}
|
||||
|
||||
class _TokenState {
|
||||
final (Tokens, JwtToken)? state;
|
||||
final Map<String, dynamic> headers;
|
||||
|
||||
const _TokenState(this.state, this.headers);
|
||||
|
||||
static _TokenState build(Tokens? tokens) {
|
||||
return _TokenState(
|
||||
tokens != null
|
||||
? (tokens, JwtToken.fromJson(JwtDecoder.decode(tokens.auth)))
|
||||
: null,
|
||||
buildHeaders(tokens),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
class Pagination {
|
||||
final String? cursor;
|
||||
final int? limit;
|
||||
|
||||
const Pagination({
|
||||
required this.cursor,
|
||||
required this.limit,
|
||||
});
|
||||
}
|
||||
|
||||
class RecordId {
|
||||
@override
|
||||
String toString();
|
||||
|
||||
factory RecordId.integer(int id) => _IntegerRecordId(id);
|
||||
factory RecordId.uuid(String id) => _UuidRecordId(id);
|
||||
}
|
||||
|
||||
class _ResponseRecordId implements RecordId {
|
||||
final String id;
|
||||
|
||||
const _ResponseRecordId(this.id);
|
||||
|
||||
_ResponseRecordId.fromJson(Map<String, dynamic> json) : id = json['id'];
|
||||
|
||||
int integer() => int.parse(id);
|
||||
Uint8List uuid() => base64Decode(id);
|
||||
|
||||
@override
|
||||
String toString() => id;
|
||||
|
||||
@override
|
||||
bool operator ==(Object other) {
|
||||
if (other is _ResponseRecordId) return id == other.id;
|
||||
|
||||
if (other is int) return int.tryParse(id) == other;
|
||||
if (other is _IntegerRecordId) return int.tryParse(id) == other.id;
|
||||
if (other is String) return id == other;
|
||||
if (other is _UuidRecordId) return id == other.id;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode => id.hashCode;
|
||||
}
|
||||
|
||||
class _IntegerRecordId implements RecordId {
|
||||
final int id;
|
||||
|
||||
const _IntegerRecordId(this.id);
|
||||
|
||||
@override
|
||||
String toString() => id.toString();
|
||||
|
||||
@override
|
||||
bool operator ==(Object other) {
|
||||
if (other is _IntegerRecordId) return id == other.id;
|
||||
if (other is int) return id == other;
|
||||
if (other is _ResponseRecordId) return id == int.tryParse(other.id);
|
||||
return false;
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode => id.hashCode;
|
||||
}
|
||||
|
||||
extension RecordIdExtInt on int {
|
||||
RecordId id() => _IntegerRecordId(this);
|
||||
}
|
||||
|
||||
class _UuidRecordId implements RecordId {
|
||||
final String id;
|
||||
|
||||
const _UuidRecordId(this.id);
|
||||
|
||||
@override
|
||||
String toString() => id;
|
||||
|
||||
@override
|
||||
bool operator ==(Object other) {
|
||||
if (other is _UuidRecordId) return id == other.id;
|
||||
if (other is String) return id == other;
|
||||
if (other is _ResponseRecordId) return id == other.id;
|
||||
return false;
|
||||
}
|
||||
|
||||
@override
|
||||
int get hashCode => id.hashCode;
|
||||
}
|
||||
|
||||
extension RecordIdExtString on String {
|
||||
RecordId id() => _UuidRecordId(this);
|
||||
}
|
||||
|
||||
class RecordApi {
|
||||
static const String _recordApi = 'api/records/v1';
|
||||
|
||||
final String _name;
|
||||
final Client _client;
|
||||
|
||||
const RecordApi(this._client, this._name);
|
||||
|
||||
Future<List<Map<String, dynamic>>> list({
|
||||
Pagination? pagination,
|
||||
List<String>? order,
|
||||
List<String>? filters,
|
||||
}) async {
|
||||
final params = <String, dynamic>{};
|
||||
if (pagination != null) {
|
||||
final cursor = pagination.cursor;
|
||||
if (cursor != null) params['cursor'] = cursor;
|
||||
|
||||
final limit = pagination.limit;
|
||||
if (limit != null) params['limit'] = limit.toString();
|
||||
}
|
||||
|
||||
if (order != null) params['order'] = order.join(',');
|
||||
|
||||
if (filters != null) {
|
||||
for (final filter in filters) {
|
||||
final (nameOp, value) = splitOnce(filter, '=');
|
||||
if (value == null) {
|
||||
throw Exception(
|
||||
'Filter "${filter}" does not match: "name[op]=value"');
|
||||
}
|
||||
params[nameOp] = value;
|
||||
}
|
||||
}
|
||||
|
||||
final response = await _client.fetch(
|
||||
'${RecordApi._recordApi}/${_name}',
|
||||
queryParams: params,
|
||||
);
|
||||
|
||||
return (response.data as List).cast<Map<String, dynamic>>();
|
||||
}
|
||||
|
||||
Future<Map<String, dynamic>> read(RecordId id) async {
|
||||
final response = await _client.fetch(
|
||||
'${RecordApi._recordApi}/${_name}/${id}',
|
||||
);
|
||||
return response.data;
|
||||
}
|
||||
|
||||
Future<RecordId> create(Map<String, dynamic> record) async {
|
||||
final response = await _client.fetch(
|
||||
'${RecordApi._recordApi}/${_name}',
|
||||
method: 'POST',
|
||||
data: record,
|
||||
);
|
||||
|
||||
if ((response.statusCode ?? 400) > 200) {
|
||||
throw Exception('${response.data} ${response.statusMessage}');
|
||||
}
|
||||
return _ResponseRecordId.fromJson(response.data);
|
||||
}
|
||||
|
||||
Future<void> update(
|
||||
RecordId id,
|
||||
Map<String, dynamic> record,
|
||||
) async {
|
||||
await _client.fetch(
|
||||
'${RecordApi._recordApi}/${_name}/${id}',
|
||||
method: 'PATCH',
|
||||
data: record,
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> delete(RecordId id) async {
|
||||
await _client.fetch(
|
||||
'${RecordApi._recordApi}/${_name}/${id}',
|
||||
method: 'DELETE',
|
||||
);
|
||||
}
|
||||
|
||||
Uri imageUri(RecordId id, String colName, {int? index}) {
|
||||
if (index != null) {
|
||||
return Uri.parse(
|
||||
'${_client.site()}/${RecordApi._recordApi}/${_name}/${id}/file/${colName}/${index}');
|
||||
}
|
||||
return Uri.parse(
|
||||
'${_client.site()}/${RecordApi._recordApi}/${_name}/${id}/file/${colName}');
|
||||
}
|
||||
}
|
||||
|
||||
class _ThinClient {
|
||||
static final _dio = dio.Dio();
|
||||
|
||||
final String site;
|
||||
|
||||
const _ThinClient(this.site);
|
||||
|
||||
Future<dio.Response> fetch(
|
||||
String path,
|
||||
_TokenState tokenState, {
|
||||
Object? data,
|
||||
String? method,
|
||||
Map<String, dynamic>? queryParams,
|
||||
}) async {
|
||||
if (path.startsWith('/')) {
|
||||
throw Exception('Path starts with "/". Relative path expected.');
|
||||
}
|
||||
|
||||
final response = await _dio.request(
|
||||
'${site}/${path}',
|
||||
data: data,
|
||||
queryParameters: queryParams,
|
||||
options: dio.Options(
|
||||
method: method,
|
||||
headers: tokenState.headers,
|
||||
validateStatus: (int? status) => true,
|
||||
),
|
||||
);
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
class Client {
|
||||
static const String _authApi = 'api/auth/v1';
|
||||
|
||||
final _ThinClient _client;
|
||||
final String _site;
|
||||
_TokenState _tokenState;
|
||||
final void Function(Client, Tokens?)? _authChange;
|
||||
|
||||
Client._(
|
||||
String site, {
|
||||
Tokens? tokens,
|
||||
void Function(Client, Tokens?)? onAuthChange,
|
||||
}) : _client = _ThinClient(site),
|
||||
_site = site,
|
||||
_tokenState = _TokenState.build(tokens),
|
||||
_authChange = onAuthChange;
|
||||
|
||||
Client(
|
||||
String site, {
|
||||
void Function(Client, Tokens?)? onAuthChange,
|
||||
}) : this._(site, onAuthChange: onAuthChange);
|
||||
|
||||
static Future<Client> withTokens(String site, Tokens tokens,
|
||||
{void Function(Client, Tokens?)? onAuthChange}) async {
|
||||
final client = Client(site, onAuthChange: onAuthChange);
|
||||
|
||||
try {
|
||||
final statusResponse = await client._client
|
||||
.fetch('${_authApi}/status', _TokenState.build(tokens));
|
||||
final Map<String, dynamic> response = statusResponse.data;
|
||||
|
||||
final newTokens = Tokens(
|
||||
response['auth_token'],
|
||||
tokens.refresh,
|
||||
response['csrf_token'],
|
||||
);
|
||||
client._tokenState = _TokenState.build(newTokens);
|
||||
client._authChange?.call(client, newTokens);
|
||||
} catch (err) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
/// Access to the raw tokens, can be used to persist login state.
|
||||
Tokens? tokens() => _tokenState.state?.$1;
|
||||
User? user() {
|
||||
final authToken = tokens()?.auth;
|
||||
if (authToken != null) {
|
||||
return User.fromJson(JwtDecoder.decode(authToken)['user']);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
String site() => _site;
|
||||
|
||||
RecordApi records(String name) => RecordApi(this, name);
|
||||
|
||||
_TokenState _updateTokens(Tokens? tokens) {
|
||||
final state = _TokenState.build(tokens);
|
||||
|
||||
_tokenState = state;
|
||||
_authChange?.call(this, state.state?.$1);
|
||||
|
||||
final claims = state.state?.$2;
|
||||
if (claims != null) {
|
||||
final now = DateTime.now().millisecondsSinceEpoch / 1000;
|
||||
if (claims.exp < now) {
|
||||
_logger.warning('Token expired');
|
||||
}
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
Future<Tokens> login(String email, String password) async {
|
||||
final response = await fetch(
|
||||
'${_authApi}/login',
|
||||
method: 'POST',
|
||||
data: {
|
||||
'email': email,
|
||||
'password': password,
|
||||
},
|
||||
);
|
||||
|
||||
final Map<String, dynamic> json = response.data;
|
||||
final tokens = Tokens(
|
||||
json['auth_token']!,
|
||||
json['refresh_token'],
|
||||
json['csrf_token'],
|
||||
);
|
||||
|
||||
_updateTokens(tokens);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
Future<Tokens> loginWithAuthCode(
|
||||
String authCode, {
|
||||
String? pkceCodeVerifier,
|
||||
}) async {
|
||||
final response = await fetch(
|
||||
'${Client._authApi}/token',
|
||||
method: 'POST',
|
||||
data: {
|
||||
'authorization_code': authCode,
|
||||
'pkce_code_verifier': pkceCodeVerifier,
|
||||
},
|
||||
);
|
||||
|
||||
final Map<String, dynamic> tokenResponse = await response.data;
|
||||
final tokens = Tokens(
|
||||
tokenResponse['auth_token']!,
|
||||
tokenResponse['refresh_token']!,
|
||||
tokenResponse['csrf_token'],
|
||||
);
|
||||
|
||||
_updateTokens(tokens);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
Future<bool> logout() async {
|
||||
final refreshToken = _tokenState.state?.$1.refresh;
|
||||
try {
|
||||
if (refreshToken != null) {
|
||||
await fetch('${_authApi}/logout', method: 'POST', data: {
|
||||
'refresh_token': refreshToken,
|
||||
});
|
||||
} else {
|
||||
await fetch('${_authApi}/logout');
|
||||
}
|
||||
} catch (err) {
|
||||
_logger.warning(err);
|
||||
}
|
||||
_updateTokens(null);
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<void> deleteUser() async {
|
||||
await fetch('${Client._authApi}/delete');
|
||||
_updateTokens(null);
|
||||
}
|
||||
|
||||
Future<void> changeEmail(String email) async {
|
||||
await fetch(
|
||||
'${Client._authApi}/change_email',
|
||||
method: 'POST',
|
||||
data: {
|
||||
'new_email': email,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> refreshAuthToken() async {
|
||||
final refreshToken = _shouldRefresh(_tokenState);
|
||||
if (refreshToken != null) {
|
||||
_tokenState = await _refreshTokensImpl(refreshToken);
|
||||
}
|
||||
}
|
||||
|
||||
Future<_TokenState> _refreshTokensImpl(String refreshToken) async {
|
||||
final response = await _client.fetch(
|
||||
'${_authApi}/refresh',
|
||||
_tokenState,
|
||||
method: 'POST',
|
||||
data: {
|
||||
'refresh_token': refreshToken,
|
||||
},
|
||||
);
|
||||
|
||||
final Map<String, dynamic> tokenResponse = await response.data;
|
||||
return _TokenState.build(Tokens(
|
||||
tokenResponse['auth_token']!,
|
||||
refreshToken,
|
||||
tokenResponse['csrf_token'],
|
||||
));
|
||||
}
|
||||
|
||||
static String? _shouldRefresh(_TokenState tokenState) {
|
||||
final state = tokenState.state;
|
||||
final now = DateTime.now().millisecondsSinceEpoch / 1000;
|
||||
if (state != null && state.$2.exp - 60 < now) {
|
||||
return state.$1.refresh;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
Future<dio.Response> fetch(
|
||||
String path, {
|
||||
bool? throwOnError,
|
||||
Object? data,
|
||||
String? method,
|
||||
Map<String, dynamic>? queryParams,
|
||||
}) async {
|
||||
var tokenState = _tokenState;
|
||||
final refreshToken = _shouldRefresh(tokenState);
|
||||
if (refreshToken != null) {
|
||||
tokenState = _tokenState = await _refreshTokensImpl(refreshToken);
|
||||
}
|
||||
|
||||
final response = await _client.fetch(path, tokenState,
|
||||
data: data, method: method, queryParams: queryParams);
|
||||
|
||||
if (response.statusCode != 200 && (throwOnError ?? true)) {
|
||||
final errMsg = await response.data;
|
||||
throw Exception(
|
||||
'[${response.statusCode}] ${response.statusMessage}}: ${errMsg}');
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, dynamic> buildHeaders(Tokens? tokens) {
|
||||
final Map<String, dynamic> base = {
|
||||
'Content-Type': 'application/json',
|
||||
};
|
||||
|
||||
if (tokens != null) {
|
||||
base['Authorization'] = 'Bearer ${tokens.auth}';
|
||||
|
||||
final refresh = tokens.refresh;
|
||||
if (refresh != null) {
|
||||
base['Refresh-Token'] = refresh;
|
||||
}
|
||||
|
||||
final csrf = tokens.csrf;
|
||||
if (csrf != null) {
|
||||
base['CSRF-Token'] = csrf;
|
||||
}
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
(String, String?) splitOnce(String s, Pattern pattern) {
|
||||
final int idx = s.indexOf(pattern);
|
||||
if (idx < 0) {
|
||||
return (s, null);
|
||||
}
|
||||
return (s.substring(0, idx), s.substring(idx + 1));
|
||||
}
|
||||
|
||||
final _logger = Logger('trailbase');
|
||||
42
client/trailbase-dart/lib/src/pkce.dart
Normal file
@@ -0,0 +1,42 @@
|
||||
import 'dart:convert';
|
||||
import 'dart:math';
|
||||
|
||||
import 'package:crypto/crypto.dart';
|
||||
|
||||
/// A pair of (pkceCodeVerifier, pkceCodeChallenge).
|
||||
typedef PkcePair = ({
|
||||
/// The random code verifier.
|
||||
String verifier,
|
||||
|
||||
/// The code challenge, computed as base64UrlNoPad(sha256(verifier)).
|
||||
String challenge
|
||||
});
|
||||
|
||||
extension Pkce on PkcePair {
|
||||
/// Generates a [PkcePair].
|
||||
///
|
||||
/// [length] is the length used to generate the [verifier]. It must be
|
||||
/// between 32 and 96, inclusive, which corresponds to a [verifier] of
|
||||
/// length between 43 and 128, inclusive. The spec recommends a length of 32.
|
||||
static PkcePair generate({int length = 32}) {
|
||||
if (length < 32 || length > 96) {
|
||||
throw ArgumentError.value(
|
||||
length,
|
||||
'length',
|
||||
'The length must be between 32 and 96, inclusive.',
|
||||
);
|
||||
}
|
||||
|
||||
final random = Random.secure();
|
||||
final verifier =
|
||||
base64UrlEncode(List.generate(length, (_) => random.nextInt(256)))
|
||||
.split('=')
|
||||
.first;
|
||||
final challenge =
|
||||
base64UrlEncode(sha256.convert(ascii.encode(verifier)).bytes)
|
||||
.split('=')
|
||||
.first;
|
||||
|
||||
return (verifier: verifier, challenge: challenge);
|
||||
}
|
||||
}
|
||||
4
client/trailbase-dart/lib/trailbase.dart
Normal file
@@ -0,0 +1,4 @@
|
||||
library;
|
||||
|
||||
export 'src/client.dart';
|
||||
export 'src/pkce.dart';
|
||||
18
client/trailbase-dart/pubspec.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: trailbase
|
||||
description: Thing client library for TrailBase.
|
||||
homepage: https://trailbase.io
|
||||
repository: https://github.com/trailbaseio/trailbase
|
||||
version: 0.1.0
|
||||
|
||||
environment:
|
||||
sdk: ^3.5.3
|
||||
|
||||
dependencies:
|
||||
crypto: ^3.0.5
|
||||
dio: ^5.7.0
|
||||
jwt_decoder: ^2.0.1
|
||||
logging: ^1.2.0
|
||||
|
||||
dev_dependencies:
|
||||
lints: ^5.0.0
|
||||
test: ^1.24.0
|
||||
140
client/trailbase-dart/test/trailbase_test.dart
Normal file
@@ -0,0 +1,140 @@
|
||||
import 'dart:io';
|
||||
|
||||
import 'package:trailbase/trailbase.dart';
|
||||
import 'package:test/test.dart';
|
||||
import 'package:dio/dio.dart';
|
||||
|
||||
const port = 4006;
|
||||
|
||||
class SimpleStrict {
|
||||
final String id;
|
||||
|
||||
final String? textNull;
|
||||
final String? textDefault;
|
||||
final String textNotNull;
|
||||
|
||||
SimpleStrict.fromJson(Map<String, dynamic> json)
|
||||
: id = json['id'],
|
||||
textNull = json['text_null'],
|
||||
textDefault = json['text_default'],
|
||||
textNotNull = json['text_not_null'];
|
||||
}
|
||||
|
||||
Future<Client> connect() async {
|
||||
final client = Client('http://127.0.0.1:${port}');
|
||||
await client.login('admin@localhost', 'secret');
|
||||
return client;
|
||||
}
|
||||
|
||||
Future<Process> initTrailBase() async {
|
||||
final result = await Process.run('cargo', ['build']);
|
||||
if (result.exitCode > 0) {
|
||||
throw Exception(
|
||||
'Cargo build failed.\n\nstdout: ${result.stdout}}\n\nstderr: ${result.stderr}}\n');
|
||||
}
|
||||
final process = await Process.start('cargo', [
|
||||
'run',
|
||||
'--',
|
||||
'--data-dir',
|
||||
'../testfixture',
|
||||
'run',
|
||||
'--dev',
|
||||
'-a',
|
||||
'127.0.0.1:${port}',
|
||||
]);
|
||||
|
||||
final dio = Dio();
|
||||
for (int i = 0; i < 50; ++i) {
|
||||
try {
|
||||
final response = await dio.fetch(
|
||||
RequestOptions(path: 'http://127.0.0.1:${port}/api/healthcheck'));
|
||||
if (response.statusCode == 200) {
|
||||
return process;
|
||||
}
|
||||
} catch (err) {
|
||||
print('Trying to connect to TrailBase');
|
||||
}
|
||||
|
||||
await Future.delayed(Duration(milliseconds: 500));
|
||||
}
|
||||
|
||||
process.kill(ProcessSignal.sigkill);
|
||||
final exitCode = await process.exitCode;
|
||||
|
||||
await process.stdout.forEach(print);
|
||||
await process.stderr.forEach(print);
|
||||
throw Exception('Cargo run failed: ${exitCode}.');
|
||||
}
|
||||
|
||||
Future<void> main() async {
|
||||
if (!Directory.current.path.endsWith('trailbase-dart')) {
|
||||
throw Exception('Unexpected working directory');
|
||||
}
|
||||
|
||||
await initTrailBase();
|
||||
|
||||
group('client tests', () {
|
||||
test('auth', () async {
|
||||
final client = await connect();
|
||||
|
||||
final oldTokens = client.tokens();
|
||||
expect(oldTokens, isNotNull);
|
||||
|
||||
// We need to wait a little to push the expiry time in seconds to avoid just getting the same token minted again.
|
||||
await Future.delayed(Duration(milliseconds: 1500));
|
||||
|
||||
await client.refreshAuthToken();
|
||||
final newTokens = client.tokens();
|
||||
expect(newTokens, isNot(equals(oldTokens!.auth)));
|
||||
});
|
||||
|
||||
test('records', () async {
|
||||
final client = await connect();
|
||||
final api = client.records('simple_strict_table');
|
||||
|
||||
final int now = DateTime.now().millisecondsSinceEpoch ~/ 1000;
|
||||
final messages = [
|
||||
'dart client test 0: ${now}',
|
||||
'dart client test 1: ${now}',
|
||||
];
|
||||
final ids = [];
|
||||
for (final msg in messages) {
|
||||
ids.add(await api.create({'text_not_null': msg}));
|
||||
}
|
||||
|
||||
{
|
||||
final records = await api.list(
|
||||
filters: ['text_not_null=${messages[0]}'],
|
||||
);
|
||||
expect(records.length, 1);
|
||||
expect(records[0]['text_not_null'], messages[0]);
|
||||
}
|
||||
|
||||
{
|
||||
final recordsAsc = await api.list(
|
||||
order: ['+text_not_null'],
|
||||
filters: ['text_not_null[like]=%${now}'],
|
||||
);
|
||||
expect(recordsAsc.map((el) => el['text_not_null']),
|
||||
orderedEquals(messages));
|
||||
|
||||
final recordsDesc = await api.list(
|
||||
order: ['-text_not_null'],
|
||||
filters: ['text_not_null[like]=%${now}'],
|
||||
);
|
||||
expect(recordsDesc.map((el) => el['text_not_null']).toList().reversed,
|
||||
orderedEquals(messages));
|
||||
}
|
||||
|
||||
final record = SimpleStrict.fromJson(await api.read(ids[0]));
|
||||
|
||||
expect(ids[0] == record.id, isTrue);
|
||||
// Note: the .id() is needed otherwise we call String's operator==. It's not ideal
|
||||
// but we didn't come up with a better option.
|
||||
expect(record.id.id() == ids[0], isTrue);
|
||||
expect(RecordId.uuid(record.id) == ids[0], isTrue);
|
||||
|
||||
expect(record.textNotNull, messages[0]);
|
||||
});
|
||||
});
|
||||
}
|
||||
2
client/trailbase-ts/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules/
|
||||
dist/
|
||||
30
client/trailbase-ts/eslint.config.mjs
Normal file
@@ -0,0 +1,30 @@
|
||||
import globals from "globals";
|
||||
import pluginJs from "@eslint/js";
|
||||
import tseslint from "typescript-eslint";
|
||||
|
||||
export default [
|
||||
pluginJs.configs.recommended,
|
||||
...tseslint.configs.recommended,
|
||||
{
|
||||
ignores: ["dist/", "node_modules/"],
|
||||
},
|
||||
{
|
||||
files: ["**/*.{js,mjs,cjs,mts,ts,tsx,jsx}"],
|
||||
rules: {
|
||||
// https://typescript-eslint.io/rules/no-explicit-any/
|
||||
"@typescript-eslint/no-explicit-any": "warn",
|
||||
// http://eslint.org/docs/rules/no-unused-vars
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
"error",
|
||||
{
|
||||
vars: "all",
|
||||
args: "after-used",
|
||||
argsIgnorePattern: "^_",
|
||||
varsIgnorePattern: "^_",
|
||||
},
|
||||
],
|
||||
"no-empty": ["error", { allowEmptyCatch: true }],
|
||||
},
|
||||
languageOptions: { globals: globals.browser },
|
||||
},
|
||||
];
|
||||
53
client/trailbase-ts/package.json
Normal file
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"name": "trailbase",
|
||||
"version": "0.1.0",
|
||||
"description": "Official TrailBase client",
|
||||
"type": "module",
|
||||
"main": "./src/index.ts",
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"main": "./dist/client/trailbase-ts/src/index.js",
|
||||
"types": "./dist/client/trailbase-ts/src/index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/client/trailbase-ts/src/index.d.ts",
|
||||
"default": "./dist/client/trailbase-ts/src/index.js"
|
||||
}
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"package.json"
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/trailbaseio/trailbae.git",
|
||||
"directory": "client/trailbase-ts"
|
||||
},
|
||||
"homepage": "https://trailbase.io",
|
||||
"scripts": {
|
||||
"start": "tsc && node dist/client/trailbase-ts/src/index.js",
|
||||
"build": "tsc",
|
||||
"test": "vitest run && vite-node tests/integration_test_runner.ts",
|
||||
"format": "prettier -w src tests",
|
||||
"check": "tsc --noEmit --skipLibCheck && eslint"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.13.0",
|
||||
"eslint": "^9.13.0",
|
||||
"execa": "^9.5.1",
|
||||
"globals": "^15.11.0",
|
||||
"http-status": "^2.0.0",
|
||||
"jsdom": "^25.0.1",
|
||||
"prettier": "^3.3.3",
|
||||
"tinybench": "^3.0.0",
|
||||
"typescript": "^5.6.3",
|
||||
"typescript-eslint": "^8.12.1",
|
||||
"vite-node": "^2.1.4",
|
||||
"vitest": "^2.1.4"
|
||||
},
|
||||
"dependencies": {
|
||||
"jwt-decode": "^4.0.0",
|
||||
"uuid": "^11.0.2"
|
||||
}
|
||||
}
|
||||
531
client/trailbase-ts/src/index.ts
Normal file
@@ -0,0 +1,531 @@
|
||||
import { jwtDecode } from "jwt-decode";
|
||||
|
||||
import type { ChangeEmailRequest } from "@bindings/ChangeEmailRequest";
|
||||
import type { LoginRequest } from "@bindings/LoginRequest";
|
||||
import type { LoginResponse } from "@bindings/LoginResponse";
|
||||
import type { LoginStatusResponse } from "@bindings/LoginStatusResponse";
|
||||
import type { LogoutRequest } from "@bindings/LogoutRequest";
|
||||
import type { RefreshRequest } from "@bindings/RefreshRequest";
|
||||
import type { RefreshResponse } from "@bindings/RefreshResponse";
|
||||
|
||||
export type User = {
|
||||
id: string;
|
||||
email: string;
|
||||
};
|
||||
|
||||
export type Pagination = {
|
||||
cursor?: string;
|
||||
limit?: number;
|
||||
};
|
||||
|
||||
export type Tokens = {
|
||||
auth_token: string;
|
||||
refresh_token: string | null;
|
||||
csrf_token: string | null;
|
||||
};
|
||||
|
||||
type TokenClaims = {
|
||||
sub: string;
|
||||
iat: number;
|
||||
exp: number;
|
||||
email: string;
|
||||
csrf_token: string;
|
||||
};
|
||||
|
||||
type TokenState = {
|
||||
state?: {
|
||||
tokens: Tokens;
|
||||
claims: TokenClaims;
|
||||
};
|
||||
headers: HeadersInit;
|
||||
};
|
||||
|
||||
function buildTokenState(tokens?: Tokens): TokenState {
|
||||
return {
|
||||
state: tokens && {
|
||||
tokens,
|
||||
claims: jwtDecode(tokens.auth_token),
|
||||
},
|
||||
headers: headers(tokens),
|
||||
};
|
||||
}
|
||||
|
||||
type FetchOptions = RequestInit & {
|
||||
throwOnError?: boolean;
|
||||
};
|
||||
|
||||
export class FetchError extends Error {
|
||||
public status: number;
|
||||
|
||||
constructor(status: number, msg: string) {
|
||||
super(msg);
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
static async from(response: Response): Promise<FetchError> {
|
||||
let body: string | undefined;
|
||||
try {
|
||||
body = await response.text();
|
||||
} catch {}
|
||||
|
||||
console.warn(response);
|
||||
|
||||
return new FetchError(
|
||||
response.status,
|
||||
body ? `${response.statusText}: ${body}` : response.statusText,
|
||||
);
|
||||
}
|
||||
|
||||
public isClient(): boolean {
|
||||
return this.status >= 400 && this.status < 500;
|
||||
}
|
||||
|
||||
public isServer(): boolean {
|
||||
return this.status >= 500;
|
||||
}
|
||||
|
||||
public toString(): string {
|
||||
return `[${this.status}] ${this.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
export interface FileUpload {
|
||||
content_type?: null | string;
|
||||
filename?: null | string;
|
||||
mime_type?: null | string;
|
||||
objectstore_path: string;
|
||||
}
|
||||
|
||||
/// Provides CRUD access to records through TrailBase's record API.
|
||||
///
|
||||
/// TODO: add file upload/download.
|
||||
export class RecordApi {
|
||||
private static readonly _recordApi = "api/records/v1";
|
||||
private readonly _createApi: string;
|
||||
|
||||
constructor(
|
||||
private readonly client: Client,
|
||||
private readonly name: string,
|
||||
) {
|
||||
this._createApi = `${RecordApi._recordApi}/${this.name}`;
|
||||
}
|
||||
|
||||
public async list<T = Record<string, unknown>>(opts?: {
|
||||
pagination?: Pagination;
|
||||
order?: string[];
|
||||
filters?: string[];
|
||||
}): Promise<T[]> {
|
||||
const params: [string, string][] = [];
|
||||
const pagination = opts?.pagination;
|
||||
if (pagination) {
|
||||
const cursor = pagination.cursor;
|
||||
if (cursor) params.push(["cursor", cursor]);
|
||||
|
||||
const limit = pagination.limit;
|
||||
if (limit) params.push(["limit", limit.toString()]);
|
||||
}
|
||||
const order = opts?.order;
|
||||
if (order) params.push(["order", order.join(",")]);
|
||||
|
||||
const filters = opts?.filters;
|
||||
if (filters) {
|
||||
for (const filter of filters) {
|
||||
const [nameOp, value] = filter.split("=", 2);
|
||||
if (value === undefined) {
|
||||
throw Error(`Filter '${filter}' does not match: 'name[op]=value'`);
|
||||
}
|
||||
params.push([nameOp, value]);
|
||||
}
|
||||
}
|
||||
|
||||
const queryParams = encodeURI(
|
||||
params.map(([key, value]) => `${key}=${value}`).join("&"),
|
||||
);
|
||||
const response = await this.client.fetch(
|
||||
`${RecordApi._recordApi}/${this.name}?${queryParams}`,
|
||||
);
|
||||
return (await response.json()) as T[];
|
||||
}
|
||||
|
||||
public async read<T = Record<string, unknown>>(
|
||||
id: string | number,
|
||||
): Promise<T> {
|
||||
const response = await this.client.fetch(
|
||||
`${RecordApi._recordApi}/${this.name}/${id}`,
|
||||
);
|
||||
return (await response.json()) as T;
|
||||
}
|
||||
|
||||
public async create<T = Record<string, unknown>>(
|
||||
record: T,
|
||||
): Promise<Response> {
|
||||
return this.client.fetch(this._createApi, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(record),
|
||||
});
|
||||
}
|
||||
|
||||
public async createId<T = Record<string, unknown>>(
|
||||
record: T,
|
||||
): Promise<string | number> {
|
||||
const response = await this.create(record);
|
||||
return (await response.json()).id;
|
||||
}
|
||||
|
||||
public async update<T = Record<string, unknown>>(
|
||||
id: string | number,
|
||||
record: Partial<T>,
|
||||
): Promise<void> {
|
||||
await this.client.fetch(`${RecordApi._recordApi}/${this.name}/${id}`, {
|
||||
method: "PATCH",
|
||||
body: JSON.stringify(record),
|
||||
});
|
||||
}
|
||||
|
||||
public async delete(id: string | number): Promise<void> {
|
||||
await this.client.fetch(`${RecordApi._recordApi}/${this.name}/${id}`, {
|
||||
method: "DELETE",
|
||||
});
|
||||
}
|
||||
|
||||
public imageUri(id: string | number, colName: string): string {
|
||||
return `${this.client.site}/${RecordApi._recordApi}/${this.name}/${id}/file/${colName}`;
|
||||
}
|
||||
|
||||
public imagesUri(
|
||||
id: string | number,
|
||||
colName: string,
|
||||
index: number,
|
||||
): string {
|
||||
return `${this.client.site}/${RecordApi._recordApi}/${this.name}/${id}/files/${colName}/${index}`;
|
||||
}
|
||||
}
|
||||
|
||||
class ThinClient {
|
||||
constructor(public readonly site: string) {}
|
||||
|
||||
public async fetch(
|
||||
path: string,
|
||||
tokenState: TokenState,
|
||||
init?: RequestInit,
|
||||
): Promise<Response> {
|
||||
if (path.startsWith("/")) {
|
||||
throw Error("Path starts with '/'. Relative path expected.");
|
||||
}
|
||||
|
||||
const response = await fetch(`${this.site}/${path}`, {
|
||||
...init,
|
||||
credentials: isDev ? "include" : "same-origin",
|
||||
headers: tokenState.headers,
|
||||
});
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
type ClientOptions = {
|
||||
tokens?: Tokens;
|
||||
onAuthChange?: (client: Client, user?: User) => void;
|
||||
};
|
||||
|
||||
/// Client for interacting with TrailBase auth and record APIs.
|
||||
///
|
||||
/// TODO: Add
|
||||
/// * issue_password_reset_email
|
||||
/// * issue_change_email
|
||||
/// * status
|
||||
export class Client {
|
||||
private static readonly _authApi = "api/auth/v1";
|
||||
private static readonly _authUi = "_/auth";
|
||||
|
||||
private readonly _client: ThinClient;
|
||||
private readonly _authChange:
|
||||
| undefined
|
||||
| ((client: Client, user?: User) => void);
|
||||
private _tokenState: TokenState;
|
||||
|
||||
constructor(site: string, opts?: ClientOptions) {
|
||||
this._client = new ThinClient(site);
|
||||
this._authChange = opts?.onAuthChange;
|
||||
|
||||
this._tokenState = this.updateTokens(opts?.tokens);
|
||||
}
|
||||
|
||||
public static init(site: string, opts?: ClientOptions): Client {
|
||||
return new Client(site, opts);
|
||||
}
|
||||
|
||||
public static async tryFromCookies(
|
||||
site: string,
|
||||
opts?: ClientOptions,
|
||||
): Promise<Client> {
|
||||
const client = new Client(site, opts);
|
||||
|
||||
// Prefer explicit tokens. When given, do not update/refresh infinite recursion
|
||||
// with `($token) => Client` factories.
|
||||
if (!client.tokens()) {
|
||||
try {
|
||||
const response = await client.fetch(`${Client._authApi}/status`);
|
||||
const status: LoginStatusResponse = await response.json();
|
||||
|
||||
const authToken = status?.auth_token;
|
||||
if (authToken) {
|
||||
client.updateTokens({
|
||||
auth_token: authToken,
|
||||
refresh_token: status.refresh_token,
|
||||
csrf_token: status.csrf_token,
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.debug("No valid cookies found: ", err);
|
||||
}
|
||||
}
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
private updateTokens(tokens?: Tokens): TokenState {
|
||||
const state = buildTokenState(tokens);
|
||||
|
||||
this._tokenState = state;
|
||||
this._authChange?.(this, this.user());
|
||||
|
||||
const claims = state.state?.claims;
|
||||
if (claims) {
|
||||
const now = Date.now() / 1000;
|
||||
if (claims.exp < now) {
|
||||
console.warn("Token expired");
|
||||
}
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
public get site() {
|
||||
return this._client.site;
|
||||
}
|
||||
|
||||
/// Low-level access to tokens (auth, refresh, csrf) useful for persisting them.
|
||||
public tokens = (): Tokens | undefined => this._tokenState?.state?.tokens;
|
||||
|
||||
/// Provides current user.
|
||||
public user(): User | undefined {
|
||||
const claims = this._tokenState.state?.claims;
|
||||
if (claims) {
|
||||
return {
|
||||
id: claims.sub,
|
||||
email: claims.email,
|
||||
};
|
||||
}
|
||||
}
|
||||
public records = (name: string): RecordApi => new RecordApi(this, name);
|
||||
|
||||
public async avatarUrl(): Promise<string | undefined> {
|
||||
const user = this.user();
|
||||
if (user) {
|
||||
const response = await this.fetch(`${Client._authApi}/avatar/${user.id}`);
|
||||
const json = (await response.json()) as { avatar_url: string };
|
||||
return json.avatar_url;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
public async login(email: string, password: string): Promise<void> {
|
||||
const response = await this.fetch(`${Client._authApi}/login`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
email: email,
|
||||
password: password,
|
||||
} as LoginRequest),
|
||||
});
|
||||
|
||||
this.updateTokens((await response.json()) as LoginResponse);
|
||||
}
|
||||
|
||||
public loginUri(redirect?: string): string {
|
||||
return `${this._client.site}/${Client._authUi}/login?${redirect ? `redirect_to=${redirect}` : ""}`;
|
||||
}
|
||||
|
||||
public async logout(): Promise<boolean> {
|
||||
try {
|
||||
const refresh_token = this._tokenState.state?.tokens.refresh_token;
|
||||
if (refresh_token) {
|
||||
await this.fetch(`${Client._authApi}/logout`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
refresh_token,
|
||||
} as LogoutRequest),
|
||||
});
|
||||
} else {
|
||||
await this.fetch(`${Client._authApi}/logout`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn(err);
|
||||
}
|
||||
this.updateTokens(undefined);
|
||||
return true;
|
||||
}
|
||||
|
||||
public logoutUri(redirect?: string): string {
|
||||
return `${this._client.site}/${Client._authApi}/logout?${redirect ? `redirect_to=${redirect}` : ""}`;
|
||||
}
|
||||
|
||||
public async deleteUser(): Promise<void> {
|
||||
await this.fetch(`${Client._authApi}/delete`);
|
||||
this.updateTokens(undefined);
|
||||
}
|
||||
|
||||
public async changeEmail(email: string): Promise<void> {
|
||||
await this.fetch(`${Client._authApi}/change_email`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
new_email: email,
|
||||
} as ChangeEmailRequest),
|
||||
});
|
||||
}
|
||||
|
||||
public async refreshAuthToken(): Promise<void> {
|
||||
const refreshToken = Client.shouldRefresh(this._tokenState);
|
||||
if (refreshToken) {
|
||||
this._tokenState = await this.refreshTokensImpl(refreshToken);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the refresh token if should refresh.
|
||||
private static shouldRefresh(tokenState: TokenState): string | undefined {
|
||||
const state = tokenState.state;
|
||||
if (state && state.claims.exp - 60 < Date.now() / 1000) {
|
||||
return state.tokens?.refresh_token ?? undefined;
|
||||
}
|
||||
}
|
||||
|
||||
private async refreshTokensImpl(refreshToken: string): Promise<TokenState> {
|
||||
const response = await this._client.fetch(
|
||||
`${Client._authApi}/refresh`,
|
||||
this._tokenState,
|
||||
{
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
refresh_token: refreshToken,
|
||||
} as RefreshRequest),
|
||||
},
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
if (response.status === 401) {
|
||||
this.logout();
|
||||
}
|
||||
throw await FetchError.from(response);
|
||||
}
|
||||
|
||||
return buildTokenState({
|
||||
...((await response.json()) as RefreshResponse),
|
||||
refresh_token: refreshToken,
|
||||
});
|
||||
}
|
||||
|
||||
/// Fetches data from TrailBase endpoints, e.g.:
|
||||
// const response = await client.fetch("api/auth/v1/status");
|
||||
//
|
||||
// Unlike native fetch, will throw in case !response.ok.
|
||||
public async fetch(path: string, init?: FetchOptions): Promise<Response> {
|
||||
let tokenState = this._tokenState;
|
||||
const refreshToken = Client.shouldRefresh(tokenState);
|
||||
if (refreshToken) {
|
||||
this._tokenState = tokenState =
|
||||
await this.refreshTokensImpl(refreshToken);
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await this._client.fetch(path, tokenState, init);
|
||||
if (!response.ok && (init?.throwOnError ?? true)) {
|
||||
throw await FetchError.from(response);
|
||||
}
|
||||
return response;
|
||||
} catch (err) {
|
||||
if (err instanceof TypeError) {
|
||||
throw Error(`Connection refused ${err}. TrailBase down or CORS?`);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function _isDev(): boolean {
|
||||
type ImportMeta = {
|
||||
env: object | undefined;
|
||||
};
|
||||
const env = (import.meta as unknown as ImportMeta).env;
|
||||
const key = "DEV" as keyof typeof env;
|
||||
const isDev = env?.[key] ?? false;
|
||||
|
||||
return isDev;
|
||||
}
|
||||
const isDev = _isDev();
|
||||
|
||||
export function headers(tokens?: Tokens): HeadersInit {
|
||||
const base = {
|
||||
"Content-Type": "application/json",
|
||||
};
|
||||
|
||||
if (tokens) {
|
||||
const { auth_token, refresh_token, csrf_token } = tokens;
|
||||
return {
|
||||
...base,
|
||||
...(auth_token && {
|
||||
Authorization: `Bearer ${auth_token}`,
|
||||
}),
|
||||
...(refresh_token && {
|
||||
"Refresh-Token": refresh_token,
|
||||
}),
|
||||
...(csrf_token && {
|
||||
"CSRF-Token": csrf_token,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
export function textEncode(s: string): Uint8Array {
|
||||
return new TextEncoder().encode(s);
|
||||
}
|
||||
|
||||
export function textDecode(ar: Uint8Array): string {
|
||||
return new TextDecoder().decode(ar);
|
||||
}
|
||||
|
||||
/// Decode a base64 string to bytes.
|
||||
export function base64Decode(base64: string): string {
|
||||
return atob(base64);
|
||||
}
|
||||
|
||||
/// Decode a "url-safe" base64 string to bytes.
|
||||
export function urlSafeBase64Decode(base64: string): string {
|
||||
return base64Decode(base64.replace(/_/g, "/").replace(/-/g, "+"));
|
||||
}
|
||||
|
||||
/// Encode an arbitrary string input as base64 string.
|
||||
export function base64Encode(s: string): string {
|
||||
return btoa(s);
|
||||
}
|
||||
|
||||
/// Encode an arbitrary string input as a "url-safe" base64 string.
|
||||
export function urlSafeBase64Encode(s: string): string {
|
||||
return base64Encode(s).replace(/\//g, "_").replace(/\+/g, "-");
|
||||
}
|
||||
|
||||
export function asyncBase64Encode(blob: Blob): Promise<string> {
|
||||
return new Promise((resolve, _) => {
|
||||
const reader = new FileReader();
|
||||
reader.onloadend = () => resolve(reader.result as string);
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
}
|
||||
|
||||
export const exportedForTesting = isDev
|
||||
? {
|
||||
base64Decode,
|
||||
base64Encode,
|
||||
}
|
||||
: undefined;
|
||||
25
client/trailbase-ts/tests/base64.test.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { expect, test } from "vitest";
|
||||
import {
|
||||
exportedForTesting,
|
||||
urlSafeBase64Encode,
|
||||
urlSafeBase64Decode,
|
||||
textEncode,
|
||||
textDecode,
|
||||
asyncBase64Encode,
|
||||
} from "../src/index";
|
||||
|
||||
const { base64Encode, base64Decode } = exportedForTesting!;
|
||||
|
||||
test("encoding", async () => {
|
||||
const input = ".,~`!@#$%^&*()_Hi!:)/|\\";
|
||||
|
||||
expect(textDecode(textEncode(input))).toBe(input);
|
||||
expect(base64Decode(base64Encode(input))).toBe(input);
|
||||
expect(urlSafeBase64Decode(urlSafeBase64Encode(input))).toBe(input);
|
||||
|
||||
const blob = new Blob([textEncode(input)]);
|
||||
const base64 = await asyncBase64Encode(blob);
|
||||
const components = base64.split(",");
|
||||
|
||||
expect(base64Decode(components[1])).toBe(input);
|
||||
});
|
||||
28
client/trailbase-ts/tests/encoding.bench.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { test } from "vitest";
|
||||
import { Bench } from "tinybench";
|
||||
import {
|
||||
urlSafeBase64Encode,
|
||||
urlSafeBase64Decode,
|
||||
base64Encode,
|
||||
base64Decode,
|
||||
} from "../src/index";
|
||||
|
||||
test("encoding benchmark", async () => {
|
||||
const bench = new Bench({ time: 500 });
|
||||
|
||||
const input = "!@#$%^&*(!@#$%^&*@".repeat(1000);
|
||||
const standardInput = base64Encode(input);
|
||||
const urlSafeInput = urlSafeBase64Encode(input);
|
||||
|
||||
bench
|
||||
.add("Url-Safe decode", () => {
|
||||
urlSafeBase64Decode(urlSafeInput);
|
||||
})
|
||||
.add("Standard decode", () => {
|
||||
base64Decode(standardInput);
|
||||
});
|
||||
|
||||
await bench.run();
|
||||
|
||||
console.table(bench.table());
|
||||
});
|
||||
182
client/trailbase-ts/tests/integration/integration.test.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
/* eslint-disable @typescript-eslint/no-unused-expressions */
|
||||
|
||||
import { expect, test } from "vitest";
|
||||
import { Client, headers, urlSafeBase64Encode } from "../../src/index";
|
||||
import { status } from "http-status";
|
||||
import { v7 as uuidv7, parse as uuidParse } from "uuid";
|
||||
|
||||
test("headers", () => {
|
||||
const h0 = headers();
|
||||
expect(Object.keys(h0).length).toBe(1);
|
||||
const h1 = headers({
|
||||
auth_token: "foo",
|
||||
refresh_token: "bar",
|
||||
csrf_token: null,
|
||||
});
|
||||
expect(Object.keys(h1).length).toBe(3);
|
||||
});
|
||||
|
||||
type SimpleStrict = {
|
||||
id: string;
|
||||
|
||||
text_null?: string;
|
||||
text_default?: string;
|
||||
text_not_null: string;
|
||||
|
||||
// Add or generate missing fields.
|
||||
};
|
||||
|
||||
type NewSimpleStrict = Partial<SimpleStrict>;
|
||||
|
||||
type SimpleCompleteView = SimpleStrict;
|
||||
|
||||
type SimpleSubsetView = {
|
||||
id: string;
|
||||
|
||||
t_null?: string;
|
||||
t_default?: string;
|
||||
t_not_null: string;
|
||||
};
|
||||
|
||||
const sleep = (ms: number) => new Promise((r) => setTimeout(r, ms));
|
||||
const port: number = 4005;
|
||||
|
||||
async function connect(): Promise<Client> {
|
||||
const client = Client.init(`http://127.0.0.1:${port}`);
|
||||
await client.login("admin@localhost", "secret");
|
||||
return client;
|
||||
}
|
||||
|
||||
// WARN: this test is not hermetic. I requires an appropriate TrailBase instance to be running.
|
||||
test("auth integration tests", async () => {
|
||||
const client = await connect();
|
||||
|
||||
const oldTokens = client.tokens();
|
||||
expect(oldTokens).not.undefined;
|
||||
|
||||
// We need to wait a little to push the expiry time in seconds to avoid just getting the same token minted again.
|
||||
await sleep(1500);
|
||||
|
||||
await client.refreshAuthToken();
|
||||
const newTokens = client.tokens();
|
||||
expect(newTokens).not.undefined.and.not.equals(oldTokens!.auth_token);
|
||||
|
||||
expect(await client.logout()).toBe(true);
|
||||
expect(client.user()).toBe(undefined);
|
||||
});
|
||||
|
||||
test("Record integration tests", async () => {
|
||||
const client = await connect();
|
||||
const api = client.records("simple_strict_table");
|
||||
|
||||
const now = new Date().getTime();
|
||||
const messages = [`ts client test 0: ${now}`, `ts client test 1: ${now}`];
|
||||
|
||||
const ids: string[] = [];
|
||||
for (const msg of messages) {
|
||||
ids.push(
|
||||
(await api.createId<NewSimpleStrict>({ text_not_null: msg })) as string,
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
const records = await api.list<SimpleStrict>({
|
||||
filters: [`text_not_null=${messages[0]}`],
|
||||
});
|
||||
expect(records.length).toBe(1);
|
||||
expect(records[0].text_not_null).toBe(messages[0]);
|
||||
}
|
||||
|
||||
{
|
||||
const records = await api.list<SimpleStrict>({
|
||||
filters: [`text_not_null[like]=%${now}`],
|
||||
order: ["+text_not_null"],
|
||||
});
|
||||
expect(records.map((el) => el.text_not_null)).toStrictEqual(messages);
|
||||
}
|
||||
|
||||
{
|
||||
const records = await api.list<SimpleStrict>({
|
||||
filters: [`text_not_null[like]=%${now}`],
|
||||
order: ["-text_not_null"],
|
||||
});
|
||||
expect(records.map((el) => el.text_not_null).reverse()).toStrictEqual(
|
||||
messages,
|
||||
);
|
||||
}
|
||||
|
||||
const record: SimpleStrict = await api.read(ids[0]);
|
||||
expect(record.id).toStrictEqual(ids[0]);
|
||||
expect(record.text_not_null).toStrictEqual(messages[0]);
|
||||
|
||||
// Test 1:1 view-bases record API.
|
||||
const view_record: SimpleCompleteView = await client
|
||||
.records("simple_complete_view")
|
||||
.read(ids[0]);
|
||||
expect(view_record.id).toStrictEqual(ids[0]);
|
||||
expect(view_record.text_not_null).toStrictEqual(messages[0]);
|
||||
|
||||
// Test view-based record API with column renames.
|
||||
const subset_view_record: SimpleSubsetView = await client
|
||||
.records("simple_subset_view")
|
||||
.read(ids[0]);
|
||||
expect(subset_view_record.id).toStrictEqual(ids[0]);
|
||||
expect(subset_view_record.t_not_null).toStrictEqual(messages[0]);
|
||||
|
||||
const updated_value: Partial<SimpleStrict> = {
|
||||
text_not_null: "updated not null",
|
||||
text_default: "updated default",
|
||||
text_null: "updated null",
|
||||
};
|
||||
await api.update(ids[1], updated_value);
|
||||
const updated_record: SimpleStrict = await api.read(ids[1]);
|
||||
expect(updated_record).toEqual(
|
||||
expect.objectContaining({
|
||||
id: ids[1],
|
||||
...updated_value,
|
||||
}),
|
||||
);
|
||||
|
||||
await api.delete(ids[1]);
|
||||
|
||||
expect(await client.logout()).toBe(true);
|
||||
expect(client.user()).toBe(undefined);
|
||||
|
||||
expect(async () => await api.read<SimpleStrict>(ids[0])).rejects.toThrowError(
|
||||
expect.objectContaining({
|
||||
status: status.FORBIDDEN,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
test("record error tests", async () => {
|
||||
const client = await connect();
|
||||
|
||||
const nonExistantId = urlSafeBase64Encode(
|
||||
String.fromCharCode.apply(null, uuidParse(uuidv7())),
|
||||
);
|
||||
const nonExistantApi = client.records("non-existant");
|
||||
expect(
|
||||
async () => await nonExistantApi.read<SimpleStrict>(nonExistantId),
|
||||
).rejects.toThrowError(
|
||||
expect.objectContaining({
|
||||
status: status.METHOD_NOT_ALLOWED,
|
||||
}),
|
||||
);
|
||||
|
||||
const api = client.records("simple_strict_table");
|
||||
expect(
|
||||
async () => await api.read<SimpleStrict>("invalid id"),
|
||||
).rejects.toThrowError(
|
||||
expect.objectContaining({
|
||||
status: status.BAD_REQUEST,
|
||||
}),
|
||||
);
|
||||
expect(
|
||||
async () => await api.read<SimpleStrict>(nonExistantId),
|
||||
).rejects.toThrowError(
|
||||
expect.objectContaining({
|
||||
status: status.NOT_FOUND,
|
||||
}),
|
||||
);
|
||||
});
|
||||
72
client/trailbase-ts/tests/integration_test_runner.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
/* eslint-disable @typescript-eslint/no-unused-vars */
|
||||
|
||||
import { createVitest } from "vitest/node";
|
||||
import { cwd } from "node:process";
|
||||
import { execa, type Subprocess } from "execa";
|
||||
|
||||
const sleep = (ms: number) => new Promise((r) => setTimeout(r, ms));
|
||||
const port: number = 4005;
|
||||
|
||||
async function initTrailBase(): Promise<{ subprocess: Subprocess }> {
|
||||
const pwd = cwd();
|
||||
if (!pwd.endsWith("trailbase-ts")) {
|
||||
throw Error(`Unxpected CWD: ${pwd}`);
|
||||
}
|
||||
|
||||
const build = await execa`cargo build`;
|
||||
if (build.failed) {
|
||||
console.error("STDOUT:", build.stdout);
|
||||
console.error("STDERR:", build.stderr);
|
||||
throw Error("cargo build failed");
|
||||
}
|
||||
|
||||
const subprocess = execa`cargo run -- --data-dir ../testfixture run --dev -a 127.0.0.1:${port}`;
|
||||
|
||||
for (let i = 0; i < 50; ++i) {
|
||||
if ((subprocess.exitCode ?? 0) > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`http://127.0.0.1:${port}/api/healthcheck`);
|
||||
if (response.ok) {
|
||||
return { subprocess };
|
||||
}
|
||||
|
||||
console.log(await response.text());
|
||||
} catch (err) {
|
||||
console.info("Waiting for TrailBase to become healthy");
|
||||
}
|
||||
|
||||
await sleep(500);
|
||||
}
|
||||
|
||||
subprocess.kill();
|
||||
|
||||
const result = await subprocess;
|
||||
console.error("EXIT:", result.exitCode);
|
||||
console.error("STDOUT:", result.stdout);
|
||||
console.error("STDERR:", result.stderr);
|
||||
|
||||
throw Error("Failed to start TrailBase");
|
||||
}
|
||||
|
||||
const { subprocess } = await initTrailBase();
|
||||
|
||||
const ctx = await createVitest("test", {
|
||||
watch: false,
|
||||
environment: "jsdom",
|
||||
include: ["tests/integration/*"],
|
||||
});
|
||||
await ctx.start();
|
||||
await ctx.close();
|
||||
|
||||
if (subprocess.exitCode === null) {
|
||||
// Still running
|
||||
subprocess.kill();
|
||||
} else {
|
||||
// Otherwise TrailBase terminated. Log output to provide a clue as to why.
|
||||
const { stderr, stdout } = subprocess;
|
||||
console.error(stdout);
|
||||
console.error(stderr);
|
||||
}
|
||||
14
client/trailbase-ts/tsconfig.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"extends": "../../ui/common/tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"declaration": true,
|
||||
"outDir": "./dist",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"],
|
||||
"@bindings/*": ["../../trailbase-core/bindings/*"]
|
||||
}
|
||||
},
|
||||
"include": [
|
||||
"./src/**/*"
|
||||
]
|
||||
}
|
||||
14
client/trailbase-ts/vitest.config.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import { defineConfig } from 'vitest/config'
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
environment: "jsdom",
|
||||
// We do not include transitively, since we rely on our own runner for
|
||||
// executing tests/integration/** instead.
|
||||
include: [
|
||||
'tests/*.test.ts',
|
||||
'tests/*.bench.ts',
|
||||
],
|
||||
},
|
||||
})
|
||||
14
docker-compose.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
services:
|
||||
|
||||
trail:
|
||||
build: .
|
||||
ports:
|
||||
- "4000:4000"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./traildepot:/app/traildepot
|
||||
environment:
|
||||
# Setup Rust's env-logger.
|
||||
RUST_LOG: "info,refinery_core=warn"
|
||||
RUST_BACKTRACE: "1"
|
||||
command: "/app/trail --data-dir /app/traildepot run --address 0.0.0.0:4000"
|
||||
4
docs/.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
node_modules/
|
||||
|
||||
.git*
|
||||
*.log
|
||||
21
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# build output
|
||||
dist/
|
||||
# generated types
|
||||
.astro/
|
||||
|
||||
# dependencies
|
||||
node_modules/
|
||||
|
||||
# logs
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
|
||||
# environment variables
|
||||
.env
|
||||
.env.production
|
||||
|
||||
# macOS-specific files
|
||||
.DS_Store
|
||||
9
docs/.prettierignore
Normal file
@@ -0,0 +1,9 @@
|
||||
# Ignore files for PNPM, NPM and YARN
|
||||
pnpm-lock.yaml
|
||||
package-lock.json
|
||||
yarn.lock
|
||||
|
||||
src/components/ui
|
||||
|
||||
# Prettier breaks {/* */} comments in MDX files :/
|
||||
**/*.mdx
|
||||
13
docs/.prettierrc.mjs
Normal file
@@ -0,0 +1,13 @@
|
||||
// .prettierrc.mjs
|
||||
/** @type {import("prettier").Config} */
|
||||
export default {
|
||||
plugins: ['prettier-plugin-astro'],
|
||||
overrides: [
|
||||
{
|
||||
files: '*.astro',
|
||||
options: {
|
||||
parser: 'astro',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
8
docs/Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM nginx:mainline-alpine AS runner
|
||||
|
||||
COPY ./nginx.conf /etc/nginx/conf.d/default.conf
|
||||
COPY ./dist /usr/share/nginx/html
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
HEALTHCHECK CMD curl --fail http://localhost:80 || exit 1
|
||||
55
docs/README.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Starlight Starter Kit: Basics
|
||||
|
||||
[](https://starlight.astro.build)
|
||||
|
||||
```
|
||||
npm create astro@latest -- --template starlight
|
||||
```
|
||||
|
||||
[](https://stackblitz.com/github/withastro/starlight/tree/main/examples/basics)
|
||||
[](https://codesandbox.io/p/sandbox/github/withastro/starlight/tree/main/examples/basics)
|
||||
[](https://app.netlify.com/start/deploy?repository=https://github.com/withastro/starlight&create_from_path=examples/basics)
|
||||
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fwithastro%2Fstarlight%2Ftree%2Fmain%2Fexamples%2Fbasics&project-name=my-starlight-docs&repository-name=my-starlight-docs)
|
||||
|
||||
> 🧑🚀 **Seasoned astronaut?** Delete this file. Have fun!
|
||||
|
||||
## 🚀 Project Structure
|
||||
|
||||
Inside of your Astro + Starlight project, you'll see the following folders and files:
|
||||
|
||||
```
|
||||
.
|
||||
├── public/
|
||||
├── src/
|
||||
│ ├── assets/
|
||||
│ ├── content/
|
||||
│ │ ├── docs/
|
||||
│ │ └── config.ts
|
||||
│ └── env.d.ts
|
||||
├── astro.config.mjs
|
||||
├── package.json
|
||||
└── tsconfig.json
|
||||
```
|
||||
|
||||
Starlight looks for `.md` or `.mdx` files in the `src/content/docs/` directory. Each file is exposed as a route based on its file name.
|
||||
|
||||
Images can be added to `src/assets/` and embedded in Markdown with a relative link.
|
||||
|
||||
Static assets, like favicons, can be placed in the `public/` directory.
|
||||
|
||||
## 🧞 Commands
|
||||
|
||||
All commands are run from the root of the project, from a terminal:
|
||||
|
||||
| Command | Action |
|
||||
| :------------------------ | :----------------------------------------------- |
|
||||
| `npm install` | Installs dependencies |
|
||||
| `npm run dev` | Starts local dev server at `localhost:4321` |
|
||||
| `npm run build` | Build your production site to `./dist/` |
|
||||
| `npm run preview` | Preview your build locally, before deploying |
|
||||
| `npm run astro ...` | Run CLI commands like `astro add`, `astro check` |
|
||||
| `npm run astro -- --help` | Get help using the Astro CLI |
|
||||
|
||||
## 👀 Want to learn more?
|
||||
|
||||
Check out [Starlight’s docs](https://starlight.astro.build/), read [the Astro documentation](https://docs.astro.build), or jump into the [Astro Discord server](https://astro.build/chat).
|
||||
68
docs/astro.config.mjs
Normal file
@@ -0,0 +1,68 @@
|
||||
import { defineConfig } from "astro/config";
|
||||
|
||||
import icon from "astro-icon";
|
||||
import robotsTxt from "astro-robots-txt";
|
||||
import sitemap from "@astrojs/sitemap";
|
||||
import solid from "@astrojs/solid-js";
|
||||
import starlight from "@astrojs/starlight";
|
||||
import tailwind from "@astrojs/tailwind";
|
||||
|
||||
// https://astro.build/config
|
||||
export default defineConfig({
|
||||
site: "https://trailbase.io",
|
||||
integrations: [
|
||||
icon(),
|
||||
solid(),
|
||||
starlight({
|
||||
title: "TrailBase",
|
||||
customCss: ["./src/tailwind.css"],
|
||||
social: {
|
||||
github: "https://github.com/trailbaseio/trailbase",
|
||||
discord: "https://discord.gg/X8cWs7YC22",
|
||||
},
|
||||
sidebar: [
|
||||
{
|
||||
label: "Getting Started",
|
||||
items: [
|
||||
{
|
||||
label: "Starting Up",
|
||||
slug: "getting-started/starting-up",
|
||||
},
|
||||
{
|
||||
label: "First App",
|
||||
slug: "getting-started/first-app",
|
||||
},
|
||||
{
|
||||
label: "Philosophy",
|
||||
slug: "getting-started/philosophy",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
label: "Documentation",
|
||||
autogenerate: {
|
||||
directory: "documentation",
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Comparisons",
|
||||
autogenerate: {
|
||||
directory: "comparison",
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Reference",
|
||||
autogenerate: {
|
||||
directory: "reference",
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
sitemap(),
|
||||
robotsTxt(),
|
||||
tailwind({
|
||||
// Disable the default base styles:
|
||||
applyBaseStyles: false,
|
||||
}),
|
||||
],
|
||||
});
|
||||
16
docs/docker-compose.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
trailbase-docs:
|
||||
container_name: trailbase-docs
|
||||
build: .
|
||||
ports:
|
||||
- "127.0.0.1:3036:80/tcp"
|
||||
restart: unless-stopped
|
||||
|
||||
# By default containers get 1024 cpu shares. Setting it to 512 means half
|
||||
# the resources compared to a default container. And 2048 double,
|
||||
# respectively.
|
||||
cpu_shares: 1024
|
||||
mem_limit: 128m
|
||||
oom_score_adj: -200
|
||||
32
docs/nginx.conf
Normal file
@@ -0,0 +1,32 @@
|
||||
server {
|
||||
# TLS termination is done by the reverse proxy.
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name trailbase_documentation;
|
||||
|
||||
#access_log /var/log/nginx/host.access.log main;
|
||||
|
||||
# File root matching build target location in Dockerfile.
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
# 404 and 500s should load our custom error pages.
|
||||
error_page 404 /404.html;
|
||||
# error_page 500 502 503 504 /50x/index.html;
|
||||
|
||||
location / {
|
||||
# Set long client-side cache TTLs for astro assets. Astro assets carry a
|
||||
# content hash in their filename, thus can be cached safely for ever.
|
||||
location /_astro/ {
|
||||
add_header Cache-Control "public, max-age=31536000, immutable";
|
||||
}
|
||||
location /particles/ {
|
||||
add_header Cache-Control "public, max-age=2592000, immutable";
|
||||
}
|
||||
|
||||
# Try resolve $uri in the following order:
|
||||
# * try $uri first
|
||||
# * then $uri/index.html
|
||||
# * finally fall back to 404 error_page below.
|
||||
try_files $uri $uri/index.html =404;
|
||||
}
|
||||
}
|
||||
37
docs/package.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"name": "",
|
||||
"type": "module",
|
||||
"version": "0.0.1",
|
||||
"scripts": {
|
||||
"dev": "astro dev",
|
||||
"start": "astro dev",
|
||||
"build": "astro check && astro build",
|
||||
"preview": "astro preview",
|
||||
"astro": "astro",
|
||||
"check": "astro check",
|
||||
"format": "prettier -w tailwind.config.mjs astro.config.mjs src "
|
||||
},
|
||||
"dependencies": {
|
||||
"@astrojs/check": "^0.9.4",
|
||||
"@astrojs/starlight": "^0.28.4",
|
||||
"@astrojs/starlight-tailwind": "^2.0.3",
|
||||
"@astrojs/tailwind": "^5.1.2",
|
||||
"@iconify-json/tabler": "^1.2.6",
|
||||
"astro": "^4.16.7",
|
||||
"astro-icon": "^1.1.1",
|
||||
"chart.js": "^4.4.6",
|
||||
"chartjs-chart-error-bars": "^4.4.3",
|
||||
"chartjs-plugin-deferred": "^2.0.0",
|
||||
"sharp": "^0.33.5",
|
||||
"solid-js": "^1.9.3",
|
||||
"tailwindcss": "^3.4.14",
|
||||
"typescript": "^5.6.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@astrojs/sitemap": "^3.2.1",
|
||||
"@astrojs/solid-js": "^4.4.2",
|
||||
"astro-robots-txt": "^1.0.0",
|
||||
"prettier": "^3.3.3",
|
||||
"prettier-plugin-astro": "^0.14.1"
|
||||
}
|
||||
}
|
||||
1
docs/public/favicon.svg
Symbolic link
@@ -0,0 +1 @@
|
||||
../../assets/favicon.svg
|
||||
1
docs/src/assets/flutter_logo.svg
Normal file
|
After Width: | Height: | Size: 9.2 KiB |
BIN
docs/src/assets/logo_512.webp
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
docs/src/assets/screenshot.webp
Normal file
|
After Width: | Height: | Size: 106 KiB |
1
docs/src/assets/ts_logo.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg fill="none" height="512" viewBox="0 0 512 512" width="512" xmlns="http://www.w3.org/2000/svg"><rect fill="#3178c6" height="512" rx="50" width="512"/><rect fill="#3178c6" height="512" rx="50" width="512"/><path clip-rule="evenodd" d="m316.939 407.424v50.061c8.138 4.172 17.763 7.3 28.875 9.386s22.823 3.129 35.135 3.129c11.999 0 23.397-1.147 34.196-3.442 10.799-2.294 20.268-6.075 28.406-11.342 8.138-5.266 14.581-12.15 19.328-20.65s7.121-19.007 7.121-31.522c0-9.074-1.356-17.026-4.069-23.857s-6.625-12.906-11.738-18.225c-5.112-5.319-11.242-10.091-18.389-14.315s-15.207-8.213-24.18-11.967c-6.573-2.712-12.468-5.345-17.685-7.9-5.217-2.556-9.651-5.163-13.303-7.822-3.652-2.66-6.469-5.476-8.451-8.448-1.982-2.973-2.974-6.336-2.974-10.091 0-3.441.887-6.544 2.661-9.308s4.278-5.136 7.512-7.118c3.235-1.981 7.199-3.52 11.894-4.615 4.696-1.095 9.912-1.642 15.651-1.642 4.173 0 8.581.313 13.224.938 4.643.626 9.312 1.591 14.008 2.894 4.695 1.304 9.259 2.947 13.694 4.928 4.434 1.982 8.529 4.276 12.285 6.884v-46.776c-7.616-2.92-15.937-5.084-24.962-6.492s-19.381-2.112-31.066-2.112c-11.895 0-23.163 1.278-33.805 3.833s-20.006 6.544-28.093 11.967c-8.086 5.424-14.476 12.333-19.171 20.729-4.695 8.395-7.043 18.433-7.043 30.114 0 14.914 4.304 27.638 12.912 38.172 8.607 10.533 21.675 19.45 39.204 26.751 6.886 2.816 13.303 5.579 19.25 8.291s11.086 5.528 15.415 8.448c4.33 2.92 7.747 6.101 10.252 9.543 2.504 3.441 3.756 7.352 3.756 11.733 0 3.233-.783 6.231-2.348 8.995s-3.939 5.162-7.121 7.196-7.147 3.624-11.894 4.771c-4.748 1.148-10.303 1.721-16.668 1.721-10.851 0-21.597-1.903-32.24-5.71-10.642-3.806-20.502-9.516-29.579-17.13zm-84.159-123.342h64.22v-41.082h-179v41.082h63.906v182.918h50.874z" fill="#fff" fill-rule="evenodd"/></svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
168
docs/src/components/BarChart.tsx
Normal file
@@ -0,0 +1,168 @@
|
||||
import { onCleanup, createEffect } from "solid-js";
|
||||
import {
|
||||
Chart,
|
||||
type ChartData,
|
||||
type Tick,
|
||||
type ScaleOptions,
|
||||
} from "chart.js/auto";
|
||||
import {
|
||||
BarWithErrorBarsController,
|
||||
BarWithErrorBar,
|
||||
} from "chartjs-chart-error-bars";
|
||||
import ChartDeferred from "chartjs-plugin-deferred";
|
||||
|
||||
import { createDarkMode } from "@/lib/darkmode";
|
||||
|
||||
Chart.register(BarWithErrorBarsController, BarWithErrorBar, ChartDeferred);
|
||||
|
||||
interface BarChartProps {
|
||||
data: ChartData<"bar">;
|
||||
scales?: { [key: string]: ScaleOptions<"linear"> };
|
||||
}
|
||||
|
||||
export function BarChart(props: BarChartProps) {
|
||||
const darkMode = createDarkMode();
|
||||
|
||||
let ref: HTMLCanvasElement | undefined;
|
||||
let chart: Chart | undefined;
|
||||
|
||||
createEffect(() => {
|
||||
chart?.destroy();
|
||||
|
||||
chart = new Chart<"bar">(ref!, {
|
||||
type: "bar",
|
||||
data: props.data,
|
||||
options: {
|
||||
scales: adjustScaleColor(darkMode(), {
|
||||
y: {},
|
||||
x: {},
|
||||
...props.scales,
|
||||
}),
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
// Defers rendering and animation until on screen.
|
||||
deferred: {
|
||||
yOffset: "30%", // defer until 50% of the canvas height are inside the viewport
|
||||
delay: 200, // delay of 500 ms after the canvas is considered inside the viewport
|
||||
},
|
||||
colors: {
|
||||
enabled: true,
|
||||
forceOverride: false,
|
||||
},
|
||||
legend: {
|
||||
position: "bottom",
|
||||
labels: {
|
||||
color: darkMode() ? "white" : undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
onCleanup(() => chart?.destroy());
|
||||
|
||||
return (
|
||||
<div class="canvas-container w-full h-full">
|
||||
<canvas ref={ref}></canvas>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function adjustScaleColor(
|
||||
dark: boolean,
|
||||
scales: { [key: string]: ScaleOptions<"linear"> },
|
||||
) {
|
||||
for (const axis of Object.keys(scales)) {
|
||||
const scale = scales[axis];
|
||||
|
||||
scale.ticks = {
|
||||
...scales[axis].ticks,
|
||||
color: dark ? "white" : undefined,
|
||||
};
|
||||
|
||||
scale.title = {
|
||||
...scale.title,
|
||||
color: dark ? "white" : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
return scales;
|
||||
}
|
||||
|
||||
interface BarChartWithErrorsProps {
|
||||
data: ChartData<"barWithErrorBars">;
|
||||
yTickFormatter?: (
|
||||
value: number | string,
|
||||
index: number,
|
||||
ticks: Tick[],
|
||||
) => string;
|
||||
}
|
||||
|
||||
export function BarChartWithErrors(props: BarChartWithErrorsProps) {
|
||||
const darkMode = createDarkMode();
|
||||
|
||||
let ref: HTMLCanvasElement | undefined;
|
||||
let chart: Chart | undefined;
|
||||
|
||||
createEffect(() => {
|
||||
chart?.destroy();
|
||||
|
||||
const scaleIds = props.data.datasets.map((e) => e.yAxisID ?? "y");
|
||||
const yScaleStyle = {
|
||||
ticks: {
|
||||
color: darkMode() ? "white" : undefined,
|
||||
display: true,
|
||||
callback: props.yTickFormatter,
|
||||
},
|
||||
grid: {
|
||||
display: true,
|
||||
lineWidth: 0,
|
||||
tickWidth: 0.5,
|
||||
tickLength: 2,
|
||||
tickColor: darkMode() ? "white" : "black",
|
||||
},
|
||||
};
|
||||
|
||||
chart = new Chart<"barWithErrorBars">(ref!, {
|
||||
type: BarWithErrorBarsController.id,
|
||||
data: props.data,
|
||||
options: {
|
||||
scales: {
|
||||
x: {
|
||||
ticks: {
|
||||
color: darkMode() ? "white" : undefined,
|
||||
},
|
||||
},
|
||||
...Object.fromEntries(scaleIds.map((id) => [id, yScaleStyle])),
|
||||
},
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
// Defers rendering and animation until on screen.
|
||||
deferred: {
|
||||
yOffset: "30%", // defer until 50% of the canvas height are inside the viewport
|
||||
delay: 200, // delay of 500 ms after the canvas is considered inside the viewport
|
||||
},
|
||||
colors: {
|
||||
enabled: true,
|
||||
forceOverride: false,
|
||||
},
|
||||
legend: {
|
||||
position: "bottom",
|
||||
labels: {
|
||||
color: darkMode() ? "white" : undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
onCleanup(() => chart?.destroy());
|
||||
|
||||
return (
|
||||
<div class="canvas-container w-full h-full">
|
||||
<canvas ref={ref}></canvas>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
85
docs/src/components/LineChart.tsx
Normal file
@@ -0,0 +1,85 @@
|
||||
import { onCleanup, createEffect } from "solid-js";
|
||||
import { Chart, type ChartData, type ScaleOptions } from "chart.js/auto";
|
||||
import ChartDeferred from "chartjs-plugin-deferred";
|
||||
|
||||
import { createDarkMode } from "@/lib/darkmode";
|
||||
|
||||
Chart.register(ChartDeferred);
|
||||
|
||||
interface LineChartProps {
|
||||
data: ChartData<"line">;
|
||||
scales?: { [key: string]: ScaleOptions<"linear"> };
|
||||
}
|
||||
|
||||
export function LineChart(props: LineChartProps) {
|
||||
const darkMode = createDarkMode();
|
||||
|
||||
let ref: HTMLCanvasElement | undefined;
|
||||
let chart: Chart | undefined;
|
||||
|
||||
createEffect(() => {
|
||||
chart?.destroy();
|
||||
|
||||
chart = new Chart(ref!, {
|
||||
type: "line",
|
||||
data: props.data,
|
||||
options: {
|
||||
scales: adjustScaleColor(darkMode(), {
|
||||
...props.scales,
|
||||
}),
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
// Defers rendering and animation until on screen.
|
||||
deferred: {
|
||||
yOffset: "30%", // defer until 50% of the canvas height are inside the viewport
|
||||
delay: 200, // delay of 500 ms after the canvas is considered inside the viewport
|
||||
},
|
||||
colors: {
|
||||
enabled: true,
|
||||
forceOverride: false,
|
||||
},
|
||||
legend: {
|
||||
position: "bottom",
|
||||
labels: {
|
||||
color: darkMode() ? "white" : undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
interaction: {
|
||||
mode: "nearest",
|
||||
axis: "x",
|
||||
intersect: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
onCleanup(() => chart?.destroy());
|
||||
|
||||
return (
|
||||
<div class="canvas-container w-full h-full">
|
||||
<canvas ref={ref}></canvas>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function adjustScaleColor(
|
||||
dark: boolean,
|
||||
scales: { [key: string]: ScaleOptions<"linear"> },
|
||||
) {
|
||||
for (const axis of Object.keys(scales)) {
|
||||
const scale = scales[axis];
|
||||
|
||||
scale.ticks = {
|
||||
...scales[axis].ticks,
|
||||
color: dark ? "white" : undefined,
|
||||
};
|
||||
|
||||
scale.title = {
|
||||
...scale.title,
|
||||
color: dark ? "white" : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
return scales;
|
||||
}
|
||||
6
docs/src/content/config.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
import { defineCollection } from "astro:content";
|
||||
import { docsSchema } from "@astrojs/starlight/schema";
|
||||
|
||||
export const collections = {
|
||||
docs: defineCollection({ schema: docsSchema() }),
|
||||
};
|
||||
21
docs/src/content/docs/_roadmap.md
Normal file
@@ -0,0 +1,21 @@
|
||||
Over time, we would like to make TrailBase the best application base it can be.
|
||||
Tell us what's missing, what could be better, and what smells.
|
||||
Independently, we're very open to contributions, just talk to us first so we
|
||||
can figure out how any feature will fit into the overall picture and minimize
|
||||
friction.
|
||||
For context, some larger features we have on our Roadmap:
|
||||
|
||||
- Realtime/notification APIs for subscribing to data changes.
|
||||
- S3 buckets and other cloud storage. The backend already supports it but it isn't wired up.
|
||||
- Support more Social/OAuth providers.
|
||||
- More configurable authentication, more customizable auth UI, and multi-factor.
|
||||
- Service-accounts for authenticating and authorizing backends not end-users.
|
||||
- Custom scheduled operations. Also enabling more time series use-cases.
|
||||
- Many SQLite databases: imagine a separate database by tenant or user.
|
||||
- Maybe integrate an ES6 JavaScript runtime or similar.
|
||||
- Streamline code-generation, the bindings life-cycle, and first-party
|
||||
support for more languages.
|
||||
- Geo-spatial extensions and Geo-Ip for logs.
|
||||
- Maybe TLS termination and proxy capabilities.
|
||||
- Consider a GraphQL layer to address fan-out and integrate external
|
||||
resources.
|
||||
96
docs/src/content/docs/comparison/pocketbase.mdx
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
title: PocketBase
|
||||
description: Comparing TrailBase & PocketBase.
|
||||
---
|
||||
|
||||
Firstly, PocketBase is amazing! It based the trail for single-file, SQLite
|
||||
application bases, is incredibly easy-to-use, and a polished experience. Gani,
|
||||
the person behind it, is a mad scientist.
|
||||
|
||||
At the surface-level there are a lot of similarities between PocketBase and
|
||||
TrailBase. In this comparison, we'll dive a little deeper and have a closer
|
||||
look at the technical as well as philosophical differences between the two.
|
||||
|
||||
### Goals & Aspirations
|
||||
|
||||
TrailBase was born out of admiration for PocketBase trying to move the needle
|
||||
in a few areas:
|
||||
|
||||
- Less abstraction, embracing standards (SQL[^1], JWT, UUID), and untethered access
|
||||
to SQLite/libsql[^2] including features such as recursive CTEs, virtual tables
|
||||
and vector search.
|
||||
The goal is to not get in your way and avoid lock-in by bespoke solutions
|
||||
making it easier adopt TrailBase either fully or as piece-meal as well as
|
||||
getting rid of it based on your product needs.
|
||||
- Be just as easy to self-host and be even easier to manage a fleet of
|
||||
deployments across integration tests, development, and production by separating
|
||||
data, configuration, and secrets.
|
||||
- Super-powers through SQLite extensions (regex, GIS, ...) including your own [^3].
|
||||
- Be lightweight enough to rival plain SQLite performance at least for
|
||||
higher-level languages.
|
||||
- Be simple and flexible enough to be an attractive alternative to plain SQLite
|
||||
for serving **and** data analysis use-cases.
|
||||
|
||||
### Differences
|
||||
|
||||
It's worth noting that PocketBase and TrailBase have a lot in common: they are
|
||||
both single-file, static binaries providing data APIs, authentication and file
|
||||
storage on top of SQLite.
|
||||
That said and for the sake of this article, let's look at some of the
|
||||
differences and extra features that PocketBase provides:
|
||||
|
||||
- TrailBase does not yet provide realtime APIs allowing clients to subscribe to
|
||||
data changes.
|
||||
- PocketBase lets you register custom endpoints in
|
||||
[ES5 JavaScript](https://pocketbase.io/docs/js-overview/).
|
||||
- PocketBase can also be used as a Go framework, i.e. instead of using the
|
||||
binary release one can build a custom binary with custom endpoints.
|
||||
|
||||
Likewise, TrailBase has a few nifty tricks up its sleeve:
|
||||
|
||||
- Language independent type-safety via JSON Schemas with strict typing
|
||||
being enforced all the way down to the database level[^4].
|
||||
- First-class access to all of SQLite/libsql's features and capabilities.
|
||||
- A simple auth UI.
|
||||
- Stateless JWT auth-tokens for simple, hermetic authentication in other
|
||||
backends.
|
||||
- Efficient and stable cursor-based pagination.
|
||||
- An admin UI that "works" on small screens and mobile :)
|
||||
|
||||
### Language & Performance
|
||||
|
||||
Another difference is that PocketBase and TrailBase are written in Go and Rust,
|
||||
respectively, which may matter to you especially when modifying either or using
|
||||
them as "frameworks".
|
||||
|
||||
Beyond personal preferences, both languages are speedy options in practice.
|
||||
That said, Rust's lack of a runtime and lower FFI overhead should make it the
|
||||
more performant choice.
|
||||
To our own surprise, we found a significant gap. TrailBase is roughly 3.5x to
|
||||
7x faster, in our [simplistic micro-benchmarks](/reference/benchmarks)
|
||||
depending on the use-case.
|
||||
Not to toot our own horn, this is mostly thanks to combining a very low
|
||||
overhead language, one of the fastest HTTP servers, and incredibly quick
|
||||
SQLite/libsql.
|
||||
|
||||
<div class="h-[30px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]: Maybe more in line with SupaBase's philosophy. We suspect that PocketBase
|
||||
relies on schema metadata by construction requiring alterations to be
|
||||
mediated through PocketBase APIs to stay in sync.
|
||||
|
||||
[^2]: We believe that SQL a ubiquitous evergreen technology, which in of itself
|
||||
is already a high-level abstraction for efficient, unified cross-database
|
||||
access.
|
||||
Even higher-level abstractions, such as ORMs, often look nice for simple
|
||||
examples but quickly fall flat for more complex ones. They're certainly
|
||||
bespoke, non-transferable knowledge, and increase vendor lock-in.
|
||||
|
||||
[^3]:
|
||||
All extensions can be built into a small, standalone shared library and
|
||||
imported by vanilla SQLite avoiding vendor lock-in.
|
||||
|
||||
[^4]: SQLite is not strictly typed by default. Instead column types merely a
|
||||
type affinity for value conversions.
|
||||
44
docs/src/content/docs/comparison/supabase.mdx
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
title: SupaBase
|
||||
description: Comparing TrailBase & SupaBase.
|
||||
---
|
||||
|
||||
Both SupaBase and Postgres are amazing. Comparing either to TrailBase and
|
||||
SQLite, respectively, is challenging given how different they are
|
||||
architecturally.
|
||||
|
||||
For one, both Postgres and SupaBase are heck of a lot more modular. "Rule 34" of
|
||||
the database world: if you can think of it, there's a Postgres extension for it.
|
||||
And SupaBase doesn't an excellent job at making all that flexibility available
|
||||
without getting in the way and giving you untethered access while further
|
||||
expanding upon it.
|
||||
In many ways, TrailBase is trying to evnetually do the same for SQLite:
|
||||
combining PocketBase's simplicity with SupaBase's layering.
|
||||
|
||||
One foundational difference is that Postgres itself is a multi-user,
|
||||
client-server architecture already.
|
||||
Extending it by building a layered services around it, like SupaBase did,
|
||||
feels very natural.
|
||||
However, SQLite is neither a multi-user system nor a server. Hence, extending
|
||||
it by embedding it into a monolith, like PocketBase did, feels fairly natural
|
||||
as well.
|
||||
There are ups and downs to either approach. The layered service approach, for
|
||||
example, allows for isolated failure domains and scaling of individual
|
||||
components [^1]. The monolith, on the other hand, with its lesser need for modularity
|
||||
can have fewer interaction points, fewer moving parts making it fundamentally
|
||||
simpler, cheaper, and
|
||||
[lower overhead (10+x performance difference)](/reference/benchmarks).
|
||||
|
||||
Ultimately, the biggest difference is that SupaBase is a polished product with
|
||||
a lot of mileage under its belt. Our simpler architecture will hopefully let us
|
||||
get there but for now SupaBase is our north star.
|
||||
|
||||
<div class="h-[30px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
For example, in our performance testing we've found that PostgREST,
|
||||
SupaBase's RESTful API layer in front of Postgres, is relatively resource
|
||||
hungry. This might not be an issue since one can simply scale by pointing
|
||||
many independent instances at the same database instance.
|
||||
11
docs/src/content/docs/contact.mdx
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
title: Contact
|
||||
template: splash
|
||||
---
|
||||
|
||||
<div class="flex flex-col">
|
||||
<span>Sebastian</span>
|
||||
<span>contact [at] trailbase.io</span>
|
||||
|
||||
<span>8047 Zurich, Switzerland</span>
|
||||
</div>
|
||||
56
docs/src/content/docs/documentation/APIs/query_apis.mdx
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
title: Query APIs
|
||||
---
|
||||
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
Query APIs are a more free-form and type-unsafe way of exposing data using
|
||||
virtual tables based on user inputs and stored procedures. Please make sure to
|
||||
take a look at [record APIs](/documentation/apis/record_apis) first. Views and
|
||||
generated columns may be a better fit for transforming data if no explicit user
|
||||
input is required.
|
||||
|
||||
<Aside type="note" title="Note">
|
||||
Query APIs fill a gap that in other frameworks is often filled by custom
|
||||
handlers. TrailBase may go this direction as well either with custom Axum
|
||||
handlers or embedding another runtime. At least for the time being Query APIs
|
||||
based on stored procedures are simply a very constrained (e.g. read-only) and
|
||||
performant way to achieve similar goals.
|
||||
</Aside>
|
||||
|
||||
## Example
|
||||
|
||||
Using migrations and sqlean's `define` we can define a table query with unbound
|
||||
inputs (see placeholder $1):
|
||||
|
||||
```sql
|
||||
CREATE VIRTUAL TABLE
|
||||
_is_editor
|
||||
USING
|
||||
define((SELECT EXISTS (SELECT * FROM editors WHERE user = $1) AS is_editor));
|
||||
```
|
||||
|
||||
Subsequently, an API can be configured to query the newly created `VIRTUAL
|
||||
TABLE`, also binding URL query parameters as inputs to above placeholders.
|
||||
|
||||
```proto
|
||||
query_apis: [
|
||||
{
|
||||
name: "is_editor"
|
||||
virtual_table_name: "_is_editor"
|
||||
params: [
|
||||
{
|
||||
name: "user"
|
||||
type: BLOB
|
||||
}
|
||||
]
|
||||
acl: WORLD
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Finally, we can query the API, e.g. using curl:
|
||||
|
||||
```bash
|
||||
curl -g 'localhost:4000/api/query/v1/is_editor?user=<b64_user_id>'
|
||||
```
|
||||
299
docs/src/content/docs/documentation/APIs/record_apis.mdx
Normal file
@@ -0,0 +1,299 @@
|
||||
---
|
||||
title: Record APIs
|
||||
---
|
||||
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
The easiest and most type-safe path to access you `TABLE`s and `VIEW`s is to use
|
||||
TrailBase's restful CRUD _Record APIs_.
|
||||
The only requirements are:
|
||||
|
||||
- Tables and views need to be `STRICT`ly[^1] typed to guarantee type-safety all the
|
||||
way from your records, via JSON schema, to your client-side language bindings [^2].
|
||||
- They need to have a sequential primary key column to allow for stable sorting
|
||||
and thus efficient cursor-based pagination. Either an explicit `INTEGER` or
|
||||
UUIDv7 `PRIMARY KEY` will do, including `FOREIGN KEY` columns.
|
||||
|
||||
## Configuring APIs
|
||||
|
||||
Record APIs can be configured through the admin dashboard or immediately in
|
||||
TrailBase's configuration file.
|
||||
Note that there are certain features that aren't yet exposed in the dashboard,
|
||||
like supporting multiple APIs based on the same table or view.
|
||||
In this case you can drop down to the configuration to set up as many as you
|
||||
like allowing for a lot of extra flexibility around permissions and visibility.
|
||||
|
||||
An example API setup for managing user avatars:
|
||||
|
||||
```json
|
||||
record_apis: [
|
||||
{
|
||||
name: "_user_avatar"
|
||||
table_name: "_user_avatar"
|
||||
conflict_resolution: REPLACE
|
||||
autofill_missing_user_id_columns: true
|
||||
acl_world: [READ]
|
||||
acl_authenticated: [CREATE, READ, UPDATE, DELETE]
|
||||
create_access_rule: "_REQ_.user IS NULL OR _REQ_.user = _USER_.id"
|
||||
update_access_rule: "_ROW_.user = _USER_.id"
|
||||
delete_access_rule: "_ROW_.user = _USER_.id"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
A quick explanation:
|
||||
|
||||
* The `name` needs to be unique. It's what is used to access the API via
|
||||
`<https://myhost>/api/v1/records/<name>/`.
|
||||
* `table_name` references the table or view that is being exposed.
|
||||
* `conflict_resolution` declares what should happen if a newly created record is
|
||||
conflicting with an existing one.
|
||||
* `autofill_missing_user_id_column` lets you omit fields for columns with a foreign
|
||||
key relationship to `_user(id)`. The field will then be filled with the
|
||||
credentials of the authenticated user. In most cases, this should probably be off, this
|
||||
only useful if you cannot explicitly provide the user id yourself, e.g. in a
|
||||
static HTML form.
|
||||
* `acl_world` and `acl_authenticated` define that anyone can read avatars but
|
||||
only authenticated users can modify them. The following `access_rules` further narrow
|
||||
mutations to records where the `user` column (or request field for insertions)
|
||||
match. In other words, user X cannot modify user Y's avatar.
|
||||
|
||||
|
||||
### Access Control
|
||||
|
||||
Access can be controlled through combination of a simple ACL-based system (a
|
||||
matrix of who and what) and custom SQL access rules of the nature:
|
||||
`f(req, user, row) -> bool`.
|
||||
Generally, the ACLs are checked first and then the access rules are evaluated
|
||||
when present.
|
||||
|
||||
For example, to validate that the requestor provided a secret key and is member
|
||||
of a group "mygroup":
|
||||
|
||||
```sql
|
||||
(_REQ_.secret = 'foo' AND EXISTS(
|
||||
SELECT 1 FROM groups
|
||||
WHERE
|
||||
groups.member = _USER_.id
|
||||
AND groups.name = 'mygroup'
|
||||
))
|
||||
```
|
||||
|
||||
* `_REQ_` is an injected sub-query containing the request fields. It is
|
||||
available in access rules for `CREATE` and `UPDATE` operations.
|
||||
* Similarly, `_ROW_` is a sub-query of the target record. It is available in
|
||||
access rules for `READ`, `UPDATE`, and `DELETE` operations.
|
||||
* Lastly, `_USER_.id` references the id of the currently authenticated user and
|
||||
`NULL` otherwise.
|
||||
|
||||
Independently, you can use `VIEW`s to filter which rows and columns of
|
||||
your `TABLE`s should be accessible.
|
||||
|
||||
#### Building access groups and capabilities
|
||||
|
||||
As hinted at by the example above, the SQL access rules can be used to
|
||||
build higher-level access protection such as group ACLs or capabilities.
|
||||
What makes the most sense in your case, is very application dependent.
|
||||
The `<repo>/examples/blog` has an "editor" group to control who can write blog
|
||||
posts.
|
||||
|
||||
Somewhat on a tangent and pretty meta, group and capability tables can
|
||||
themselves be exposed via Record APIs.
|
||||
This can be used to programmatically manage permissions, e.g. for building a
|
||||
moderation dashboard.
|
||||
When exposing authorization primitives, make sure the permissions are
|
||||
appropriately tight to avoid permission escalations.
|
||||
|
||||
### Write-only columns
|
||||
|
||||
Columns with names starting with an underscore can be written on insert or
|
||||
update but are hidden on reads. This is meant as a convenient convention to
|
||||
allow for internal data fields, e.g hiding the record owner in an otherwise public
|
||||
data set or hiding a user's internal credit rating from their profile. A
|
||||
similar effect could otherwise be achieved by exposing a table for inserts and
|
||||
updates only while poxying reads through a VIEW.
|
||||
|
||||
<Aside type="note" title="Unhiding">
|
||||
Note that views can also be used to rename columns and thus expose hidden columns
|
||||
in a read-only fashion.
|
||||
</Aside>
|
||||
|
||||
## Accessing Record APIs
|
||||
|
||||
After configuring the APIs and setting up permissions, record APIs expose six
|
||||
main endpoints[^3]:
|
||||
|
||||
* **C**reate: endpoint for for inserting new and potentially overriding records
|
||||
depending on conflict resolution strategy.<br/>
|
||||
`POST /api/v1/records/<record_api_name>`
|
||||
* **R**ead: endpoint for reading specific records given the record id.<br/>
|
||||
`GET /api/v1/records/<record_api_name>/<url_safe_b64_record_id>`
|
||||
* **U**pdate: partial updates to existing records given a record id and subset of fields <br/>
|
||||
`PATCH /api/v1/records/<record_api_name>/<url_safe_b64_record_id>`
|
||||
* **D**elete: endpoints for deleting record given a record id. <br/>
|
||||
`DELETE /api/v1/records/<record_api_name>/<url_safe_b64_record_id>`
|
||||
* List: endpoint for listing, filtering and sorting records based on the
|
||||
configured read access rule and provided filters.<br/>
|
||||
`GET /api/v1/records/<record_api_name>?<params>`
|
||||
* Schema: endpoint for reading the APIs JSON schema definition. Can be used for
|
||||
introspection and to drive code generation.<br/>
|
||||
`GET /api/v1/records/<record_api_name>/schema`
|
||||
|
||||
All of the above endpoints can be interacted with through requests that are
|
||||
either JSON encoded, url-encoded, or `multipart/form-data` encoded, which makes
|
||||
them accessible via rich client-side applications, progressive web apps, and
|
||||
static HTML forms alike.
|
||||
|
||||
### Listing, filtering & sorting records
|
||||
|
||||
Using the `GET /api/v1/records/<record_api_name>?<params>` endpoint and given
|
||||
sufficient permissions one can query records based the given `read_access_rule`
|
||||
and query parameters.
|
||||
|
||||
Parameters:
|
||||
|
||||
* Pagination can be controlled with two parameters: `limit=N` (with a hard
|
||||
limit of 1024) and `cursor=<primary key>`.
|
||||
* Ordering can be controlled via `order=[[+-]?<column_name>]+`, e.g.
|
||||
`order=created,-rank`, which would sort records first by their `created`
|
||||
column in ascending order (same as "+") and then by the `rank` column in
|
||||
descending order due to the "-".
|
||||
* Lastly, one can filter records by matching against one or more columns like
|
||||
`<column_name>[op]=<value>`, e.g. `revenue[gt]=0` to request only records
|
||||
with revenue values "greater than" 0. The supported operators are:
|
||||
* equal, is the empty operator, e.g. `?success=TRUE`.
|
||||
* **not**|**ne**: not equal
|
||||
* **gte**: greater-than-equal
|
||||
* **gt**: greater-than
|
||||
* **lte**: less-than-equal
|
||||
* **lt**: less-than
|
||||
* **like**: SQL `LIKE` operator
|
||||
* **re**: SQL `REGEXP` operator
|
||||
|
||||
For example, to query the 10 highest grossing movies with a watch time less
|
||||
than 2 hours and an actor called John, one could query:
|
||||
|
||||
```bash
|
||||
curl -g '<address>/api/recrods/v1/movies?limit=10&order=grossing&watch_time_min[lt]=120&actors[like]=%John%'
|
||||
```
|
||||
|
||||
## File Upload
|
||||
|
||||
Record APIs can also support file uploads and downloads. There's some special
|
||||
handling in place so that only metadata is stored in the underlying table while
|
||||
the actual files are kept in an object store.
|
||||
|
||||
By adding a `TEXT` column with a `CHECK(jsonschema('std.FileUpload'))`
|
||||
constrained to your TABLE, you instruct TrailBase to store file metadata as
|
||||
defined by the "std.FileUpload" JSON schema and write the contents off to
|
||||
object storage.
|
||||
Files can then be upload by sending the contents as part your JSON or
|
||||
`multipart/form-data` POST request.
|
||||
Downloading files is slightly different, since reading the column through
|
||||
record APIs will only yield the metadata. There's a dedicated GET API endpoint
|
||||
for file downloads:
|
||||
`/api/v1/records/<record_api_name>/<record_id>/file/<column_name>`
|
||||
|
||||
|
||||
<Aside type="note" title="S3">
|
||||
In principle, TrailBase can also S3 object storage, however the settings
|
||||
aren't yet wired through. Currently uploads are stored under
|
||||
`--data-dir/uploads` in the local file system.
|
||||
</Aside>
|
||||
|
||||
## Custom JSON Schemas
|
||||
|
||||
Akin to `std.FileUpload` above, you can register your own nested JSON schemas
|
||||
to be used with column `CHECK`s.
|
||||
For now, the dashboard only allows viewing all registered JSON schemas, however
|
||||
you can register schemas using the configuration:
|
||||
|
||||
```json
|
||||
schemas: [
|
||||
{
|
||||
name: "simple_schema"
|
||||
schema:
|
||||
'{'
|
||||
' "type": "object",'
|
||||
' "properties": {'
|
||||
' "name": { "type": "string" },'
|
||||
' "obj": { "type": "object" }'
|
||||
' },'
|
||||
' "required": ["name"]'
|
||||
'}'
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Once registered, schemas can be added as column constraints:
|
||||
|
||||
```sql
|
||||
CREATE TALE test (
|
||||
simple TEXT CHECK(jsonschema('simple_schema')),
|
||||
|
||||
-- ...
|
||||
) STRICT;
|
||||
```
|
||||
|
||||
When generating new client-side bindings for a table or view with such nested
|
||||
schemas, they will be included ensuring type-safety all the way to the
|
||||
client-side APIs.
|
||||
|
||||
### Tangent: Querying JSON
|
||||
|
||||
Independent of type-safety and Record APIs,
|
||||
[SQLite](https://www.sqlite.org/json1.html) has first-class support for
|
||||
querying nested properties of columns containing JSON in textual or binary
|
||||
format [^4].
|
||||
For example, given a table:
|
||||
|
||||
```sql
|
||||
CREATE TABLE items (json TEXT NOT NULL);
|
||||
|
||||
INSERT INTO items (json) VALUES ('{"name": "House", "color": "blue"}');
|
||||
INSERT INTO items (json) VALUES ('{"name": "Tent", "color": "red"}');
|
||||
```
|
||||
|
||||
You can query the names of red items:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
json->>'name' AS name
|
||||
FROM
|
||||
items
|
||||
WHERE
|
||||
json->>'color' = 'red';
|
||||
```
|
||||
|
||||
Note that this requires SQLite to scan all rows and deserialize the JSON.
|
||||
Instead, storing the color of items in a separate, indexed column and filter
|
||||
on it would be a lot more efficient.
|
||||
Yet, using JSON for complex structured or denormalized data can be powerful
|
||||
addition to your toolbox.
|
||||
|
||||
|
||||
<div class="h-[50px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
By default, SQLite are not strictly typed. Column types merely express
|
||||
type-affinities. Unless tables are explicitly created as `STRICT` columns can
|
||||
store any data type.
|
||||
|
||||
[^2]:
|
||||
Views are more tricky to strictly type, since they're the result of an
|
||||
arbitrary `SELECT` statement. TrailBase parses the `CREATE VIEW` statement
|
||||
and will allow record APIs only on top of a conservative subset, where it
|
||||
can infer the column types. Over time, TrailBase will be able to support
|
||||
larger subsets. Let us know if you have provably strictly typed queries
|
||||
that you think should be supported but aren't.
|
||||
|
||||
[^3]:
|
||||
There's also a few other endpoints, e.g. for downloading files as described
|
||||
later in the document.
|
||||
|
||||
[^4]:
|
||||
Record APIs only support textual JSON. Binary JSON is more compact and more
|
||||
efficient to parse, however its actual encoding is internal to SQLite and
|
||||
thus opaque to TrailBase.
|
||||
291
docs/src/content/docs/documentation/_auth.svg
Normal file
@@ -0,0 +1,291 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="646.19281"
|
||||
height="329.7818"
|
||||
viewBox="0 0 170.97184 87.254767"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
sodipodi:docname="_auth.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<sodipodi:namedview
|
||||
id="namedview7"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#000000"
|
||||
borderopacity="0.25"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="true"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="px"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.8249942"
|
||||
inkscape:cx="338.35724"
|
||||
inkscape:cy="169.31561"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1131"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="layer1" />
|
||||
<defs
|
||||
id="defs2">
|
||||
<marker
|
||||
style="overflow:visible"
|
||||
id="marker1914"
|
||||
refX="0"
|
||||
refY="0"
|
||||
orient="auto-start-reverse"
|
||||
inkscape:stockid="TriangleStart"
|
||||
markerWidth="5.3244081"
|
||||
markerHeight="6.155385"
|
||||
viewBox="0 0 5.3244081 6.1553851"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always"
|
||||
preserveAspectRatio="xMidYMid">
|
||||
<path
|
||||
transform="scale(0.5)"
|
||||
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
|
||||
d="M 5.77,0 -2.88,5 V -5 Z"
|
||||
id="path1912" />
|
||||
</marker>
|
||||
<marker
|
||||
style="overflow:visible"
|
||||
id="TriangleStart"
|
||||
refX="0"
|
||||
refY="0"
|
||||
orient="auto-start-reverse"
|
||||
inkscape:stockid="TriangleStart"
|
||||
markerWidth="5.3244081"
|
||||
markerHeight="6.155385"
|
||||
viewBox="0 0 5.3244081 6.1553851"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always"
|
||||
preserveAspectRatio="xMidYMid">
|
||||
<path
|
||||
transform="scale(0.5)"
|
||||
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
|
||||
d="M 5.77,0 -2.88,5 V -5 Z"
|
||||
id="path135" />
|
||||
</marker>
|
||||
<marker
|
||||
style="overflow:visible"
|
||||
id="TriangleStart-3"
|
||||
refX="0"
|
||||
refY="0"
|
||||
orient="auto-start-reverse"
|
||||
inkscape:stockid="TriangleStart"
|
||||
markerWidth="5.3244081"
|
||||
markerHeight="6.155385"
|
||||
viewBox="0 0 5.3244081 6.1553851"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always"
|
||||
preserveAspectRatio="xMidYMid">
|
||||
<path
|
||||
transform="scale(0.5)"
|
||||
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
|
||||
d="M 5.77,0 -2.88,5 V -5 Z"
|
||||
id="path135-6" />
|
||||
</marker>
|
||||
</defs>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-19.670349,-21.276482)">
|
||||
<rect
|
||||
style="fill:#cfd4fc;stroke:none;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke;fill-opacity:1;opacity:0.75"
|
||||
id="rect335"
|
||||
width="170.97185"
|
||||
height="87.254768"
|
||||
x="19.670349"
|
||||
y="21.276482" />
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
d="M 32.947025,41.547671 V 101.61317"
|
||||
id="path1685" />
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
d="M 161.53459,41.547671 V 101.61317"
|
||||
id="path1701" />
|
||||
<rect
|
||||
style="fill:#eaeefe;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke;stroke-opacity:1;fill-opacity:1"
|
||||
id="rect254"
|
||||
width="37.041664"
|
||||
height="12.7"
|
||||
x="141.53709"
|
||||
y="29.482912"
|
||||
rx="2.6458333"
|
||||
ry="2.6458333" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:4.7625px;line-height:0px;font-family:Inter;-inkscape-font-specification:'Inter Bold';text-align:center;text-anchor:middle;fill:#3d50f5;stroke:none;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke;fill-opacity:1"
|
||||
x="160.06026"
|
||||
y="37.635128"
|
||||
id="text522"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan520"
|
||||
style="font-size:4.7625px;fill:#3d50f5;stroke:none;stroke-width:0.529167;fill-opacity:1"
|
||||
x="160.06026"
|
||||
y="37.635128">TrailBase</tspan></text>
|
||||
<g
|
||||
id="g786"
|
||||
transform="matrix(0.63516325,0,0,0.63516325,16.428468,18.283254)"
|
||||
style="fill:#3d50f5;fill-opacity:1;stroke:#3c53f8;stroke-opacity:0.133333">
|
||||
<circle
|
||||
style="fill:#3d50f5;fill-opacity:1;stroke:#3c53f8;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-opacity:0.133333;paint-order:markers fill stroke"
|
||||
id="path678"
|
||||
cx="25.802729"
|
||||
cy="22.580135"
|
||||
r="4.1157269" />
|
||||
<path
|
||||
id="path685"
|
||||
style="fill:#3d50f5;fill-opacity:1;stroke:#3c53f8;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-opacity:0.133333;paint-order:markers fill stroke"
|
||||
d="M 34.017687,34.314613 H 17.58777 c 0,-4.536996 3.677963,-8.214958 8.214959,-8.214958 4.536996,0 8.214958,3.677962 8.214958,8.214958 z"
|
||||
sodipodi:nodetypes="ccsc" />
|
||||
</g>
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;marker-end:url(#TriangleStart);paint-order:markers fill stroke"
|
||||
d="M 34.034182,53.307677 H 95.286593"
|
||||
id="path842"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:4.7625px;line-height:0px;font-family:Inter;-inkscape-font-specification:'Inter Bold';text-align:center;text-anchor:middle;fill:#3d50f5;fill-opacity:1;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
x="60.450264"
|
||||
y="59.023113"
|
||||
id="text1619"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1617"
|
||||
style="font-size:4.7625px;fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:0.529167"
|
||||
x="60.450264"
|
||||
y="59.023113">auth token (JWT)</tspan></text>
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;marker-end:url(#TriangleStart-3);paint-order:markers fill stroke"
|
||||
d="M 98.359408,64.167241 H 158.74194"
|
||||
id="path842-7"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:3.70417px;line-height:1;font-family:Inter;-inkscape-font-specification:'Inter Bold';text-align:center;text-anchor:middle;fill:#3d50f5;fill-opacity:1;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
x="111.75844"
|
||||
y="50.671043"
|
||||
id="text1619-5"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1617-3"
|
||||
style="font-size:3.70417px;text-align:start;text-anchor:start;fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:0.529167"
|
||||
x="111.75844"
|
||||
y="50.671043"
|
||||
dy="0">locally authenticate</tspan><tspan
|
||||
sodipodi:role="line"
|
||||
style="font-size:3.70417px;text-align:start;text-anchor:start;fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:0.529167"
|
||||
x="111.75844"
|
||||
y="54.375214"
|
||||
id="tspan2055">and/or forward JWT</tspan></text>
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
d="m 98.034202,41.547671 v 44.340917 m 0,6.313197 v 9.411385"
|
||||
id="path1775"
|
||||
sodipodi:nodetypes="cccc" />
|
||||
<rect
|
||||
style="fill:#eaeefe;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke;stroke-opacity:1;fill-opacity:1"
|
||||
id="rect13106"
|
||||
width="37.041664"
|
||||
height="12.7"
|
||||
x="79.513367"
|
||||
y="29.482912"
|
||||
rx="2.6458333"
|
||||
ry="2.6458333" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:4.7625px;line-height:0px;font-family:Inter;-inkscape-font-specification:'Inter Bold';text-align:center;text-anchor:middle;fill:#3d50f5;stroke:none;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke;fill-opacity:1"
|
||||
x="98.126053"
|
||||
y="37.539783"
|
||||
id="text1773"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1771"
|
||||
style="font-size:4.7625px;fill:#3d50f5;stroke:none;stroke-width:0.529167;fill-opacity:1"
|
||||
x="98.126053"
|
||||
y="37.539783">Your Backend</tspan></text>
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;marker-end:url(#TriangleStart);paint-order:markers fill stroke"
|
||||
d="M 34.302475,87.703537 H 158.37972"
|
||||
id="path1892"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:4.7625px;line-height:0px;font-family:Inter;-inkscape-font-specification:'Inter Bold';text-align:center;text-anchor:middle;fill:#3d50f5;fill-opacity:1;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
x="58.333595"
|
||||
y="85.481468"
|
||||
id="text1896"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1894"
|
||||
style="font-size:4.7625px;fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:0.529167"
|
||||
x="58.333595"
|
||||
y="85.481468">refresh token</tspan></text>
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;marker-start:url(#marker1914);paint-order:markers fill stroke"
|
||||
d="M 35.795743,90.349372 H 159.873"
|
||||
id="path1908"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-weight:bold;font-size:4.7625px;line-height:0px;font-family:Inter;-inkscape-font-specification:'Inter Bold';text-align:center;text-anchor:middle;fill:#3d50f5;fill-opacity:1;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;paint-order:markers fill stroke"
|
||||
x="60.450264"
|
||||
y="96.064812"
|
||||
id="text1992"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan1990"
|
||||
style="font-size:4.7625px;fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:0.529167"
|
||||
x="60.450264"
|
||||
y="96.064812">auth token (JWT)</tspan></text>
|
||||
<path
|
||||
id="path1996"
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529168;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;marker-end:url(#marker1914);paint-order:markers fill stroke"
|
||||
d="m 99.49649,52.378718 c 0.427038,-1.432367 1.75543,-2.476796 3.32801,-2.476796 1.91752,0 3.47198,1.552864 3.47198,3.46842 0,1.915555 -1.55446,3.468419 -3.47198,3.468419 -1.4533,0 -2.69805,-0.891994 -3.215564,-2.157836"
|
||||
sodipodi:nodetypes="csssc" />
|
||||
<rect
|
||||
style="fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:4.23333;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
id="rect2972"
|
||||
width="6.4607415"
|
||||
height="6.4607415"
|
||||
x="85.760681"
|
||||
y="9.4290905" />
|
||||
<rect
|
||||
style="fill:#cfd4fc;fill-opacity:1;stroke:none;stroke-width:4.23333;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
id="rect3026"
|
||||
width="6.4607415"
|
||||
height="6.4607415"
|
||||
x="96.344025"
|
||||
y="9.4290905" />
|
||||
<rect
|
||||
style="fill:#3d50f5;fill-opacity:1;stroke:none;stroke-width:4.23333;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
id="rect8120"
|
||||
width="3.9671581"
|
||||
height="90.2556"
|
||||
x="16.999001"
|
||||
y="19.424362" />
|
||||
<path
|
||||
style="fill:#3d50f5;fill-opacity:1;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker1914)"
|
||||
d="M 85.328226,54.497314 95.717913,62.51639"
|
||||
id="path13924"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<path
|
||||
style="fill:none;stroke:#000000;stroke-width:0.529167;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40;marker-end:url(#TriangleStart-3);paint-order:markers fill stroke"
|
||||
d="m 104.73842,56.418202 h 54.00352"
|
||||
id="path14333"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<rect
|
||||
style="fill:#eaeefe;fill-opacity:1;stroke:none;stroke-width:4.23333;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:40"
|
||||
id="rect4326"
|
||||
width="6.4607415"
|
||||
height="6.4607415"
|
||||
x="107.98567"
|
||||
y="9.4290905" />
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 13 KiB |
122
docs/src/content/docs/documentation/auth.mdx
Normal file
@@ -0,0 +1,122 @@
|
||||
---
|
||||
title: Auth
|
||||
description: Managing Users and Access
|
||||
---
|
||||
|
||||
import { Image } from "astro:assets";
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
import implementation from "./_auth.svg";
|
||||
|
||||
TrailBase provides core authentication flows and a basic UI out of the box[^1].
|
||||
These primitives let you establish the identity of your users in order to
|
||||
authorize or deny access to your data, let users change their email address,
|
||||
reset their password, etc.
|
||||
|
||||
<Aside type="danger" title="HTTPs">
|
||||
The safety of any authentication flow hinges on TLS/HTTPS, both for your users
|
||||
to trust that the server they're talking to is yours and for credentials to be
|
||||
transferred encrypted. Otherwise, credentials will be transmitted in plain
|
||||
text. In production, use a reverse proxy like nginx, caddy, or similar for TLS
|
||||
termination, see [here](/documentation/production).
|
||||
</Aside>
|
||||
|
||||
## Implementation
|
||||
|
||||
TrailBase tries to offer a standard, safe and versatile auth implementation out
|
||||
of the box. It combines:
|
||||
|
||||
- Asymmetric cryptography based on elliptic curves (ed25519)
|
||||
- Stateless, short-lived auth tokens (JWT)
|
||||
- Stateful, long-lived, opaque refresh tokens.
|
||||
|
||||
Breaking this apart, __asymmetric cryptography__ means that tokens signed with a
|
||||
private key by the TrailBase "auth server", which can then be validated by
|
||||
others ("resource servers") using only the corresponding public key.
|
||||
The __Statelesss JWTs__ contain metadata that identities the user w/o having to
|
||||
talk to the auth server.
|
||||
Combining the two, other back-ends can authenticate, validate & identify, users
|
||||
hermetically.
|
||||
This is very easy and efficient, however means that hermetic auth tokens cannot
|
||||
be invalidated.
|
||||
A hermetic auth token released into the wild is valid until it expires.
|
||||
To balance the risks and benefits, TrailBase uses short-lived auth tokens
|
||||
expiring frequently[^2].
|
||||
To avoid burdening users by constantly re-authenticating, TrailBase issues an
|
||||
additional __opaque, stateful refresh token__.
|
||||
Refresh tokens are simply a unique identifier the server keeps track of as
|
||||
sessions.
|
||||
Only refresh tokens that have not been revoked can be exchanged for a new auth
|
||||
token.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<Image
|
||||
class="w-[80%] "
|
||||
src={implementation}
|
||||
alt="Screenshot of TrailBase's admin dashboard"
|
||||
/>
|
||||
</div>
|
||||
|
||||
## Flows & UI
|
||||
|
||||
TrailBase currently implements the following auth flows:
|
||||
|
||||
- Email + password based user registration and email verification.
|
||||
- User registration using social OAuth providers (Google, ...)
|
||||
- Login & logout.
|
||||
- Change & reset password.
|
||||
- Change email.
|
||||
- User deletion.
|
||||
- Avatar management.
|
||||
|
||||
Besides the flows above, TrailBase also ships with a set of simple UIs to
|
||||
support the above flows. By default it's accessible via the route:
|
||||
`<url>/_/auth/login`. Check out the [demo](https://demo.trailbase.io/_/auth/login).
|
||||
The built-in auth UIs can be disabled with `--disable-auth-ui` in case you
|
||||
prefer rolling your own or have no need web-based authentication.
|
||||
|
||||
## Usernames and other metadata
|
||||
|
||||
Strictly speaking, authentication is merely responsible for uniquely
|
||||
identifying who's on the other side.
|
||||
This only requires a __unique identifier__ and one or more __secrets__
|
||||
(e.g. a password, hardware token, ...) for the peer to proof they're them.
|
||||
|
||||
Any unique identifier will do: a random string (painful to remember), a phone
|
||||
number, a username, or an email address.
|
||||
Email addresses are a popular choice, since they do double duty as a
|
||||
communication channel letting you reach out to your users, e.g. to reset their
|
||||
password.
|
||||
|
||||
Even from a product angle, building an online shop for example, email addresses
|
||||
are the natural choice.
|
||||
Asking your customers to think up and remember a globally unique username adds
|
||||
extra friction especially since you need their email address anyway to send
|
||||
receipts.
|
||||
Additional profile data, like a shipment address, is something you can ask for
|
||||
at a later time and is independent from auth.
|
||||
In contrast, when building a social network, chat app or messaging board, you
|
||||
typically don't want to leak everyone's email address.
|
||||
You will likely want an additional, more opaque identifier such as a username
|
||||
or handle.
|
||||
|
||||
Long story short, modeling __profile__ data is very product dependent.
|
||||
It's for you to figure out.
|
||||
That said, it is straight forward to join auth data, such as the user's email
|
||||
address, and custom custom profile data in TrailBase.
|
||||
We suggest creating a separate profile table with a `_user.id` `FOREIGN KEY`
|
||||
primary key column. You can then freely expose profiles as dedicated record API
|
||||
endpoints or join them with other data `_user.id`.
|
||||
The blog example in `<repo>/examples/blog` demonstrates this, joining blog
|
||||
posts with user profiles on the author id to get an author's name.
|
||||
|
||||
<div class="h-[50px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
Which can be disabled using `--disable-auth-ui`, if you prefer rolling your
|
||||
own or have no need for a web-based authentication UI.
|
||||
|
||||
[^2]:
|
||||
A one hour TTL by default.
|
||||
121
docs/src/content/docs/documentation/extending.mdx
Normal file
@@ -0,0 +1,121 @@
|
||||
---
|
||||
title: Extending
|
||||
description: Collocating your logic
|
||||
---
|
||||
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
This article explores different ways to extend TrailBase and integrate your own
|
||||
custom logic.
|
||||
|
||||
## The Elephant in the Room
|
||||
|
||||
The question on where your code should run is as old as the modern internets
|
||||
becoming ever present since moving away from a static mainframe model and
|
||||
hermetic desktop applications.
|
||||
With pushing more interactive applications to slow platforms, such as early
|
||||
browsers or mobile phone, there was an increased need to distribute
|
||||
applications with interactivity happening in the front-end and heavy lifting
|
||||
happening in a back-end.
|
||||
That's not to say that there aren't other good reasons to not just run all your
|
||||
code in an untrusted, potentially slow client-side sandbox.
|
||||
|
||||
In any case, having a rich client-side application like a mobile, desktop or
|
||||
progressive web apps will reduce your need for server-side integrations.
|
||||
They're often a good place to start [^1], even if over time you decide to move more
|
||||
logic to a backend to address issues like high fan-out, initial load
|
||||
times, and SEO for web applications.
|
||||
|
||||
Inversely, if you have an existing application that is mostly running
|
||||
server-side, you probably already have a database, auth, and are hosting your
|
||||
own APIs, ... .
|
||||
If so, there's intrinsically less any application base can help you with.
|
||||
Remaining use-cases might be piece-meal adoption to speed up existing APIs or
|
||||
delegate authentication.
|
||||
One advantage of lightweight, self-hosted solutions is that they can be
|
||||
co-locate with your existing stack to reduce costs and latency.
|
||||
|
||||
## Bring your own Backend
|
||||
|
||||
The most flexible and likewise de-coupled way of running your own code is to
|
||||
deploy a separate service alongside TrailBase. This gives you full control over
|
||||
your destiny: runtime, scaling, deployment, etc.
|
||||
|
||||
TrailBase is designed with the explicit goal of running along a sea of other
|
||||
services.
|
||||
Its stateless tokens using asymmetric crypto make it easy for other resource
|
||||
servers to hermetically authenticate your users.
|
||||
TrailBase's APIs can be accessed transitively, simply by forwarding user
|
||||
tokens.
|
||||
Alternatively, you can fall back to raw SQLite for reads, writes and even
|
||||
schema alterations[^2].
|
||||
|
||||
<Aside type="note" title="Service Accounts">
|
||||
We would like to add service accounts in the future to authorize privileged
|
||||
services independent from user-provided tokens or using fake user-accounts
|
||||
for services.
|
||||
</Aside>
|
||||
|
||||
## Custom APIs in TrailBase
|
||||
|
||||
TrailBase provides three main ways to embed your code and expose custom APIs:
|
||||
|
||||
1. Rust/Axum handlers.
|
||||
2. Stored procedures & [Query APIs](/documentation/apis/query_apis/)
|
||||
3. SQLite extensions, virtual table modules & [Query APIs](/documentation/apis/query_apis/)
|
||||
|
||||
Beware that the Rust APIs and [Query APIs](/documentation/apis/query_apis/) are
|
||||
likely subject to change. We rely on semantic versioning to explicitly signal
|
||||
breaking changes.
|
||||
Eventually, we would like to lower the barrier of entry by providing stable
|
||||
bindings to a higher-level runtime within TrailBase, likely a
|
||||
TypeScript/ES6/JavaScript runtime.
|
||||
|
||||
### Using Rust
|
||||
|
||||
The Rust APIs aren't yet stable and fairly undocumented.
|
||||
That said, similar to using PocketBase as a Go framework, you can build your
|
||||
own TrailBase binary and register custom Axum handlers written in rust with the
|
||||
main application router, see `/examples/custom-binary`.
|
||||
|
||||
### Stored Procedures & Query APIs
|
||||
|
||||
Unlike Postgres or MySQL, SQLite does not supported stored procedures out of
|
||||
the box.
|
||||
TrailBase has adopted sqlean's
|
||||
[user-defined functions](https://github.com/nalgeon/sqlean/blob/main/docs/define.md)
|
||||
to provide similar functionality and minimize lock-in over vanilla SQLite.
|
||||
Check out [Query APIs](/documentation/apis/query_apis/), to see how stored
|
||||
procedures can be hooked up.
|
||||
|
||||
### SQLite extensions, virtual table modules & Query APIs
|
||||
|
||||
Likely the most bespoke approach is to expose your functionality as a custom
|
||||
SQLite extension or module similar to how TrailBase extends SQLite itself.
|
||||
|
||||
This approach can be somewhat limiting in terms of dependencies you have
|
||||
access to and things you can do especially for extensions. Modules are quite a bit
|
||||
more flexible but also involved.
|
||||
Take a look at [SQLite's list](https://www.sqlite.org/vtablist.html) and
|
||||
[osquery](https://osquery.readthedocs.io/en/stable/) to get a sense of what's
|
||||
possible.
|
||||
|
||||
Besides their limitations, major advantages of using extensions or
|
||||
modules are:
|
||||
* you have extremely low-overhead access to your data,
|
||||
* extensions and modules can also be used by services accessing the
|
||||
underlying SQLite databases.
|
||||
|
||||
<div class="h-[30px]" />
|
||||
|
||||
---
|
||||
[^1]:
|
||||
There are genuinely good properties in terms of latency, interactivity, offline
|
||||
capabilities and privacy when processing your users' data locally on their
|
||||
device.
|
||||
|
||||
[^2]:
|
||||
SQLite is running in WAL mode, which allows for parallel reads and
|
||||
concurrent writes. That said, when possible you should probably use the APIs
|
||||
since falling back to raw database access is a priviledge practically reserved
|
||||
to processes with access to a shared file-system.
|
||||
85
docs/src/content/docs/documentation/production.mdx
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
title: Productionize
|
||||
description: Going to production.
|
||||
---
|
||||
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
<Aside type="caution" title="Production">
|
||||
TrailBase is still young and hasn't gotten a lot of mileage yet. We're looking
|
||||
for feedback and are happy to help early adopters to get to production.
|
||||
</Aside>
|
||||
|
||||
Going to production and depending on your requirements things to think about
|
||||
could be:
|
||||
|
||||
- HTTPS/TLS termination
|
||||
- locking down access
|
||||
- setting up Email
|
||||
- deployment
|
||||
- introspection
|
||||
- disaster recovery
|
||||
|
||||
## TLS termination
|
||||
|
||||
The most important thing alongside ensuring proper access protection is to set
|
||||
up TLS termination ensuring that all traffic from your users to your
|
||||
termination point is encrypted.
|
||||
In practice, this means putting TrailBase behind a reverse proxy such as NGinx,
|
||||
Caddy, ... . The main benefit of using an established reverse proxy is the
|
||||
availability of auto-renewal of self-signed certificates with SSL authorities
|
||||
Let's encrypt.
|
||||
|
||||
## Access
|
||||
|
||||
### API access
|
||||
|
||||
Make sure to use record API's authorization primitives to tighten access to
|
||||
data as much as possible. It's a good idea to check `_REQ_.<user_id> ==
|
||||
_USER_.id` on record creations and updates to avoid users can impersonate or
|
||||
touch on other users records.
|
||||
|
||||
### Admin access
|
||||
|
||||
You can expose TrailBase's admin APIs and UIs on a separate private port as an
|
||||
extra precaution and to simply expose a smaller surface.
|
||||
|
||||
### Protect Configuration
|
||||
|
||||
You can prevent TrailBase configuration from being accidentally changed in
|
||||
prod, e.g. when you think you're actually configuring a dev instances. To do
|
||||
so, you can read-only mount the configuration directory. However, make sure the
|
||||
data directory remains writable.
|
||||
|
||||
## Email
|
||||
|
||||
By default TrailBase will be using your machine's sendmail setup. This can lead
|
||||
to messages not being sent at all and likely getting stuck in spam filters not
|
||||
coming from a well-known Email server.
|
||||
|
||||
You should likely set up TrailBase with an SMTP server that can send Email
|
||||
coming from your domain. If you don't have an Email provider yet, an option
|
||||
could be Brevo, Mailchimp, SendGrid, ... .
|
||||
|
||||
## Deployment
|
||||
|
||||
We recommend containerization (e.g. Docker) for convenience. You can also
|
||||
consider to mount certain directories and files such as `<data_dir>/secrets`
|
||||
and `<data_dir>/config.textproto` as read only.
|
||||
|
||||
## Introspection
|
||||
|
||||
TrailBase's introspection is fairly non-existent at this point. There is a
|
||||
`/api/healthcheck` endpoint for container orchestration systems to probe.
|
||||
You could also consider setting up probers probing other endpoints.
|
||||
|
||||
## Disaster Recovery
|
||||
|
||||
The simplest option is to mount anothjer local or remote drive and use
|
||||
TrailBase's periodic backups.
|
||||
However, this may lead to significant data loss in case of a disaster, which
|
||||
may be acceptable for first party content but likely not for user-generated
|
||||
content.
|
||||
|
||||
A more comprehensive approach may be to use [Litestream](https://litestream.io/)
|
||||
to continuously replicate your database.
|
||||
82
docs/src/content/docs/documentation/type_safety.mdx
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
title: Type-Safety
|
||||
---
|
||||
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
TrailBase provides end-to-end type-safety from the database level, through the
|
||||
HTTP APIs, all the way up to the client bindings relying on JSON schemas.
|
||||
It's worth noting that the JSON schema is derived directly from the database
|
||||
schema as the source of truth meaning that any schema change will be reflected
|
||||
independent of whether they were applied via the admin dashboard, `sqlite3` or
|
||||
other means.
|
||||
This also means that you should regenerate your type definitions
|
||||
after changing the schema. We therefore recommend to integrate type generation
|
||||
into your build process or check the generated types in following your database
|
||||
migrations.
|
||||
|
||||
Using JSON schema and relying on off-the-shelf code generation tools, allows to
|
||||
keep the client-libraries very thin making it easy to integrate virtually any
|
||||
language in a type-safe fashion.
|
||||
`/examples/blog` provides a glimpse at using [quicktype](https://quicktype.io/)
|
||||
to generate type-safe TypeScript and Dart APIs.
|
||||
|
||||
Type-safety is the main reason why TrailBase APIs require `STRICT`ly typed
|
||||
tables. By default SQLite only has a notion of "type affinity" on
|
||||
inserts and updates, generally allowing any data in any column.
|
||||
|
||||
## Generating Types from JSON Schemas
|
||||
|
||||
The generated JSON schemas depend on two aspects:
|
||||
|
||||
1. The actual database schema mapping columns and column types to fields and
|
||||
data types in a data structure of your target language.
|
||||
2. The specific API operation: `CREATE`, `UPDATE`, `READ`.
|
||||
|
||||
Expanding on 2., the notion of default values for columns means that data for
|
||||
`NOT NULL` columns is optional when creating a new record but guaranteed to be
|
||||
present on record read.
|
||||
`UPDATE`s are point updates of existing records, thus only requiring specific
|
||||
column values to be overridden.
|
||||
|
||||
Concretely, looking at `/examples/blog`, the data structure for inserting a new
|
||||
blog article is less strict than the equivalent for retrieving an existing
|
||||
article:
|
||||
|
||||
```typescript
|
||||
// Input data type when creating a new article record.
|
||||
export interface NewArticle {
|
||||
author: string;
|
||||
body: string;
|
||||
created?: number;
|
||||
id?: string;
|
||||
image?: FileUpload;
|
||||
// ...
|
||||
}
|
||||
|
||||
// Result data type when reading an article record.
|
||||
export interface Article {
|
||||
author: string;
|
||||
body: string;
|
||||
created: number;
|
||||
id: string;
|
||||
image?: FileUpload;
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
## Nested JSON Columns
|
||||
|
||||
TrailBase also supports generating type-safe bindings for columns containing
|
||||
JSON data and enforcing a specific JSON schema, see
|
||||
[here](/documentation/apis/record_apis/#custom-json-schemas).
|
||||
|
||||
|
||||
|
||||
<div class="h-[50px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
We do not support binary JSON, i.e. SQLite's internal JSONB
|
||||
representation at this point.
|
||||
197
docs/src/content/docs/getting-started/first-app.mdx
Normal file
@@ -0,0 +1,197 @@
|
||||
---
|
||||
title: First App
|
||||
description: A guide in my new Starlight docs site.
|
||||
---
|
||||
|
||||
import { Code } from "@astrojs/starlight/components";
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
|
||||
{/*
|
||||
import Readme from "../../../../../examples/tutorial/README.md";
|
||||
<Readme />
|
||||
*/}
|
||||
|
||||
In this tutorial, we'll set up a database with an IMDB test dataset, spin up
|
||||
TrailBase and write a small program to access the data.
|
||||
|
||||
In an effort to demonstrate TrailBase's loose coupling and the possibility of
|
||||
simply trying out TrailBase with an existing SQLite-based data analysis
|
||||
project, we will also offer a alternative path to bootstrapping the database
|
||||
using the vanilla `sqlite3` CLI.
|
||||
|
||||
<Aside type="note" title="Note">
|
||||
Just like in the previous section, you can use docker to run arbitrary TrailBase commands like:
|
||||
|
||||
```bash
|
||||
mkdir traildepot
|
||||
alias trail="docker run -p 4000:4000 --mount type=bind,source=$PWD/traildepot,target=/app/traildepot trailbase/trailbase /app/trail"
|
||||
trail --help
|
||||
```
|
||||
|
||||
The above alias mounts `$PWD/traildepot` into the ephemeral container to store
|
||||
TrailBase's runtime files like configuration, database, etc. You can set this
|
||||
to be anywhere you like.
|
||||
Alternatively, you can build or download the `trail` binary and make it
|
||||
available via the `$PATH` variable.
|
||||
|
||||
</Aside>
|
||||
|
||||
## Create the Schema
|
||||
|
||||
By simply starting TrailBase, the migrations in `traildepot/migrations` will be
|
||||
applied, including `U1728810800__create_table_movies.sql`:
|
||||
|
||||
```sql
|
||||
CREATE TABLE movies IF NOT EXISTS (
|
||||
rank INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
year ANY NOT NULL,
|
||||
watch_time INTEGER NOT NULL,
|
||||
rating REAL NOT NULL,
|
||||
metascore ANY,
|
||||
gross ANY,
|
||||
votes TEXT NOT NULL,
|
||||
description TEXT NOT NULL
|
||||
) STRICT;
|
||||
```
|
||||
|
||||
Note that the only schema requirement for exposing an API is: `STRICT` typing
|
||||
and an integer (or UUIDv7) primary key column.
|
||||
|
||||
The main benefit of relying on TrailBase to apply the above schema as migrations
|
||||
over manually applying the schema yourself, is to:
|
||||
|
||||
- document your database's schema alongside your code and
|
||||
- even more importantly, letting TrailBase bootstrap from scratch and
|
||||
sync-up databases across your dev setup, your colleague's, every time
|
||||
integration tests run, QA stages, and in production.
|
||||
|
||||
That said, TrailBase will happily work on existing datasets, in which
|
||||
case it is your responsibility to provide a SQLite database file that
|
||||
meets expectations expressed as configured TrailBase API endpoints.
|
||||
|
||||
Feel free to run:
|
||||
|
||||
```bash
|
||||
$ mkdir traildepot/data
|
||||
$ sqlite3 traildepot/data/main.db < traildepot/migrations/U1728810800__create_table_movies.sql
|
||||
```
|
||||
|
||||
before starting TrailBase the first time, if you prefer bootstrapping the
|
||||
database yourself.
|
||||
|
||||
## Importing the Data
|
||||
|
||||
After creating the schema above, either manually or starting TrailBase to apply
|
||||
migrations, we're ready to import the IMDB test dataset.
|
||||
We could now expose an API endpoint and write a small program to first read the
|
||||
CSV file to then write movie database records... and we'll do that in a little
|
||||
later.
|
||||
For now, let's start by harnessing the fact that SQLite databases are simply a
|
||||
local file and import the data using the `sqlite3` CLI side-stepping TrailBase:
|
||||
|
||||
```
|
||||
$ sqlite3 traildepot/data/main.db
|
||||
sqlite> .mode csv
|
||||
sqlite> .import ./data/Top_1000_IMDb_movies_New_version.csv movies
|
||||
```
|
||||
|
||||
There will be a warning for the first line of the CSV, which contains textual
|
||||
table headers rather than data matching our schema. That's expected.
|
||||
We can validate that we successfully imported 1000 movies by running:
|
||||
|
||||
```sql
|
||||
sqlite> SELECT COUNT(*) FROM movies;
|
||||
1000
|
||||
```
|
||||
|
||||
## Accessing the Data
|
||||
|
||||
With TrailBase up and running (`trail run`), the easiest way to explore your
|
||||
data is go to the admin dashboard under
|
||||
[http://localhost:4000](http://localhost:4000)
|
||||
and log in with the admin credentials provided to you in the terminal upon
|
||||
first start (you can also use the `trail` CLI to reset the password `trail user
|
||||
reset-password admin@localhost`).
|
||||
|
||||
In this tutorial we want to explore more programmatic access and using
|
||||
TrailBase record APIs.
|
||||
|
||||
```json
|
||||
record_apis: [
|
||||
# ...
|
||||
{
|
||||
name: "movies"
|
||||
table_name: "movies"
|
||||
acl_world: [READ]
|
||||
acl_authenticated: [CREATE, READ, UPDATE, DELETE]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
By adding the above snippet to your configuration (which is already the case
|
||||
for the checked-in configuration) you expose a world-readable API. We're using
|
||||
the config here but you can also configure the API using the admin dashboard
|
||||
via the
|
||||
[tables view](http://localhost:4000/_/admin/tables?pageIndex=0&pageSize=20&table=movies)
|
||||
and the "Record API" settings in the top right.
|
||||
|
||||
Let's try it out by querying the top-3 ranked movies with less than 120min
|
||||
watch time:
|
||||
|
||||
```bash
|
||||
curl -g 'localhost:4000/api/records/v1/movies?limit=3&order=rank&watch_time[lt]=120'
|
||||
```
|
||||
|
||||
You can also use your browser. Either way, you should see some JSON output with
|
||||
the respective movies.
|
||||
|
||||
## Type-Safe APIs and Mutations
|
||||
|
||||
Finally, let's authenticate and use privileged APIs to first delete all movies
|
||||
and then add them pack using type-safe APIs rather than `sqlite3`.
|
||||
|
||||
Let's first create the JSON Schema type definitions from the database schema we
|
||||
added above. Note, that the type definition for creation, reading, and updating
|
||||
are all different. Creating a new record requires values for all `NOT NULL`
|
||||
columns w/o a default value, while reads guarantees values for all `NOT NULL`
|
||||
columns, and updates only require values for columns that are being updated.
|
||||
In this tutorial we'll "cheat" by using the same type definition for reading
|
||||
existing and creating new records, since our schema doesn't define any default
|
||||
values (except implicitly for the primary key), they're almost identical.
|
||||
|
||||
In preparation for deleting and re-adding the movies, let's run:
|
||||
|
||||
```bash
|
||||
$ trail schema movies --mode insert
|
||||
```
|
||||
|
||||
This will output a standard JSON schema type definition file. There's quite a few
|
||||
code-generators you can use to generate bindings for your favorite language.
|
||||
For this example we'll use _quicktype_ to generate _TypeScript_ definitions,
|
||||
which also happens to support some other ~25 languages. You can install it, but
|
||||
for the tutorial we'll stick with the [browser](https://app.quicktype.io/)
|
||||
version and copy&paste the JSON schema from above.
|
||||
|
||||
With the generated types, we can use the TrailBase TypeScript client to write
|
||||
the following program:
|
||||
|
||||
import fillCode from "../../../../../examples/tutorial/scripts/src/fill.ts?raw";
|
||||
|
||||
<Code
|
||||
code={fillCode}
|
||||
lang="ts"
|
||||
title={"examples/tutorial/scripts/src/fill.ts"}
|
||||
mark={[]}
|
||||
/>
|
||||
|
||||
## What's Next?
|
||||
|
||||
Thanks for making it to the end.
|
||||
Beyond the basic example above, the repository contains a more involved Blog
|
||||
example (`/examples/blog`) including both, a Web and Flutter UI.
|
||||
The blog example also demonstrates more complex APIs, authorization, custom
|
||||
user profiles, etc.
|
||||
|
||||
Any questions or suggestions? Reach out on GitHub and help us improve the docs.
|
||||
Thanks!
|
||||
87
docs/src/content/docs/getting-started/philosophy.mdx
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: Philosophy
|
||||
description: A quick look at TrailBase's philosophy and goals.
|
||||
---
|
||||
|
||||
The ambition of TrailBase is to help solve common problems with
|
||||
established, standard solutions, while being blazingly fast, avoiding
|
||||
lock-in, or getting in your way.
|
||||
You shouldn't have to fight your framework due to evolving product needs.
|
||||
Instead, you should be able to use SQLite to its fullest, extend it, or move on
|
||||
entirely if necessary.
|
||||
|
||||
The key for TrailBase to achieve this goal is to focus on _loose coupling_ and
|
||||
_rigorous simplicity_.
|
||||
Performance is merely a desirable side-effect of keeping it simple and
|
||||
carefully picking the right components for doing the heavy lifting.
|
||||
|
||||
### _Simplicity_
|
||||
|
||||
To avoid demoting _simplicity_ to just another popular marketing term, let's
|
||||
start by clarify what we mean.
|
||||
Simplicity is a relative property in the context of a problem:
|
||||
a simple solution for a simple problem is straightforward to understand,
|
||||
validate, extend or even replace.
|
||||
A simple solution solution for a hard problem will naturally be more involved
|
||||
but retains above properties within a more complex context.
|
||||
|
||||
"Simple" is different from "easy".
|
||||
An easy solution may be very complex in an effort to take on as much
|
||||
responsibility as possible for very specific scenarios.
|
||||
Easy solutions will sometimes yield pleasing but magic solutions that fall flat
|
||||
when straying off the path.
|
||||
Magic solutions will always lead to tight coupling and lock-in.
|
||||
In contrast, simple solutions are explicit, apply in a wide range of scenarios
|
||||
and lead to easy-to-understand, easy-to-change outcomes.
|
||||
|
||||
Why should I care? We believe that there are material benefits to simplicity.
|
||||
For example and tying back to TrailBase:
|
||||
|
||||
- A simple, single-file backend dependency lets you set up **consistent**
|
||||
production, pre-prod, testing and development environments, which will help
|
||||
to improve velocity, catch issues sooner, and reduce cognitive overhead.
|
||||
- Lets you change production deployments or cloud providers more easily to
|
||||
address soaring bills, ToS changes, geopolitics and policy requirements such as
|
||||
data governance.
|
||||
- Lets you more easily adopt TrailBase, also selectively, and more easily drop
|
||||
it if you choose to.
|
||||
|
||||
### _Coupling_
|
||||
|
||||
The way we defined _simplicity_ above, loose coupling is already an important
|
||||
property of a good-natured, simple solution. Yet, we believe it's a critical
|
||||
property in its own right that should be called out explicitly helping to
|
||||
illustrate the guiding principles underpinning TrailBase.
|
||||
Looking at two examples:
|
||||
|
||||
__Admin Dashboard__: TrailBase offers an easy-to-use, integrated admin dashboard.
|
||||
It doesn't make TrailBase architecturally simpler but hopefully easier to use.
|
||||
Importantly, it's loosely coupled to the rest of the system. It's neither
|
||||
critical for serving production traffic nor setting up TrailBase.
|
||||
Any dashboard task can be equally accomplished using the CLI, the configs, or
|
||||
SQL.
|
||||
|
||||
__SQL over ORM__: TrailBase embraces SQL instead of trying to hide it. Eventually
|
||||
all paths lead to SQL as the only truly cross-platform, cross-database,
|
||||
cross-language solution 😉.
|
||||
ORMs often yield simple looking examples but then fall flat soon after going
|
||||
beyond the tutorial, sometimes already when joining tables.
|
||||
Your data model should never be driven by the short-comings of an abstraction
|
||||
forced upon you.
|
||||
ORMs aren't without merit. Often they will provide type-safe APIs for accessing
|
||||
the database. While more constraint, TrailBase's end-to-end type-safety
|
||||
provides similar benefits.
|
||||
Leaning into HTTP, JSON and JSON schema to makes it easy for TrailBase to
|
||||
provide consistent cross-language client-side type-safety [^1], giving you more
|
||||
freedom in choosing the right tool for a specific job.
|
||||
|
||||
<div class="h-[50px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
Having only TypeScript and Dart bindings at the moment, this may sound more
|
||||
aspirational than practical.
|
||||
However, client bindings are only very thin layers around HTTP + JSON.
|
||||
It is straight forward to add new bindings or just use `curl` in a bunch
|
||||
of shell scripts. We're planning to add more bindings in the future.
|
||||
72
docs/src/content/docs/getting-started/starting-up.mdx
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
title: Starting Up
|
||||
description: A guide in my new Starlight docs site.
|
||||
---
|
||||
|
||||
import { Aside } from "@astrojs/starlight/components";
|
||||
import { Icon } from 'astro-icon/components'
|
||||
|
||||
In getting-started guide we'll bring up a local TrailBase instance, explore the
|
||||
admin dashboard, and implement our first, small application.
|
||||
|
||||
## Starting TrailBase
|
||||
|
||||
The quickest way to get TrailBase up and running is to use docker:
|
||||
|
||||
```bash
|
||||
$ mkdir traildepot
|
||||
$ docker run -p 4000:4000 --mount type=bind,source=$PWD/traildepot,target=/app/traildepot trailbase/trailbase
|
||||
```
|
||||
|
||||
On first start, TrailBase will generate a `traildepot` folder in your working
|
||||
directory containing its configuration, the database, secrets and a few more
|
||||
things.
|
||||
It will also generate a new admin user for you. The credentials will be printed
|
||||
on the command line as follows:
|
||||
|
||||
```
|
||||
Created new admin user:
|
||||
email: 'admin@localhost'
|
||||
password: '<random>'
|
||||
```
|
||||
|
||||
If you like, feel free to change the Email or password later in the dashboard.
|
||||
Independently, if you ever forget your password, you can reset it using the
|
||||
`trail user reset-password admin@localhost <new_pw>` command.
|
||||
|
||||
## The Admin Dashboard
|
||||
|
||||
After successfully starting TrailBase, we can check out the admin dashboard under
|
||||
[http://localhost:4000/\_/admin/](http://localhost:4000/_/admin/).
|
||||
After logging in with the admin credentials from the terminal, there's a couple
|
||||
of pages to explore.
|
||||
|
||||
* First and maybe most importantly: the data browser
|
||||
(<span class="not-content inline align-middle"><Icon name="tabler:database" /></span>)
|
||||
that let's you explore and alter both the data as well as table schemas. It
|
||||
provides access to _tables_, _views_, _virtual tables_, _indexes_, _trggiers_
|
||||
as well as your TrailBase API settings.
|
||||
* The simple SQL editor
|
||||
(<span class="not-content inline align-middle"><Icon name="tabler:edit" /></span>)
|
||||
lets you run arbitrary queries against the database and take full control.
|
||||
It also lets you access SQLite features which aren't (yet) exposed via the
|
||||
dashboard.
|
||||
* The accounts page
|
||||
(<span class="not-content inline align-middle"><Icon name="tabler:users" /></span>)
|
||||
lets you manage your registered users.
|
||||
* The logs page
|
||||
(<span class="not-content inline align-middle"><Icon name="tabler:timeline" /></span>)
|
||||
lets you see what's going on. At this early stage you're probably just seeing
|
||||
your own interactions with the admin dashboard.
|
||||
* The settings page
|
||||
(<span class="not-content inline align-middle"><Icon name="tabler:settings" /></span>)
|
||||
lets you configure instance-wide settings.
|
||||
Alternatively, uou can also directly edit TrailBase's config file, however, unlike
|
||||
the UI you'll need to restart the server to apply the changes.
|
||||
TrailBase uses protobuf for its configuration. The schema can be
|
||||
found [here](https://github.com/trailbaseio/trailbase/proto/config.proto).
|
||||
|
||||
We encourage you to take a minute, click around, and maybe create a few tables.
|
||||
Don't worry about breaking anything. Also note that when creating, altering, or
|
||||
deleting a table a schema migration file will be created in
|
||||
`traildepot/migrations`.
|
||||
167
docs/src/content/docs/index.mdx
Normal file
@@ -0,0 +1,167 @@
|
||||
---
|
||||
title: Welcome to TrailBase
|
||||
description: Blazingly fast, single-file, open-source server for your Applications
|
||||
template: splash
|
||||
hero:
|
||||
tagline: A blazingly fast, single-file, and open-source server for your application with APIs, auth, admin dashboard, ...
|
||||
image:
|
||||
file: ../../assets/logo_512.webp
|
||||
actions:
|
||||
- text: Documentation
|
||||
link: /getting-started/starting-up
|
||||
icon: right-arrow
|
||||
- text: FAQ
|
||||
link: /reference/faq/
|
||||
icon: external
|
||||
variant: secondary
|
||||
---
|
||||
|
||||
import { Image } from "astro:assets";
|
||||
import { Aside, Card, CardGrid } from "@astrojs/starlight/components";
|
||||
|
||||
import screenshot from "@/assets/screenshot.webp";
|
||||
import flutterLogo from "@/assets/flutter_logo.svg";
|
||||
import tsLogo from "@/assets/ts_logo.svg";
|
||||
|
||||
import { Duration100kInsertsChart } from "./reference/_benchmarks/benchmarks.tsx";
|
||||
|
||||
<div class="flex justify-center">
|
||||
<div class="md:w-[80%] flex flex-col gap-4">
|
||||
|
||||
<div>
|
||||
<h3>Total time for 100k insertions</h3>
|
||||
|
||||
<a href="/reference/benchmarks">
|
||||
TrailBase adds minimal overhead compared to in-process SQLite and
|
||||
beats other excellent choices:
|
||||
</a>
|
||||
|
||||
<div class="w-full h-[260px] mt-4">
|
||||
<Duration100kInsertsChart client:only="solid-js" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
#### Live Demo
|
||||
|
||||
<div class="relative">
|
||||
<Image class="z-0 rounded-xl" src={screenshot} alt="Screenshot of TrailBase's admin dashboard" />
|
||||
|
||||
<div class="z-1 w-full h-full absolute top-0 flex justify-center items-center">
|
||||
<a class="no-underline flex flex-col items-center bg-gray-200 dark:bg-accent-900 px-4 py-2 rounded" href="https://demo.trailbase.io/_/admin">
|
||||
<span>login: admin@localhost</span>
|
||||
<span>password: secret</span>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<Aside type="caution" title="Early Days">
|
||||
TrailBase is very young.
|
||||
You can expect many new features but also moving APIs until things settle.
|
||||
We'll make sure to follow semantic versioning and welcome brave, early
|
||||
adopters.
|
||||
</Aside>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="w-full py-8">
|
||||
|
||||
<CardGrid stagger>
|
||||
|
||||
<Card title="Performance" icon="rocket">
|
||||
Blazingly fast thanks to its constituents:
|
||||
|
||||
* Rust: one of the lowest overhead languages,
|
||||
* Axum: one of the fastest HTTP servers,
|
||||
* SQLite/Libsql: one of the fastest full-SQL databases.
|
||||
|
||||
TrailBase is [6-7x faster than PocketBase and 15x faster than SupaBase
|
||||
needing only a fraction of the footprint](/reference/benchmarks), allowing
|
||||
you to serve millions of customers from a tiny box.
|
||||
</Card>
|
||||
|
||||
<Card title="Simple" icon="heart">
|
||||
TrailBase is a small, single file, static binary that is incredibly easy
|
||||
to deploy **consistently** across integration testing, development, pre-prod,
|
||||
and production environments including edge.
|
||||
Architecturally, TrailBase aims to be a simple, thin abstraction around
|
||||
standards helping full or piece-meal adoption and avoiding lock-in.
|
||||
|
||||
A simple architecture, both in your dependencies and your own App, will
|
||||
let you move faster, more confidently and pivot when necessary.
|
||||
</Card>
|
||||
|
||||
<Card title="Admin Dashboard" icon="setting">
|
||||
TrailBase ships with a builtin admin dashboard UI, see demo above, that
|
||||
lets you quickly configure your instance and visually explore your data.
|
||||
Following TrailBase's mantra of not getting in your way, the UI is
|
||||
entirely optional letting you fall back to a purely config &
|
||||
migration-based setup for integration tests or managing an entire fleet
|
||||
of deployments.
|
||||
</Card>
|
||||
|
||||
<Card title="Authentication" icon="open-book">
|
||||
TrailBase comes with an authentication system and UI built-in supporting
|
||||
both password-based and Social/OAuth (Google, Discord, ...) sign-ups.
|
||||
|
||||
TrailBase authentication system follows standards and best-practices
|
||||
combining short-lived, stateless JSON web tokens with long-lived stateful
|
||||
refresh tokens letting you easily and efficiently authenticate your users
|
||||
from any of your other back-ends relying on safe, asymmetric cryptography.
|
||||
</Card>
|
||||
|
||||
<Card title="APIs & File Storage" icon="random">
|
||||
Provide access to your tables and views through fast, flexible and
|
||||
**type-safe** restful CRUD APIs.
|
||||
Authorize users based on ACLs and SQL access rules letting you
|
||||
easily build higher-level access management or moderation facilities
|
||||
like groups or capabilities.
|
||||
</Card>
|
||||
|
||||
<Card title="Integration" icon="puzzle">
|
||||
Straightforward integration with any stack thanks to thin abstractions,
|
||||
reliance on standards, and JSON Schema for type-safety allowing type-safe
|
||||
bindings for virtually any language.
|
||||
|
||||
Clients as well as code-generation examples for TypeScript and
|
||||
Dart/Flutter are provided out of the box.
|
||||
|
||||
<div class="m-0 flex justify-center items-start gap-8">
|
||||
<span>
|
||||
<Image class="p-0 m-0" height={52} src={tsLogo} alt="TypeScript" />
|
||||
</span>
|
||||
<span>
|
||||
<Image margin={0} class="p-0 m-0" width={42} height={52} src={flutterLogo} alt="Flutter" />
|
||||
</span>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
</CardGrid>
|
||||
</div>
|
||||
|
||||
import Roadmap from "./_roadmap.md";
|
||||
|
||||
<div class="flex justify-center">
|
||||
<div class="md:w-[80%] flex flex-col items-center gap-4">
|
||||
<Aside type="tip" title="Roadmap">
|
||||
<Roadmap />
|
||||
</Aside>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Hero page footer */}
|
||||
|
||||
<div class="h-[30px]" />
|
||||
<div class="w-full mt-[28px] h-[1px] bg-accent-200" />
|
||||
|
||||
<div class="w-full flex justify-end">
|
||||
<div class="flex flex-col items-end mr-4">
|
||||
<a class="no-underline" href="/contact">
|
||||
Contact
|
||||
</a>
|
||||
<a class="no-underline" href="/license">
|
||||
License
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
108
docs/src/content/docs/license.mdx
Normal file
@@ -0,0 +1,108 @@
|
||||
---
|
||||
title: Functional Source License, Version 1.1, Apache 2.0 Future License
|
||||
template: splash
|
||||
---
|
||||
|
||||
## Abbreviation
|
||||
|
||||
FSL-1.1-Apache-2.0
|
||||
|
||||
## Notice
|
||||
|
||||
Copyright 2024 Sebastian Jeltsch
|
||||
|
||||
## Terms and Conditions
|
||||
|
||||
### Licensor ("We")
|
||||
|
||||
The party offering the Software under these Terms and Conditions.
|
||||
|
||||
### The Software
|
||||
|
||||
The "Software" is each version of the software that we make available under
|
||||
these Terms and Conditions, as indicated by our inclusion of these Terms and
|
||||
Conditions with the Software.
|
||||
|
||||
### License Grant
|
||||
|
||||
Subject to your compliance with this License Grant and the Patents,
|
||||
Redistribution and Trademark clauses below, we hereby grant you the right to
|
||||
use, copy, modify, create derivative works, publicly perform, publicly display
|
||||
and redistribute the Software for any Permitted Purpose identified below.
|
||||
|
||||
### Permitted Purpose
|
||||
|
||||
A Permitted Purpose is any purpose other than a Competing Use. A Competing Use
|
||||
means making the Software available to others in a commercial product or
|
||||
service that:
|
||||
|
||||
1. substitutes for the Software;
|
||||
|
||||
2. substitutes for any other product or service we offer using the Software
|
||||
that exists as of the date we make the Software available; or
|
||||
|
||||
3. offers the same or substantially similar functionality as the Software.
|
||||
|
||||
Permitted Purposes specifically include using the Software:
|
||||
|
||||
1. for your internal use and access;
|
||||
|
||||
2. for non-commercial education;
|
||||
|
||||
3. for non-commercial research; and
|
||||
|
||||
4. in connection with professional services that you provide to a licensee
|
||||
using the Software in accordance with these Terms and Conditions.
|
||||
|
||||
### Patents
|
||||
|
||||
To the extent your use for a Permitted Purpose would necessarily infringe our
|
||||
patents, the license grant above includes a license under our patents. If you
|
||||
make a claim against any party that the Software infringes or contributes to
|
||||
the infringement of any patent, then your patent license to the Software ends
|
||||
immediately.
|
||||
|
||||
### Redistribution
|
||||
|
||||
The Terms and Conditions apply to all copies, modifications and derivatives of
|
||||
the Software.
|
||||
|
||||
If you redistribute any copies, modifications or derivatives of the Software,
|
||||
you must include a copy of or a link to these Terms and Conditions and not
|
||||
remove any copyright notices provided in or with the Software.
|
||||
|
||||
### Disclaimer
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR
|
||||
PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT.
|
||||
|
||||
IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE
|
||||
SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES,
|
||||
EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE.
|
||||
|
||||
### Trademarks
|
||||
|
||||
Except for displaying the License Details and identifying us as the origin of
|
||||
the Software, you have no right under these Terms and Conditions to use our
|
||||
trademarks, trade names, service marks or product names.
|
||||
|
||||
## Grant of Future License
|
||||
|
||||
We hereby irrevocably grant you an additional license to use the Software under
|
||||
the Apache License, Version 2.0 that is effective on the second anniversary of
|
||||
the date we make the Software available. On or after that date, you may use the
|
||||
Software under the Apache License, Version 2.0, in which case the following
|
||||
will apply:
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
this file except in compliance with the License.
|
||||
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed
|
||||
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
378
docs/src/content/docs/reference/_benchmarks/benchmarks.tsx
Normal file
@@ -0,0 +1,378 @@
|
||||
import { type ChartData, type ChartDataset, type Tick } from "chart.js/auto";
|
||||
|
||||
import { BarChart } from "@/components/BarChart.tsx";
|
||||
import { LineChart } from "@/components/LineChart.tsx";
|
||||
|
||||
import { data as supabaseUtilization } from "./supabase_utilization";
|
||||
import { data as pocketbaseUtilization } from "./pocketbase_utilization";
|
||||
import { data as trailbaseUtilization } from "./trailbase_utilization";
|
||||
|
||||
const colors = {
|
||||
supabase: "rgb(62, 207, 142)",
|
||||
pocketbase0: "rgb(230, 128, 30)",
|
||||
pocketbase1: "rgb(238, 175, 72)",
|
||||
trailbase0: "rgb(0, 115, 170)",
|
||||
trailbase1: "rgb(71, 161, 205)",
|
||||
trailbase2: "rgb(146, 209, 242)",
|
||||
drizzle: "rgb(249, 39, 100)",
|
||||
};
|
||||
|
||||
function transformTimeTicks(factor: number = 0.5) {
|
||||
return (_value: number | string, index: number): string | undefined => {
|
||||
if (index % 10 === 0) {
|
||||
// WARN: These are estimate time due to how we measure: periodic
|
||||
// polling every 0.5s using `top` or `docker stats`, which themselves
|
||||
// have sampling intervals. The actual value shouldn't matter that
|
||||
// much, since we measure the actual duration in-situ. We do this
|
||||
// transformation only to make the time scale more intuitive than
|
||||
// just "time at sample X".
|
||||
return `~${index * factor}s`;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const durations100k = [
|
||||
{
|
||||
label: "SupaBase",
|
||||
data: [151],
|
||||
backgroundColor: colors.supabase,
|
||||
},
|
||||
{
|
||||
label: "PocketBase TS",
|
||||
data: [67.721],
|
||||
backgroundColor: colors.pocketbase0,
|
||||
},
|
||||
// {
|
||||
// label: "PocketBase Dart (AOT)",
|
||||
// data: [62.8136],
|
||||
// },
|
||||
{
|
||||
label: "PocketBase Dart (JIT)",
|
||||
data: [61.687],
|
||||
backgroundColor: colors.pocketbase1,
|
||||
},
|
||||
{
|
||||
label: "TrailBase TS",
|
||||
data: [16.742],
|
||||
backgroundColor: colors.trailbase0,
|
||||
},
|
||||
// {
|
||||
// label: "TrailBase Dart (AOT)",
|
||||
// data: [11.1],
|
||||
// },
|
||||
{
|
||||
// label: "TrailBase Dart (JIT)",
|
||||
label: "TrailBase Dart",
|
||||
data: [9.4247],
|
||||
backgroundColor: colors.trailbase1,
|
||||
},
|
||||
// {
|
||||
// label: "TrailBase Dart (JIT + PGO)",
|
||||
// data: [10.05],
|
||||
// },
|
||||
// {
|
||||
// label: "TrailBase Dart (INT PK)",
|
||||
// data: [8.5249],
|
||||
// backgroundColor: colors.trailbase2,
|
||||
// },
|
||||
{
|
||||
label: "In-process SQLite (Drizzle)",
|
||||
data: [8.803],
|
||||
backgroundColor: colors.drizzle,
|
||||
},
|
||||
];
|
||||
|
||||
export function Duration100kInsertsChart() {
|
||||
const data: ChartData<"bar"> = {
|
||||
labels: ["Time [s] (lower is better)"],
|
||||
datasets: durations100k as ChartDataset<"bar">[],
|
||||
};
|
||||
|
||||
return <BarChart data={data} />;
|
||||
}
|
||||
|
||||
export function PocketBaseAndTrailBaseReadLatencies() {
|
||||
// 2024-10-12
|
||||
// Read 1000000 messages, took 0:00:57.952120 (limit=64)
|
||||
const readTrailbaseMicroS = {
|
||||
p50: 3504,
|
||||
p75: 3947,
|
||||
p90: 4393,
|
||||
p95: 4725,
|
||||
};
|
||||
|
||||
// 2024-10-12
|
||||
// Read 100000 messages, took 0:00:20.273054 (limit=64)
|
||||
const readPocketbaseMicroS = {
|
||||
p50: 12740,
|
||||
p75: 13718,
|
||||
p90: 14755,
|
||||
p95: 15495,
|
||||
};
|
||||
|
||||
const latenciesMs = (d: any) =>
|
||||
[d.p50, d.p75, d.p90, d.p95].map((p) => p / 1000);
|
||||
|
||||
const data: ChartData<"bar"> = {
|
||||
labels: ["p50", "p75", "p90", "p95"],
|
||||
datasets: [
|
||||
{
|
||||
label: "PocketBase",
|
||||
data: latenciesMs(readPocketbaseMicroS),
|
||||
backgroundColor: colors.pocketbase0,
|
||||
},
|
||||
{
|
||||
label: "TrailBase",
|
||||
data: latenciesMs(readTrailbaseMicroS),
|
||||
backgroundColor: colors.trailbase0,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
return (
|
||||
<BarChart
|
||||
data={data}
|
||||
scales={{
|
||||
y: {
|
||||
title: {
|
||||
display: true,
|
||||
text: "Read Latency [ms]",
|
||||
},
|
||||
},
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function PocketBaseAndTrailBaseInsertLatencies() {
|
||||
// 2024-10-12
|
||||
// Inserted 10000 messages, took 0:00:01.654810 (limit=64)
|
||||
const insertTrailbaseMicroS = {
|
||||
p50: 8107,
|
||||
p75: 10897,
|
||||
p90: 15327,
|
||||
p95: 19627,
|
||||
};
|
||||
// 2024-10-12
|
||||
//Inserted 10000 messages, took 0:00:07.759677 (limit=64)
|
||||
const insertPocketbaseMicroS = {
|
||||
p50: 28160,
|
||||
p75: 58570,
|
||||
p90: 108325,
|
||||
p95: 157601,
|
||||
};
|
||||
|
||||
const latenciesMs = (d: any) =>
|
||||
[d.p50, d.p75, d.p90, d.p95].map((p) => p / 1000);
|
||||
|
||||
const data: ChartData<"bar"> = {
|
||||
labels: ["p50", "p75", "p90", "p95"],
|
||||
datasets: [
|
||||
{
|
||||
label: "PocketBase",
|
||||
data: latenciesMs(insertPocketbaseMicroS),
|
||||
backgroundColor: colors.pocketbase0,
|
||||
},
|
||||
{
|
||||
label: "TrailBase",
|
||||
data: latenciesMs(insertTrailbaseMicroS),
|
||||
backgroundColor: colors.trailbase0,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
return (
|
||||
<BarChart
|
||||
data={data}
|
||||
scales={{
|
||||
y: {
|
||||
title: {
|
||||
display: true,
|
||||
text: "Insert Latency [ms]",
|
||||
},
|
||||
},
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function SupaBaseMemoryUsageChart() {
|
||||
const data: ChartData<"line"> = {
|
||||
labels: [...Array(330).keys()],
|
||||
datasets: Object.keys(supabaseUtilization).map((key) => {
|
||||
const data = supabaseUtilization[key].map((datum, index) => ({
|
||||
x: index,
|
||||
y: datum.memUsageKb,
|
||||
}));
|
||||
|
||||
return {
|
||||
label: key.replace("supabase-", ""),
|
||||
data: data,
|
||||
fill: true,
|
||||
showLine: false,
|
||||
pointStyle: false,
|
||||
};
|
||||
}),
|
||||
};
|
||||
|
||||
return (
|
||||
<LineChart
|
||||
data={data}
|
||||
scales={{
|
||||
y: {
|
||||
stacked: true,
|
||||
title: {
|
||||
display: true,
|
||||
text: "Memory Usage [GB]",
|
||||
},
|
||||
ticks: {
|
||||
callback: (
|
||||
value: number | string,
|
||||
_index: number,
|
||||
_ticks: Tick[],
|
||||
): string | undefined => {
|
||||
const v = value as number;
|
||||
return `${(v / 1024 / 1024).toFixed(0)}`;
|
||||
},
|
||||
},
|
||||
},
|
||||
x: {
|
||||
ticks: {
|
||||
display: true,
|
||||
callback: transformTimeTicks(),
|
||||
},
|
||||
},
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function SupaBaseCpuUsageChart() {
|
||||
const data: ChartData<"line"> = {
|
||||
labels: [...Array(330).keys()],
|
||||
datasets: Object.keys(supabaseUtilization).map((key) => {
|
||||
const data = supabaseUtilization[key].map((datum, index) => ({
|
||||
x: index,
|
||||
y: datum.cpuPercent ?? 0,
|
||||
}));
|
||||
|
||||
return {
|
||||
label: key.replace("supabase-", ""),
|
||||
data: data,
|
||||
fill: true,
|
||||
showLine: false,
|
||||
pointStyle: false,
|
||||
};
|
||||
}),
|
||||
};
|
||||
|
||||
return (
|
||||
<LineChart
|
||||
data={data}
|
||||
scales={{
|
||||
y: {
|
||||
stacked: true,
|
||||
title: {
|
||||
display: true,
|
||||
text: "CPU Cores",
|
||||
},
|
||||
},
|
||||
x: {
|
||||
ticks: {
|
||||
display: true,
|
||||
callback: transformTimeTicks(),
|
||||
},
|
||||
},
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export function PocketBaseAndTrailBaseUsageChart() {
|
||||
// To roughly align start of benchmark on the time axis.
|
||||
const xOffset = 3;
|
||||
|
||||
const data: ChartData<"line"> = {
|
||||
labels: [...Array(134).keys()],
|
||||
datasets: [
|
||||
{
|
||||
yAxisID: "yLeft",
|
||||
label: "PocketBase CPU",
|
||||
data: pocketbaseUtilization.slice(xOffset).map((datum, index) => ({
|
||||
x: index,
|
||||
y: datum.cpu,
|
||||
})),
|
||||
borderColor: colors.pocketbase0,
|
||||
backgroundColor: colors.pocketbase0,
|
||||
},
|
||||
{
|
||||
yAxisID: "yRight",
|
||||
label: "PocketBase RSS",
|
||||
data: pocketbaseUtilization.slice(xOffset).map((datum, index) => ({
|
||||
x: index,
|
||||
y: datum.rss,
|
||||
})),
|
||||
borderColor: colors.pocketbase1,
|
||||
backgroundColor: colors.pocketbase1,
|
||||
},
|
||||
{
|
||||
yAxisID: "yLeft",
|
||||
label: "TrailBase CPU",
|
||||
data: trailbaseUtilization.map((datum, index) => ({
|
||||
x: index,
|
||||
y: datum.cpu,
|
||||
})),
|
||||
borderColor: colors.trailbase0,
|
||||
backgroundColor: colors.trailbase0,
|
||||
},
|
||||
{
|
||||
yAxisID: "yRight",
|
||||
label: "TrailBase RSS",
|
||||
data: trailbaseUtilization.map((datum, index) => ({
|
||||
x: index,
|
||||
y: datum.rss,
|
||||
})),
|
||||
borderColor: colors.trailbase1,
|
||||
backgroundColor: colors.trailbase1,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
return (
|
||||
<LineChart
|
||||
data={data}
|
||||
scales={{
|
||||
yLeft: {
|
||||
position: "left",
|
||||
title: {
|
||||
display: true,
|
||||
text: "CPU Cores",
|
||||
},
|
||||
},
|
||||
yRight: {
|
||||
position: "right",
|
||||
title: {
|
||||
display: true,
|
||||
text: "Resident Memory Size [MB]",
|
||||
},
|
||||
ticks: {
|
||||
callback: (
|
||||
value: number | string,
|
||||
_index: number,
|
||||
_ticks: Tick[],
|
||||
): string | undefined => {
|
||||
const v = value as number;
|
||||
return `${(v / 1024).toFixed(0)}`;
|
||||
},
|
||||
},
|
||||
},
|
||||
x: {
|
||||
ticks: {
|
||||
display: true,
|
||||
callback: transformTimeTicks(0.6),
|
||||
},
|
||||
},
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,507 @@
|
||||
type Datum = {
|
||||
cpu: number;
|
||||
rss: number;
|
||||
};
|
||||
|
||||
export const data: Datum[] = [
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 35476,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 37140,
|
||||
},
|
||||
{
|
||||
cpu: 2.1,
|
||||
rss: 88196,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 110108,
|
||||
},
|
||||
{
|
||||
cpu: 2.5,
|
||||
rss: 118060,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 123776,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 127716,
|
||||
},
|
||||
{
|
||||
cpu: 2.3,
|
||||
rss: 131548,
|
||||
},
|
||||
{
|
||||
cpu: 2.4,
|
||||
rss: 134740,
|
||||
},
|
||||
{
|
||||
cpu: 2.2,
|
||||
rss: 138208,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 139436,
|
||||
},
|
||||
{
|
||||
cpu: 2.818,
|
||||
rss: 141788,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 142532,
|
||||
},
|
||||
{
|
||||
cpu: 2.545,
|
||||
rss: 142652,
|
||||
},
|
||||
{
|
||||
cpu: 2.4,
|
||||
rss: 142796,
|
||||
},
|
||||
{
|
||||
cpu: 2.182,
|
||||
rss: 143012,
|
||||
},
|
||||
{
|
||||
cpu: 3.1,
|
||||
rss: 143072,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 142660,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 142292,
|
||||
},
|
||||
{
|
||||
cpu: 2.2,
|
||||
rss: 143560,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 143236,
|
||||
},
|
||||
{
|
||||
cpu: 3.3,
|
||||
rss: 143200,
|
||||
},
|
||||
{
|
||||
cpu: 2.636,
|
||||
rss: 143136,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143068,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143144,
|
||||
},
|
||||
{
|
||||
cpu: 2.4,
|
||||
rss: 142832,
|
||||
},
|
||||
{
|
||||
cpu: 2.4,
|
||||
rss: 143120,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 143020,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 142884,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143068,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143024,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143392,
|
||||
},
|
||||
{
|
||||
cpu: 2.636,
|
||||
rss: 143276,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 143264,
|
||||
},
|
||||
{
|
||||
cpu: 2.3,
|
||||
rss: 142436,
|
||||
},
|
||||
{
|
||||
cpu: 2.3,
|
||||
rss: 142812,
|
||||
},
|
||||
{
|
||||
cpu: 3.1,
|
||||
rss: 142564,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 142624,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143296,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 142000,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 142264,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143004,
|
||||
},
|
||||
{
|
||||
cpu: 2.273,
|
||||
rss: 142336,
|
||||
},
|
||||
{
|
||||
cpu: 2.5,
|
||||
rss: 142420,
|
||||
},
|
||||
{
|
||||
cpu: 2.5,
|
||||
rss: 142696,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 141480,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 142084,
|
||||
},
|
||||
{
|
||||
cpu: 2.455,
|
||||
rss: 142428,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 144056,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143800,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143408,
|
||||
},
|
||||
{
|
||||
cpu: 2.5,
|
||||
rss: 143144,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 143076,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 143080,
|
||||
},
|
||||
{
|
||||
cpu: 2.636,
|
||||
rss: 142248,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 142812,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143836,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 142564,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 142868,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 143088,
|
||||
},
|
||||
{
|
||||
cpu: 2.3,
|
||||
rss: 143516,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 142912,
|
||||
},
|
||||
{
|
||||
cpu: 2.636,
|
||||
rss: 143428,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 142660,
|
||||
},
|
||||
{
|
||||
cpu: 3.3,
|
||||
rss: 143012,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143404,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143512,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 143048,
|
||||
},
|
||||
{
|
||||
cpu: 2.3,
|
||||
rss: 142480,
|
||||
},
|
||||
{
|
||||
cpu: 2.545,
|
||||
rss: 142628,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 142744,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143576,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143284,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143588,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 143340,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 142944,
|
||||
},
|
||||
{
|
||||
cpu: 2.5,
|
||||
rss: 142972,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 142940,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 144108,
|
||||
},
|
||||
{
|
||||
cpu: 2.545,
|
||||
rss: 143676,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143480,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143228,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143420,
|
||||
},
|
||||
{
|
||||
cpu: 3.1,
|
||||
rss: 143316,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143324,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 143376,
|
||||
},
|
||||
{
|
||||
cpu: 2.4,
|
||||
rss: 142712,
|
||||
},
|
||||
{
|
||||
cpu: 2.182,
|
||||
rss: 142896,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143616,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 144012,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 142724,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 142240,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 144172,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143712,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 143144,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 142732,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 142924,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 142632,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143912,
|
||||
},
|
||||
{
|
||||
cpu: 2.727,
|
||||
rss: 143132,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143212,
|
||||
},
|
||||
{
|
||||
cpu: 3,
|
||||
rss: 143420,
|
||||
},
|
||||
{
|
||||
cpu: 2.3,
|
||||
rss: 143480,
|
||||
},
|
||||
{
|
||||
cpu: 2.6,
|
||||
rss: 143212,
|
||||
},
|
||||
{
|
||||
cpu: 2.455,
|
||||
rss: 142700,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 142812,
|
||||
},
|
||||
{
|
||||
cpu: 2.7,
|
||||
rss: 143088,
|
||||
},
|
||||
{
|
||||
cpu: 2.8,
|
||||
rss: 143492,
|
||||
},
|
||||
{
|
||||
cpu: 2.9,
|
||||
rss: 143276,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 143004,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 142328,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 142328,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 142328,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 142328,
|
||||
},
|
||||
{
|
||||
cpu: 0.1,
|
||||
rss: 142328,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 142328,
|
||||
},
|
||||
];
|
||||
15343
docs/src/content/docs/reference/_benchmarks/supabase_utilization.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
type Datum = {
|
||||
cpu: number;
|
||||
rss: number;
|
||||
};
|
||||
|
||||
export const data: Datum[] = [
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 53640,
|
||||
},
|
||||
{
|
||||
cpu: 1.2,
|
||||
rss: 100256,
|
||||
},
|
||||
{
|
||||
cpu: 2.273,
|
||||
rss: 70444,
|
||||
},
|
||||
{
|
||||
cpu: 3.1,
|
||||
rss: 71984,
|
||||
},
|
||||
{
|
||||
cpu: 3.1,
|
||||
rss: 74032,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 76436,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 79124,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 83760,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 86192,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 95820,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 103000,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 111724,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 116524,
|
||||
},
|
||||
{
|
||||
cpu: 3.2,
|
||||
rss: 116748,
|
||||
},
|
||||
{
|
||||
cpu: 1.2,
|
||||
rss: 110208,
|
||||
},
|
||||
{
|
||||
cpu: 0.7,
|
||||
rss: 111360,
|
||||
},
|
||||
{
|
||||
cpu: 0.636,
|
||||
rss: 112768,
|
||||
},
|
||||
{
|
||||
cpu: 0.6,
|
||||
rss: 113024,
|
||||
},
|
||||
{
|
||||
cpu: 0.6,
|
||||
rss: 113024,
|
||||
},
|
||||
{
|
||||
cpu: 0.5,
|
||||
rss: 113024,
|
||||
},
|
||||
{
|
||||
cpu: 0.5,
|
||||
rss: 112844,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 112844,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 112844,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 112844,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 112844,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 112844,
|
||||
},
|
||||
{
|
||||
cpu: 0,
|
||||
rss: 112844,
|
||||
},
|
||||
];
|
||||
74
docs/src/content/docs/reference/_sql.mdx
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
title: SQL
|
||||
description: A short intro to SQL.
|
||||
---
|
||||
|
||||
SQL is a functional general-purpose query language for relational databases. It
|
||||
is as old as relational databases themselves.
|
||||
It allows to define arbitrary data look-ups and computations w/o hard-coding a
|
||||
specific database implementation or storage structure (presence of indexes,
|
||||
storage layout, ...).
|
||||
Yet many foster a love-hate relationship with it, which has given rise to an
|
||||
entire cottage industry of ORMs: higher-level, often type-safe abstractions
|
||||
with bindings for your favorite programming language.
|
||||
They tend to work great until you need to break glass. In recent years, there
|
||||
has been a push to thinner and thinner abstractions.
|
||||
|
||||
Instead of hiding or working around SQL, TrailBase embraces it as an evergreen,
|
||||
transferable skill.
|
||||
There's no denying that SQL can get tricky but it's
|
||||
also its greatest strength. SQL is a general purpose functional (sometimes
|
||||
imperative) programming language that lets you solve arbitrary problems in a
|
||||
high-level, portable fashion. In other words, learning SQL is a lot more useful
|
||||
than learning a specific ORM or similar abstractions.
|
||||
|
||||
One thing that's pretty sweet about SQL is that, almost any relational database
|
||||
(Postgres, MySQL, MS SQL, sqlite, ...) supports some dialect of SQL, which
|
||||
makes this a pretty transferable skill. This is only possible because SQL is
|
||||
pretty abstract. Given a lookup or transformation, different databases might
|
||||
execute them quite differently depending on their capabilities, how the data
|
||||
structures are set up, and how the look is expressed. All of this is facilitate
|
||||
through the magic of the query-planner, which takes your query and turns it
|
||||
into a physical execution plan.
|
||||
|
||||
## A gentle introduction
|
||||
|
||||
At first glance, SQL may look just like some antiquated filter language but in
|
||||
practice it's a high-level, functional programming language that is optimized
|
||||
for table transformations. All that SQLs does is:
|
||||
|
||||
1. inserting rows (INSERT),
|
||||
2. removing rows (DELETE),
|
||||
3. updating rows (UPDATE),
|
||||
4. and transforming NxM tables into PxQ tables (SELECT).
|
||||
|
||||
5. to 3. are pretty simple, they operate on rows.
|
||||
|
||||
SELECT is by far the most complex tool, think of it as writing a function that
|
||||
takes in one or more table, even combined tables, filters them, and them
|
||||
transforms them into a new table. They might look overwhelming at first:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
Output: outcol0, outcol1, ..
|
||||
FROM
|
||||
Input: incol0, incol1,
|
||||
WHERE Filter: e.g. where incol0 > 10
|
||||
ORDER BY outcol0 ASC
|
||||
GROUP BY aggregation
|
||||
;
|
||||
```
|
||||
|
||||
but most of it is optional. The simplest statement is `SELECT 1;`, it creates a
|
||||
new table with one row and one column containing the value of `1` (a scalar)
|
||||
from nothing.
|
||||
|
||||
Now that we can create a new column from thin air, let's transform it:
|
||||
|
||||
```sql
|
||||
SELECT col*2 FROM (SELECT 1 as col);
|
||||
```
|
||||
|
||||
Now we created two nested SELECTs, inner one creates the table we already know
|
||||
but names the column `col` and the outer SELECT transforms that table into a
|
||||
new 1x1 table with the values doubled.
|
||||
207
docs/src/content/docs/reference/benchmarks.mdx
Normal file
@@ -0,0 +1,207 @@
|
||||
---
|
||||
title: Benchmarks
|
||||
description: Performance comparison with similar products.
|
||||
---
|
||||
|
||||
import {
|
||||
Duration100kInsertsChart,
|
||||
PocketBaseAndTrailBaseReadLatencies,
|
||||
PocketBaseAndTrailBaseInsertLatencies,
|
||||
SupaBaseMemoryUsageChart,
|
||||
SupaBaseCpuUsageChart,
|
||||
PocketBaseAndTrailBaseUsageChart,
|
||||
} from "./_benchmarks/benchmarks.tsx";
|
||||
|
||||
TrailBase is merely the sum of its parts. It's the result of marrying one of
|
||||
the lowest-overhead languages, one of the fastest HTTP servers, and one of the
|
||||
lightest relational SQL databases, while merely avoiding extra expenditures.
|
||||
We did expect it to go fast but how fast exactly? Let's take a brief look at
|
||||
how TrailBase performs compared to a few amazing, and more weathered
|
||||
alternatives such as SupaBase, PocketBase, and vanilla SQLite.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
In general, benchmarks are tricky, both to do well and to interpret.
|
||||
Benchmarks never show how fast something can theoretically go but merely how
|
||||
fast the author managed to make it go.
|
||||
Micro-benchmarks, especially, offer a selective key-hole insights, which may be
|
||||
biased and may or may not apply to your workload.
|
||||
|
||||
Performance also doesn't exist in a vacuum. If something is super fast but
|
||||
doesn't do what you need it to do, performance is an illusive luxury.
|
||||
Doing less makes it naturally easier to go fast, which is not a bad thing,
|
||||
however means that comparing a highly specialized solution to a more general
|
||||
one on a specific aspect can be misleading or "unfair".
|
||||
Specifically, PocketBase and SupaBase have both been around for longer offering
|
||||
a different and in many cases more comprehensive features.
|
||||
|
||||
We tried our hardest to give all contenders the best chance to go fast [^1].
|
||||
We were surprised by the performance gap ourselves and thus went back and
|
||||
forth. We suspect that any overhead weighs so heavily because of how quick
|
||||
SQLite itself is.
|
||||
If you spot any issues or have ideas to make anyone go faster,
|
||||
[we want to know](https://github.com/trailbaseio/trailbase-benchmark).
|
||||
We hope to improve the methodology over time, make the numbers more broadly
|
||||
applicable, and as fair as an apples-to-oranges comparison can be.
|
||||
With that said, we hope that the results can provide at least some insights
|
||||
into what to expect when taken with a grain of salt.
|
||||
Ultimately, nothing beats benchmarking your own workload and setup.
|
||||
|
||||
## Insertion Benchmarks
|
||||
|
||||
_Total Time for 100k Insertions_
|
||||
|
||||
<div class="flex justify-center">
|
||||
<div class="h-[300px] w-[90%]">
|
||||
<Duration100kInsertsChart client:only="solid-js" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The graph shows the overall time it takes to insert 100k messages into a mock
|
||||
"chat-room" table setup. Less time is better.
|
||||
|
||||
Unsurprisingly, in-process SQLite is the quickest [^2].
|
||||
All other setups add additional table look-ups for access checking, IPC
|
||||
overhead[^3], and layers of features on top.
|
||||
Maybe think of this data point as an upper bound to how fast SQLite could go
|
||||
and the cost a project would pay by adopting any of the systems over in-process
|
||||
SQLite.
|
||||
|
||||
The data suggests that depending on your setup (client, data, hardware)
|
||||
TrailBase can insert 100k records 9 to 16 times faster than SupaBase[^4] and
|
||||
roughly 6 to 7 times faster than PocketBase [^1].
|
||||
The fact that our TS/node.js benchmark is slower than the Dart one, suggests a
|
||||
client-side bottleneck that could be overcome by tuning the setup or trying
|
||||
other JS runtimes with lower overhead HTTP clients.
|
||||
|
||||
Total time of inserting a large batch of data tells only part of the story,
|
||||
let's have a quick look at resource consumption to get an intuition for
|
||||
provisioning or footprint requirements:
|
||||
|
||||
_TrailBase & PocketBase Utilization_
|
||||
|
||||
<div class="flex justify-center">
|
||||
<div class="h-[300px] w-[90%]">
|
||||
<PocketBaseAndTrailBaseUsageChart client:only="solid-js" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
The graph shows the CPU utilization and memory consumption (RSS) of both
|
||||
PocketBase and TrailBase. They look fairly similar apart from TrailBase
|
||||
finishing earlier. They both load roughly 3 CPUs with PocketBase's CPU
|
||||
consumption being slightly more variable [^5].
|
||||
The little bump after the TrailBase run is likely due to SQLite check-pointing.
|
||||
|
||||
Both only consume about 140MB of memory at full tilt, which makes them a great
|
||||
choice for running on a tiny VPS or a toaster.
|
||||
|
||||
SupaBase is a bit more involved due to it's
|
||||
[layered architecture](https://supabase.com/docs/guides/getting-started/architecture)
|
||||
including a dozen separate services that provide a ton of extra functionality:
|
||||
|
||||
_SupaBase Memory Usage_
|
||||
|
||||
<div class="flex justify-center">
|
||||
<div class="h-[340px] w-[90%]">
|
||||
<SupaBaseMemoryUsageChart client:only="solid-js" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Looking at SupaBase's memory usage, it increased from from roughly 6GB at rest to
|
||||
7GB fully loaded.
|
||||
This means that out of the box, SupaBase has roughly 50 times the memory
|
||||
footprint of either PocketBase or TrailBase.
|
||||
In all fairness, there's a lot of extra functionality and it might be possible
|
||||
to further optimize the setup by shedding some less critical services, e.g.
|
||||
removing "supabase-analytics" may safe ~40% of memory. That said, we don't know
|
||||
how feasible this is in practice.
|
||||
|
||||
_SupaBase CPU utilization_
|
||||
|
||||
<div class="flex justify-center">
|
||||
<div class="h-[340px] w-[90%]">
|
||||
<SupaBaseCpuUsageChart client:only="solid-js" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Looking at the CPU usage You can see how the CPU usage jumps up to roughly 9
|
||||
cores (the benchmark ran on a machine with 8 physical cores and 16 threads:
|
||||
7840U). Most of the CPUs seem to be consumed by "supabase-rest" with postgres
|
||||
itself hovering at only ~0.7.
|
||||
|
||||
## Latency and Read Performance
|
||||
|
||||
In this chapter we'll take a closer look at latency distributions. To keep
|
||||
things manageable we'll focus on PocketBase and TrailBase, which are
|
||||
architecturally simpler and more comparable.
|
||||
|
||||
Reads were on average 3.5 faster with TrailBase and insertions 6x as discussed
|
||||
above.
|
||||
|
||||
<div class="flex justify-center h-[340px] w-[90%]">
|
||||
<div class="w-[50%]">
|
||||
<PocketBaseAndTrailBaseReadLatencies client:only="solid-js" />
|
||||
</div>
|
||||
|
||||
<div class="w-[50%]">
|
||||
<PocketBaseAndTrailBaseInsertLatencies client:only="solid-js" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Looking at the latency distributions we can see that the spread is well
|
||||
contained for TrailBase. For PocketBase, read latencies are also generally well
|
||||
contained and predictable.
|
||||
However, insert latencies show a more significant "long tail" with their p90
|
||||
being roughly 5x longer than therr p50.
|
||||
Slower insertions can take north of 100ms. There may or may not be a connection
|
||||
to the variability in CPU utilization we've seen above.
|
||||
|
||||
## Final Words
|
||||
|
||||
We're very happy to confirm that TrailBase is quick. The significant
|
||||
performance gap we observed might just be a consequence of how much overhead
|
||||
matters given how quick SQLite itself is.
|
||||
Yet, it challenges our intuition. With the numbers fresh of the press, prudence is
|
||||
of the essence. We'd like to re-emphasize how important it is to run your own
|
||||
tests with your specific setup and workloads.
|
||||
In any case, we hope this was interesting nonetheless and let us know if you
|
||||
see anything that can or should be improved.
|
||||
The benchmarks are available on [GitHub](https://github.com/trailbaseio/trailbase-benchmark).
|
||||
|
||||
<div class="h-[50px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
Trying to give PocketBase the best chance, the binary was built with the
|
||||
latest go compiler (v1.23.1 at the time of writing), `CGO_ENABLED=1` (which
|
||||
according to PB's own documentation will use a faster C-based SQLite
|
||||
driver) and `GOAMD64=v4` (for less portable but more aggressive CPU
|
||||
optimizations).
|
||||
We found this setup to be roughly 20% faster than the static, pre-built
|
||||
binary release.
|
||||
|
||||
[^2]:
|
||||
Our setup with drizzle and node.js is certainly not the fastest possible.
|
||||
For example, we could drop down to using raw SQLite in C or another
|
||||
low-level language.
|
||||
That said, drizzle is a great popular choice which mostly serves as a
|
||||
point-of-reference and sanity check.
|
||||
|
||||
[^3]:
|
||||
The actual magnitude on IPC overhead will depend on the communication cost.
|
||||
For the benchmarks at hand we're using a loopback network device.
|
||||
|
||||
[^4]:
|
||||
The SupaBase benchmark setup skips row-level access checks. Technically,
|
||||
this is in its favor from a performance standpoint, however looking at the
|
||||
overall load on its constituents with PG being only a sliver, it probably
|
||||
would not make much of an overall difference nor would PG17's vectorization,
|
||||
which has been released since the benchmarks were run. That said, these
|
||||
claims deserve re-validation.
|
||||
|
||||
[^5]:
|
||||
We're unsure as to what causes these 1-core swings.
|
||||
Runtime-effects, such as garbage collection, may have an effect, however we
|
||||
would have expected these to show on shorter time-scales.
|
||||
This could also indicate a contention or thrashing issue 🤷.
|
||||
107
docs/src/content/docs/reference/faq.mdx
Normal file
@@ -0,0 +1,107 @@
|
||||
---
|
||||
title: FAQ
|
||||
description: Frequently Asked Questions
|
||||
---
|
||||
|
||||
## How is TrailBase different from PocketBase, SupaBase or other application bases?
|
||||
|
||||
Naturally there's a lot of overlap but let's start by saying that
|
||||
we're also huge fans of SupaBase and PocketBase.
|
||||
The former is incredibly versatile, while the latter is incredibly easy and
|
||||
cheap to use.
|
||||
As far as we can tell, PocketBase pioneered the notion of a single-file,
|
||||
SQLite-based, FireBase-like server.
|
||||
|
||||
TrailBase is an attempt at combining the flexibility and principled
|
||||
architecture of SupaBase with the ease and low-overhead of PocketBase.
|
||||
We owe a great debt of gratitude to both ❤️.
|
||||
|
||||
Let's address the elephant in the room: other more established solutions are
|
||||
more polished, may have more extensive feature sets in many areas, and have
|
||||
seen a lot more mileage.
|
||||
TrailBase is committed to catch up and challenge the status quo following our
|
||||
[principles](/getting-started/philosophy) and in many ways, TrailBase is
|
||||
already incredibly easy to deploy and [blazingly fast](/reference/benchmarks).
|
||||
|
||||
We also offer some slightly more detailed comparisons to both
|
||||
[PocketBase](/comparison/pocketbase) and [SupaBase](/comparison/supabase).
|
||||
|
||||
## Is TrailBase ready for production use?
|
||||
|
||||
TrailBase has not seen a lot of mileage yet and there's probably plenty of
|
||||
sharp edges, which will take some time to smooth over.
|
||||
That said, it's also incredibly simple, easy to get on, and easy to get off.
|
||||
We're welcoming any brave soul who would like to be an early adopter.
|
||||
If you're curious and patient, we're ready to help you get off the ground in
|
||||
return for your honest feedback 🙏.
|
||||
You can take a look at the preliminary
|
||||
[productionization](/documentation/production).
|
||||
|
||||
## Scale, performance and reliability
|
||||
|
||||
As my product grows, will TrailBase scale with me or will I hit a wall?
|
||||
Firstly, congratulations! The "success"-problem is a great problem to have 🎉.
|
||||
|
||||
Short, hand-wavy answer: you'll face all the same issues as with other
|
||||
solutions but you probably will be fine 😶🌫️ .
|
||||
|
||||
Long answer: TrailBase currently only scales vertically, however it's incredibly
|
||||
fast. Besides, there's an inherent beauty to vertical scaling [^1] and modern
|
||||
servers can get you very very far.
|
||||
You can absolutely support tens of thousands or even hundreds thousands of
|
||||
concurrent users with a single database.
|
||||
With TrailBase simple deployment, it may also be an option to shard your users
|
||||
or tenants across multiple databases. In the future, TrailBase would also like
|
||||
to support multi-database setups out-of-the-box to further improve concurrency.
|
||||
|
||||
Keep in mind that other databases, like MySQL or Postgres, aren't a silver bullet either.
|
||||
If you're reaching massive levels of scale, more specialized solutions will
|
||||
become more and more attractive such as non-relational document stores,
|
||||
columnar OLAP stores for analytic workloads, ...
|
||||
|
||||
TrailBase explicitly tries to avoid tight coupling locking you in. At the end
|
||||
of the day, you're using very plain SQLite, letting you adopt and drop
|
||||
TrailBase when it makes sense.
|
||||
Similarly, the stateless auth flow makes it easy to split out your logic and
|
||||
data while continuing to use TrailBase.
|
||||
|
||||
Besides pure scale and performance, many more horizontal solutions provide
|
||||
additional benefits such as disaster-recovery/fail-over or improved edge read
|
||||
latency.
|
||||
Fortunately, both can be achieved with SQLite as well using solutions like
|
||||
[LiteStream](https://litestream.io/) keeping eventually consistent copies of
|
||||
your data.
|
||||
|
||||
## Can we add Features to TrailBase?
|
||||
|
||||
Yes! First take a look at our coarse [roadmap](/reference/roadmap), maybe we're
|
||||
already working on it?
|
||||
Otherwise, don't hesitate, just open an issue and ask away. We love to hear
|
||||
your thoughts.
|
||||
Contributions are also very welcome, let's just talk upfront to avoid any
|
||||
surprises.
|
||||
Especially, in the early days we'll have to see how "things" fit into the
|
||||
roadmap.
|
||||
For example, having a dark mode for the dashboard would be nice but it's also
|
||||
extra work to maintain while the dashboard is still rapidly changing, so it
|
||||
becomes a question of when.
|
||||
|
||||
## Data Import & Export
|
||||
|
||||
Few requirements: `STRICT` table and an auto-incrementing primary key for
|
||||
collections but the dashboard will work for any table, view, etc.
|
||||
You can simply import and export data with standard SQLite tooling, e.g.:
|
||||
|
||||
```shell
|
||||
sqlite3 main.db < import.sql
|
||||
```
|
||||
|
||||
Also check out the [getting started](/getting-started/first-app) guide.
|
||||
|
||||
<div class="h-[50px]" />
|
||||
|
||||
---
|
||||
|
||||
[^1]:
|
||||
Adopting more complex multi-tiered database solutions comes with its own
|
||||
challenges for operations, testing, and developer setups.
|
||||
7
docs/src/content/docs/reference/roadmap.mdx
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
title: Roadmap
|
||||
---
|
||||
|
||||
import Roadmap from "../_roadmap.md";
|
||||
|
||||
<Roadmap />
|
||||
2
docs/src/env.d.ts
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/// <reference path="../.astro/types.d.ts" />
|
||||
/// <reference types="astro/client" />
|
||||
24
docs/src/lib/darkmode.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { createSignal, onCleanup, onMount } from "solid-js";
|
||||
import type { Accessor } from "solid-js";
|
||||
|
||||
export function createDarkMode(): Accessor<boolean> {
|
||||
const isDark = () => document.documentElement.dataset["theme"] === "dark";
|
||||
|
||||
const [darkMode, setDarkMode] = createSignal<boolean>(isDark());
|
||||
|
||||
let observer: MutationObserver | undefined;
|
||||
|
||||
onMount(() => {
|
||||
observer = new MutationObserver((mutations) => {
|
||||
mutations.forEach((mu) => {
|
||||
if (mu.type === "attributes" && mu.attributeName === "data-theme") {
|
||||
setDarkMode(isDark());
|
||||
}
|
||||
});
|
||||
});
|
||||
observer.observe(document.documentElement, { attributes: true });
|
||||
});
|
||||
onCleanup(() => observer?.disconnect());
|
||||
|
||||
return darkMode;
|
||||
}
|
||||
38
docs/src/tailwind.css
Normal file
@@ -0,0 +1,38 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
||||
:root {
|
||||
--overlay-accent: #92d1fe30;
|
||||
}
|
||||
|
||||
:root[data-theme="dark"] {
|
||||
--overlay-accent: #92d1fe70;
|
||||
}
|
||||
|
||||
[data-has-hero] .page {
|
||||
background:
|
||||
linear-gradient(215deg, var(--overlay-accent), transparent 40%),
|
||||
radial-gradient(var(--overlay-accent), transparent 40%) no-repeat -60vw -40vh /
|
||||
105vw 200vh,
|
||||
radial-gradient(var(--overlay-accent), transparent 65%) no-repeat 50%
|
||||
calc(100% + 20rem) / 60rem 30rem;
|
||||
}
|
||||
|
||||
.card {
|
||||
border-radius: 0.75rem;
|
||||
}
|
||||
|
||||
[data-has-hero] header {
|
||||
border-bottom: 1px solid transparent;
|
||||
background-color: #ffffffc0;
|
||||
-webkit-backdrop-filter: blur(32px);
|
||||
backdrop-filter: blur(32px);
|
||||
}
|
||||
|
||||
[data-has-hero][data-theme="dark"] header {
|
||||
border-bottom: 1px solid transparent;
|
||||
background-color: transparent;
|
||||
-webkit-backdrop-filter: blur(32px);
|
||||
backdrop-filter: blur(32px);
|
||||
}
|
||||
33
docs/tailwind.config.mjs
Normal file
@@ -0,0 +1,33 @@
|
||||
import starlightPlugin from "@astrojs/starlight-tailwind";
|
||||
|
||||
// Generated color palettes
|
||||
const accent = {
|
||||
200: "#92d1fe",
|
||||
600: "#0073aa",
|
||||
900: "#003653",
|
||||
950: "#00273d",
|
||||
};
|
||||
const gray = {
|
||||
100: "#f3f7f9",
|
||||
200: "#e7eff2",
|
||||
300: "#bac4c8",
|
||||
400: "#7b8f96",
|
||||
500: "#495c62",
|
||||
700: "#2a3b41",
|
||||
800: "#182a2f",
|
||||
900: "#121a1c",
|
||||
};
|
||||
|
||||
/** @type {import('tailwindcss').Config} */
|
||||
export default {
|
||||
content: ["./src/**/*.{astro,html,js,jsx,md,mdx,svelte,ts,tsx,vue}"],
|
||||
theme: {
|
||||
extend: {
|
||||
colors: {
|
||||
accent,
|
||||
gray,
|
||||
},
|
||||
},
|
||||
},
|
||||
plugins: [starlightPlugin()],
|
||||
};
|
||||
18
docs/tsconfig.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"extends": "astro/tsconfigs/strict",
|
||||
"compilerOptions": {
|
||||
"jsx": "preserve",
|
||||
"jsxImportSource": "solid-js",
|
||||
"baseUrl": "./",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"],
|
||||
"@assets/*": ["../assets/*"],
|
||||
"@common/*": ["../ui/common/*"]
|
||||
}
|
||||
},
|
||||
"exclude": [
|
||||
"dist",
|
||||
"node_modules",
|
||||
"public"
|
||||
]
|
||||
}
|
||||
5
examples/blog/Caddyfile
Normal file
@@ -0,0 +1,5 @@
|
||||
# example.com
|
||||
|
||||
localhost
|
||||
encode gzip zstd
|
||||
reverse_proxy blog:4000
|
||||
39
examples/blog/Dockerfile
Normal file
@@ -0,0 +1,39 @@
|
||||
# syntax = edrevo/dockerfile-plus
|
||||
|
||||
# NOTE: paths are relative to build context, which is trailbase's root otherwise we
|
||||
# cannot build the trailbase server as well.
|
||||
|
||||
INCLUDE+ Dockerfile
|
||||
|
||||
FROM chef AS webapp_builder
|
||||
|
||||
COPY examples/blog/web /app
|
||||
WORKDIR /app
|
||||
|
||||
RUN pnpm install --no-frozen-lockfile
|
||||
RUN pnpm run build
|
||||
|
||||
FROM debian:bookworm-slim AS runtime
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends tini curl
|
||||
|
||||
COPY --from=builder /app/target/x86_64-unknown-linux-gnu/release/trail /app/
|
||||
COPY --from=webapp_builder /app/dist /app/public
|
||||
|
||||
# When `docker run` is executed, launch the binary as unprivileged user.
|
||||
ENV USERNAME=trailbase
|
||||
RUN adduser \
|
||||
--disabled-password \
|
||||
--gecos "" \
|
||||
--home "$(pwd)" \
|
||||
--no-create-home \
|
||||
${USERNAME}
|
||||
USER ${USERNAME}
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
EXPOSE 4000
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
|
||||
CMD ["/app/trail", "run"]
|
||||
|
||||
HEALTHCHECK CMD curl --fail http://localhost:4000/api/healthcheck || exit 1
|
||||
45
examples/blog/Makefile
Normal file
@@ -0,0 +1,45 @@
|
||||
outputs = \
|
||||
web/types/article.ts \
|
||||
web/types/profile.ts \
|
||||
web/types/new_profile.ts \
|
||||
flutter/lib/types/article.dart \
|
||||
flutter/lib/types/profile.dart \
|
||||
flutter/lib/types/new_profile.dart
|
||||
|
||||
types: $(outputs)
|
||||
|
||||
schema/article.json:
|
||||
cargo run -- schema articles_view --mode select > $@
|
||||
web/types/article.ts: schema/article.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
flutter/lib/types/article.dart: schema/article.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
|
||||
schema/new_article.json:
|
||||
cargo run -- schema articles_view --mode insert > $@
|
||||
web/types/new_article.ts: schema/new_article.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
|
||||
schema/profile.json:
|
||||
cargo run -- schema profiles_view --mode select > $@
|
||||
web/types/profile.ts: schema/profile.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
flutter/lib/types/profile.dart: schema/profile.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
|
||||
schema/new_profile.json:
|
||||
cargo run -- schema profiles --mode insert > $@
|
||||
web/types/new_profile.ts: schema/new_profile.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
flutter/lib/types/new_profile.dart: schema/new_profile.json
|
||||
pnpm quicktype -s schema $< -o $@
|
||||
|
||||
clean_data:
|
||||
rm -f traildepot/data/*
|
||||
|
||||
clean_types:
|
||||
rm -f schema/* web/types/* flutter/lib/types/*
|
||||
|
||||
clean: clean_data clean_types
|
||||
|
||||
.PHONY: clean clean_data clean_types
|
||||
80
examples/blog/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# TrailBase Example: A Blog with Web and Mobile clients
|
||||
|
||||
The main goal of this example is to be easily digestible while show-casing many
|
||||
of TrailBase's capabilities both for web and cross-platform Flutter:
|
||||
|
||||
* Bootstrapping the database including schemas and dummy content though migration.
|
||||
* End-to-end type-safety through code-generated data models for TypeScript,
|
||||
Dart and many more based on JSON Schema.
|
||||
* Builtin web authentication flow (including OAuth) on web and Flutter as well
|
||||
as a custom password-based login in Flutter.
|
||||
* API authorization: world readable, user editable, and moderator manageable articles.
|
||||
* Different API types:
|
||||
* Table and View-based APIs for custom user profiles associating users with a
|
||||
username and keep their email addresses private as well as associating
|
||||
articles with usernames.
|
||||
* Virtual-table-based query API to expose "is_editor" authorization.
|
||||
* The web client illustrates two different styles: a consumer SPA and an
|
||||
HTML-only form-based authoring UI.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
.
|
||||
├── Caddyfile # Example reverse proxy for TLS termination
|
||||
├── Dockerfile # Example for bundling web app
|
||||
├── docker-compose.yml # Example setup with reverse proxy
|
||||
├── flutter #
|
||||
│ ├── lib # Flutter app lives here
|
||||
│ └── ... # Most other files a default cross-platform setup
|
||||
├── Makefile # Builds JSON schemas and coge-generates type definitions
|
||||
├── schema # Checked-in JSON schemas
|
||||
├── traildepot # Where TrailBase keeps its runtime data
|
||||
│ ├── backups # Periodic DB backups
|
||||
│ ├── data # Contains SQLite's DB and WAL
|
||||
│ ├── migrations # Bootstraps DB with schemas and dummy content
|
||||
│ ├── secrets # Nothing to see :)
|
||||
│ └── uploads # Local file uploads (will support S3 soon)
|
||||
└── web
|
||||
├── dist # Built/packaged web app
|
||||
├── src # Web app lives here
|
||||
└── types # Generated type definitions
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Instructions
|
||||
|
||||
Generally speaking, there are roughly 2.5 moving parts to run the example, i.e:
|
||||
we have to build the web UI, start the TrailBase server, and optionally start
|
||||
the Flutter app. Once you have `cargo`, `pnpm`, and `flutter` installed, you
|
||||
can simply run:
|
||||
|
||||
```bash
|
||||
# From within the blog examples base directory
|
||||
$ cd $REPO/examples/blog
|
||||
|
||||
# build and bundle the web app:
|
||||
$ pnpm --dir web build
|
||||
|
||||
# Start TrailBase:
|
||||
cargo run --bin trail -- run --public web/dist
|
||||
|
||||
# Start Flutter app:
|
||||
$ cd flutter
|
||||
$ flutter run -d <Device, e.g.: Linux, Chrome, Mobile Emulator, ...>
|
||||
```
|
||||
|
||||
You can also try the code generation:
|
||||
|
||||
```bash
|
||||
# Optionally delete the checked-in JSON schemas and code first
|
||||
$ make clean_types
|
||||
|
||||
# Genarate JSON Schema and codegen types from DB schema (this requires that
|
||||
# you start TrailBase first to initialize the DB)
|
||||
$ make --always-make types
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
* The styling is based on: https://github.com/palmiak/pacamara-astro 🙏
|
||||
1
examples/blog/caddy/config/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
1
examples/blog/caddy/data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
33
examples/blog/docker-compose.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
services:
|
||||
|
||||
blog:
|
||||
# NOTE: We have to build relative to root to have a build context that
|
||||
# includes both: the trailbase server source and the demo wepapp sources.
|
||||
# build: ../..
|
||||
# TODO: Build from "." once the Dockerfile can pull a base image from
|
||||
# dockerhub. We still need an example Dockerfile to build the UI.
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: examples/blog/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TRAIL_INITIAL_PASSWORD: secret
|
||||
ADDRESS: 0.0.0.0:4000
|
||||
PUBLIC_DIR: ./public
|
||||
DATA_DIR: ./traildepot
|
||||
volumes:
|
||||
- ./traildepot:/app/traildepot
|
||||
|
||||
caddy:
|
||||
image: caddy:2.8-alpine
|
||||
restart: unless-stopped
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "443:443/udp"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||
- ./caddy/data:/data
|
||||
- ./caddy/config:/config
|
||||