Herculean migration from libsql's rust bindings to plain rusqlite+SQLite.

There's a couple of reasons:

* As for the rust bindings: they're sub-par to rusqlite, though
  rusqlite is amazing. Even libsql-server uses rusqlite over their own
  bindings. The bindings are missing features such as update hooks
  and the hard-coded execution model suffers from lock congestion.
* We've fixed bugs (e.g. trivial null ptr accesses went unnoticed),
  raised issues, and tried to add missing functionality such as update
  hooks. It's unclear if the rust-bindings are a priority or covered by
  the principles laid out in the libsql manifesto. From the outside it
  looks like focus has shifted to https://github.com/penberg/limbo.
* As for the C-libsql fork for SQLite itself, it's getting more and
  more outdated (2024-01-30 (3.45.1)) and it's unclear when and if the
  ideas from the manifesto will manifest.

Looking forward this opens the door for TrailBase to:

* Bundle more recent versions of SQLite
* Implement more performant, better scaling execution models.
* Implement realtime APIs for subscribing to data changes.
This commit is contained in:
Sebastian Jeltsch
2024-11-29 13:35:55 +01:00
parent 9942669f86
commit 6cbea390fb
96 changed files with 1523 additions and 2108 deletions

6
.gitmodules vendored
View File

@@ -7,3 +7,9 @@
[submodule "vendor/sqlean/bundled/sqlean"]
path = vendor/sqlean/bundled/sqlean
url = https://github.com/trailbaseio/sqlean
[submodule "vendor/tokio-rusqlite"]
path = vendor/tokio-rusqlite
url = https://github.com/trailbaseio/tokio-rusqlite.git
[submodule "vendor/serde_rusqlite"]
path = vendor/serde_rusqlite
url = https://github.com/trailbaseio/serde_rusqlite.git

374
Cargo.lock generated
View File

@@ -428,6 +428,17 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi 0.1.19",
"libc",
"winapi",
]
[[package]]
name = "auto-future"
version = "1.0.0"
@@ -574,12 +585,6 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "barrel"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad9e605929a6964efbec5ac0884bd0fe93f12a3b1eb271f52c251316640c68d9"
[[package]]
name = "base16ct"
version = "0.2.0"
@@ -649,24 +654,24 @@ dependencies = [
[[package]]
name = "bindgen"
version = "0.66.1"
version = "0.60.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7"
checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6"
dependencies = [
"bitflags",
"bitflags 1.3.2",
"cexpr",
"clang-sys",
"clap 3.2.25",
"env_logger 0.9.3",
"lazy_static",
"lazycell",
"log",
"peeking_take_while",
"prettyplease",
"proc-macro2",
"quote",
"regex",
"rustc-hash 1.1.0",
"shlex",
"syn 2.0.89",
"which 4.4.2",
]
@@ -676,7 +681,7 @@ version = "0.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f"
dependencies = [
"bitflags",
"bitflags 2.6.0",
"cexpr",
"clang-sys",
"itertools 0.13.0",
@@ -720,6 +725,12 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.6.0"
@@ -828,9 +839,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.8.0"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da"
checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
dependencies = [
"serde",
]
@@ -858,9 +869,9 @@ dependencies = [
[[package]]
name = "cc"
version = "1.2.1"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47"
checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc"
dependencies = [
"shlex",
]
@@ -970,6 +981,21 @@ dependencies = [
"libloading",
]
[[package]]
name = "clap"
version = "3.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
dependencies = [
"atty",
"bitflags 1.3.2",
"clap_lex 0.2.4",
"indexmap 1.9.3",
"strsim 0.10.0",
"termcolor",
"textwrap",
]
[[package]]
name = "clap"
version = "4.5.21"
@@ -988,8 +1014,8 @@ checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
"clap_lex 0.7.3",
"strsim 0.11.1",
]
[[package]]
@@ -1004,6 +1030,15 @@ dependencies = [
"syn 2.0.89",
]
[[package]]
name = "clap_lex"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
dependencies = [
"os_str_bytes",
]
[[package]]
name = "clap_lex"
version = "0.7.3"
@@ -1101,7 +1136,7 @@ dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"clap 4.5.21",
"criterion-plot",
"futures",
"is-terminal",
@@ -1130,6 +1165,15 @@ dependencies = [
"itertools 0.10.5",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
@@ -1225,7 +1269,7 @@ name = "custom-binary"
version = "0.1.0"
dependencies = [
"axum",
"env_logger",
"env_logger 0.11.5",
"tokio",
"tracing-subscriber",
"trailbase-core",
@@ -1332,7 +1376,7 @@ dependencies = [
"deno_ops",
"deno_unsync",
"futures",
"indexmap",
"indexmap 2.6.0",
"libc",
"memoffset",
"parking_lot",
@@ -1840,6 +1884,19 @@ dependencies = [
"regex",
]
[[package]]
name = "env_logger"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
dependencies = [
"atty",
"humantime",
"log",
"regex",
"termcolor",
]
[[package]]
name = "env_logger"
version = "0.11.5"
@@ -1861,12 +1918,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
version = "0.3.9"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
dependencies = [
"libc",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -1902,12 +1959,6 @@ dependencies = [
"pin-project-lite",
]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "fallible-iterator"
version = "0.3.0"
@@ -2246,7 +2297,7 @@ dependencies = [
"futures-core",
"futures-sink",
"http 1.1.0",
"indexmap",
"indexmap 2.6.0",
"slab",
"tokio",
"tokio-util",
@@ -2263,6 +2314,12 @@ dependencies = [
"crunchy",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
version = "0.14.5"
@@ -2281,9 +2338,9 @@ checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
[[package]]
name = "hashlink"
version = "0.8.4"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af"
dependencies = [
"hashbrown 0.14.5",
]
@@ -2300,6 +2357,15 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.3.9"
@@ -2464,9 +2530,9 @@ dependencies = [
[[package]]
name = "http-range-header"
version = "0.4.1"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a397c49fec283e3d6211adbe480be95aae5f304cfb923e9970e08956d5168a"
checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
[[package]]
name = "httparse"
@@ -2724,6 +2790,16 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed"
[[package]]
name = "indexmap"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
]
[[package]]
name = "indexmap"
version = "2.6.0"
@@ -2954,9 +3030,9 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.166"
version = "0.2.167"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2ccc108bbc0b1331bd061864e7cd823c0cab660bbe6970e66e2c0614decde36"
checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc"
[[package]]
name = "libloading"
@@ -2985,66 +3061,14 @@ dependencies = [
]
[[package]]
name = "libsql"
version = "0.6.0"
name = "libsqlite3-sys"
version = "0.30.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe18646e4ef8db446bc3e3f5fb96131483203bc5f4998ff149f79a067530c01c"
checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149"
dependencies = [
"async-trait",
"bitflags",
"bytes",
"futures",
"libsql-sys",
"serde",
"thiserror 1.0.69",
"tracing",
]
[[package]]
name = "libsql-ffi"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5919d202c2d296b4c44b6877d1b67fe6ad8f18520ce74bd70a29c383e44ccbee"
dependencies = [
"bindgen 0.66.1",
"cc",
]
[[package]]
name = "libsql-ffi"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f2a50a585a1184a43621a9133b7702ba5cb7a87ca5e704056b19d8005de6faf"
dependencies = [
"bindgen 0.66.1",
"cc",
]
[[package]]
name = "libsql-rusqlite"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b811f72e13b9864601197d234621ffe89a490b2cb034cf28753b111334cf1db3"
dependencies = [
"bitflags",
"fallible-iterator 0.2.0",
"fallible-streaming-iterator",
"hashlink",
"libsql-ffi 0.4.1",
"smallvec",
]
[[package]]
name = "libsql-sys"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c05b61c226781d6f5e26e3e7364617f19c0c1d5332035802e9229d6024cec05"
dependencies = [
"bytes",
"libsql-ffi 0.5.0",
"once_cell",
"tracing",
"zerocopy",
"pkg-config",
"vcpkg",
]
[[package]]
@@ -3510,6 +3534,12 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "os_str_bytes"
version = "6.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
[[package]]
name = "outref"
version = "0.1.0"
@@ -3656,7 +3686,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset",
"indexmap",
"indexmap 2.6.0",
]
[[package]]
@@ -3776,6 +3806,12 @@ dependencies = [
"spki",
]
[[package]]
name = "pkg-config"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2"
[[package]]
name = "plotters"
version = "0.3.7"
@@ -4223,7 +4259,7 @@ version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
dependencies = [
"bitflags",
"bitflags 2.6.0",
]
[[package]]
@@ -4275,6 +4311,7 @@ dependencies = [
"cfg-if",
"log",
"regex",
"rusqlite",
"siphasher 1.0.1",
"thiserror 1.0.69",
"time",
@@ -4282,20 +4319,6 @@ dependencies = [
"walkdir",
]
[[package]]
name = "refinery-libsql"
version = "0.0.1"
dependencies = [
"async-trait",
"barrel",
"libsql",
"refinery",
"refinery-core",
"tempfile",
"time",
"tokio",
]
[[package]]
name = "refinery-macros"
version = "0.8.14"
@@ -4449,6 +4472,20 @@ dependencies = [
"zeroize",
]
[[package]]
name = "rusqlite"
version = "0.32.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e"
dependencies = [
"bitflags 2.6.0",
"fallible-iterator",
"fallible-streaming-iterator",
"hashlink",
"libsqlite3-sys",
"smallvec",
]
[[package]]
name = "rust-embed"
version = "8.5.0"
@@ -4542,7 +4579,7 @@ version = "0.38.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6"
dependencies = [
"bitflags",
"bitflags 2.6.0",
"errno",
"libc",
"linux-raw-sys",
@@ -4753,7 +4790,7 @@ version = "2.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [
"bitflags",
"bitflags 2.6.0",
"core-foundation 0.9.4",
"core-foundation-sys",
"libc",
@@ -4766,7 +4803,7 @@ version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8"
dependencies = [
"bitflags",
"bitflags 2.6.0",
"core-foundation 0.10.0",
"core-foundation-sys",
"libc",
@@ -4850,7 +4887,7 @@ version = "1.0.133"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
dependencies = [
"indexmap",
"indexmap 2.6.0",
"itoa",
"memchr",
"ryu",
@@ -4867,6 +4904,16 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_rusqlite"
version = "0.36.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b741cc5ef185cd96157e762c3bba743c4e94c8dc6af0edb053c48d2b3c27e691"
dependencies = [
"rusqlite",
"serde",
]
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
@@ -5036,9 +5083,9 @@ dependencies = [
[[package]]
name = "socket2"
version = "0.5.7"
version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8"
dependencies = [
"libc",
"windows-sys 0.52.0",
@@ -5104,7 +5151,8 @@ version = "0.0.1"
dependencies = [
"bindgen 0.70.1",
"cc",
"libsql-ffi 0.5.0",
"libsqlite3-sys",
"rusqlite",
]
[[package]]
@@ -5123,11 +5171,12 @@ dependencies = [
name = "sqlite-loadable"
version = "0.0.6-alpha.6"
dependencies = [
"bitflags",
"libsql-ffi 0.5.0",
"bitflags 2.6.0",
"libsqlite3-sys",
"serde",
"serde_json",
"sqlite-loadable-macros",
"sqlite3ext-sys",
]
[[package]]
@@ -5136,7 +5185,7 @@ version = "0.0.3"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.89",
"syn 1.0.109",
]
[[package]]
@@ -5145,10 +5194,10 @@ version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb5307dad6cb84730ce8bdefde56ff4cf95fe516972d52e2bbdc8a8cd8f2520b"
dependencies = [
"bitflags",
"bitflags 2.6.0",
"cc",
"fallible-iterator 0.3.0",
"indexmap",
"fallible-iterator",
"indexmap 2.6.0",
"log",
"memchr",
"phf",
@@ -5157,6 +5206,13 @@ dependencies = [
"uncased",
]
[[package]]
name = "sqlite3ext-sys"
version = "0.0.1"
dependencies = [
"bindgen 0.60.1",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
@@ -5200,6 +5256,12 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04028eeb851ed08af6aba5caa29f2d59a13ed168cee4d6bd753aeefcf1d636b0"
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strsim"
version = "0.11.1"
@@ -5307,7 +5369,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4740e53eaf68b101203c1df0937d5161a29f3c13bceed0836ddfe245b72dd000"
dependencies = [
"anyhow",
"indexmap",
"indexmap 2.6.0",
"serde",
"serde_json",
"swc_cached",
@@ -5332,7 +5394,7 @@ version = "0.118.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6f866d12e4d519052b92a0a86d1ac7ff17570da1272ca0c89b3d6f802cd79df"
dependencies = [
"bitflags",
"bitflags 2.6.0",
"is-macro",
"num-bigint",
"phf",
@@ -5418,8 +5480,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65f21494e75d0bd8ef42010b47cabab9caaed8f2207570e809f6f4eb51a710d1"
dependencies = [
"better_scoped_tls",
"bitflags",
"indexmap",
"bitflags 2.6.0",
"indexmap 2.6.0",
"once_cell",
"phf",
"rustc-hash 1.1.0",
@@ -5488,7 +5550,7 @@ checksum = "76c76d8b9792ce51401d38da0fa62158d61f6d80d16d68fe5b03ce4bf5fba383"
dependencies = [
"base64 0.21.7",
"dashmap",
"indexmap",
"indexmap 2.6.0",
"once_cell",
"serde",
"sha1",
@@ -5528,7 +5590,7 @@ version = "0.134.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "029eec7dd485923a75b5a45befd04510288870250270292fc2c1b3a9e7547408"
dependencies = [
"indexmap",
"indexmap 2.6.0",
"num_cpus",
"once_cell",
"rustc-hash 1.1.0",
@@ -5692,6 +5754,12 @@ dependencies = [
"serde",
]
[[package]]
name = "textwrap"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
[[package]]
name = "thiserror"
version = "1.0.69"
@@ -5837,6 +5905,17 @@ dependencies = [
"syn 2.0.89",
]
[[package]]
name = "tokio-rusqlite"
version = "0.6.0"
dependencies = [
"crossbeam-channel",
"rusqlite",
"serde",
"serde_rusqlite",
"tokio",
]
[[package]]
name = "tokio-rustls"
version = "0.26.0"
@@ -5928,7 +6007,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697"
dependencies = [
"async-compression",
"bitflags",
"bitflags 2.6.0",
"bytes",
"futures-core",
"futures-util",
@@ -6024,14 +6103,14 @@ version = "0.2.0"
dependencies = [
"axum",
"chrono",
"clap",
"env_logger",
"libsql",
"clap 4.5.21",
"env_logger 0.11.5",
"log",
"mimalloc",
"serde",
"serde_json",
"tokio",
"tokio-rusqlite",
"tracing-subscriber",
"trailbase-core",
"utoipa",
@@ -6058,19 +6137,17 @@ dependencies = [
"cookie",
"criterion",
"ed25519-dalek",
"env_logger",
"fallible-iterator 0.3.0",
"env_logger 0.11.5",
"fallible-iterator",
"form_urlencoded",
"futures",
"indexmap",
"indexmap 2.6.0",
"indoc",
"itertools 0.13.0",
"jsonschema",
"jsonwebtoken",
"lazy_static",
"lettre",
"libsql",
"libsql-rusqlite",
"log",
"minijinja",
"oauth2",
@@ -6083,9 +6160,9 @@ dependencies = [
"quoted_printable",
"rand",
"refinery",
"refinery-libsql",
"regex",
"reqwest",
"rusqlite",
"rust-embed",
"rustyscript",
"schemars",
@@ -6098,7 +6175,9 @@ dependencies = [
"sqlite3-parser",
"temp-dir",
"thiserror 2.0.3",
"thread_local",
"tokio",
"tokio-rusqlite",
"tower 0.5.1",
"tower-cookies",
"tower-http",
@@ -6121,16 +6200,15 @@ dependencies = [
"argon2",
"base64 0.22.1",
"jsonschema",
"libsql",
"lru",
"maxminddb",
"parking_lot",
"rand",
"regex",
"rusqlite",
"serde_json",
"sqlean",
"sqlite-loadable",
"tokio",
"uuid",
"validator",
]
@@ -6142,8 +6220,8 @@ dependencies = [
"infer",
"jsonschema",
"lazy_static",
"libsql",
"log",
"rusqlite",
"schemars",
"serde",
"serde_json",
@@ -6371,7 +6449,7 @@ version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "514a48569e4e21c86d0b84b5612b5e73c0b2cf09db63260134ba426d4e8ea714"
dependencies = [
"indexmap",
"indexmap 2.6.0",
"serde",
"serde_json",
"utoipa-gen",
@@ -6434,7 +6512,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c23b5c2caff00209b03a716609b275acae94b02dd3b63c4648e7232a84a8402f"
dependencies = [
"bindgen 0.70.1",
"bitflags",
"bitflags 2.6.0",
"fslock",
"gzip-header",
"home",
@@ -6471,6 +6549,12 @@ version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2"
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "version_check"
version = "0.9.5"
@@ -7048,7 +7132,7 @@ dependencies = [
"crossbeam-utils",
"displaydoc",
"flate2",
"indexmap",
"indexmap 2.6.0",
"memchr",
"thiserror 2.0.3",
"zopfli",

View File

@@ -6,8 +6,8 @@ members = [
"trailbase-core",
"trailbase-extension",
"trailbase-sqlite",
"vendor/refinery-libsql",
"vendor/sqlean",
"vendor/tokio-rusqlite",
]
default-members = [
"trailbase-cli",
@@ -30,12 +30,10 @@ lto = true
codegen-units = 1
[workspace.dependencies]
libsql = { package = "libsql", version = "^0.6.0", default-features = false, features = ["core", "serde"] }
refinery = { package = "refinery", path = "vendor/refinery/refinery", default-features = false }
refinery = { package = "refinery", path = "vendor/refinery/refinery", default-features = false, features = ["rusqlite"] }
refinery-core = { package = "refinery-core", path = "vendor/refinery/refinery_core" }
refinery-libsql = { package = "refinery-libsql", path = "vendor/refinery-libsql" }
rusqlite = { package = "libsql-rusqlite", version = "^0.32", default-features = false, features = [
"libsql-experimental",
rusqlite = { version = "^0.32.1", default-features = false, features = [
"bundled",
"column_decltype",
"load_extension",
"modern_sqlite",
@@ -43,4 +41,6 @@ rusqlite = { package = "libsql-rusqlite", version = "^0.32", default-features =
"limits",
"backup",
] }
sqlite-loadable = { package = "sqlite-loadable", path = "./vendor/sqlite-loadable", features=["static"] }
serde_rusqlite = { package = "serde_rusqlite", path = "./vendor/serde_rusqlite" }
sqlite-loadable = { package = "sqlite-loadable", path = "./vendor/sqlite-loadable", features=["static"] }
tokio-rusqlite = { package = "tokio-rusqlite", path = "./vendor/tokio-rusqlite" }

View File

@@ -17,7 +17,7 @@ TrailBase was born out of admiration for PocketBase trying to move the needle
in a few areas:
- Less abstraction, embracing standards (SQL[^1], JWT, UUID), and untethered access
to SQLite/libsql[^2] including features such as recursive CTEs, virtual tables
to SQLite[^2] including features such as recursive CTEs, virtual tables
and vector search.
The goal is to not get in your way and avoid lock-in by bespoke solutions
making it easier adopt TrailBase either fully or as piece-meal as well as
@@ -52,7 +52,7 @@ Likewise, TrailBase has a few nifty tricks up its sleeve:
being enforced all the way down to the database level[^4].
- TrailBase's JavaScript runtime supports full ES6, TypeScript transpilation,
and is built on V8 making it [~45x faster](/reference/benchmarks/).
- First-class access to all of SQLite/libsql's features and capabilities.
- First-class access to all of SQLite's features and capabilities.
- A simple auth UI.
- Stateless JWT auth-tokens for simple, hermetic authentication in other
backends.
@@ -73,7 +73,7 @@ To our own surprise, we found a significant gap. TrailBase is roughly 3.5x to
depending on the use-case.
Not to toot our own horn, this is mostly thanks to combining a very low
overhead language, one of the fastest HTTP servers, a V8 engine, and incredibly
quick SQLite/libsql.
quick SQLite.
<div class="h-[30px]" />

View File

@@ -43,7 +43,7 @@ import { Duration100kInsertsChart } from "./reference/_benchmarks/benchmarks.tsx
* Rust: one of the lowest overhead languages,
* Axum: one of the fastest HTTP servers,
* SQLite/Libsql: one of the fastest full-SQL databases,
* SQLite: one of the fastest full-SQL databases,
* V8: one of the fastest JS engines.
TrailBase APIs are [6-7x faster than PocketBase's and 15x faster than SupaBase's

View File

@@ -114,7 +114,7 @@ Looking at SupaBase's memory usage, it increased from from roughly 6GB at rest t
7GB fully loaded.
This means that out of the box, SupaBase has roughly 50 times the memory
footprint of either PocketBase or TrailBase.
In all fairness, a lot SupaBase's functionality isn't needed for this benchmark
In all fairness, a lot of SupaBase's functionality isn't needed for this benchmark
and it might be possible to shed less critical services, e.g. removing
*supabase-analytics* would save ~40% of memory.
That said, we don't know how feasible this is in practice.

View File

@@ -16,12 +16,12 @@ chrono = "^0.4.38"
clap = { version = "^4.4.11", features=["derive", "env"] }
env_logger = "^0.11.3"
trailbase-core = { path = "../trailbase-core" }
libsql = { workspace = true }
log = "^0.4.21"
mimalloc = { version = "^0.1.41", default-features = false }
serde = { version = "^1.0.203", features = ["derive"] }
serde_json = "^1.0.117"
tokio = { version = "^1.38.0", features=["macros", "rt-multi-thread", "fs", "signal"] }
tokio-rusqlite = { workspace = true }
tracing-subscriber = "0.3.18"
utoipa = { version = "5.0.0-beta.0", features = ["axum_extras"], optional = true }
utoipa-swagger-ui = { version = "8.0.1", features = ["axum"], optional = true }

View File

@@ -5,14 +5,13 @@ static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
use chrono::TimeZone;
use clap::{CommandFactory, Parser};
use libsql::{de, params};
use log::*;
use serde::Deserialize;
use std::rc::Rc;
use tokio::{fs, io::AsyncWriteExt};
use tracing_subscriber::{filter, prelude::*};
use trailbase_core::{
api::{self, init_app_state, query_one_row, Email, InitArgs, TokenClaims},
api::{self, init_app_state, Email, InitArgs, TokenClaims},
constants::USER_TABLE,
DataDir, Server, ServerOptions,
};
@@ -54,15 +53,20 @@ impl DbUser {
}
}
async fn get_user_by_email(conn: &libsql::Connection, email: &str) -> Result<DbUser, BoxError> {
return Ok(de::from_row(
&query_one_row(
conn,
async fn get_user_by_email(
conn: &tokio_rusqlite::Connection,
email: &str,
) -> Result<DbUser, BoxError> {
if let Some(user) = conn
.query_value::<DbUser>(
&format!("SELECT * FROM {USER_TABLE} WHERE email = $1"),
params!(email),
(email.to_string(),),
)
.await?,
)?);
.await?
{
return Ok(user);
}
return Err("not found".into());
}
async fn async_main() -> Result<(), BoxError> {
@@ -154,7 +158,11 @@ async fn async_main() -> Result<(), BoxError> {
Some(SubCommands::Schema(cmd)) => {
init_logger(false);
let conn = api::connect_sqlite(Some(data_dir.main_db_path()), None).await?;
let conn = tokio_rusqlite::Connection::from_conn(api::connect_sqlite(
Some(data_dir.main_db_path()),
None,
)?)
.await?;
let table_metadata = api::TableMetadataCache::new(conn.clone()).await?;
let table_name = &cmd.table;
@@ -194,17 +202,20 @@ async fn async_main() -> Result<(), BoxError> {
Some(SubCommands::Admin { cmd }) => {
init_logger(false);
let conn = api::connect_sqlite(Some(data_dir.main_db_path()), None).await?;
let conn = tokio_rusqlite::Connection::from_conn(api::connect_sqlite(
Some(data_dir.main_db_path()),
None,
)?)
.await?;
match cmd {
Some(AdminSubCommands::List) => {
let mut rows = conn
.query(&format!("SELECT * FROM {USER_TABLE} WHERE admin > 0"), ())
let users = conn
.query_values::<DbUser>(&format!("SELECT * FROM {USER_TABLE} WHERE admin > 0"), ())
.await?;
println!("{: >36}\temail\tcreated\tupdated", "id");
while let Some(row) = rows.next().await? {
let user: DbUser = de::from_row(&row)?;
for user in users {
let id = user.uuid();
println!(
@@ -219,7 +230,7 @@ async fn async_main() -> Result<(), BoxError> {
conn
.execute(
&format!("UPDATE {USER_TABLE} SET admin = FALSE WHERE email = $1"),
params!(email.clone()),
(email.clone(),),
)
.await?;
@@ -229,7 +240,7 @@ async fn async_main() -> Result<(), BoxError> {
conn
.execute(
&format!("UPDATE {USER_TABLE} SET admin = TRUE WHERE email = $1"),
params!(email.clone()),
(email.clone(),),
)
.await?;
@@ -246,7 +257,11 @@ async fn async_main() -> Result<(), BoxError> {
init_logger(false);
let data_dir = DataDir(args.data_dir);
let conn = api::connect_sqlite(Some(data_dir.main_db_path()), None).await?;
let conn = tokio_rusqlite::Connection::from_conn(api::connect_sqlite(
Some(data_dir.main_db_path()),
None,
)?)
.await?;
match cmd {
Some(UserSubCommands::ResetPassword { email, password }) => {

View File

@@ -21,7 +21,7 @@ axum = { version = "^0.7.5", features=["multipart"] }
axum-client-ip = "0.6.0"
axum-extra = { version = "^0.9.3", default-features = false, features=["protobuf"] }
base64 = { version = "0.22.1", default-features = false }
bytes = "1.8.0"
bytes = { version = "1.8.0", features = ["serde"] }
chrono = "^0.4.38"
cookie = "0.18.1"
ed25519-dalek = { version = "2.1.1", features=["pkcs8", "pem", "rand_core"] }
@@ -36,7 +36,6 @@ jsonschema = { version = "0.26.0", default-features = false }
jsonwebtoken = { version = "^9.3.0", default-features = false, features = ["use_pem"] }
lazy_static = "1.4.0"
lettre = { version = "^0.11.7", default-features = false, features = ["tokio1-rustls-tls", "sendmail-transport", "smtp-transport", "builder"] }
libsql = { workspace = true }
log = "^0.4.21"
minijinja = "2.1.2"
oauth2 = { version = "5.0.0-alpha.4", default-features = false, features = ["reqwest", "rustls-tls"] }
@@ -46,7 +45,6 @@ prost = "^0.12.6"
prost-reflect = { version = "^0.13.0", features = ["derive", "text-format"] }
rand = "0.8.5"
refinery = { workspace = true }
refinery-libsql = { workspace = true }
regex = "1.11.0"
reqwest = { version = "0.12.8", default-features = false, features = ["rustls-tls", "json"] }
rusqlite = { workspace = true }
@@ -60,7 +58,9 @@ sha2 = "0.10.8"
sqlformat = "0.3.1"
sqlite3-parser = "0.13.0"
thiserror = "2.0.1"
thread_local = "1.1.8"
tokio = { version = "^1.38.0", features=["macros", "rt-multi-thread", "fs", "signal", "time"] }
tokio-rusqlite = { workspace = true }
tower-cookies = { version = "0.10.0" }
tower-http = { version = "^0.6.0", features=["cors", "trace", "fs", "limit"] }
tower-service = "0.3.3"

View File

@@ -6,20 +6,20 @@ use axum::body::Body;
use axum::extract::{Json, State};
use axum::http::{self, Request};
use base64::prelude::*;
use libsql::{params, Connection};
use std::sync::{Arc, Mutex};
use tokio_rusqlite::params;
use tower::{Service, ServiceExt};
use trailbase_core::config::proto::PermissionFlag;
use trailbase_core::records::Acls;
use trailbase_core::api::{
create_user_handler, login_with_password, query_one_row, CreateUserRequest,
};
use trailbase_core::api::{create_user_handler, login_with_password, CreateUserRequest};
use trailbase_core::constants::RECORD_API_PATH;
use trailbase_core::records::{add_record_api, AccessRules};
use trailbase_core::{DataDir, Server, ServerOptions};
async fn create_chat_message_app_tables(conn: &Connection) -> Result<(), libsql::Error> {
async fn create_chat_message_app_tables(
conn: &tokio_rusqlite::Connection,
) -> Result<(), tokio_rusqlite::Error> {
// Create a messages, chat room and members tables.
conn
.execute_batch(
@@ -55,23 +55,27 @@ async fn create_chat_message_app_tables(conn: &Connection) -> Result<(), libsql:
return Ok(());
}
async fn add_room(conn: &Connection, name: &str) -> Result<[u8; 16], libsql::Error> {
let room: [u8; 16] = query_one_row(
conn,
"INSERT INTO room (name) VALUES ($1) RETURNING id",
params!(name),
)
.await?
.get(0)?;
async fn add_room(
conn: &tokio_rusqlite::Connection,
name: &str,
) -> Result<[u8; 16], anyhow::Error> {
let room: [u8; 16] = conn
.query_row(
"INSERT INTO room (name) VALUES ($1) RETURNING id",
params!(name.to_string()),
)
.await?
.unwrap()
.get(0)?;
return Ok(room);
}
async fn add_user_to_room(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
user: [u8; 16],
room: [u8; 16],
) -> Result<(), libsql::Error> {
) -> Result<(), tokio_rusqlite::Error> {
conn
.execute(
"INSERT INTO room_members (user, room) VALUES ($1, $2)",

View File

@@ -6,8 +6,12 @@ use thiserror::Error;
#[derive(Debug, Error)]
pub enum AdminError {
#[error("Libsql error: {0}")]
Libsql(#[from] libsql::Error),
#[error("TokioRusqlite error: {0}")]
TokioRusqlite(#[from] tokio_rusqlite::Error),
#[error("Rusqlite error: {0}")]
Rusqlite(#[from] rusqlite::Error),
#[error("Rusqlite FromSql error: {0}")]
FromSql(#[from] rusqlite::types::FromSqlError),
#[error("Deserialization error: {0}")]
Deserialization(#[from] serde::de::value::Error),
#[error("JsonSerialization error: {0}")]

View File

@@ -4,11 +4,9 @@ use axum::{
};
use chrono::{DateTime, Duration, Utc};
use lazy_static::lazy_static;
use libsql::{de, params::Params, Connection};
use log::*;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use trailbase_sqlite::query_one_row;
use ts_rs::TS;
use uuid::Uuid;
@@ -116,13 +114,13 @@ pub async fn list_logs_handler(
let filter_where_clause = build_filter_where_clause(&table_metadata, filter_params)?;
let total_row_count = {
let row = query_one_row(
let row = crate::util::query_one_row(
conn,
&format!(
"SELECT COUNT(*) FROM {LOGS_TABLE_NAME} WHERE {clause}",
clause = filter_where_clause.clause
),
Params::Named(filter_where_clause.params.clone()),
filter_where_clause.params.clone(),
)
.await?;
@@ -184,7 +182,7 @@ pub async fn list_logs_handler(
}
async fn fetch_logs(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
filter_where_clause: WhereClause,
cursor: Option<[u8; 16]>,
order: Vec<(String, Order)>,
@@ -192,10 +190,16 @@ async fn fetch_logs(
) -> Result<Vec<LogQuery>, Error> {
let mut params = filter_where_clause.params;
let mut where_clause = filter_where_clause.clause;
params.push((":limit".to_string(), libsql::Value::Integer(limit as i64)));
params.push((
":limit".to_string(),
tokio_rusqlite::Value::Integer(limit as i64),
));
if let Some(cursor) = cursor {
params.push((":cursor".to_string(), libsql::Value::Blob(cursor.to_vec())));
params.push((
":cursor".to_string(),
tokio_rusqlite::Value::Blob(cursor.to_vec()),
));
where_clause = format!("{where_clause} AND log.id < :cursor",);
}
@@ -226,17 +230,7 @@ async fn fetch_logs(
"#,
);
let mut rows = conn.query(&sql_query, Params::Named(params)).await?;
let mut logs: Vec<LogQuery> = vec![];
while let Ok(Some(row)) = rows.next().await {
match de::from_row(&row) {
Ok(log) => logs.push(log),
Err(err) => warn!("failed: {err}"),
};
}
return Ok(logs);
return Ok(conn.query_values::<LogQuery>(&sql_query, params).await?);
}
#[derive(Debug, Serialize, TS)]
@@ -256,7 +250,7 @@ struct FetchAggregateArgs {
}
async fn fetch_aggregate_stats(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
args: &FetchAggregateArgs,
) -> Result<Stats, Error> {
let filter_clause = args
@@ -290,10 +284,10 @@ async fn fetch_aggregate_stats(
"#
);
use libsql::Value::Integer;
use tokio_rusqlite::Value::Integer;
let from_seconds = args.from.timestamp();
let interval_seconds = args.interval.num_seconds();
let mut params: Vec<(String, libsql::Value)> = vec![
let mut params: Vec<(String, tokio_rusqlite::Value)> = vec![
(":interval_seconds".to_string(), Integer(interval_seconds)),
(":from_seconds".to_string(), Integer(from_seconds)),
(":to_seconds".to_string(), Integer(args.to.timestamp())),
@@ -303,12 +297,10 @@ async fn fetch_aggregate_stats(
params.extend(filter.params.clone())
}
let mut rows = conn.query(&qps_query, Params::Named(params)).await?;
let rows = conn.query_values::<AggRow>(&qps_query, params).await?;
let mut rate: Vec<(i64, f64)> = vec![];
while let Ok(Some(row)) = rows.next().await {
let r: AggRow = de::from_row(&row)?;
for r in rows.iter() {
// The oldest interval may be clipped if "(to-from)/interval" isn't integer. In this case
// dividide by a shorter interval length to reduce artifacting. Otherwise, the clipped
// interval would appear to have a lower rater.
@@ -338,10 +330,10 @@ async fn fetch_aggregate_stats(
"#
);
let mut rows = conn.query(&cc_query, ()).await?;
let rows = conn.query(&cc_query, ()).await?;
let mut country_codes = HashMap::<String, usize>::new();
while let Ok(Some(row)) = rows.next().await {
for row in rows.iter() {
let cc: Option<String> = row.get(0)?;
let count: i64 = row.get(1)?;
@@ -372,8 +364,11 @@ mod tests {
#[tokio::test]
async fn test_aggregate_rate_computation() {
let conn = trailbase_sqlite::connect_sqlite(None, None).await.unwrap();
apply_logs_migrations(conn.clone()).await.unwrap();
let mut conn_sync = trailbase_sqlite::connect_sqlite(None, None).unwrap();
apply_logs_migrations(&mut conn_sync).unwrap();
let conn = tokio_rusqlite::Connection::from_conn(conn_sync)
.await
.unwrap();
let interval_seconds = 600;
let to = DateTime::parse_from_rfc3339("1996-12-22T12:00:00Z").unwrap();

View File

@@ -36,7 +36,7 @@ pub async fn query_handler(
//
// In the end we really want to allow executing all constructs as valid to sqlite. As such we
// best effort parse the statements to see if need to invalidate the table cache and otherwise
// fall back to libsql's execute batch which materializes all rows and invalidate anyway.
// fall back to execute batch which materializes all rows and invalidate anyway.
// Check the statements are correct before executing anything, just to be sure.
let statements = sqlite3_parse_into_statements(&request.query)?;
@@ -66,15 +66,8 @@ pub async fn query_handler(
state.table_metadata().invalidate_all().await?;
}
let mut batched_rows = batched_rows_result?;
let mut prev: Option<libsql::Rows> = None;
while let Some(maybe_rows) = batched_rows.next_stmt_row() {
prev = maybe_rows;
}
if let Some(result_rows) = prev {
let (rows, columns) = rows_to_json_arrays(result_rows, 1024).await?;
if let Some(rows) = batched_rows_result? {
let (rows, columns) = rows_to_json_arrays(rows, 1024)?;
return Ok(Json(QueryResponse { columns, rows }));
}

View File

@@ -99,7 +99,6 @@ pub async fn delete_rows_handler(
#[cfg(test)]
mod tests {
use axum::extract::{Json, Path, RawQuery, State};
use trailbase_sqlite::query_one_row;
use super::*;
use crate::admin::rows::insert_row::insert_row;
@@ -184,9 +183,11 @@ mod tests {
};
let count = || async {
query_one_row(conn, &format!("SELECT COUNT(*) FROM '{table_name}'"), ())
conn
.query_row(&format!("SELECT COUNT(*) FROM '{table_name}'"), ())
.await
.unwrap()
.unwrap()
.get::<i64>(0)
.unwrap()
};

View File

@@ -41,5 +41,5 @@ pub(crate) async fn insert_row(
)
.await?;
return Ok(row_to_json_array(row)?);
return Ok(row_to_json_array(&row)?);
}

View File

@@ -1,12 +1,10 @@
use axum::extract::{Json, Path, RawQuery, State};
use libsql::{params::Params, Connection};
use log::*;
use serde::Serialize;
use std::sync::Arc;
use ts_rs::TS;
use crate::admin::AdminError as Error;
use crate::api::query_one_row;
use crate::app_state::AppState;
use crate::listing::{
build_filter_where_clause, limit_or_default, parse_query, Order, WhereClause,
@@ -57,12 +55,13 @@ pub async fn list_rows_handler(
let total_row_count = {
let where_clause = &filter_where_clause.clause;
let count_query = format!("SELECT COUNT(*) FROM '{table_name}' WHERE {where_clause}");
let row = query_one_row(
let row = crate::util::query_one_row(
state.conn(),
&count_query,
Params::Named(filter_where_clause.params.clone()),
filter_where_clause.params.clone(),
)
.await?;
row.get::<i64>(0)?
};
@@ -117,7 +116,7 @@ struct Pagination<'a> {
}
async fn fetch_rows(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
table_or_view_name: &str,
filter_where_clause: WhereClause,
order: Option<Vec<(String, Order)>>,
@@ -129,15 +128,18 @@ async fn fetch_rows(
} = filter_where_clause;
params.push((
":limit".to_string(),
libsql::Value::Integer(pagination.limit as i64),
tokio_rusqlite::Value::Integer(pagination.limit as i64),
));
params.push((
":offset".to_string(),
libsql::Value::Integer(pagination.offset.unwrap_or(0) as i64),
tokio_rusqlite::Value::Integer(pagination.offset.unwrap_or(0) as i64),
));
if let Some(cursor) = pagination.cursor {
params.push((":cursor".to_string(), libsql::Value::Blob(cursor.to_vec())));
params.push((
":cursor".to_string(),
tokio_rusqlite::Value::Blob(cursor.to_vec()),
));
clause = format!("{clause} AND _row_.id < :cursor",);
}
@@ -175,15 +177,12 @@ async fn fetch_rows(
"#,
);
let result_rows = conn
.query(&query, libsql::params::Params::Named(params))
.await
.map_err(|err| {
#[cfg(debug_assertions)]
error!("QUERY: {query}\n\t=> {err}");
let result_rows = conn.query(&query, params).await.map_err(|err| {
#[cfg(debug_assertions)]
error!("QUERY: {query}\n\t=> {err}");
return err;
})?;
return err;
})?;
return Ok(rows_to_json_arrays(result_rows, 1024).await?);
return Ok(rows_to_json_arrays(result_rows, 1024)?);
}

View File

@@ -27,32 +27,40 @@ pub async fn alter_index_handler(
State(state): State<AppState>,
Json(request): Json<AlterIndexRequest>,
) -> Result<Response, Error> {
let conn = state.conn();
let source_schema = request.source_schema;
let source_index_name = &source_schema.name;
let source_index_name = source_schema.name.clone();
let target_schema = request.target_schema;
debug!("Alter index:\nsource: {source_schema:?}\ntarget: {target_schema:?}",);
let mut tx = TransactionRecorder::new(
conn.clone(),
state.data_dir().migrations_path(),
format!("alter_index_{source_index_name}"),
)
.await?;
let migration_path = state.data_dir().migrations_path();
let conn = state.conn();
let writer = conn
.call(move |conn| {
let mut tx = TransactionRecorder::new(
conn,
migration_path,
format!("alter_index_{source_index_name}"),
)?;
// Drop old index
tx.execute(&format!("DROP INDEX {source_index_name}"))
// Drop old index
tx.execute(&format!("DROP INDEX {source_index_name}"))?;
// Create new index
let create_index_query = target_schema.create_index_statement();
tx.execute(&create_index_query)?;
return tx
.rollback_and_create_migration()
.map_err(|err| tokio_rusqlite::Error::Other(err.into()));
})
.await?;
// Create new index
let create_index_query = target_schema.create_index_statement();
tx.query(&create_index_query).await?;
// Write to migration file.
let report = tx.commit_and_create_migration().await?;
debug!("Migration report: {report:?}");
if let Some(writer) = writer {
let report = writer.write(conn).await?;
debug!("Migration report: {report:?}");
}
return Ok((StatusCode::OK, "altered index").into_response());
}

View File

@@ -10,10 +10,10 @@ use log::*;
use serde::Deserialize;
use ts_rs::TS;
use crate::admin::AdminError as Error;
use crate::app_state::AppState;
use crate::schema::Table;
use crate::transaction::TransactionRecorder;
use crate::{admin::AdminError as Error, transaction::MigrationWriter};
#[derive(Clone, Debug, Deserialize, TS)]
#[ts(export)]
@@ -30,16 +30,16 @@ pub async fn alter_table_handler(
Json(request): Json<AlterTableRequest>,
) -> Result<Response, Error> {
let source_schema = request.source_schema;
let source_table_name = &source_schema.name;
let source_table_name = source_schema.name.clone();
let Some(_metadata) = state.table_metadata().get(source_table_name) else {
let Some(_metadata) = state.table_metadata().get(&source_table_name) else {
return Err(Error::Precondition(format!(
"Cannot alter '{source_table_name}'. Only tables are supported.",
)));
};
let target_schema = request.target_schema;
let target_table_name = &target_schema.name;
let target_table_name = target_schema.name.clone();
debug!("Alter table:\nsource: {source_schema:?}\ntarget: {target_schema:?}",);
@@ -71,46 +71,59 @@ pub async fn alter_table_handler(
let mut target_schema_copy = target_schema.clone();
target_schema_copy.name = temp_table_name.to_string();
let mut tx = TransactionRecorder::new(
state.conn().clone(),
state.data_dir().migrations_path(),
format!("alter_table_{source_table_name}"),
)
.await?;
tx.execute("PRAGMA foreign_keys = OFF").await?;
let migration_path = state.data_dir().migrations_path();
let conn = state.conn();
let writer = conn
.call(
move |conn| -> Result<Option<MigrationWriter>, tokio_rusqlite::Error> {
let mut tx = TransactionRecorder::new(
conn,
migration_path,
format!("alter_table_{source_table_name}"),
)
.map_err(|err| tokio_rusqlite::Error::Other(err.into()))?;
// Create new table
let sql = target_schema_copy.create_table_statement();
tx.query(&sql).await?;
tx.execute("PRAGMA foreign_keys = OFF")?;
// Copy
tx.query(&format!(
r#"
INSERT INTO
{temp_table_name} ({column_list})
SELECT
{column_list}
FROM
{source_table_name}
"#,
column_list = copy_columns.join(", "),
))
.await?;
// Create new table
let sql = target_schema_copy.create_table_statement();
tx.execute(&sql)?;
tx.query(&format!("DROP TABLE {source_table_name}")).await?;
// Copy
tx.execute(&format!(
r#"
INSERT INTO
{temp_table_name} ({column_list})
SELECT
{column_list}
FROM
{source_table_name}
"#,
column_list = copy_columns.join(", "),
))?;
if *target_table_name != temp_table_name {
tx.query(&format!(
"ALTER TABLE '{temp_table_name}' RENAME TO '{target_table_name}'"
))
tx.execute(&format!("DROP TABLE {source_table_name}"))?;
if *target_table_name != temp_table_name {
tx.execute(&format!(
"ALTER TABLE '{temp_table_name}' RENAME TO '{target_table_name}'"
))?;
}
tx.execute("PRAGMA foreign_keys = ON")?;
return tx
.rollback_and_create_migration()
.map_err(|err| tokio_rusqlite::Error::Other(err.into()));
},
)
.await?;
}
tx.execute("PRAGMA foreign_keys = ON").await?;
// Write to migration file.
let report = tx.commit_and_create_migration().await?;
debug!("Migration report: {report:?}");
if let Some(writer) = writer {
let report = writer.write(conn).await?;
debug!("Migration report: {report:?}");
}
state.table_metadata().invalidate_all().await?;

View File

@@ -24,24 +24,32 @@ pub async fn create_index_handler(
State(state): State<AppState>,
Json(request): Json<CreateIndexRequest>,
) -> Result<Json<CreateIndexResponse>, Error> {
let conn = state.conn();
let dry_run = request.dry_run.unwrap_or(false);
let index_name = request.schema.name.clone();
let create_index_query = request.schema.create_index_statement();
if !dry_run {
let mut tx = TransactionRecorder::new(
conn.clone(),
state.data_dir().migrations_path(),
format!("create_index_{index_name}"),
)
.await?;
let create_index_query = create_index_query.clone();
let migration_path = state.data_dir().migrations_path();
let conn = state.conn();
let writer = conn
.call(move |conn| {
let mut tx =
TransactionRecorder::new(conn, migration_path, format!("create_index_{index_name}"))?;
tx.query(&create_index_query).await?;
tx.execute(&create_index_query)?;
return tx
.rollback_and_create_migration()
.map_err(|err| tokio_rusqlite::Error::Other(err.into()));
})
.await?;
// Write to migration file.
tx.commit_and_create_migration().await?;
if let Some(writer) = writer {
writer.write(conn).await?;
}
}
return Ok(Json(CreateIndexResponse {

View File

@@ -24,7 +24,6 @@ pub async fn create_table_handler(
State(state): State<AppState>,
Json(request): Json<CreateTableRequest>,
) -> Result<Json<CreateTableResponse>, Error> {
let conn = state.conn();
if request.schema.columns.is_empty() {
return Err(Error::Precondition(
"Tables need to have at least one column".to_string(),
@@ -34,27 +33,36 @@ pub async fn create_table_handler(
let table_name = request.schema.name.clone();
// This contains the create table statement and may also contain indexes and triggers.
let query = request.schema.create_table_statement();
let create_table_query = request.schema.create_table_statement();
if !dry_run {
let mut tx = TransactionRecorder::new(
conn.clone(),
state.data_dir().migrations_path(),
format!("create_table_{table_name}"),
)
.await?;
let create_table_query = create_table_query.clone();
let migration_path = state.data_dir().migrations_path();
let conn = state.conn();
let writer = conn
.call(move |conn| {
let mut tx =
TransactionRecorder::new(conn, migration_path, format!("create_table_{table_name}"))?;
tx.query(&query).await?;
tx.execute(&create_table_query)?;
return tx
.rollback_and_create_migration()
.map_err(|err| tokio_rusqlite::Error::Other(err.into()));
})
.await?;
// Write to migration file.
tx.commit_and_create_migration().await?;
if let Some(writer) = writer {
let _report = writer.write(conn).await?;
}
state.table_metadata().invalidate_all().await?;
}
return Ok(Json(CreateTableResponse {
sql: sqlformat::format(
format!("{query};").as_str(),
format!("{create_table_query};").as_str(),
&sqlformat::QueryParams::None,
&sqlformat::FormatOptions {
ignore_case_convert: None,

View File

@@ -22,22 +22,29 @@ pub async fn drop_index_handler(
State(state): State<AppState>,
Json(request): Json<DropIndexRequest>,
) -> Result<Response, Error> {
let conn = state.conn();
let index_name = request.name;
let mut tx = TransactionRecorder::new(
conn.clone(),
state.data_dir().migrations_path(),
format!("drop_index_{index_name}"),
)
.await?;
let migration_path = state.data_dir().migrations_path();
let conn = state.conn();
let writer = conn
.call(move |conn| {
let mut tx =
TransactionRecorder::new(conn, migration_path, format!("drop_index_{index_name}"))?;
let query = format!("DROP INDEX IF EXISTS {}", index_name);
info!("dropping index: {query}");
tx.execute(&query).await?;
let query = format!("DROP INDEX IF EXISTS {}", index_name);
info!("dropping index: {query}");
tx.execute(&query)?;
return tx
.rollback_and_create_migration()
.map_err(|err| tokio_rusqlite::Error::Other(err.into()));
})
.await?;
// Write to migration file.
tx.commit_and_create_migration().await?;
if let Some(writer) = writer {
let _report = writer.write(conn).await?;
}
return Ok((StatusCode::OK, "").into_response());
}

View File

@@ -22,12 +22,12 @@ pub async fn drop_table_handler(
State(state): State<AppState>,
Json(request): Json<DropTableRequest>,
) -> Result<Response, Error> {
let table_name = &request.name;
let table_name = request.name.clone();
let entity_type: &str;
if state.table_metadata().get(table_name).is_some() {
if state.table_metadata().get(&table_name).is_some() {
entity_type = "TABLE";
} else if state.table_metadata().get_view(table_name).is_some() {
} else if state.table_metadata().get_view(&table_name).is_some() {
entity_type = "VIEW";
} else {
return Err(Error::Precondition(format!(
@@ -35,19 +35,31 @@ pub async fn drop_table_handler(
)));
}
let mut tx = TransactionRecorder::new(
state.conn().clone(),
state.data_dir().migrations_path(),
format!("drop_{}_{table_name}", entity_type.to_lowercase()),
)
.await?;
let migration_path = state.data_dir().migrations_path();
let conn = state.conn();
let writer = conn
.call(move |conn| {
let mut tx = TransactionRecorder::new(
conn,
migration_path,
format!("drop_{}_{table_name}", entity_type.to_lowercase()),
)?;
let query = format!("DROP {entity_type} IF EXISTS {table_name}");
info!("dropping table: {query}");
tx.execute(&query).await?;
let query = format!("DROP {entity_type} IF EXISTS {table_name}");
info!("dropping table: {query}");
tx.execute(&query)?;
return tx
.rollback_and_create_migration()
.map_err(|err| tokio_rusqlite::Error::Other(err.into()));
})
.await?;
// Write to migration file.
tx.commit_and_create_migration().await?;
if let Some(writer) = writer {
let _report = writer.write(conn).await?;
}
state.table_metadata().invalidate_all().await?;
return Ok((StatusCode::OK, "").into_response());

View File

@@ -1,7 +1,6 @@
use axum::{extract::State, Json};
use libsql::de;
use log::*;
use serde::{Deserialize, Serialize};
use serde::Serialize;
use ts_rs::TS;
use crate::admin::AdminError as Error;
@@ -31,32 +30,33 @@ pub struct ListSchemasResponse {
pub async fn list_tables_handler(
State(state): State<AppState>,
) -> Result<Json<ListSchemasResponse>, Error> {
let conn = state.conn();
// NOTE: the "ORDER BY" is a bit sneaky, it ensures that we parse all "table"s before we parse
// "view"s.
let mut rows = conn
let rows = state
.conn()
.query(
&format!("SELECT * FROM {SQLITE_SCHEMA_TABLE} ORDER BY type"),
&format!("SELECT type, name, tbl_name, sql FROM {SQLITE_SCHEMA_TABLE} ORDER BY type"),
(),
)
.await?;
let mut schemas = ListSchemasResponse::default();
while let Some(row) = rows.next().await? {
#[derive(Deserialize, Debug)]
for row in rows.iter() {
#[derive(Debug)]
pub struct SqliteSchema {
pub r#type: String,
pub name: String,
pub tbl_name: String,
#[allow(unused)]
pub rootpage: i64,
pub sql: Option<String>,
}
let schema: SqliteSchema = de::from_row(&row)?;
let schema = SqliteSchema {
r#type: row.get(0)?,
name: row.get(1)?,
tbl_name: row.get(2)?,
sql: row.get(3).ok(),
};
let name = &schema.name;
match schema.r#type.as_str() {

View File

@@ -1,8 +1,7 @@
use axum::{extract::State, Json};
use lazy_static::lazy_static;
use libsql::{de, named_params};
use serde::{Deserialize, Serialize};
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::named_params;
use ts_rs::TS;
use uuid::Uuid;
@@ -64,9 +63,9 @@ pub async fn create_user_handler(
);
}
let user: DbUser = de::from_row(
&query_one_row(
state.user_conn(),
let Some(user) = state
.user_conn()
.query_value::<DbUser>(
&INSERT_USER_QUERY,
named_params! {
":email": normalized_email,
@@ -76,8 +75,10 @@ pub async fn create_user_handler(
":email_verification_code": email_verification_code.clone(),
},
)
.await?,
)?;
.await?
else {
return Err(Error::Precondition("Internal".into()));
};
if let Some(email_verification_code) = email_verification_code {
Email::verification_email(&state, &user, &email_verification_code)?

View File

@@ -3,10 +3,8 @@ use axum::{
Json,
};
use lazy_static::lazy_static;
use libsql::{de, params::Params, Connection};
use log::*;
use serde::Serialize;
use trailbase_sqlite::query_one_row;
use ts_rs::TS;
use uuid::Uuid;
@@ -78,10 +76,10 @@ pub async fn list_users_handler(
let total_row_count = {
let where_clause = &filter_where_clause.clause;
let row = query_one_row(
let row = crate::util::query_one_row(
conn,
&format!("SELECT COUNT(*) FROM {USER_TABLE} WHERE {where_clause}"),
Params::Named(filter_where_clause.params.clone()),
filter_where_clause.params.clone(),
)
.await?;
@@ -112,7 +110,7 @@ pub async fn list_users_handler(
}
async fn fetch_users(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
filter_where_clause: WhereClause,
cursor: Option<[u8; 16]>,
order: Vec<(String, Order)>,
@@ -120,10 +118,16 @@ async fn fetch_users(
) -> Result<Vec<DbUser>, Error> {
let mut params = filter_where_clause.params;
let mut where_clause = filter_where_clause.clause;
params.push((":limit".to_string(), libsql::Value::Integer(limit as i64)));
params.push((
":limit".to_string(),
tokio_rusqlite::Value::Integer(limit as i64),
));
if let Some(cursor) = cursor {
params.push((":cursor".to_string(), libsql::Value::Blob(cursor.to_vec())));
params.push((
":cursor".to_string(),
tokio_rusqlite::Value::Blob(cursor.to_vec()),
));
where_clause = format!("{where_clause} AND _row_.id < :cursor",);
}
@@ -156,15 +160,6 @@ async fn fetch_users(
info!("PARAMS: {params:?}\nQUERY: {sql_query}");
let mut rows = conn.query(&sql_query, Params::Named(params)).await?;
let mut users: Vec<DbUser> = vec![];
while let Ok(Some(row)) = rows.next().await {
match de::from_row(&row) {
Ok(user) => users.push(user),
Err(err) => warn!("failed: {err}"),
};
}
let users = conn.query_values::<DbUser>(&sql_query, params).await?;
return Ok(users);
}

View File

@@ -12,7 +12,6 @@ pub(crate) use create_user::create_user_for_test;
#[cfg(test)]
mod tests {
use axum::{extract::State, Json};
use libsql::params;
use std::sync::Arc;
use uuid::Uuid;
@@ -58,7 +57,7 @@ mod tests {
.user_conn()
.execute(
&format!("DELETE FROM '{USER_TABLE}' WHERE id = $1"),
params!(user.get_id().as_bytes()),
(user.get_id().as_bytes().to_vec(),),
)
.await
.unwrap();

View File

@@ -5,7 +5,7 @@ use axum::{
Json,
};
use lazy_static::lazy_static;
use libsql::params;
use rusqlite::params;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
@@ -48,22 +48,27 @@ pub async fn update_user_handler(
static ref UPDATE_VERIFIED_QUERY: String = update_query("verified");
}
let tx = conn.transaction().await?;
let email = request.email.clone();
let verified = request.verified;
conn
.call(move |conn| {
let tx = conn.transaction()?;
if let Some(ref email) = request.email {
tx.execute(&UPDATE_EMAIL_QUERY, params![email.clone(), user_id_bytes])
.await?;
}
if let Some(password_hash) = hashed_password {
tx.execute(&UPDATE_PW_HASH_QUERY, params!(password_hash, user_id_bytes))
.await?;
}
if let Some(verified) = request.verified {
tx.execute(&UPDATE_VERIFIED_QUERY, params!(verified, user_id_bytes))
.await?;
}
if let Some(email) = email {
tx.execute(&UPDATE_EMAIL_QUERY, params![email, user_id_bytes])?;
}
if let Some(password_hash) = hashed_password {
tx.execute(&UPDATE_PW_HASH_QUERY, params!(password_hash, user_id_bytes))?;
}
if let Some(verified) = verified {
tx.execute(&UPDATE_VERIFIED_QUERY, params!(verified, user_id_bytes))?;
}
tx.commit().await?;
tx.commit()?;
return Ok(());
})
.await?;
return Ok((StatusCode::OK, format!("Updated user: {request:?}")).into_response());
}

View File

@@ -1,4 +1,3 @@
use libsql::Connection;
use log::*;
use object_store::ObjectStore;
use std::path::PathBuf;
@@ -30,8 +29,8 @@ struct InternalState {
query_apis: Computed<Vec<(String, QueryApi)>, Config>,
config: ValueNotifier<Config>,
logs_conn: Connection,
conn: Connection,
logs_conn: tokio_rusqlite::Connection,
conn2: tokio_rusqlite::Connection,
jwt: JwtHelper,
@@ -51,8 +50,8 @@ pub(crate) struct AppStateArgs {
pub dev: bool,
pub table_metadata: TableMetadataCache,
pub config: Config,
pub conn: Connection,
pub logs_conn: Connection,
pub conn2: tokio_rusqlite::Connection,
pub logs_conn: tokio_rusqlite::Connection,
pub jwt: JwtHelper,
pub object_store: Box<dyn ObjectStore + Send + Sync>,
pub js_runtime_threads: Option<usize>,
@@ -68,13 +67,13 @@ impl AppState {
let config = ValueNotifier::new(args.config);
let table_metadata_clone = args.table_metadata.clone();
let conn_clone0 = args.conn.clone();
let conn_clone1 = args.conn.clone();
let conn_clone0 = args.conn2.clone();
let conn_clone1 = args.conn2.clone();
let runtime = args
.js_runtime_threads
.map_or_else(RuntimeHandle::new, RuntimeHandle::new_with_threads);
runtime.set_connection(args.conn.clone());
runtime.set_connection(args.conn2.clone());
AppState {
state: Arc::new(InternalState {
@@ -122,7 +121,7 @@ impl AppState {
.collect::<Vec<_>>();
}),
config,
conn: args.conn.clone(),
conn2: args.conn2.clone(),
logs_conn: args.logs_conn,
jwt: args.jwt,
table_metadata: args.table_metadata,
@@ -148,15 +147,15 @@ impl AppState {
return self.state.dev;
}
pub fn conn(&self) -> &Connection {
return &self.state.conn;
pub fn conn(&self) -> &tokio_rusqlite::Connection {
return &self.state.conn2;
}
pub(crate) fn user_conn(&self) -> &Connection {
return &self.state.conn;
pub fn user_conn(&self) -> &tokio_rusqlite::Connection {
return &self.state.conn2;
}
pub(crate) fn logs_conn(&self) -> &Connection {
pub(crate) fn logs_conn(&self) -> &tokio_rusqlite::Connection {
return &self.state.logs_conn;
}
@@ -306,21 +305,22 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
let temp_dir = temp_dir::TempDir::new()?;
tokio::fs::create_dir_all(temp_dir.child("uploads")).await?;
let main_conn = {
let conn = trailbase_sqlite::connect_sqlite(None, None).await?;
apply_user_migrations(conn.clone()).await?;
let _new_db = apply_main_migrations(conn.clone(), None).await?;
let conn2 = {
let mut conn = trailbase_sqlite::connect_sqlite(None, None)?;
apply_user_migrations(&mut conn)?;
let _new_db = apply_main_migrations(&mut conn, None)?;
conn
tokio_rusqlite::Connection::from_conn(conn).await?
};
let logs_conn = {
let conn = trailbase_sqlite::connect_sqlite(None, None).await?;
apply_logs_migrations(conn.clone()).await?;
conn
let mut conn = trailbase_sqlite::connect_sqlite(None, None)?;
apply_logs_migrations(&mut conn)?;
tokio_rusqlite::Connection::from_conn(conn).await?
};
let table_metadata = TableMetadataCache::new(main_conn.clone()).await?;
let table_metadata = TableMetadataCache::new(conn2.clone()).await?;
let build_default_config = || {
// Construct a fabricated config for tests and make sure it's valid.
@@ -364,8 +364,8 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
validate_config(&table_metadata, &config).unwrap();
let config = ValueNotifier::new(config);
let main_conn_clone0 = main_conn.clone();
let main_conn_clone1 = main_conn.clone();
let main_conn_clone0 = conn2.clone();
let main_conn_clone1 = conn2.clone();
let table_metadata_clone = table_metadata.clone();
let data_dir = DataDir(temp_dir.path().to_path_buf());
@@ -389,7 +389,7 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
};
let runtime = RuntimeHandle::new();
runtime.set_connection(main_conn.clone());
runtime.set_connection(conn2.clone());
return Ok(AppState {
state: Arc::new(InternalState {
@@ -428,7 +428,7 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
.collect::<Vec<_>>();
}),
config,
conn: main_conn.clone(),
conn2,
logs_conn,
jwt: jwt::test_jwt_helper(),
table_metadata,
@@ -440,7 +440,7 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
}
fn build_record_api(
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
table_metadata_cache: &TableMetadataCache,
config: RecordApiConfig,
) -> Result<RecordApi, String> {
@@ -459,7 +459,10 @@ fn build_record_api(
return Err(format!("RecordApi references missing table: {config:?}"));
}
fn build_query_api(conn: libsql::Connection, config: QueryApiConfig) -> Result<QueryApi, String> {
fn build_query_api(
conn: tokio_rusqlite::Connection,
config: QueryApiConfig,
) -> Result<QueryApi, String> {
// TODO: Check virtual table exists
return QueryApi::from(conn, config);
}

View File

@@ -1,9 +1,8 @@
use axum::extract::{Json, Path, State};
use axum::http::{header, HeaderMap, StatusCode};
use axum::response::{IntoResponse, Redirect, Response};
use libsql::params;
use serde::{Deserialize, Serialize};
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use trailbase_sqlite::schema::FileUpload;
use uuid::Uuid;
@@ -15,7 +14,7 @@ use crate::constants::{AVATAR_TABLE, RECORD_API_PATH};
use crate::util::{assert_uuidv7_version, id_to_b64};
async fn get_avatar_url(state: &AppState, user: &DbUser) -> Option<String> {
if let Ok(row) = query_one_row(
if let Ok(row) = crate::util::query_one_row(
state.user_conn(),
&format!("SELECT EXISTS(SELECT user FROM '{AVATAR_TABLE}' WHERE user = $1)"),
params!(user.id),
@@ -101,8 +100,6 @@ mod tests {
use axum::http;
use axum::response::Response;
use axum_test::multipart::{MultipartForm, Part};
use libsql::de;
use trailbase_sqlite::query_one_row;
use super::*;
use crate::admin::user::create_user_for_test;
@@ -197,16 +194,15 @@ mod tests {
let user_x_token = login_with_password(&state, email, password).await.unwrap();
let db_user: DbUser = de::from_row(
&query_one_row(
state.user_conn(),
let db_user = state
.user_conn()
.query_value::<DbUser>(
&format!("SELECT * FROM '{USER_TABLE}' WHERE email = $1"),
[email],
(email,),
)
.await
.unwrap(),
)
.unwrap();
.unwrap()
.unwrap();
let missing_profile_response = get_avatar_url_handler(
State(state.clone()),

View File

@@ -4,8 +4,8 @@ use axum::{
response::{IntoResponse, Redirect, Response},
};
use lazy_static::lazy_static;
use libsql::named_params;
use serde::Deserialize;
use tokio_rusqlite::named_params;
use ts_rs::TS;
use utoipa::{IntoParams, ToSchema};

View File

@@ -4,8 +4,8 @@ use axum::{
response::Redirect,
};
use lazy_static::lazy_static;
use libsql::named_params;
use serde::Deserialize;
use tokio_rusqlite::named_params;
use ts_rs::TS;
use utoipa::{IntoParams, ToSchema};
@@ -91,7 +91,7 @@ pub async fn change_password_handler(
.execute(
&QUERY,
named_params! {
":user_id": user.uuid.into_bytes(),
":user_id": user.uuid.into_bytes().to_vec(),
":new_password_hash": new_password_hash,
":old_password_hash": old_password_hash,
},

View File

@@ -28,7 +28,7 @@ pub(crate) async fn delete_handler(
.user_conn()
.execute(
&format!("DELETE FROM '{USER_TABLE}' WHERE id = $1"),
[user.uuid.into_bytes().to_vec()],
[tokio_rusqlite::Value::Blob(user.uuid.into_bytes().to_vec())],
)
.await?;

View File

@@ -5,8 +5,8 @@ use axum::{
Json,
};
use lazy_static::lazy_static;
use libsql::named_params;
use serde::{Deserialize, Serialize};
use tokio_rusqlite::named_params;
use tower_cookies::Cookies;
use ts_rs::TS;
use utoipa::{IntoParams, ToSchema};

View File

@@ -4,9 +4,8 @@ use axum::{
response::{IntoResponse, Redirect, Response},
};
use lazy_static::lazy_static;
use libsql::{de, named_params};
use serde::Deserialize;
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::named_params;
use utoipa::ToSchema;
use validator::ValidateEmail;
@@ -87,9 +86,9 @@ pub async fn register_user_handler(
);
}
let user: DbUser = de::from_row(
&query_one_row(
state.user_conn(),
let Some(user) = state
.user_conn()
.query_value::<DbUser>(
&INSERT_USER_QUERY,
named_params! {
":email": normalized_email.clone(),
@@ -97,14 +96,16 @@ pub async fn register_user_handler(
":email_verification_code": email_verification_code.clone(),
},
)
.await?,
)
.map_err(|_err| {
#[cfg(debug_assertions)]
log::debug!("Failed to create user {normalized_email}: {_err}");
// The insert will fail if the user is already registered
AuthError::Conflict
})?;
.await
.map_err(|_err| {
#[cfg(debug_assertions)]
log::debug!("Failed to create user {normalized_email}: {_err}");
// The insert will fail if the user is already registered
AuthError::Conflict
})?
else {
return Err(AuthError::Internal("Failed to get user".into()));
};
let email = Email::verification_email(&state, &user, &email_verification_code)
.map_err(|err| AuthError::Internal(err.into()))?;

View File

@@ -4,9 +4,8 @@ use axum::{
response::{IntoResponse, Response},
};
use lazy_static::lazy_static;
use libsql::params;
use serde::Deserialize;
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use ts_rs::TS;
use utoipa::ToSchema;
use uuid::Uuid;
@@ -170,7 +169,7 @@ pub async fn reset_password_update_handler(
}
pub async fn force_password_reset(
user_conn: &libsql::Connection,
user_conn: &tokio_rusqlite::Connection,
email: String,
password: String,
) -> Result<Uuid, AuthError> {
@@ -181,13 +180,14 @@ pub async fn force_password_reset(
format!("UPDATE '{USER_TABLE}' SET password_hash = $1 WHERE email = $2 RETURNING id");
}
let id: [u8; 16] = query_one_row(
let id: [u8; 16] = crate::util::query_one_row(
user_conn,
&UPDATE_PASSWORD_QUERY,
params!(hashed_password, email),
)
.await?
.get(0)?;
.get(0)
.map_err(|_err| AuthError::NotFound)?;
return Ok(Uuid::from_bytes(id));
}

View File

@@ -1,8 +1,7 @@
use axum::extract::{Json, State};
use lazy_static::lazy_static;
use libsql::{de, params};
use serde::{Deserialize, Serialize};
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use ts_rs::TS;
use utoipa::ToSchema;
@@ -74,15 +73,16 @@ pub(crate) async fn auth_code_to_token_handler(
);
}
let db_user: DbUser = de::from_row(
&query_one_row(
state.user_conn(),
let Some(db_user) = state
.user_conn()
.query_value::<DbUser>(
&UPDATE_QUERY,
params!(authorization_code, pkce_code_challenge),
)
.await?,
)
.map_err(|err| AuthError::Internal(err.into()))?;
.await?
else {
return Err(AuthError::NotFound);
};
let (auth_token_ttl, _refresh_token_ttl) = state.access_config(|c| c.auth.token_ttls());
let user_id = db_user.uuid();

View File

@@ -4,8 +4,8 @@ use axum::{
response::{IntoResponse, Redirect, Response},
};
use lazy_static::lazy_static;
use libsql::params;
use serde::Deserialize;
use tokio_rusqlite::params;
use utoipa::{IntoParams, ToSchema};
use crate::app_state::AppState;

View File

@@ -1,8 +1,7 @@
use axum::extract::{Form, Json, Path, Query, State};
use libsql::{de, params};
use std::sync::Arc;
use tokio_rusqlite::params;
use tower_cookies::Cookies;
use trailbase_sqlite::query_one_row;
use crate::api::TokenClaims;
use crate::app_state::{test_state, TestStateOptions};
@@ -25,6 +24,7 @@ use crate::auth::user::{DbUser, User};
use crate::constants::*;
use crate::email::{testing::TestAsyncSmtpTransport, Mailer};
use crate::extract::Either;
use crate::util::query_one_row;
#[tokio::test]
async fn test_auth_registration_reset_and_change_email() {
@@ -65,16 +65,14 @@ async fn test_auth_registration_reset_and_change_email() {
// Then steal the verification code from the DB and verify.
let email_verification_code = {
let db_user: DbUser = de::from_row(
&query_one_row(
conn,
let db_user = conn
.query_value::<DbUser>(
&format!("SELECT * FROM '{USER_TABLE}' WHERE email = $1"),
[email.clone()],
(email.clone(),),
)
.await
.unwrap(),
)
.unwrap();
.unwrap()
.unwrap();
db_user.email_verification_code.unwrap()
};
@@ -106,16 +104,14 @@ async fn test_auth_registration_reset_and_change_email() {
.unwrap();
let (verified, user) = {
let db_user: DbUser = de::from_row(
&query_one_row(
conn,
let db_user = conn
.query_value::<DbUser>(
&format!("SELECT * FROM '{USER_TABLE}' WHERE email = $1"),
[email.clone()],
(email.clone(),),
)
.await
.unwrap(),
)
.unwrap();
.unwrap()
.unwrap();
(
db_user.verified.clone(),
@@ -151,7 +147,7 @@ async fn test_auth_registration_reset_and_change_email() {
let session_exists: bool = query_one_row(
conn,
&session_exists_query,
[user.uuid.into_bytes().to_vec()],
(user.uuid.into_bytes().to_vec(),),
)
.await
.unwrap()
@@ -220,7 +216,7 @@ async fn test_auth_registration_reset_and_change_email() {
let reset_code: String = query_one_row(
conn,
&format!("SELECT password_reset_code FROM '{USER_TABLE}' WHERE id = $1"),
[user.uuid.into_bytes().to_vec()],
(user.uuid.into_bytes().to_vec(),),
)
.await
.unwrap()
@@ -279,7 +275,7 @@ async fn test_auth_registration_reset_and_change_email() {
let session_exists: bool = query_one_row(
conn,
&session_exists_query,
[user.uuid.into_bytes().to_vec()],
(user.uuid.into_bytes().to_vec(),),
)
.await
.unwrap()

View File

@@ -26,23 +26,37 @@ pub enum AuthError {
Internal(Box<dyn std::error::Error + Send + Sync>),
}
impl From<libsql::Error> for AuthError {
fn from(err: libsql::Error) -> Self {
impl From<tokio_rusqlite::Error> for AuthError {
fn from(err: tokio_rusqlite::Error) -> Self {
return match err {
libsql::Error::QueryReturnedNoRows => Self::NotFound,
// List of error codes: https://www.sqlite.org/rescode.html
libsql::Error::SqliteFailure(275, _msg) => Self::BadRequest("sqlite constraint: check"),
libsql::Error::SqliteFailure(531, _msg) => Self::BadRequest("sqlite constraint: commit hook"),
libsql::Error::SqliteFailure(3091, _msg) => Self::BadRequest("sqlite constraint: data type"),
libsql::Error::SqliteFailure(787, _msg) => Self::BadRequest("sqlite constraint: fk"),
libsql::Error::SqliteFailure(1043, _msg) => Self::BadRequest("sqlite constraint: function"),
libsql::Error::SqliteFailure(1299, _msg) => Self::BadRequest("sqlite constraint: not null"),
libsql::Error::SqliteFailure(2835, _msg) => Self::BadRequest("sqlite constraint: pinned"),
libsql::Error::SqliteFailure(1555, _msg) => Self::BadRequest("sqlite constraint: pk"),
libsql::Error::SqliteFailure(2579, _msg) => Self::BadRequest("sqlite constraint: row id"),
libsql::Error::SqliteFailure(1811, _msg) => Self::BadRequest("sqlite constraint: trigger"),
libsql::Error::SqliteFailure(2067, _msg) => Self::BadRequest("sqlite constraint: unique"),
libsql::Error::SqliteFailure(2323, _msg) => Self::BadRequest("sqlite constraint: vtab"),
tokio_rusqlite::Error::Rusqlite(err) => match err {
rusqlite::Error::QueryReturnedNoRows => {
#[cfg(debug_assertions)]
info!("SQLite returned empty rows error");
Self::NotFound
}
rusqlite::Error::SqliteFailure(err, _msg) => {
match err.extended_code {
// List of error codes: https://www.sqlite.org/rescode.html
275 => Self::BadRequest("sqlite constraint: check"),
531 => Self::BadRequest("sqlite constraint: commit hook"),
3091 => Self::BadRequest("sqlite constraint: data type"),
787 => Self::BadRequest("sqlite constraint: fk"),
1043 => Self::BadRequest("sqlite constraint: function"),
1299 => Self::BadRequest("sqlite constraint: not null"),
2835 => Self::BadRequest("sqlite constraint: pinned"),
1555 => Self::BadRequest("sqlite constraint: pk"),
2579 => Self::BadRequest("sqlite constraint: row id"),
1811 => Self::BadRequest("sqlite constraint: trigger"),
2067 => Self::BadRequest("sqlite constraint: unique"),
2323 => Self::BadRequest("sqlite constraint: vtab"),
_ => Self::Internal(err.into()),
}
}
_ => Self::Internal(err.into()),
},
err => Self::Internal(err.into()),
};
}
@@ -92,7 +106,10 @@ mod tests {
#[tokio::test]
async fn test_some_sqlite_errors_yield_client_errors() {
let conn = trailbase_sqlite::connect_sqlite(None, None).await.unwrap();
let conn =
tokio_rusqlite::Connection::from_conn(trailbase_sqlite::connect_sqlite(None, None).unwrap())
.await
.unwrap();
conn
.execute(
@@ -119,7 +136,12 @@ mod tests {
.err()
.unwrap();
assert!(matches!(sqlite_err, libsql::Error::SqliteFailure(1555, _)));
match sqlite_err {
tokio_rusqlite::Error::Rusqlite(rusqlite::Error::SqliteFailure(err, _)) => {
assert_eq!(err.extended_code, 1555);
}
_ => panic!("{sqlite_err}"),
};
let err: AuthError = sqlite_err.into();
assert_eq!(err.into_response().status(), StatusCode::BAD_REQUEST);

View File

@@ -4,14 +4,13 @@ use axum::{
};
use chrono::Duration;
use lazy_static::lazy_static;
use libsql::{de, named_params, params, Connection};
use oauth2::PkceCodeVerifier;
use oauth2::{AsyncHttpClient, HttpClientError, HttpRequest, HttpResponse};
use oauth2::{AuthorizationCode, StandardTokenResponse, TokenResponse};
use serde::Deserialize;
use thiserror::Error;
use tokio_rusqlite::{named_params, params};
use tower_cookies::Cookies;
use trailbase_sqlite::query_one_row;
use crate::auth::oauth::state::{OAuthState, ResponseType};
use crate::auth::oauth::OAuthUser;
@@ -254,7 +253,7 @@ pub(crate) async fn callback_from_external_auth_provider(
}
async fn create_user_for_external_provider(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
user: &OAuthUser,
) -> Result<uuid::Uuid, AuthError> {
if !user.verified {
@@ -273,7 +272,7 @@ async fn create_user_for_external_provider(
);
}
let row = query_one_row(
let row = crate::util::query_one_row(
conn,
&QUERY,
named_params! {
@@ -286,11 +285,15 @@ async fn create_user_for_external_provider(
)
.await?;
return Ok(uuid::Uuid::from_bytes(row.get::<[u8; 16]>(0)?));
return Ok(uuid::Uuid::from_bytes(
row
.get::<[u8; 16]>(0)
.map_err(|err| AuthError::Internal(err.into()))?,
));
}
async fn user_by_provider_id(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
provider_id: OAuthProviderId,
provider_user_id: &str,
) -> Result<DbUser, AuthError> {
@@ -299,8 +302,12 @@ async fn user_by_provider_id(
format!("SELECT * FROM '{USER_TABLE}' WHERE provider_id = $1 AND provider_user_id = $2");
};
return de::from_row(
&query_one_row(conn, &QUERY, params!(provider_id as i64, provider_user_id)).await?,
)
.map_err(|err| AuthError::Internal(err.into()));
return conn
.query_value::<DbUser>(
&QUERY,
params!(provider_id as i64, provider_user_id.to_string()),
)
.await
.map_err(|err| AuthError::Internal(err.into()))?
.ok_or_else(|| AuthError::NotFound);
}

View File

@@ -6,7 +6,6 @@ use axum_test::{TestServer, TestServerConfig};
use serde::{Deserialize, Serialize};
use tower_cookies::Cookies;
use crate::api::query_one_row;
use crate::app_state::{test_state, TestStateOptions};
use crate::auth::oauth::providers::test::{TestOAuthProvider, TestUser};
use crate::auth::oauth::state::OAuthState;
@@ -183,13 +182,15 @@ async fn test_oauth() {
let location = unpack_redirect(internal_redirect);
assert_eq!(location, "/_/auth/profile");
let row = query_one_row(
state.user_conn(),
&format!("SELECT email FROM {USER_TABLE} WHERE provider_user_id = $1"),
[external_user_id],
)
.await
.unwrap();
let row = state
.user_conn()
.query_row(
&format!("SELECT email FROM {USER_TABLE} WHERE provider_user_id = $1"),
(external_user_id,),
)
.await
.unwrap()
.unwrap();
assert_eq!(row.get::<String>(0).unwrap(), external_user_email);
}

View File

@@ -5,9 +5,8 @@ use axum::{
};
use chrono::Duration;
use lazy_static::lazy_static;
use libsql::{de, params};
use tokio_rusqlite::params;
use tower_cookies::Cookies;
use trailbase_sqlite::query_row;
use crate::app_state::AppState;
use crate::auth::jwt::TokenClaims;
@@ -168,7 +167,7 @@ pub(crate) async fn mint_new_tokens(
.user_conn()
.execute(
&QUERY,
params!(user_id.into_bytes(), refresh_token.clone(),),
params!(user_id.into_bytes().to_vec(), refresh_token.clone(),),
)
.await?;
@@ -197,13 +196,13 @@ pub(crate) async fn reauth_with_refresh_token(
);
}
let Some(row) = query_row(
state.user_conn(),
&QUERY,
params!(refresh_token, refresh_token_ttl.num_seconds()),
)
.await
.map_err(|err| AuthError::Internal(err.into()))?
let Some(db_user) = state
.user_conn()
.query_value::<DbUser>(
&QUERY,
params!(refresh_token, refresh_token_ttl.num_seconds()),
)
.await?
else {
// Row not found case, typically expected in one of 4 cases:
// 1. Above where clause doesn't match, e.g. refresh token expired.
@@ -216,8 +215,6 @@ pub(crate) async fn reauth_with_refresh_token(
return Err(AuthError::Unauthorized);
};
let db_user: DbUser = de::from_row(&row).map_err(|err| AuthError::Internal(err.into()))?;
assert!(
db_user.verified,
"unverified user, should have been caught by above query"

View File

@@ -3,10 +3,9 @@ use base64::prelude::*;
use chrono::Duration;
use cookie::SameSite;
use lazy_static::lazy_static;
use libsql::{de, params, Connection};
use sha2::{Digest, Sha256};
use tokio_rusqlite::params;
use tower_cookies::{Cookie, Cookies};
use trailbase_sqlite::{query_one_row, query_row};
use crate::auth::user::{DbUser, User};
use crate::auth::AuthError;
@@ -132,51 +131,61 @@ pub async fn user_by_email(state: &AppState, email: &str) -> Result<DbUser, Auth
return get_user_by_email(state.user_conn(), email).await;
}
pub async fn get_user_by_email(user_conn: &Connection, email: &str) -> Result<DbUser, AuthError> {
pub async fn get_user_by_email(
user_conn: &tokio_rusqlite::Connection,
email: &str,
) -> Result<DbUser, AuthError> {
lazy_static! {
static ref QUERY: String = format!("SELECT * FROM {USER_TABLE} WHERE email = $1");
};
let row = query_one_row(user_conn, &QUERY, params!(email))
let db_user = user_conn
.query_value::<DbUser>(&QUERY, params!(email.to_string()))
.await
.map_err(|_err| AuthError::UnauthorizedExt("user not found by email".into()))?;
return de::from_row(&row).map_err(|_err| AuthError::UnauthorizedExt("invalid user".into()));
return db_user.ok_or_else(|| AuthError::UnauthorizedExt("invalid user".into()));
}
pub async fn user_by_id(state: &AppState, id: &uuid::Uuid) -> Result<DbUser, AuthError> {
return get_user_by_id(state.user_conn(), id).await;
}
pub(crate) async fn get_user_by_id(
user_conn: &Connection,
async fn get_user_by_id(
user_conn: &tokio_rusqlite::Connection,
id: &uuid::Uuid,
) -> Result<DbUser, AuthError> {
lazy_static! {
static ref QUERY: String = format!("SELECT * FROM {USER_TABLE} WHERE id = $1");
};
let row = query_one_row(user_conn, &QUERY, params!(id.into_bytes()))
let db_user = user_conn
.query_value::<DbUser>(&QUERY, params!(id.into_bytes()))
.await
.map_err(|_err| AuthError::UnauthorizedExt("User not found by id".into()))?;
return de::from_row(&row).map_err(|_err| AuthError::UnauthorizedExt("Invalid user".into()));
return db_user.ok_or_else(|| AuthError::UnauthorizedExt("invalid user".into()));
}
pub async fn user_exists(state: &AppState, email: &str) -> Result<bool, libsql::Error> {
pub async fn user_exists(state: &AppState, email: &str) -> Result<bool, AuthError> {
lazy_static! {
static ref EXISTS_QUERY: String =
format!("SELECT EXISTS(SELECT 1 FROM '{USER_TABLE}' WHERE email = $1)");
};
let row = query_one_row(state.user_conn(), &EXISTS_QUERY, params!(email)).await?;
return row.get::<bool>(0);
let row =
crate::util::query_one_row(state.user_conn(), &EXISTS_QUERY, params!(email.to_string()))
.await?;
return row
.get::<bool>(0)
.map_err(|err| AuthError::Internal(err.into()));
}
pub(crate) async fn is_admin(state: &AppState, user: &User) -> bool {
let Ok(Some(row)) = query_row(
state.user_conn(),
&format!("SELECT admin FROM {USER_TABLE} WHERE id = $1"),
params!(user.uuid.as_bytes().to_vec()),
)
.await
let Ok(Some(row)) = state
.user_conn()
.query_row(
&format!("SELECT admin FROM {USER_TABLE} WHERE id = $1"),
params!(user.uuid.as_bytes().to_vec()),
)
.await
else {
return false;
};
@@ -187,29 +196,36 @@ pub(crate) async fn is_admin(state: &AppState, user: &User) -> bool {
pub(crate) async fn delete_all_sessions_for_user(
state: &AppState,
user_id: uuid::Uuid,
) -> Result<u64, libsql::Error> {
) -> Result<usize, AuthError> {
lazy_static! {
static ref QUERY: String = format!("DELETE FROM '{SESSION_TABLE}' WHERE user = $1");
};
return state
.user_conn()
.execute(&QUERY, [user_id.into_bytes().to_vec()])
.await;
return Ok(
state
.user_conn()
.execute(
&QUERY,
[tokio_rusqlite::Value::Blob(user_id.into_bytes().to_vec())],
)
.await?,
);
}
pub(crate) async fn delete_session(
state: &AppState,
refresh_token: String,
) -> Result<u64, libsql::Error> {
) -> Result<usize, AuthError> {
lazy_static! {
static ref QUERY: String = format!("DELETE FROM '{SESSION_TABLE}' WHERE refresh_token = $1");
};
return state
.user_conn()
.execute(&QUERY, params!(refresh_token))
.await;
return Ok(
state
.user_conn()
.execute(&QUERY, params!(refresh_token))
.await?,
);
}
/// Derives the code challenge given the verifier as base64UrlNoPad(sha256([codeVerifier])).

View File

@@ -10,7 +10,7 @@ mod fallback {
pub(crate) struct RuntimeHandle {}
impl RuntimeHandle {
pub(crate) fn set_connection(&self, _conn: libsql::Connection) {}
pub(crate) fn set_connection(&self, _conn: tokio_rusqlite::Connection) {}
pub(crate) fn new() -> Self {
return Self {};

View File

@@ -3,7 +3,6 @@ use axum::extract::{RawPathParams, Request};
use axum::http::{header::CONTENT_TYPE, request::Parts, HeaderName, HeaderValue, StatusCode};
use axum::response::{IntoResponse, Response};
use axum::Router;
use libsql::Connection;
use parking_lot::Mutex;
use rustyscript::{
deno_core::PollEventLoopOptions, init_platform, js_value::Promise, json_args, Module, Runtime,
@@ -73,7 +72,7 @@ enum Message {
struct State {
sender: async_channel::Sender<Message>,
connection: Mutex<Option<libsql::Connection>>,
connection: Mutex<Option<tokio_rusqlite::Connection>>,
}
struct RuntimeSingleton {
@@ -345,7 +344,7 @@ impl RuntimeSingleton {
let query: String = get_arg(&args, 0)?;
let json_params: Vec<serde_json::Value> = get_arg(&args, 1)?;
let mut params: Vec<libsql::Value> = vec![];
let mut params: Vec<tokio_rusqlite::Value> = vec![];
for value in json_params {
params.push(json_value_to_param(value)?);
}
@@ -357,12 +356,11 @@ impl RuntimeSingleton {
};
let rows = conn
.query(&query, libsql::params::Params::Positional(params))
.query(&query, params)
.await
.map_err(|err| rustyscript::Error::Runtime(err.to_string()))?;
let (values, _columns) = rows_to_json_arrays(rows, usize::MAX)
.await
.map_err(|err| rustyscript::Error::Runtime(err.to_string()))?;
return Ok(serde_json::json!(values));
@@ -375,7 +373,7 @@ impl RuntimeSingleton {
let query: String = get_arg(&args, 0)?;
let json_params: Vec<serde_json::Value> = get_arg(&args, 1)?;
let mut params: Vec<libsql::Value> = vec![];
let mut params: Vec<tokio_rusqlite::Value> = vec![];
for value in json_params {
params.push(json_value_to_param(value)?);
}
@@ -387,7 +385,7 @@ impl RuntimeSingleton {
};
let rows_affected = conn
.execute(&query, libsql::params::Params::Positional(params))
.execute(&query, params)
.await
.map_err(|err| rustyscript::Error::Runtime(err.to_string()))?;
@@ -414,7 +412,7 @@ pub(crate) struct RuntimeHandle {
impl RuntimeHandle {
#[cfg(not(test))]
pub(crate) fn set_connection(&self, conn: Connection) {
pub(crate) fn set_connection(&self, conn: tokio_rusqlite::Connection) {
for s in &self.runtime.state {
let mut lock = s.connection.lock();
if lock.is_some() {
@@ -425,7 +423,7 @@ impl RuntimeHandle {
}
#[cfg(test)]
pub(crate) fn set_connection(&self, conn: Connection) {
pub(crate) fn set_connection(&self, conn: tokio_rusqlite::Connection) {
for s in &self.runtime.state {
let mut lock = s.connection.lock();
if lock.is_some() {
@@ -437,7 +435,7 @@ impl RuntimeHandle {
}
#[cfg(test)]
pub(crate) fn override_connection(&self, conn: Connection) {
pub(crate) fn override_connection(&self, conn: tokio_rusqlite::Connection) {
for s in &self.runtime.state {
let mut lock = s.connection.lock();
if lock.is_some() {
@@ -484,7 +482,9 @@ impl RuntimeHandle {
}
}
pub fn json_value_to_param(value: serde_json::Value) -> Result<libsql::Value, rustyscript::Error> {
pub fn json_value_to_param(
value: serde_json::Value,
) -> Result<tokio_rusqlite::Value, rustyscript::Error> {
use rustyscript::Error;
return Ok(match value {
serde_json::Value::Object(ref _map) => {
@@ -493,16 +493,16 @@ pub fn json_value_to_param(value: serde_json::Value) -> Result<libsql::Value, ru
serde_json::Value::Array(ref _arr) => {
return Err(Error::Runtime("Array unsupported".to_string()));
}
serde_json::Value::Null => libsql::Value::Null,
serde_json::Value::Bool(b) => libsql::Value::Integer(b as i64),
serde_json::Value::String(str) => libsql::Value::Text(str),
serde_json::Value::Null => tokio_rusqlite::Value::Null,
serde_json::Value::Bool(b) => tokio_rusqlite::Value::Integer(b as i64),
serde_json::Value::String(str) => tokio_rusqlite::Value::Text(str),
serde_json::Value::Number(number) => {
if let Some(n) = number.as_i64() {
libsql::Value::Integer(n)
tokio_rusqlite::Value::Integer(n)
} else if let Some(n) = number.as_u64() {
libsql::Value::Integer(n as i64)
tokio_rusqlite::Value::Integer(n as i64)
} else if let Some(n) = number.as_f64() {
libsql::Value::Real(n)
tokio_rusqlite::Value::Real(n)
} else {
return Err(Error::Runtime(format!("invalid number: {number:?}")));
}
@@ -795,21 +795,11 @@ pub(crate) async fn write_js_runtime_files(data_dir: &DataDir) {
mod tests {
use super::*;
use rustyscript::Module;
use trailbase_sqlite::query_one_row;
async fn new_mem_conn() -> libsql::Connection {
return libsql::Builder::new_local(":memory:")
.build()
.await
.unwrap()
.connect()
.unwrap();
}
#[tokio::test]
async fn test_serial_tests() {
// NOTE: needs to run serially since registration of libsql connection with singleton v8 runtime
// is racy.
// NOTE: needs to run serially since registration of SQLite connection with singleton v8
// runtime is racy.
test_runtime_apply().await;
test_runtime_javascript().await;
test_javascript_query().await;
@@ -852,7 +842,7 @@ mod tests {
}
async fn test_javascript_query() {
let conn = new_mem_conn().await;
let conn = tokio_rusqlite::Connection::open_in_memory().await.unwrap();
conn
.execute("CREATE TABLE test (v0 TEXT, v1 INTEGER);", ())
.await
@@ -901,7 +891,7 @@ mod tests {
}
async fn test_javascript_execute() {
let conn = new_mem_conn().await;
let conn = tokio_rusqlite::Connection::open_in_memory().await.unwrap();
conn
.execute("CREATE TABLE test (v0 TEXT, v1 INTEGER);", ())
.await
@@ -930,8 +920,10 @@ mod tests {
.await
.unwrap();
let row = query_one_row(&conn, "SELECT COUNT(*) FROM test", ())
let row = conn
.query_row("SELECT COUNT(*) FROM test", ())
.await
.unwrap()
.unwrap();
let count: i64 = row.get(0).unwrap();
assert_eq!(0, count);

View File

@@ -1,4 +1,5 @@
#![allow(clippy::needless_return)]
#![warn(clippy::await_holding_lock, clippy::inefficient_to_string)]
pub mod app_state;
pub mod assets;
@@ -58,7 +59,7 @@ pub mod openapi {
}
pub mod api {
pub use trailbase_sqlite::{connect_sqlite, query_one_row};
pub use trailbase_sqlite::connect_sqlite;
pub use crate::admin::user::{create_user_handler, CreateUserRequest};
pub use crate::auth::api::login::login_with_password;

View File

@@ -9,8 +9,6 @@ use crate::util::b64_to_id;
#[derive(Debug, Error)]
pub enum WhereClauseError {
#[error("Libsql error: {0}")]
Libsql(#[from] libsql::Error),
#[error("Parse error: {0}")]
Parse(String),
#[error("Base64 decoding error: {0}")]
@@ -166,7 +164,7 @@ pub fn parse_query(query: Option<String>) -> Option<QueryParseResult> {
#[derive(Debug, Clone)]
pub struct WhereClause {
pub clause: String,
pub params: Vec<(String, libsql::Value)>,
pub params: Vec<(String, tokio_rusqlite::Value)>,
}
pub fn build_filter_where_clause(
@@ -174,7 +172,7 @@ pub fn build_filter_where_clause(
filter_params: Option<HashMap<String, Vec<QueryParam>>>,
) -> Result<WhereClause, WhereClauseError> {
let mut where_clauses: Vec<String> = vec![];
let mut params: Vec<(String, libsql::Value)> = vec![];
let mut params: Vec<(String, tokio_rusqlite::Value)> = vec![];
if let Some(filter_params) = filter_params {
for (column_name, query_params) in filter_params {

View File

@@ -2,13 +2,11 @@ use axum::body::Body;
use axum::http::{header::HeaderMap, Request};
use axum::response::Response;
use axum_client_ip::InsecureClientIp;
use libsql::{params, Connection};
use log::*;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::BTreeMap;
use std::time::Duration;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tracing::field::Field;
use tracing::span::{Attributes, Id, Record, Span};
use tracing_subscriber::layer::{Context, Layer};
@@ -57,41 +55,6 @@ pub(crate) struct Log {
pub data: Option<serde_json::Value>,
}
// The writer runs in a separate Task in the background and receives Logs via a channel, which it
// then writes to Sqlite.
//
// TODO: should we use a bound receiver to create back pressure?
// TODO: use recv_many() and batch insert.
async fn logs_writer(logs_conn: Connection, mut receiver: UnboundedReceiver<Log>) {
while let Some(log) = receiver.recv().await {
let result = logs_conn
.execute(
r#"
INSERT INTO
_logs (type, level, status, method, url, latency, client_ip, referer, user_agent)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9)
"#,
params!(
log.r#type,
log.level,
log.status,
log.method,
log.url,
log.latency,
log.client_ip,
log.referer,
log.user_agent
),
)
.await;
if let Err(err) = result {
warn!("logs writing failed: {err}");
}
}
}
pub(super) fn sqlite_logger_make_span(request: &Request<Body>) -> Span {
let headers = request.headers();
let host = get_header(headers, "host").unwrap_or("");
@@ -143,28 +106,47 @@ pub(super) fn sqlite_logger_on_response(
}
pub struct SqliteLogLayer {
sender: UnboundedSender<Log>,
handle: tokio::task::AbortHandle,
conn: tokio_rusqlite::Connection,
}
impl SqliteLogLayer {
pub fn new(state: &AppState) -> Self {
let (sender, abort_handle) = {
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel::<Log>();
let writer = tokio::spawn(logs_writer(state.logs_conn().clone(), receiver));
(sender, writer.abort_handle())
};
return SqliteLogLayer {
sender,
handle: abort_handle,
conn: state.logs_conn().clone(),
};
}
}
impl Drop for SqliteLogLayer {
fn drop(&mut self) {
self.handle.abort();
// The writer runs in a separate Task in the background and receives Logs via a channel, which it
// then writes to Sqlite.
//
// TODO: should we use a bound receiver to create back pressure?
// TODO: use recv_many() and batch insert.
fn write_log(&self, log: Log) -> Result<(), tokio_rusqlite::Error> {
return self.conn.call_and_forget(move |conn| {
let result = conn.execute(
r#"
INSERT INTO
_logs (type, level, status, method, url, latency, client_ip, referer, user_agent)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9)
"#,
rusqlite::params!(
log.r#type,
log.level,
log.status,
log.method,
log.url,
log.latency,
log.client_ip,
log.referer,
log.user_agent
),
);
if let Err(err) = result {
warn!("logs writing failed: {err}");
}
});
}
}
@@ -243,7 +225,7 @@ where
data: Some(json!(storage.fields)),
};
if let Err(err) = self.sender.send(log) {
if let Err(err) = self.write_log(log) {
warn!("Failed to send to logs to writer: {err}");
}
}

View File

@@ -1,9 +1,7 @@
use lazy_static::lazy_static;
use libsql::Connection;
use log::*;
use parking_lot::Mutex;
use refinery::Migration;
use refinery_libsql::LibsqlConnection;
use std::path::PathBuf;
mod main {
@@ -47,12 +45,8 @@ pub(crate) fn new_migration_runner(migrations: &[Migration]) -> refinery::Runner
return runner;
}
// The main migrations are bit tricky because they maybe a mix of user-provided and builtin
// migrations. They might event come out of order, e.g.: someone does a schema migration on an old
// version of the binary and then updates. Yet, they need to be applied in one go. We therefore
// rely on refinery's non-strictly versioned migrations prefixed with the "U" name.
pub(crate) async fn apply_main_migrations(
conn: Connection,
pub(crate) fn apply_main_migrations(
conn: &mut rusqlite::Connection,
user_migrations_path: Option<PathBuf>,
) -> Result<bool, refinery::Error> {
let all_migrations = {
@@ -64,7 +58,7 @@ pub(crate) async fn apply_main_migrations(
if let Some(path) = user_migrations_path {
// NOTE: refinery has a bug where it will name-check the directory and write a warning... :/.
let user_migrations = refinery::load_sql_migrations(path)?;
migrations.extend(user_migrations.into_iter());
migrations.extend(user_migrations);
}
// Interleave the system and user migrations based on their version prefixes.
@@ -73,10 +67,8 @@ pub(crate) async fn apply_main_migrations(
migrations
};
let mut conn = LibsqlConnection::from_connection(conn);
let runner = new_migration_runner(&all_migrations);
let report = match runner.run_async(&mut conn).await {
let report = match runner.run(conn) {
Ok(report) => report,
Err(err) => {
error!("Main migrations: {err}");
@@ -99,13 +91,13 @@ pub(crate) async fn apply_main_migrations(
}
#[cfg(test)]
pub(crate) async fn apply_user_migrations(user_conn: Connection) -> Result<(), refinery::Error> {
let mut user_conn = LibsqlConnection::from_connection(user_conn);
pub(crate) fn apply_user_migrations(
user_conn: &mut rusqlite::Connection,
) -> Result<(), refinery::Error> {
let mut runner = main::migrations::runner();
runner.set_migration_table_name(MIGRATION_TABLE_NAME);
let report = runner.run_async(&mut user_conn).await.map_err(|err| {
let report = runner.run(user_conn).map_err(|err| {
error!("User migrations: {err}");
return err;
})?;
@@ -119,13 +111,13 @@ pub(crate) async fn apply_user_migrations(user_conn: Connection) -> Result<(), r
return Ok(());
}
pub(crate) async fn apply_logs_migrations(logs_conn: Connection) -> Result<(), refinery::Error> {
let mut logs_conn = LibsqlConnection::from_connection(logs_conn);
pub(crate) fn apply_logs_migrations(
logs_conn: &mut rusqlite::Connection,
) -> Result<(), refinery::Error> {
let mut runner = logs::migrations::runner();
runner.set_migration_table_name(MIGRATION_TABLE_NAME);
let report = runner.run_async(&mut logs_conn).await.map_err(|err| {
let report = runner.run(logs_conn).map_err(|err| {
error!("Logs migrations: {err}");
return err;
})?;

View File

@@ -21,28 +21,36 @@ pub enum QueryError {
Internal(Box<dyn std::error::Error + Send + Sync>),
}
impl From<libsql::Error> for QueryError {
fn from(err: libsql::Error) -> Self {
impl From<tokio_rusqlite::Error> for QueryError {
fn from(err: tokio_rusqlite::Error) -> Self {
return match err {
// libsql::Error::QueryReturnedNoRows => {
// #[cfg(debug_assertions)]
// info!("libsql returned empty rows error");
//
// Self::RecordNotFound
// }
// List of error codes: https://www.sqlite.org/rescode.html
libsql::Error::SqliteFailure(275, _msg) => Self::BadRequest("sqlite constraint: check"),
libsql::Error::SqliteFailure(531, _msg) => Self::BadRequest("sqlite constraint: commit hook"),
libsql::Error::SqliteFailure(3091, _msg) => Self::BadRequest("sqlite constraint: data type"),
libsql::Error::SqliteFailure(787, _msg) => Self::BadRequest("sqlite constraint: fk"),
libsql::Error::SqliteFailure(1043, _msg) => Self::BadRequest("sqlite constraint: function"),
libsql::Error::SqliteFailure(1299, _msg) => Self::BadRequest("sqlite constraint: not null"),
libsql::Error::SqliteFailure(2835, _msg) => Self::BadRequest("sqlite constraint: pinned"),
libsql::Error::SqliteFailure(1555, _msg) => Self::BadRequest("sqlite constraint: pk"),
libsql::Error::SqliteFailure(2579, _msg) => Self::BadRequest("sqlite constraint: row id"),
libsql::Error::SqliteFailure(1811, _msg) => Self::BadRequest("sqlite constraint: trigger"),
libsql::Error::SqliteFailure(2067, _msg) => Self::BadRequest("sqlite constraint: unique"),
libsql::Error::SqliteFailure(2323, _msg) => Self::BadRequest("sqlite constraint: vtab"),
tokio_rusqlite::Error::Rusqlite(err) => match err {
// rusqlite::Error::QueryReturnedNoRows => {
// #[cfg(debug_assertions)]
// info!("rusqlite returned empty rows error");
//
// Self::RecordNotFound
// }
rusqlite::Error::SqliteFailure(err, _msg) => {
match err.extended_code {
// List of error codes: https://www.sqlite.org/rescode.html
275 => Self::BadRequest("sqlite constraint: check"),
531 => Self::BadRequest("sqlite constraint: commit hook"),
3091 => Self::BadRequest("sqlite constraint: data type"),
787 => Self::BadRequest("sqlite constraint: fk"),
1043 => Self::BadRequest("sqlite constraint: function"),
1299 => Self::BadRequest("sqlite constraint: not null"),
2835 => Self::BadRequest("sqlite constraint: pinned"),
1555 => Self::BadRequest("sqlite constraint: pk"),
2579 => Self::BadRequest("sqlite constraint: row id"),
1811 => Self::BadRequest("sqlite constraint: trigger"),
2067 => Self::BadRequest("sqlite constraint: unique"),
2323 => Self::BadRequest("sqlite constraint: vtab"),
_ => Self::Internal(err.into()),
}
}
_ => Self::Internal(err.into()),
},
err => Self::Internal(err.into()),
};
}

View File

@@ -41,17 +41,20 @@ pub async fn query_handler(
None => HashMap::new(),
};
let mut params: Vec<(String, libsql::Value)> = vec![];
let mut params: Vec<(String, tokio_rusqlite::Value)> = vec![];
for (name, typ) in api.params() {
match query_params.remove(name) {
Some(value) => match *typ {
QueryApiParameterType::Text => {
params.push((format!(":{name}"), libsql::Value::Text(value.clone())));
params.push((
format!(":{name}"),
tokio_rusqlite::Value::Text(value.clone()),
));
}
QueryApiParameterType::Blob => {
params.push((
format!(":{name}"),
libsql::Value::Blob(
tokio_rusqlite::Value::Blob(
BASE64_URL_SAFE
.decode(value)
.map_err(|_err| E::BadRequest("not b64"))?,
@@ -61,7 +64,7 @@ pub async fn query_handler(
QueryApiParameterType::Real => {
params.push((
format!(":{name}"),
libsql::Value::Real(
tokio_rusqlite::Value::Real(
value
.parse::<f64>()
.map_err(|_err| E::BadRequest("expected f64"))?,
@@ -71,7 +74,7 @@ pub async fn query_handler(
QueryApiParameterType::Integer => {
params.push((
format!(":{name}"),
libsql::Value::Integer(
tokio_rusqlite::Value::Integer(
value
.parse::<i64>()
.map_err(|_err| E::BadRequest("expected i64"))?,
@@ -80,7 +83,7 @@ pub async fn query_handler(
}
},
None => {
params.push((format!(":{name}"), libsql::Value::Null));
params.push((format!(":{name}"), tokio_rusqlite::Value::Null));
}
};
}
@@ -103,13 +106,12 @@ pub async fn query_handler(
.collect::<Vec<_>>()
.join(", ")
),
libsql::params::Params::Named(params),
params,
)
.await?;
let (json_rows, columns) = rows_to_json_arrays(response_rows, LIMIT)
.await
.map_err(|err| E::Internal(err.into()))?;
let (json_rows, columns) =
rows_to_json_arrays(response_rows, LIMIT).map_err(|err| E::Internal(err.into()))?;
let Some(columns) = columns else {
return Err(E::Internal("Missing column mapping".into()));

View File

@@ -4,7 +4,6 @@ use std::sync::Arc;
use crate::auth::User;
use crate::config::proto::{QueryApiAcl, QueryApiConfig, QueryApiParameterType};
use crate::query::QueryError;
use trailbase_sqlite::query_one_row;
#[derive(Clone)]
pub struct QueryApi {
@@ -12,7 +11,7 @@ pub struct QueryApi {
}
struct QueryApiState {
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
api_name: String,
virtual_table_name: String,
@@ -23,7 +22,7 @@ struct QueryApiState {
}
impl QueryApi {
pub fn from(conn: libsql::Connection, config: QueryApiConfig) -> Result<Self, String> {
pub fn from(conn: tokio_rusqlite::Connection, config: QueryApiConfig) -> Result<Self, String> {
return Ok(QueryApi {
state: Arc::new(QueryApiState {
conn,
@@ -70,7 +69,7 @@ impl QueryApi {
pub(crate) async fn check_api_access(
&self,
query_params: &[(String, libsql::Value)],
query_params: &[(String, tokio_rusqlite::Value)],
user: Option<&User>,
) -> Result<(), QueryError> {
let Some(acl) = self.state.acl else {
@@ -116,15 +115,12 @@ impl QueryApi {
let mut params = query_params.to_vec();
params.push((
":__user_id".to_string(),
user.map_or(libsql::Value::Null, |u| libsql::Value::Blob(u.uuid.into())),
user.map_or(tokio_rusqlite::Value::Null, |u| {
tokio_rusqlite::Value::Blob(u.uuid.into())
}),
));
let row = match query_one_row(
&self.state.conn,
&access_query,
libsql::params::Params::Named(params),
)
.await
let row = match crate::util::query_one_row(&self.state.conn, &access_query, params).await
{
Ok(row) => row,
Err(err) => {

View File

@@ -84,7 +84,7 @@ pub async fn create_record_handler(
if !missing_columns.is_empty() {
if let Some(user) = user {
for col in missing_columns {
params.push_param(col, libsql::Value::Blob(user.uuid.into()));
params.push_param(col, tokio_rusqlite::Value::Blob(user.uuid.into()));
}
}
}
@@ -107,8 +107,15 @@ pub async fn create_record_handler(
return Ok(
Json(CreateRecordResponse {
id: match pk_column.data_type {
ColumnDataType::Blob => BASE64_URL_SAFE.encode(row.get::<[u8; 16]>(0)?),
ColumnDataType::Integer => row.get::<i64>(0)?.to_string(),
ColumnDataType::Blob => BASE64_URL_SAFE.encode(
row
.get::<[u8; 16]>(0)
.map_err(|err| RecordError::Internal(err.into()))?,
),
ColumnDataType::Integer => row
.get::<i64>(0)
.map_err(|err| RecordError::Internal(err.into()))?
.to_string(),
_ => {
return Err(RecordError::Internal(
format!("Unexpected data type: {:?}", pk_column.data_type).into(),

View File

@@ -51,8 +51,7 @@ pub async fn delete_record_handler(
#[cfg(test)]
mod test {
use axum::extract::Query;
use libsql::{params, Connection};
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use super::*;
use crate::admin::user::*;
@@ -137,11 +136,14 @@ mod test {
return Ok(());
}
async fn message_exists(conn: &Connection, id: &[u8; 16]) -> Result<bool, anyhow::Error> {
let count: i64 = query_one_row(
async fn message_exists(
conn: &tokio_rusqlite::Connection,
id: &[u8; 16],
) -> Result<bool, anyhow::Error> {
let count: i64 = crate::util::query_one_row(
conn,
"SELECT COUNT(*) FROM message WHERE id = $1",
params!(id),
params!(*id),
)
.await?
.get(0)?;

View File

@@ -25,28 +25,37 @@ pub enum RecordError {
Internal(Box<dyn std::error::Error + Send + Sync>),
}
impl From<libsql::Error> for RecordError {
fn from(err: libsql::Error) -> Self {
impl From<tokio_rusqlite::Error> for RecordError {
fn from(err: tokio_rusqlite::Error) -> Self {
return match err {
libsql::Error::QueryReturnedNoRows => {
#[cfg(debug_assertions)]
info!("libsql returned empty rows error");
tokio_rusqlite::Error::Rusqlite(err) => match err {
rusqlite::Error::QueryReturnedNoRows => {
#[cfg(debug_assertions)]
info!("SQLite returned empty rows error");
Self::RecordNotFound
}
// List of error codes: https://www.sqlite.org/rescode.html
libsql::Error::SqliteFailure(275, _msg) => Self::BadRequest("sqlite constraint: check"),
libsql::Error::SqliteFailure(531, _msg) => Self::BadRequest("sqlite constraint: commit hook"),
libsql::Error::SqliteFailure(3091, _msg) => Self::BadRequest("sqlite constraint: data type"),
libsql::Error::SqliteFailure(787, _msg) => Self::BadRequest("sqlite constraint: fk"),
libsql::Error::SqliteFailure(1043, _msg) => Self::BadRequest("sqlite constraint: function"),
libsql::Error::SqliteFailure(1299, _msg) => Self::BadRequest("sqlite constraint: not null"),
libsql::Error::SqliteFailure(2835, _msg) => Self::BadRequest("sqlite constraint: pinned"),
libsql::Error::SqliteFailure(1555, _msg) => Self::BadRequest("sqlite constraint: pk"),
libsql::Error::SqliteFailure(2579, _msg) => Self::BadRequest("sqlite constraint: row id"),
libsql::Error::SqliteFailure(1811, _msg) => Self::BadRequest("sqlite constraint: trigger"),
libsql::Error::SqliteFailure(2067, _msg) => Self::BadRequest("sqlite constraint: unique"),
libsql::Error::SqliteFailure(2323, _msg) => Self::BadRequest("sqlite constraint: vtab"),
Self::RecordNotFound
}
rusqlite::Error::SqliteFailure(err, _msg) => {
match err.extended_code {
// List of error codes: https://www.sqlite.org/rescode.html
275 => Self::BadRequest("sqlite constraint: check"),
531 => Self::BadRequest("sqlite constraint: commit hook"),
3091 => Self::BadRequest("sqlite constraint: data type"),
787 => Self::BadRequest("sqlite constraint: fk"),
1043 => Self::BadRequest("sqlite constraint: function"),
1299 => Self::BadRequest("sqlite constraint: not null"),
2835 => Self::BadRequest("sqlite constraint: pinned"),
1555 => Self::BadRequest("sqlite constraint: pk"),
2579 => Self::BadRequest("sqlite constraint: row id"),
1811 => Self::BadRequest("sqlite constraint: trigger"),
2067 => Self::BadRequest("sqlite constraint: unique"),
2323 => Self::BadRequest("sqlite constraint: vtab"),
_ => Self::Internal(err.into()),
}
}
_ => Self::Internal(err.into()),
},
err => Self::Internal(err.into()),
};
}

View File

@@ -11,8 +11,6 @@ use crate::table_metadata::{JsonColumnMetadata, TableOrViewMetadata};
#[derive(Debug, Error)]
pub enum FileError {
#[error("Libsql error: {0}")]
Libsql(#[from] libsql::Error),
#[error("Storage error: {0}")]
Storage(#[from] object_store::Error),
#[error("IO error: {0}")]
@@ -56,7 +54,7 @@ pub(crate) async fn read_file_into_response(
pub(crate) async fn delete_files_in_row(
state: &AppState,
metadata: &(dyn TableOrViewMetadata + Send + Sync),
row: libsql::Row,
row: tokio_rusqlite::Row,
) -> Result<(), FileError> {
for i in 0..row.column_count() {
let Some(col_name) = row.column_name(i) else {
@@ -72,14 +70,14 @@ pub(crate) async fn delete_files_in_row(
let store = state.objectstore();
match json {
JsonColumnMetadata::SchemaName(name) if name == "std.FileUpload" => {
if let Ok(json) = row.get_str(i) {
let file: FileUpload = serde_json::from_str(json)?;
if let Ok(json) = row.get::<String>(i) {
let file: FileUpload = serde_json::from_str(&json)?;
delete_file(store, file).await?;
}
}
JsonColumnMetadata::SchemaName(name) if name == "std.FileUploads" => {
if let Ok(json) = row.get_str(i) {
let file_uploads: FileUploads = serde_json::from_str(json)?;
if let Ok(json) = row.get::<String>(i) {
let file_uploads: FileUploads = serde_json::from_str(&json)?;
for file in file_uploads.0 {
delete_file(store, file).await?;
}

View File

@@ -5,12 +5,12 @@ use object_store::ObjectStore;
use std::collections::{hash_map::Entry, HashMap};
use std::sync::Arc;
use trailbase_sqlite::schema::{FileUpload, FileUploadInput, FileUploads};
use trailbase_sqlite::{query_one_row, query_row};
use crate::config::proto::ConflictResolutionStrategy;
use crate::records::files::delete_files_in_row;
use crate::schema::{Column, ColumnDataType};
use crate::table_metadata::{self, ColumnMetadata, JsonColumnMetadata, TableMetadata};
use crate::util::query_one_row;
use crate::AppState;
#[derive(Debug, Clone, thiserror::Error)]
@@ -41,18 +41,10 @@ pub enum ParamsError {
JsonSerialization(Arc<serde_json::Error>),
#[error("Json schema error: {0}")]
Schema(#[from] trailbase_sqlite::schema::SchemaError),
#[error("Sql error: {0}")]
Sql(Arc<libsql::Error>),
#[error("ObjectStore error: {0}")]
Storage(Arc<object_store::Error>),
}
impl From<libsql::Error> for ParamsError {
fn from(err: libsql::Error) -> Self {
return Self::Sql(Arc::new(err));
}
}
impl From<serde_json::Error> for ParamsError {
fn from(err: serde_json::Error) -> Self {
return Self::JsonSerialization(Arc::new(err));
@@ -70,7 +62,11 @@ pub enum QueryError {
#[error("Precondition error: {0}")]
Precondition(&'static str),
#[error("Sql error: {0}")]
Sql(Arc<libsql::Error>),
Sql(Arc<rusqlite::Error>),
#[error("FromSql error: {0}")]
FromSql(Arc<rusqlite::types::FromSqlError>),
#[error("Tokio Rusqlite error: {0}")]
TokioRusqlite(Arc<tokio_rusqlite::Error>),
#[error("Json serialization error: {0}")]
JsonSerialization(Arc<serde_json::Error>),
#[error("ObjectStore error: {0}")]
@@ -81,27 +77,33 @@ pub enum QueryError {
NotFound,
}
impl From<libsql::Error> for QueryError {
fn from(err: libsql::Error) -> Self {
return Self::Sql(Arc::new(err));
impl From<serde_json::Error> for QueryError {
fn from(err: serde_json::Error) -> Self {
return Self::JsonSerialization(err.into());
}
}
impl From<serde_json::Error> for QueryError {
fn from(err: serde_json::Error) -> Self {
return Self::JsonSerialization(Arc::new(err));
impl From<tokio_rusqlite::Error> for QueryError {
fn from(err: tokio_rusqlite::Error) -> Self {
return Self::TokioRusqlite(err.into());
}
}
impl From<rusqlite::types::FromSqlError> for QueryError {
fn from(err: rusqlite::types::FromSqlError) -> Self {
return Self::FromSql(err.into());
}
}
impl From<object_store::Error> for QueryError {
fn from(err: object_store::Error) -> Self {
return Self::Storage(Arc::new(err));
return Self::Storage(err.into());
}
}
impl From<crate::records::files::FileError> for QueryError {
fn from(err: crate::records::files::FileError) -> Self {
return Self::File(Arc::new(err));
return Self::File(err.into());
}
}
@@ -116,7 +118,7 @@ pub struct Params {
/// List of named params with their respective placeholders, e.g.:
/// '(":col_name": Value::Text("hi"))'.
params: Vec<(String, libsql::Value)>,
params: Vec<(String, tokio_rusqlite::Value)>,
/// List of columns that are targeted by the params. Useful for building Insert/Update queries.
///
@@ -132,7 +134,7 @@ pub struct Params {
}
impl Params {
/// Converts a top-level Json object into libsql::Values and extract files.
/// Converts a top-level Json object into tokio_rusqlite::Values and extract files.
///
/// Note: that this function by design is non-recursive, since we're mapping to a flat hierarchy
/// in sqlite, since even JSON/JSONB is simply text/blob that is lazily parsed.
@@ -208,7 +210,7 @@ impl Params {
return metadata.column_by_name(field_name);
}
pub fn push_param(&mut self, col: String, value: libsql::Value) {
pub fn push_param(&mut self, col: String, value: tokio_rusqlite::Value) {
self.params.push((format!(":{col}"), value));
self.col_names.push(col);
}
@@ -217,7 +219,7 @@ impl Params {
return &self.col_names;
}
pub(crate) fn named_params(&self) -> &Vec<(String, libsql::Value)> {
pub(crate) fn named_params(&self) -> &Vec<(String, tokio_rusqlite::Value)> {
&self.params
}
@@ -287,7 +289,7 @@ impl Params {
for (col_name, file_upload) in file_upload_map {
self.params.push((
format!(":{col_name}"),
libsql::Value::Text(serde_json::to_string(&file_upload)?),
tokio_rusqlite::Value::Text(serde_json::to_string(&file_upload)?),
));
self.col_names.push(col_name.clone());
self.file_col_names.push(col_name);
@@ -296,7 +298,7 @@ impl Params {
for (col_name, file_uploads) in file_uploads_map {
self.params.push((
format!(":{col_name}"),
libsql::Value::Text(serde_json::to_string(&FileUploads(file_uploads))?),
tokio_rusqlite::Value::Text(serde_json::to_string(&FileUploads(file_uploads))?),
));
self.col_names.push(col_name.clone());
self.file_col_names.push(col_name);
@@ -320,14 +322,15 @@ impl SelectQueryBuilder {
state: &AppState,
table_name: &str,
pk_column: &str,
pk_value: libsql::Value,
) -> Result<Option<libsql::Row>, libsql::Error> {
return query_row(
state.conn(),
&format!("SELECT * FROM '{table_name}' WHERE {pk_column} = $1"),
[pk_value],
)
.await;
pk_value: tokio_rusqlite::Value,
) -> Result<Option<tokio_rusqlite::Row>, tokio_rusqlite::Error> {
return state
.conn()
.query_row(
&format!("SELECT * FROM '{table_name}' WHERE {pk_column} = $1"),
[pk_value],
)
.await;
}
}
@@ -339,18 +342,19 @@ impl GetFileQueryBuilder {
table_name: &str,
file_column: (&Column, &ColumnMetadata),
pk_column: &str,
pk_value: libsql::Value,
pk_value: tokio_rusqlite::Value,
) -> Result<FileUpload, QueryError> {
return match &file_column.1.json {
Some(JsonColumnMetadata::SchemaName(name)) if name == "std.FileUpload" => {
let column_name = &file_column.0.name;
let Some(row) = query_row(
state.conn(),
&format!("SELECT [{column_name}] FROM '{table_name}' WHERE {pk_column} = $1"),
[pk_value],
)
.await?
let Some(row) = state
.conn()
.query_row(
&format!("SELECT [{column_name}] FROM '{table_name}' WHERE {pk_column} = $1"),
[pk_value],
)
.await?
else {
return Err(QueryError::NotFound);
};
@@ -372,18 +376,19 @@ impl GetFilesQueryBuilder {
table_name: &str,
file_column: (&Column, &ColumnMetadata),
pk_column: &str,
pk_value: libsql::Value,
pk_value: tokio_rusqlite::Value,
) -> Result<FileUploads, QueryError> {
return match &file_column.1.json {
Some(JsonColumnMetadata::SchemaName(name)) if name == "std.FileUploads" => {
let column_name = &file_column.0.name;
let Some(row) = query_row(
state.conn(),
&format!("SELECT [{column_name}] FROM '{table_name}' WHERE {pk_column} = $1"),
[pk_value],
)
.await?
let Some(row) = state
.conn()
.query_row(
&format!("SELECT [{column_name}] FROM '{table_name}' WHERE {pk_column} = $1"),
[pk_value],
)
.await?
else {
return Err(QueryError::NotFound);
};
@@ -405,7 +410,7 @@ impl InsertQueryBuilder {
params: Params,
conflict_resolution: Option<ConflictResolutionStrategy>,
return_column_name: Option<&str>,
) -> Result<libsql::Row, QueryError> {
) -> Result<tokio_rusqlite::Row, QueryError> {
let (query_fragment, named_params, mut files) =
Self::build_insert_query(params, conflict_resolution)?;
let query = match return_column_name {
@@ -445,7 +450,14 @@ impl InsertQueryBuilder {
fn build_insert_query(
params: Params,
conflict_resolution: Option<ConflictResolutionStrategy>,
) -> Result<(String, libsql::params::Params, FileMetadataContents), QueryError> {
) -> Result<
(
String,
Vec<(String, tokio_rusqlite::Value)>,
FileMetadataContents,
),
QueryError,
> {
let table_name = &params.table_name;
let conflict_clause = Self::conflict_resolution_clause(
@@ -462,11 +474,7 @@ impl InsertQueryBuilder {
),
};
return Ok((
query,
libsql::params::Params::Named(params.params),
params.files,
));
return Ok((query, params.params, params.files));
}
fn conflict_resolution_clause(config: ConflictResolutionStrategy) -> &'static str {
@@ -490,7 +498,7 @@ impl UpdateQueryBuilder {
metadata: &TableMetadata,
mut params: Params,
pk_column: &str,
pk_value: libsql::Value,
pk_value: tokio_rusqlite::Value,
) -> Result<(), QueryError> {
let table_name = metadata.name();
assert_eq!(params.table_name, *table_name);
@@ -510,12 +518,12 @@ impl UpdateQueryBuilder {
}
async fn row_update(
conn: &libsql::Connection,
conn: &tokio_rusqlite::Connection,
table_name: &str,
params: Params,
pk_column: &str,
pk_value: libsql::Value,
) -> Result<Option<libsql::Row>, QueryError> {
pk_value: tokio_rusqlite::Value,
) -> Result<Option<tokio_rusqlite::Row>, QueryError> {
let build_setters = || -> String {
assert_eq!(params.col_names.len(), params.params.len());
return std::iter::zip(&params.col_names, &params.params)
@@ -524,34 +532,52 @@ impl UpdateQueryBuilder {
};
let setters = build_setters();
let named_params = libsql::params::Params::Named(params.params);
let tx = conn.transaction().await?;
let pk_column = pk_column.to_string();
let table_name = table_name.to_string();
let files_row = conn
.call(move |conn| {
let tx = conn.transaction()?;
// First, fetch updated file column contents so we can delete the files after updating the
// column.
let files_row = if params.file_col_names.is_empty() {
None
} else {
let file_columns = params.file_col_names.join(", ");
query_row(
&tx,
&format!("SELECT {file_columns} FROM '{table_name}' WHERE {pk_column} = ${pk_column}"),
libsql::params::Params::Named(vec![(":pk_column".to_string(), pk_value)]),
)
.await?
};
// First, fetch updated file column contents so we can delete the files after updating the
// column.
let files_row = if params.file_col_names.is_empty() {
None
} else {
let file_columns = params.file_col_names.join(", ");
// Update the column.
let _ = tx
.execute(
&format!("UPDATE '{table_name}' SET {setters} WHERE {pk_column} = :{pk_column}"),
named_params,
)
let mut stmt = tx.prepare(&format!(
"SELECT {file_columns} FROM '{table_name}' WHERE {pk_column} = ${pk_column}"
))?;
use tokio_rusqlite::Params;
[(":pk_column", pk_value)].bind(&mut stmt)?;
let mut rows = stmt.raw_query();
if let Some(row) = rows.next()? {
Some(tokio_rusqlite::Row::from_row(row, None)?)
} else {
None
}
};
// Update the column.
{
let mut stmt = tx.prepare(&format!(
"UPDATE '{table_name}' SET {setters} WHERE {pk_column} = :{pk_column}"
))?;
use tokio_rusqlite::Params;
params.params.bind(&mut stmt)?;
stmt.raw_execute()?;
}
tx.commit()?;
return Ok(files_row);
})
.await?;
tx.commit().await?;
return Ok(files_row);
}
@@ -589,7 +615,7 @@ impl DeleteQueryBuilder {
state: &AppState,
metadata: &TableMetadata,
pk_column: &str,
pk_value: libsql::Value,
pk_value: tokio_rusqlite::Value,
) -> Result<(), QueryError> {
let table_name = metadata.name();
@@ -621,7 +647,9 @@ async fn write_file(
return Ok(());
}
fn try_json_array_to_blob(arr: &Vec<serde_json::Value>) -> Result<libsql::Value, ParamsError> {
fn try_json_array_to_blob(
arr: &Vec<serde_json::Value>,
) -> Result<tokio_rusqlite::Value, ParamsError> {
let mut byte_array: Vec<u8> = vec![];
for el in arr {
match el {
@@ -650,25 +678,25 @@ fn try_json_array_to_blob(arr: &Vec<serde_json::Value>) -> Result<libsql::Value,
};
}
return Ok(libsql::Value::Blob(byte_array));
return Ok(tokio_rusqlite::Value::Blob(byte_array));
}
fn json_string_to_value(
data_type: ColumnDataType,
value: String,
) -> Result<libsql::Value, ParamsError> {
) -> Result<tokio_rusqlite::Value, ParamsError> {
return Ok(match data_type {
ColumnDataType::Null => libsql::Value::Null,
ColumnDataType::Null => tokio_rusqlite::Value::Null,
// Strict/storage types
ColumnDataType::Any => libsql::Value::Text(value),
ColumnDataType::Text => libsql::Value::Text(value),
ColumnDataType::Blob => libsql::Value::Blob(BASE64_URL_SAFE.decode(value)?),
ColumnDataType::Integer => libsql::Value::Integer(value.parse::<i64>()?),
ColumnDataType::Real => libsql::Value::Real(value.parse::<f64>()?),
ColumnDataType::Numeric => libsql::Value::Integer(value.parse::<i64>()?),
ColumnDataType::Any => tokio_rusqlite::Value::Text(value),
ColumnDataType::Text => tokio_rusqlite::Value::Text(value),
ColumnDataType::Blob => tokio_rusqlite::Value::Blob(BASE64_URL_SAFE.decode(value)?),
ColumnDataType::Integer => tokio_rusqlite::Value::Integer(value.parse::<i64>()?),
ColumnDataType::Real => tokio_rusqlite::Value::Real(value.parse::<f64>()?),
ColumnDataType::Numeric => tokio_rusqlite::Value::Integer(value.parse::<i64>()?),
// JSON types.
ColumnDataType::JSONB => libsql::Value::Blob(value.into_bytes().to_vec()),
ColumnDataType::JSON => libsql::Value::Text(value),
ColumnDataType::JSONB => tokio_rusqlite::Value::Blob(value.into_bytes().to_vec()),
ColumnDataType::JSON => tokio_rusqlite::Value::Text(value),
// Affine types
//
// Integers:
@@ -680,7 +708,7 @@ fn json_string_to_value(
| ColumnDataType::UnignedBigInt
| ColumnDataType::Int2
| ColumnDataType::Int4
| ColumnDataType::Int8 => libsql::Value::Integer(value.parse::<i64>()?),
| ColumnDataType::Int8 => tokio_rusqlite::Value::Integer(value.parse::<i64>()?),
// Text:
ColumnDataType::Character
| ColumnDataType::Varchar
@@ -688,23 +716,23 @@ fn json_string_to_value(
| ColumnDataType::NChar
| ColumnDataType::NativeCharacter
| ColumnDataType::NVarChar
| ColumnDataType::Clob => libsql::Value::Text(value),
| ColumnDataType::Clob => tokio_rusqlite::Value::Text(value),
// Real:
ColumnDataType::Double | ColumnDataType::DoublePrecision | ColumnDataType::Float => {
libsql::Value::Real(value.parse::<f64>()?)
tokio_rusqlite::Value::Real(value.parse::<f64>()?)
}
// Numeric
ColumnDataType::Boolean
| ColumnDataType::Decimal
| ColumnDataType::Date
| ColumnDataType::DateTime => libsql::Value::Integer(value.parse::<i64>()?),
| ColumnDataType::DateTime => tokio_rusqlite::Value::Integer(value.parse::<i64>()?),
});
}
pub fn simple_json_value_to_param(
col_type: ColumnDataType,
value: serde_json::Value,
) -> Result<libsql::Value, ParamsError> {
) -> Result<tokio_rusqlite::Value, ParamsError> {
let param = match value {
serde_json::Value::Object(ref _map) => {
return Err(ParamsError::UnexpectedType(
@@ -724,16 +752,16 @@ pub fn simple_json_value_to_param(
try_json_array_to_blob(arr)?
}
serde_json::Value::Null => libsql::Value::Null,
serde_json::Value::Bool(b) => libsql::Value::Integer(b as i64),
serde_json::Value::Null => tokio_rusqlite::Value::Null,
serde_json::Value::Bool(b) => tokio_rusqlite::Value::Integer(b as i64),
serde_json::Value::String(str) => json_string_to_value(col_type, str)?,
serde_json::Value::Number(number) => {
if let Some(n) = number.as_i64() {
libsql::Value::Integer(n)
tokio_rusqlite::Value::Integer(n)
} else if let Some(n) = number.as_u64() {
libsql::Value::Integer(n as i64)
tokio_rusqlite::Value::Integer(n as i64)
} else if let Some(n) = number.as_f64() {
libsql::Value::Real(n)
tokio_rusqlite::Value::Real(n)
} else {
warn!("Not a valid number: {number:?}");
return Err(ParamsError::NotANumber);
@@ -748,7 +776,7 @@ fn extract_params_and_files_from_json(
col: &Column,
col_meta: &ColumnMetadata,
value: serde_json::Value,
) -> Result<(libsql::Value, Option<FileMetadataContents>), ParamsError> {
) -> Result<(tokio_rusqlite::Value, Option<FileMetadataContents>), ParamsError> {
let col_name = &col.name;
match value {
serde_json::Value::Object(ref _map) => {
@@ -773,13 +801,13 @@ fn extract_params_and_files_from_json(
let file_upload: FileUploadInput = serde_json::from_value(value)?;
let (_col_name, metadata, content) = file_upload.consume()?;
let param = libsql::Value::Text(serde_json::to_string(&metadata)?);
let param = tokio_rusqlite::Value::Text(serde_json::to_string(&metadata)?);
return Ok((param, Some(vec![(metadata, content)])));
}
_ => {
json.validate(&value)?;
return Ok((libsql::Value::Text(value.to_string()), None));
return Ok((tokio_rusqlite::Value::Text(value.to_string()), None));
}
}
}
@@ -803,13 +831,13 @@ fn extract_params_and_files_from_json(
uploads.push((metadata, content));
}
let param = libsql::Value::Text(serde_json::to_string(&FileUploads(temp))?);
let param = tokio_rusqlite::Value::Text(serde_json::to_string(&FileUploads(temp))?);
return Ok((param, Some(uploads)));
}
schema => {
schema.validate(&value)?;
return Ok((libsql::Value::Text(value.to_string()), None));
return Ok((tokio_rusqlite::Value::Text(value.to_string()), None));
}
}
}
@@ -946,24 +974,24 @@ mod tests {
match param.as_str() {
ID_COL_PLACEHOLDER => {
assert!(
matches!(value, libsql::Value::Blob(x) if *x == id),
matches!(value, tokio_rusqlite::Value::Blob(x) if *x == id),
"VALUE: {value:?}"
);
}
":blob" => {
assert!(matches!(value, libsql::Value::Blob(x) if *x == blob));
assert!(matches!(value, tokio_rusqlite::Value::Blob(x) if *x == blob));
}
":text" => {
assert!(matches!(value, libsql::Value::Text(x) if x.contains("some text :)")));
assert!(matches!(value, tokio_rusqlite::Value::Text(x) if x.contains("some text :)")));
}
":num" => {
assert!(matches!(value, libsql::Value::Integer(x) if *x == 5));
assert!(matches!(value, tokio_rusqlite::Value::Integer(x) if *x == 5));
}
":real" => {
assert!(matches!(value, libsql::Value::Real(x) if *x == 3.0));
assert!(matches!(value, tokio_rusqlite::Value::Real(x) if *x == 3.0));
}
":json_col" => {
assert!(matches!(value, libsql::Value::Text(_x)));
assert!(matches!(value, tokio_rusqlite::Value::Text(_x)));
}
x => assert!(false, "{x}"),
}
@@ -1061,7 +1089,7 @@ mod tests {
let params = Params::from(&metadata, json_row_from_value(value).unwrap(), None).unwrap();
let json_col: Vec<libsql::Value> = params
let json_col: Vec<tokio_rusqlite::Value> = params
.params
.iter()
.filter_map(|(name, value)| {
@@ -1073,7 +1101,7 @@ mod tests {
.collect();
assert_eq!(json_col.len(), 1);
let libsql::Value::Text(ref text) = json_col[0] else {
let tokio_rusqlite::Value::Text(ref text) = json_col[0] else {
panic!("Unexpected param type: {:?}", json_col[0]);
};

View File

@@ -47,12 +47,15 @@ pub async fn list_records_handler(
} = build_filter_where_clause(metadata, filter_params)
.map_err(|_err| RecordError::BadRequest("Invalid filter params"))?;
if let Some(cursor) = cursor {
params.push((":cursor".to_string(), libsql::Value::Blob(cursor.to_vec())));
params.push((
":cursor".to_string(),
tokio_rusqlite::Value::Blob(cursor.to_vec()),
));
clause = format!("{clause} AND _ROW_.id < :cursor");
}
params.push((
":limit".to_string(),
libsql::Value::Integer(limit_or_default(limit) as i64),
tokio_rusqlite::Value::Integer(limit_or_default(limit) as i64),
));
// User properties
@@ -102,10 +105,7 @@ pub async fn list_records_handler(
table_name = api.table_name()
);
let rows = state
.conn()
.query(&query, libsql::params::Params::Named(params))
.await?;
let rows = state.conn().query(&query, params).await?;
return Ok(Json(serde_json::Value::Array(
rows_to_json(metadata, rows, |col_name| !col_name.starts_with("_"))

View File

@@ -46,7 +46,7 @@ pub async fn read_record_handler(
};
return Ok(Json(
row_to_json(api.metadata(), row, |col_name| !col_name.starts_with("_"))
row_to_json(api.metadata(), &row, |col_name| !col_name.starts_with("_"))
.map_err(|err| RecordError::Internal(err.into()))?,
));
}
@@ -163,7 +163,7 @@ pub async fn get_uploaded_files_from_record_handler(
mod test {
use axum::extract::{Path, Query, State};
use axum::Json;
use trailbase_sqlite::{query_one_row, schema::FileUpload, schema::FileUploadInput};
use trailbase_sqlite::{schema::FileUpload, schema::FileUploadInput};
use super::*;
use crate::admin::user::*;
@@ -181,11 +181,11 @@ mod test {
use crate::records::test_utils::*;
use crate::records::*;
use crate::test::unpack_json_response;
use crate::util::id_to_b64;
use crate::util::{id_to_b64, query_one_row};
#[tokio::test]
async fn libsql_ignores_extra_parameters_test() -> Result<(), anyhow::Error> {
// This test is actually just testing libsql and making sure that we can overprovision
async fn ignores_extra_sql_parameters_test() -> Result<(), anyhow::Error> {
// This test is actually just testing our SQL driver and making sure that we can overprovision
// arguments. Specifically, we want to provide :user and :id arguments even if they're not
// consumed by a user-provided access query.
let state = test_state(None).await?;
@@ -195,14 +195,14 @@ mod test {
conn
.execute(
&format!("INSERT INTO '{USER_TABLE}' (email) VALUES ($1)"),
libsql::params!(EMAIL),
tokio_rusqlite::params!(EMAIL),
)
.await?;
query_one_row(
conn,
&format!("SELECT * from '{USER_TABLE}' WHERE email = :email"),
libsql::named_params! {
tokio_rusqlite::named_params! {
":email": EMAIL,
":unused": "unused",
":foo": 42,

View File

@@ -1,7 +1,6 @@
use itertools::Itertools;
use log::*;
use std::sync::Arc;
use trailbase_sqlite::query_one_row;
use crate::auth::user::User;
use crate::config::proto::{ConflictResolutionStrategy, RecordApiConfig};
@@ -32,7 +31,7 @@ enum RecordApiMetadata {
}
struct RecordApiState {
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
metadata: RecordApiMetadata,
record_pk_column: Column,
@@ -51,7 +50,7 @@ struct RecordApiState {
impl RecordApi {
pub fn from_table(
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
table_metadata: TableMetadata,
config: RecordApiConfig,
) -> Result<Self, String> {
@@ -82,7 +81,7 @@ impl RecordApi {
}
pub fn from_view(
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
view_metadata: ViewMetadata,
config: RecordApiConfig,
) -> Result<Self, String> {
@@ -119,7 +118,7 @@ impl RecordApi {
}
fn from_impl(
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
record_pk_column: Column,
metadata: RecordApiMetadata,
config: RecordApiConfig,
@@ -187,14 +186,14 @@ impl RecordApi {
}
}
pub fn id_to_sql(&self, id: &str) -> Result<libsql::Value, RecordError> {
pub fn id_to_sql(&self, id: &str) -> Result<tokio_rusqlite::Value, RecordError> {
return match self.state.record_pk_column.data_type {
ColumnDataType::Blob => {
let record_id = b64_to_id(id).map_err(|_err| RecordError::BadRequest("Invalid id"))?;
assert_uuidv7(&record_id);
Ok(libsql::Value::Blob(record_id.into()))
Ok(tokio_rusqlite::Value::Blob(record_id.into()))
}
ColumnDataType::Integer => Ok(libsql::Value::Integer(
ColumnDataType::Integer => Ok(tokio_rusqlite::Value::Integer(
id.parse::<i64>()
.map_err(|_err| RecordError::BadRequest("Invalid id"))?,
)),
@@ -232,7 +231,7 @@ impl RecordApi {
pub async fn check_record_level_access(
&self,
p: Permission,
record_id: Option<&libsql::Value>,
record_id: Option<&tokio_rusqlite::Value>,
request_params: Option<&mut LazyParams<'_>>,
user: Option<&User>,
) -> Result<(), RecordError> {
@@ -255,7 +254,7 @@ impl RecordApi {
)
.await?;
let row = match query_one_row(&self.state.conn, &access_query, params).await {
let row = match crate::util::query_one_row(&self.state.conn, &access_query, params).await {
Ok(row) => row,
Err(err) => {
error!("RLA query '{access_query}' failed: {err}");
@@ -311,10 +310,10 @@ impl RecordApi {
p: Permission,
access_rule: &str,
table_name: &str,
record_id: Option<&libsql::Value>,
record_id: Option<&tokio_rusqlite::Value>,
request_params: Option<&mut LazyParams<'_>>,
user: Option<&User>,
) -> Result<(String, libsql::params::Params), RecordError> {
) -> Result<(String, Vec<(String, tokio_rusqlite::Value)>), RecordError> {
let pk_column_name = &self.state.record_pk_column.name;
// We need to inject context like: record id, user, request, and row into the access
// check. Below we're building the query and binding the context as params accordingly.
@@ -322,7 +321,7 @@ impl RecordApi {
params.push((
":__record_id".to_string(),
record_id.map_or(libsql::Value::Null, |id| id.clone()),
record_id.map_or(tokio_rusqlite::Value::Null, |id| id.clone()),
));
// Assumes access_rule is an expression: https://www.sqlite.org/syntax/expr.html
@@ -391,13 +390,13 @@ impl RecordApi {
),
};
return Ok((query, libsql::params::Params::Named(params)));
return Ok((query, params));
}
}
pub(crate) fn build_user_sub_select(
user: Option<&User>,
) -> (&'static str, Vec<(String, libsql::Value)>) {
) -> (&'static str, Vec<(String, tokio_rusqlite::Value)>) {
const QUERY: &str = "SELECT :__user_id AS id";
if let Some(user) = user {
@@ -405,11 +404,14 @@ pub(crate) fn build_user_sub_select(
QUERY,
vec![(
":__user_id".to_string(),
libsql::Value::Blob(user.uuid.into()),
tokio_rusqlite::Value::Blob(user.uuid.into()),
)],
);
} else {
return (QUERY, vec![(":__user_id".to_string(), libsql::Value::Null)]);
return (
QUERY,
vec![(":__user_id".to_string(), tokio_rusqlite::Value::Null)],
);
}
}
@@ -417,7 +419,7 @@ pub(crate) fn build_user_sub_select(
fn build_request_sub_select(
table_metadata: &TableMetadata,
request_params: &Params,
) -> (String, Vec<(String, libsql::Value)>) {
) -> (String, Vec<(String, tokio_rusqlite::Value)>) {
// NOTE: This has gotten pretty wild. We cannot have access queries access missing _REQ_.props.
// So we need to inject an explicit NULL value for all missing fields on the request.
// Can we make this cheaper, either by pre-processing the access query or improving construction?
@@ -425,10 +427,10 @@ fn build_request_sub_select(
// save some string ops?
let schema = &table_metadata.schema;
let mut named_params: Vec<(String, libsql::Value)> = schema
let mut named_params: Vec<(String, tokio_rusqlite::Value)> = schema
.columns
.iter()
.map(|c| (format!(":{}", c.name), libsql::Value::Null))
.map(|c| (format!(":{}", c.name), tokio_rusqlite::Value::Null))
.collect();
for (param_index, col_name) in request_params.column_names().iter().enumerate() {

View File

@@ -19,25 +19,27 @@ pub enum JsonError {
ValueNotFound,
}
fn value_to_json(value: libsql::Value) -> Result<serde_json::Value, JsonError> {
fn value_to_json(value: rusqlite::types::Value) -> Result<serde_json::Value, JsonError> {
return Ok(match value {
libsql::Value::Null => serde_json::Value::Null,
libsql::Value::Real(real) => {
rusqlite::types::Value::Null => serde_json::Value::Null,
rusqlite::types::Value::Real(real) => {
let Some(number) = serde_json::Number::from_f64(real) else {
return Err(JsonError::Finite);
};
serde_json::Value::Number(number)
}
libsql::Value::Integer(integer) => serde_json::Value::Number(serde_json::Number::from(integer)),
libsql::Value::Blob(blob) => serde_json::Value::String(BASE64_URL_SAFE.encode(blob)),
libsql::Value::Text(text) => serde_json::Value::String(text),
rusqlite::types::Value::Integer(integer) => {
serde_json::Value::Number(serde_json::Number::from(integer))
}
rusqlite::types::Value::Blob(blob) => serde_json::Value::String(BASE64_URL_SAFE.encode(blob)),
rusqlite::types::Value::Text(text) => serde_json::Value::String(text),
});
}
// Serialize libsql row to json.
/// Serialize SQL row to json.
pub fn row_to_json(
metadata: &(dyn TableOrViewMetadata + Send + Sync),
row: libsql::Row,
row: &tokio_rusqlite::Row,
column_filter: fn(&str) -> bool,
) -> Result<serde_json::Value, JsonError> {
let mut map = serde_json::Map::<String, serde_json::Value>::default();
@@ -52,7 +54,7 @@ pub fn row_to_json(
}
let value = row.get_value(i).map_err(|_err| JsonError::ValueNotFound)?;
if let libsql::Value::Text(str) = &value {
if let rusqlite::types::Value::Text(str) = &value {
if let Some((_col, col_meta)) = metadata.column_by_name(col_name) {
if col_meta.json.is_some() {
map.insert(col_name.to_string(), serde_json::from_str(str)?);
@@ -69,25 +71,24 @@ pub fn row_to_json(
return Ok(serde_json::Value::Object(map));
}
// Turns rows into a list of json objects.
/// Turns rows into a list of json objects.
pub async fn rows_to_json(
metadata: &(dyn TableOrViewMetadata + Send + Sync),
mut rows: libsql::Rows,
rows: tokio_rusqlite::Rows,
column_filter: fn(&str) -> bool,
) -> Result<Vec<serde_json::Value>, JsonError> {
let mut objects: Vec<serde_json::Value> = vec![];
while let Some(row) = rows.next().await.map_err(|_err| JsonError::RowNotFound)? {
for row in rows.iter() {
objects.push(row_to_json(metadata, row, column_filter)?);
}
return Ok(objects);
}
/// Turns a row into a list of json arrays.
pub fn row_to_json_array(row: libsql::Row) -> Result<Vec<serde_json::Value>, JsonError> {
pub fn row_to_json_array(row: &tokio_rusqlite::Row) -> Result<Vec<serde_json::Value>, JsonError> {
let cols = row.column_count();
let mut json_row = Vec::<serde_json::Value>::with_capacity(cols as usize);
let mut json_row = Vec::<serde_json::Value>::with_capacity(cols);
for i in 0..cols {
let value = row.get_value(i).map_err(|_err| JsonError::ValueNotFound)?;
@@ -101,14 +102,14 @@ pub fn row_to_json_array(row: libsql::Row) -> Result<Vec<serde_json::Value>, Jso
///
/// WARN: This is lossy and whenever possible we should rely on parsed "CREATE TABLE" statement for
/// the respective column.
fn rows_to_columns(rows: &libsql::Rows) -> Result<Vec<Column>, libsql::Error> {
use libsql::ValueType as T;
fn rows_to_columns(rows: &tokio_rusqlite::Rows) -> Result<Vec<Column>, rusqlite::Error> {
use tokio_rusqlite::ValueType as T;
let mut columns: Vec<Column> = vec![];
for i in 0..rows.column_count() {
columns.push(Column {
name: rows.column_name(i).unwrap_or("<missing>").to_string(),
data_type: match rows.column_type(i)? {
data_type: match rows.column_type(i).unwrap_or(T::Null) {
T::Real => ColumnDataType::Real,
T::Text => ColumnDataType::Text,
T::Integer => ColumnDataType::Integer,
@@ -123,21 +124,23 @@ fn rows_to_columns(rows: &libsql::Rows) -> Result<Vec<Column>, libsql::Error> {
return Ok(columns);
}
/// Turns rows into a list of json arrays.
pub async fn rows_to_json_arrays(
mut rows: libsql::Rows,
pub fn rows_to_json_arrays(
rows: tokio_rusqlite::Rows,
limit: usize,
) -> Result<(Vec<Vec<serde_json::Value>>, Option<Vec<Column>>), JsonError> {
let mut cnt = 0_usize;
let columns = rows_to_columns(&rows).ok();
let columns = match rows_to_columns(&rows) {
Ok(columns) => Some(columns),
Err(err) => {
debug!("Failed to get column def: {err}");
None
}
};
let mut json_rows: Vec<Vec<serde_json::Value>> = vec![];
while let Some(row) = rows.next().await.map_err(|_err| JsonError::RowNotFound)? {
if cnt >= limit {
for (idx, row) in rows.iter().enumerate() {
if idx >= limit {
break;
}
cnt += 1;
json_rows.push(row_to_json_array(row)?);
}

View File

@@ -1,12 +1,12 @@
#[cfg(test)]
mod tests {
use libsql::{params, Connection};
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use crate::records::json_to_sql::JsonRow;
use crate::util::query_one_row;
use crate::AppState;
pub async fn create_chat_message_app_tables(state: &AppState) -> Result<(), libsql::Error> {
pub async fn create_chat_message_app_tables(state: &AppState) -> Result<(), anyhow::Error> {
// Create a messages, chat room and members tables.
state
.conn()
@@ -47,7 +47,7 @@ mod tests {
pub async fn create_chat_message_app_tables_integer(
state: &AppState,
) -> Result<(), libsql::Error> {
) -> Result<(), anyhow::Error> {
// Create a messages, chat room and members tables.
state
.conn()
@@ -86,11 +86,14 @@ mod tests {
return Ok(());
}
pub async fn add_room(conn: &Connection, name: &str) -> Result<[u8; 16], libsql::Error> {
pub async fn add_room(
conn: &tokio_rusqlite::Connection,
name: &str,
) -> Result<[u8; 16], anyhow::Error> {
let room: [u8; 16] = query_one_row(
conn,
"INSERT INTO room (name) VALUES ($1) RETURNING id",
params!(name),
params!(name.to_string()),
)
.await?
.get(0)?;
@@ -99,10 +102,10 @@ mod tests {
}
pub async fn add_user_to_room(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
user: [u8; 16],
room: [u8; 16],
) -> Result<(), libsql::Error> {
) -> Result<(), tokio_rusqlite::Error> {
conn
.execute(
"INSERT INTO room_members (user, room) VALUES ($1, $2)",
@@ -113,18 +116,20 @@ mod tests {
}
pub async fn send_message(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
user: [u8; 16],
room: [u8; 16],
message: &str,
) -> Result<[u8; 16], libsql::Error> {
return query_one_row(
conn,
"INSERT INTO message (_owner, room, data) VALUES ($1, $2, $3) RETURNING id",
params!(user, room, message),
)
.await?
.get(0);
) -> Result<[u8; 16], anyhow::Error> {
return Ok(
query_one_row(
conn,
"INSERT INTO message (_owner, room, data) VALUES ($1, $2, $3) RETURNING id",
params!(user, room, message.to_string()),
)
.await?
.get(0)?,
);
}
pub fn json_row_from_value(value: serde_json::Value) -> Result<JsonRow, anyhow::Error> {

View File

@@ -65,8 +65,7 @@ pub async fn update_record_handler(
#[cfg(test)]
mod test {
use axum::extract::Query;
use libsql::params;
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use super::*;
use crate::admin::user::*;
@@ -81,7 +80,7 @@ mod test {
use crate::records::test_utils::*;
use crate::records::*;
use crate::test::unpack_json_response;
use crate::util::{b64_to_id, id_to_b64};
use crate::util::{b64_to_id, id_to_b64, query_one_row};
#[tokio::test]
async fn test_record_api_update() -> Result<(), anyhow::Error> {

View File

@@ -1,8 +1,7 @@
use chrono::{Duration, Utc};
use libsql::params;
use log::*;
use rusqlite::{Connection, DatabaseName};
use std::future::Future;
use tokio_rusqlite::params;
use crate::app_state::AppState;
use crate::constants::{DEFAULT_REFRESH_TOKEN_TTL, LOGS_RETENTION_DEFAULT, SESSION_TABLE};
@@ -47,28 +46,31 @@ pub(super) fn start_periodic_tasks(app_state: &AppState) -> AbortOnDrop {
});
// Backup job.
let db_path = app_state.data_dir().main_db_path();
let conn = app_state.conn().clone();
let backup_file = app_state.data_dir().backup_path().join("backup.db");
let backup_interval = app_state
.access_config(|c| c.server.backup_interval_sec)
.map_or(Duration::zero(), Duration::seconds);
if !backup_interval.is_zero() {
tasks.add_periodic_task(backup_interval, move || {
let db_path = db_path.clone();
let conn = conn.clone();
let backup_file = backup_file.clone();
async move {
// NOTE: We need to "re-open" the database with rusqlite since libsql doesn't support
// backups (yet).
match Connection::open(&db_path) {
Ok(conn) => {
match conn.backup(DatabaseName::Main, backup_file, /* progress= */ None) {
Ok(_) => info!("Backup complete"),
Err(err) => error!("Backup failed: {err}"),
};
}
Err(err) => warn!("Backup process failed to open DB: {err}"),
}
let result = conn
.call(|conn| {
return Ok(conn.backup(
rusqlite::DatabaseName::Main,
backup_file,
/* progress= */ None,
)?);
})
.await;
match result {
Ok(_) => info!("Backup complete"),
Err(err) => error!("Backup failed: {err}"),
};
}
});
}

View File

@@ -1025,15 +1025,6 @@ mod tests {
use super::*;
use crate::constants::USER_TABLE;
async fn new_mem_conn() -> libsql::Connection {
return libsql::Builder::new_local(":memory:")
.build()
.await
.unwrap()
.connect()
.unwrap();
}
#[tokio::test]
async fn test_statement_to_table_schema_and_back() {
lazy_static! {
@@ -1059,7 +1050,7 @@ mod tests {
{
// First Make sure the query is actually valid, as opposed to "only" parsable.
let conn = new_mem_conn().await;
let conn = tokio_rusqlite::Connection::open_in_memory().await.unwrap();
conn.execute(&SQL, ()).await.unwrap();
}
@@ -1071,7 +1062,7 @@ mod tests {
let sql = table1.create_table_statement();
{
// Same as above, make sure the constructed query is valid as opposed to "only" parsable.
let conn = new_mem_conn().await;
let conn = tokio_rusqlite::Connection::open_in_memory().await.unwrap();
conn.execute(&sql, ()).await.unwrap();
}

View File

@@ -1,8 +1,6 @@
use libsql::Connection;
use log::*;
use std::path::PathBuf;
use thiserror::Error;
use trailbase_sqlite::{connect_sqlite, query_one_row};
use crate::app_state::{build_objectstore, AppState, AppStateArgs};
use crate::auth::jwt::{JwtHelper, JwtHelperError};
@@ -15,8 +13,12 @@ use crate::table_metadata::TableMetadataCache;
#[derive(Debug, Error)]
pub enum InitError {
#[error("Libsql error: {0}")]
Libsql(#[from] libsql::Error),
#[error("TRusqlite error: {0}")]
TokioRusqlite(#[from] tokio_rusqlite::Error),
#[error("Rusqlite error: {0}")]
Rusqlite(#[from] rusqlite::Error),
#[error("RusqliteFromSql error: {0}")]
FromSql(#[from] rusqlite::types::FromSqlError),
#[error("DB Migration error: {0}")]
Migration(#[from] refinery::Error),
#[error("IO error: {0}")]
@@ -54,18 +56,22 @@ pub async fn init_app_state(
data_dir.ensure_directory_structure().await?;
// Then open or init new databases.
let logs_conn = init_logs_db(&data_dir).await?;
let logs_conn = {
let mut conn = init_logs_db(&data_dir)?;
apply_logs_migrations(&mut conn)?;
tokio_rusqlite::Connection::from_conn(conn).await?
};
// Open or init the main db. Note that we derive whether a new DB was initialized based on
// whether the V1 migration had to be applied. Should be fairly robust.
let (main_conn, new_db) = {
let conn = connect_sqlite(Some(data_dir.main_db_path()), None).await?;
let new_db = apply_main_migrations(conn.clone(), Some(data_dir.migrations_path())).await?;
let (conn2, new_db) = {
let mut conn = trailbase_sqlite::connect_sqlite(Some(data_dir.main_db_path()), None)?;
let new_db = apply_main_migrations(&mut conn, Some(data_dir.migrations_path()))?;
(conn, new_db)
(tokio_rusqlite::Connection::from_conn(conn).await?, new_db)
};
let table_metadata = TableMetadataCache::new(main_conn.clone()).await?;
let table_metadata = TableMetadataCache::new(conn2.clone()).await?;
// Read config or write default one.
let config = load_or_init_config_textproto(&data_dir, &table_metadata).await?;
@@ -119,7 +125,7 @@ pub async fn init_app_state(
dev: args.dev,
table_metadata,
config,
conn: main_conn.clone(),
conn2,
logs_conn,
jwt,
object_store,
@@ -127,7 +133,7 @@ pub async fn init_app_state(
});
if new_db {
let num_admins: i64 = query_one_row(
let num_admins: i64 = crate::util::query_one_row(
app_state.user_conn(),
&format!("SELECT COUNT(*) FROM {USER_TABLE} WHERE admin = TRUE"),
(),
@@ -151,7 +157,7 @@ pub async fn init_app_state(
INSERT INTO
"#
),
libsql::params!(),
(),
)
.await?;
@@ -176,18 +182,16 @@ pub async fn init_app_state(
return Ok((new_db, app_state));
}
async fn init_logs_db(data_dir: &DataDir) -> Result<Connection, InitError> {
let conn = connect_sqlite(data_dir.logs_db_path().into(), None).await?;
fn init_logs_db(data_dir: &DataDir) -> Result<rusqlite::Connection, InitError> {
let conn = trailbase_sqlite::connect_sqlite(data_dir.logs_db_path().into(), None)?;
// Turn off secure_deletions, i.e. don't wipe the memory with zeros.
conn
.query("PRAGMA secure_delete = FALSE", ())
.await
.query_row("PRAGMA secure_delete = FALSE", (), |_row| Ok(()))
.unwrap();
// Sync less often
conn.execute("PRAGMA synchronous = 1", ()).await.unwrap();
conn.execute("PRAGMA synchronous = 1", ()).unwrap();
apply_logs_migrations(conn.clone()).await?;
return Ok(conn);
}

View File

@@ -1,7 +1,6 @@
use fallible_iterator::FallibleIterator;
use jsonschema::Validator;
use lazy_static::lazy_static;
use libsql::{params, Connection};
use log::*;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -10,7 +9,7 @@ use sqlite3_parser::ast::Stmt;
use std::collections::HashMap;
use std::sync::Arc;
use thiserror::Error;
use trailbase_sqlite::query_one_row;
use tokio_rusqlite::params;
use crate::constants::{SQLITE_SCHEMA_TABLE, USER_TABLE};
use crate::schema::{Column, ColumnDataType, ColumnOption, ForeignKey, SchemaError, Table, View};
@@ -439,7 +438,7 @@ fn find_record_pk_column_index(columns: &[Column], tables: &[Table]) -> Option<u
}
struct TableMetadataCacheState {
conn: libsql::Connection,
conn: tokio_rusqlite::Connection,
tables: parking_lot::RwLock<HashMap<String, Arc<TableMetadata>>>,
views: parking_lot::RwLock<HashMap<String, Arc<ViewMetadata>>>,
}
@@ -450,7 +449,7 @@ pub struct TableMetadataCache {
}
impl TableMetadataCache {
pub async fn new(conn: libsql::Connection) -> Result<Self, TableLookupError> {
pub async fn new(conn: tokio_rusqlite::Connection) -> Result<Self, TableLookupError> {
let (table_map, tables) = Self::build_tables(&conn).await?;
let views = Self::build_views(&conn, &tables).await?;
@@ -464,7 +463,7 @@ impl TableMetadataCache {
}
async fn build_tables(
conn: &libsql::Connection,
conn: &tokio_rusqlite::Connection,
) -> Result<(HashMap<String, Arc<TableMetadata>>, Vec<Table>), TableLookupError> {
let tables = lookup_and_parse_all_table_schemas(conn).await?;
let build = |table: &Table| {
@@ -478,7 +477,7 @@ impl TableMetadataCache {
}
async fn build_views(
conn: &libsql::Connection,
conn: &tokio_rusqlite::Connection,
tables: &[Table],
) -> Result<HashMap<String, Arc<ViewMetadata>>, TableLookupError> {
let views = lookup_and_parse_all_view_schemas(conn, tables).await?;
@@ -522,8 +521,10 @@ impl std::fmt::Debug for TableMetadataCache {
#[derive(Debug, Error)]
pub enum TableLookupError {
#[error("SQL error: {0}")]
Sql(#[from] libsql::Error),
#[error("SQL2 error: {0}")]
Sql(#[from] tokio_rusqlite::Error),
#[error("SQL3 error: {0}")]
FromSql(#[from] rusqlite::types::FromSqlError),
#[error("Schema error: {0}")]
Schema(#[from] SchemaError),
#[error("Missing")]
@@ -533,14 +534,14 @@ pub enum TableLookupError {
}
pub async fn lookup_and_parse_table_schema(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
table_name: &str,
) -> Result<Table, TableLookupError> {
// Then get the actual table.
let sql: String = query_one_row(
let sql: String = crate::util::query_one_row(
conn,
&format!("SELECT sql FROM {SQLITE_SCHEMA_TABLE} WHERE type = 'table' AND name = $1"),
params!(table_name),
params!(table_name.to_string()),
)
.await?
.get(0)?;
@@ -614,10 +615,10 @@ pub(crate) fn sqlite3_parse_into_statement(
}
pub async fn lookup_and_parse_all_table_schemas(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
) -> Result<Vec<Table>, TableLookupError> {
// Then get the actual table.
let mut rows = conn
let rows = conn
.query(
&format!("SELECT sql FROM {SQLITE_SCHEMA_TABLE} WHERE type = 'table'"),
(),
@@ -625,7 +626,7 @@ pub async fn lookup_and_parse_all_table_schemas(
.await?;
let mut tables: Vec<Table> = vec![];
while let Some(row) = rows.next().await? {
for row in rows.iter() {
let sql: String = row.get(0)?;
let Some(stmt) = sqlite3_parse_into_statement(&sql)? else {
return Err(TableLookupError::Missing);
@@ -651,11 +652,11 @@ fn sqlite3_parse_view(sql: &str, tables: &[Table]) -> Result<View, TableLookupEr
}
pub async fn lookup_and_parse_all_view_schemas(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
tables: &[Table],
) -> Result<Vec<View>, TableLookupError> {
// Then get the actual table.
let mut rows = conn
let rows = conn
.query(
&format!("SELECT sql FROM {SQLITE_SCHEMA_TABLE} WHERE type = 'view'"),
(),
@@ -663,7 +664,7 @@ pub async fn lookup_and_parse_all_view_schemas(
.await?;
let mut views: Vec<View> = vec![];
while let Some(row) = rows.next().await? {
for row in rows.iter() {
let sql: String = row.get(0)?;
views.push(sqlite3_parse_view(&sql, tables)?);
}
@@ -964,9 +965,11 @@ mod tests {
.await
.is_ok());
let cnt: i64 = query_one_row(conn, "SELECT COUNT(*) FROM test_table", ())
let cnt: i64 = conn
.query_row("SELECT COUNT(*) FROM test_table", ())
.await
.unwrap()
.unwrap()
.get(0)
.unwrap();

View File

@@ -1,6 +1,4 @@
use libsql::{Connection, Rows, Transaction};
use log::*;
use refinery_libsql::LibsqlConnection;
use std::path::{Path, PathBuf};
use thiserror::Error;
@@ -8,8 +6,10 @@ use crate::migrations;
#[derive(Debug, Error)]
pub enum TransactionError {
#[error("Libsql error: {0}")]
Libsql(#[from] libsql::Error),
#[error("Rusqlite error: {0}")]
Rusqlite(#[from] rusqlite::Error),
#[error("Tokio Rusqlite error: {0}")]
TokioRusqlite(#[from] tokio_rusqlite::Error),
#[error("IO error: {0}")]
IO(#[from] std::io::Error),
#[error("Migration error: {0}")]
@@ -18,10 +18,44 @@ pub enum TransactionError {
File(String),
}
pub struct MigrationWriter {
path: PathBuf,
stem: String,
sql: String,
}
impl MigrationWriter {
pub(crate) async fn write(
&self,
conn: &tokio_rusqlite::Connection,
) -> Result<refinery::Report, TransactionError> {
let migrations = vec![refinery::Migration::unapplied(&self.stem, &self.sql)?];
let runner = migrations::new_migration_runner(&migrations).set_abort_missing(false);
let report = conn
.call(move |conn| {
let report = runner
.run(conn)
.map_err(|err| tokio_rusqlite::Error::Other(err.into()))?;
return Ok(report);
})
.await
.map_err(|err| {
error!("Migration aborted with: {err} for {}", self.sql);
err
})?;
write_migration_file(self.path.clone(), &self.sql).await?;
return Ok(report);
}
}
/// A recorder for table migrations, i.e.: create, alter, drop, as opposed to data migrations.
pub struct TransactionRecorder {
conn: Connection,
tx: Transaction,
pub struct TransactionRecorder<'a> {
tx: rusqlite::Transaction<'a>,
log: Vec<String>,
migration_path: PathBuf,
@@ -29,40 +63,42 @@ pub struct TransactionRecorder {
}
#[allow(unused)]
impl TransactionRecorder {
pub async fn new(
conn: Connection,
impl<'a> TransactionRecorder<'a> {
pub fn new(
conn: &'a mut rusqlite::Connection,
migration_path: PathBuf,
migration_suffix: String,
) -> Result<Self, TransactionError> {
let tx = conn.transaction().await?;
return Ok(TransactionRecorder {
conn,
tx,
) -> Result<Self, rusqlite::Error> {
let recorder = TransactionRecorder {
tx: conn.transaction()?,
log: vec![],
migration_path,
migration_suffix,
});
};
return Ok(recorder);
}
// Note that we cannot take any sql params for recording purposes.
pub async fn query(&mut self, sql: &str) -> Result<Rows, TransactionError> {
let rows = self.tx.query(sql, ()).await?;
pub fn query(&mut self, sql: &str) -> Result<(), rusqlite::Error> {
let mut stmt = self.tx.prepare(sql)?;
let mut rows = stmt.query([])?;
rows.next()?;
self.log.push(sql.to_string());
return Ok(rows);
return Ok(());
}
pub async fn execute(&mut self, sql: &str) -> Result<u64, TransactionError> {
let rows_affected = self.tx.execute(sql, ()).await?;
pub fn execute(&mut self, sql: &str) -> Result<usize, rusqlite::Error> {
let rows_affected = self.tx.execute(sql, ())?;
self.log.push(sql.to_string());
return Ok(rows_affected);
}
/// Consume this transaction and commit.
pub async fn commit_and_create_migration(
self,
) -> Result<Option<refinery::Report>, TransactionError> {
pub fn rollback_and_create_migration(
mut self,
) -> Result<Option<MigrationWriter>, TransactionError> {
if self.log.is_empty() {
return Ok(None);
}
@@ -71,7 +107,7 @@ impl TransactionRecorder {
// sync.
// NOTE: Slightly hacky that we build up the transaction first to then cancel it. However, this
// gives us early checking. We could as well just not do it.
self.tx.rollback().await?;
self.tx.rollback()?;
let filename = migrations::new_unique_migration_filename(&self.migration_suffix);
let stem = Path::new(&filename)
@@ -103,24 +139,13 @@ impl TransactionRecorder {
},
);
let migrations = vec![refinery::Migration::unapplied(&stem, &sql)?];
let mut conn = LibsqlConnection::from_connection(self.conn);
let mut runner = migrations::new_migration_runner(&migrations).set_abort_missing(false);
let report = runner.run_async(&mut conn).await.map_err(|err| {
error!("Migration aborted with: {err} for {sql}");
err
})?;
write_migration_file(path, &sql).await?;
return Ok(Some(report));
return Ok(Some(MigrationWriter { path, stem, sql }));
}
/// Consume this transaction and rollback.
pub async fn rollback(self) -> Result<(), TransactionError> {
return Ok(self.tx.rollback().await?);
pub fn rollback(mut self) -> Result<(), TransactionError> {
self.tx.rollback()?;
return Ok(());
}
}

View File

@@ -55,3 +55,16 @@ pub(crate) fn assert_uuidv7_version(uuid: &Uuid) {
#[cfg(not(debug_assertions))]
pub(crate) fn assert_uuidv7_version(_uuid: &Uuid) {}
pub async fn query_one_row(
conn: &tokio_rusqlite::Connection,
sql: &str,
params: impl tokio_rusqlite::Params + Send + 'static,
) -> Result<tokio_rusqlite::Row, tokio_rusqlite::Error> {
if let Some(row) = conn.query_row(sql, params).await? {
return Ok(row);
}
return Err(tokio_rusqlite::Error::Rusqlite(
rusqlite::Error::QueryReturnedNoRows,
));
}

View File

@@ -3,12 +3,10 @@ use axum::http::StatusCode;
use axum_test::multipart::MultipartForm;
use axum_test::TestServer;
use cookie::Cookie;
use libsql::{params, Connection};
use std::rc::Rc;
use tokio_rusqlite::params;
use trailbase_core::api::{
create_user_handler, login_with_password, query_one_row, CreateUserRequest,
};
use trailbase_core::api::{create_user_handler, login_with_password, CreateUserRequest};
use trailbase_core::config::proto::PermissionFlag;
use trailbase_core::constants::{COOKIE_AUTH_TOKEN, RECORD_API_PATH};
use trailbase_core::records::*;
@@ -176,7 +174,9 @@ fn test_record_apis() {
});
}
pub async fn create_chat_message_app_tables(conn: &Connection) -> Result<(), libsql::Error> {
pub async fn create_chat_message_app_tables(
conn: &tokio_rusqlite::Connection,
) -> Result<(), anyhow::Error> {
// Create a messages, chat room and members tables.
conn
.execute_batch(
@@ -212,23 +212,27 @@ pub async fn create_chat_message_app_tables(conn: &Connection) -> Result<(), lib
return Ok(());
}
pub async fn add_room(conn: &Connection, name: &str) -> Result<[u8; 16], libsql::Error> {
let room: [u8; 16] = query_one_row(
conn,
"INSERT INTO room (name) VALUES ($1) RETURNING id",
params!(name),
)
.await?
.get(0)?;
pub async fn add_room(
conn: &tokio_rusqlite::Connection,
name: &str,
) -> Result<[u8; 16], anyhow::Error> {
let room: [u8; 16] = conn
.query_row(
"INSERT INTO room (name) VALUES ($1) RETURNING id",
params!(name.to_string()),
)
.await?
.unwrap()
.get(0)?;
return Ok(room);
}
pub async fn add_user_to_room(
conn: &Connection,
conn: &tokio_rusqlite::Connection,
user: [u8; 16],
room: [u8; 16],
) -> Result<(), libsql::Error> {
) -> Result<(), anyhow::Error> {
conn
.execute(
"INSERT INTO room_members (user, room) VALUES ($1, $2)",

View File

@@ -23,8 +23,7 @@ uuid = { version = "1.7.0", default-features = false, features = ["std", "v7"] }
validator = "0.19.0"
[dev-dependencies]
libsql = { workspace = true }
tokio = { version = "^1.38.0", features = ["macros", "rt-multi-thread"] }
rusqlite = { workspace = true }
uuid = { version = "1.7.0", default-features = false, features = ["std", "v4", "v7"] }
[profile.release]

View File

@@ -239,11 +239,11 @@ pub(crate) fn jsonschema_matches(
#[cfg(test)]
mod tests {
use super::*;
use libsql::params;
use rusqlite::params;
#[tokio::test]
async fn test_explicit_jsonschema() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_explicit_jsonschema() {
let conn = crate::connect().unwrap();
let text0_schema = r#"
{
@@ -266,7 +266,7 @@ mod tests {
) STRICT;
"#
);
conn.query(&create_table, ()).await.unwrap();
conn.execute(&create_table, ()).unwrap();
{
conn
@@ -274,7 +274,6 @@ mod tests {
r#"INSERT INTO test (text0, text1) VALUES ('{"name": "foo"}', '"text"')"#,
params!(),
)
.await
.unwrap();
}
@@ -284,14 +283,13 @@ mod tests {
r#"INSERT INTO test (text0, text1) VALUES ('{"name": "foo", "age": -5}', '"text"')"#,
params!(),
)
.await
.is_err());
}
}
#[tokio::test]
async fn test_registerd_jsonschema() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_registerd_jsonschema() {
let conn = crate::connect().unwrap();
let text0_schema = r#"
{
@@ -335,14 +333,13 @@ mod tests {
) STRICT;
"#
);
conn.query(&create_table, ()).await.unwrap();
conn.execute(&create_table, ()).unwrap();
conn
.execute(
r#"INSERT INTO test (text0) VALUES ('{"name": "prefix_foo"}')"#,
params!(),
)
.await
.unwrap();
assert!(conn
@@ -350,7 +347,6 @@ mod tests {
r#"INSERT INTO test (text0) VALUES ('{"name": "WRONG_PREFIX_foo"}')"#,
params!(),
)
.await
.is_err());
}
}

View File

@@ -137,59 +137,46 @@ pub fn sqlite3_extension_init(db: *mut sqlite3) -> Result<(), sqlite_loadable::E
#[cfg(test)]
unsafe extern "C" fn init_extension(
db: *mut libsql::ffi::sqlite3,
pz_err_msg: *mut *const ::std::os::raw::c_char,
p_thunk: *const libsql::ffi::sqlite3_api_routines,
db: *mut rusqlite::ffi::sqlite3,
pz_err_msg: *mut *mut ::std::os::raw::c_char,
p_thunk: *const rusqlite::ffi::sqlite3_api_routines,
) -> ::std::os::raw::c_int {
return sqlite3_extension_init(
db,
pz_err_msg as *mut *mut ::std::os::raw::c_char,
p_thunk as *mut libsql::ffi::sqlite3_api_routines,
pz_err_msg,
p_thunk as *mut rusqlite::ffi::sqlite3_api_routines,
) as ::std::os::raw::c_int;
}
#[cfg(test)]
pub(crate) async fn connect() -> Result<libsql::Connection, libsql::Error> {
let builder = libsql::Builder::new_local(":memory:")
.build()
.await
.unwrap();
pub(crate) fn connect() -> Result<rusqlite::Connection, rusqlite::Error> {
unsafe {
rusqlite::ffi::sqlite3_auto_extension(Some(init_extension));
}
unsafe { libsql::ffi::sqlite3_auto_extension(Some(init_extension)) };
Ok(builder.connect().unwrap())
}
#[cfg(test)]
pub(crate) async fn query_row(
conn: &libsql::Connection,
sql: &str,
params: impl libsql::params::IntoParams,
) -> Result<Option<libsql::Row>, libsql::Error> {
let mut rows = conn.query(sql, params).await?;
return rows.next().await;
return Ok(rusqlite::Connection::open_in_memory()?);
}
#[cfg(test)]
mod tests {
#[tokio::test]
async fn test_sqlean_define() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_sqlean_define() {
let conn = crate::connect().unwrap();
// Define an application defined function in SQL and test it below.
conn
.query("SELECT define('sumn', ':n * (:n + 1) / 2')", ())
.await
.query_row("SELECT define('sumn', ':n * (:n + 1) / 2')", (), |_row| {
Ok(())
})
.unwrap();
let value: i64 = crate::query_row(&conn, "SELECT sumn(5)", ())
.await
.unwrap()
.unwrap()
.get(0)
let value: i64 = conn
.query_row("SELECT sumn(5)", (), |row| row.get(0))
.unwrap();
assert_eq!(value, 15);
conn.query("SELECT undefine('sumn')", ()).await.unwrap();
conn
.query_row("SELECT undefine('sumn')", (), |_row| Ok(()))
.unwrap();
}
}

View File

@@ -61,38 +61,33 @@ pub(crate) fn geoip_country(
mod tests {
use super::*;
use crate::query_row;
#[tokio::test]
async fn test_explicit_jsonschema() {
#[test]
fn test_explicit_jsonschema() {
let ip = "89.160.20.112";
let conn = crate::connect().await.unwrap();
let conn = crate::connect().unwrap();
let cc: Option<String> = query_row(&conn, &format!("SELECT geoip_country('{ip}')"), ())
.await
.unwrap()
.unwrap()
.get(0)
let cc: Option<String> = conn
.query_row(&format!("SELECT geoip_country('{ip}')"), (), |row| {
row.get(0)
})
.unwrap();
assert_eq!(cc, None);
load_geoip_db("testdata/GeoIP2-Country-Test.mmdb").unwrap();
let cc: String = query_row(&conn, &format!("SELECT geoip_country('{ip}')"), ())
.await
.unwrap()
.unwrap()
.get(0)
let cc: String = conn
.query_row(&format!("SELECT geoip_country('{ip}')"), (), |row| {
row.get(0)
})
.unwrap();
assert_eq!(cc, "SE");
let cc: Option<String> = query_row(&conn, &format!("SELECT geoip_country('127.0.0.1')"), ())
.await
.unwrap()
.unwrap()
.get(0)
let cc: Option<String> = conn
.query_row(&format!("SELECT geoip_country('127.0.0.1')"), (), |row| {
row.get(0)
})
.unwrap();
assert_eq!(cc, None);

View File

@@ -78,20 +78,12 @@ pub(super) fn parse_uuid(
#[cfg(test)]
mod tests {
use libsql::{params, Connection};
use rusqlite::params;
use uuid::Uuid;
async fn query_row(
conn: &Connection,
sql: &str,
params: impl libsql::params::IntoParams,
) -> Result<libsql::Row, libsql::Error> {
conn.prepare(sql).await?.query_row(params).await
}
#[tokio::test]
async fn test_uuid() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_uuid() {
let conn = crate::connect().unwrap();
let create_table = r#"
CREATE TABLE test (
@@ -100,18 +92,18 @@ mod tests {
uuid_v7 BLOB CHECK(is_uuid_v7(uuid_v7))
) STRICT;
"#;
conn.query(create_table, ()).await.unwrap();
conn.execute(create_table, ()).unwrap();
{
let row = query_row(
&conn,
"INSERT INTO test (uuid, uuid_v7) VALUES (NULL, NULL) RETURNING id",
(),
)
.await
.unwrap();
let row = conn
.query_row(
"INSERT INTO test (uuid, uuid_v7) VALUES (NULL, NULL) RETURNING id",
(),
|row| -> rusqlite::Result<[u8; 16]> { Ok(row.get(0)?) },
)
.unwrap();
Uuid::from_slice(&row.get::<[u8; 16]>(0).unwrap()).unwrap();
Uuid::from_slice(&row).unwrap();
}
{
@@ -120,7 +112,6 @@ mod tests {
"INSERT INTO test (uuid, uuid_v7) VALUES ($1, NULL)",
params!(b"")
)
.await
.is_err());
}
@@ -131,24 +122,20 @@ mod tests {
"INSERT INTO test (uuid, uuid_v7) VALUES (NULL, $1)",
params!(uuidv4.into_bytes().to_vec())
)
.await
.is_err());
}
{
let uuid = Uuid::now_v7();
let row = query_row(
&conn,
"INSERT INTO test (uuid, uuid_v7) VALUES (parse_uuid($1), parse_uuid($1)) RETURNING uuid",
[uuid.to_string()],
)
.await
.unwrap();
let row = conn
.query_row(
"INSERT INTO test (uuid, uuid_v7) VALUES (parse_uuid($1), parse_uuid($1)) RETURNING uuid",
[uuid.to_string()],
|row| -> rusqlite::Result<[u8; 16]> { Ok(row.get(0)?) },
)
.unwrap();
assert_eq!(
Uuid::from_slice(&row.get::<[u8; 16]>(0).unwrap()).unwrap(),
uuid
);
assert_eq!(Uuid::from_slice(&row).unwrap(), uuid);
}
}
}

View File

@@ -101,105 +101,94 @@ pub(super) fn is_json(
#[cfg(test)]
mod tests {
use libsql::{params, Connection};
use rusqlite::params;
async fn query_row(
conn: &Connection,
sql: &str,
params: impl libsql::params::IntoParams,
) -> Result<libsql::Row, libsql::Error> {
conn.prepare(sql).await?.query_row(params).await
}
#[tokio::test]
async fn test_is_email() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_is_email() {
let conn = crate::connect().unwrap();
let create_table = r#"
CREATE TABLE test (
email TEXT CHECK(is_email(email))
) STRICT;
"#;
conn.query(create_table, ()).await.unwrap();
conn.execute(create_table, ()).unwrap();
const QUERY: &str = "INSERT INTO test (email) VALUES ($1) RETURNING *";
assert_eq!(
query_row(&conn, QUERY, ["test@test.com"])
.await
.unwrap()
.get::<String>(0)
conn
.query_row(QUERY, ["test@test.com"], |row| Ok(row.get::<_, String>(0)?))
.unwrap(),
"test@test.com"
);
query_row(&conn, QUERY, [libsql::Value::Null])
.await
conn
.query_row(QUERY, [rusqlite::types::Value::Null], |_row| Ok(()))
.unwrap();
assert!(conn.execute(QUERY, params!("not an email")).await.is_err());
assert!(conn.execute(QUERY, params!("not an email")).is_err());
}
#[tokio::test]
async fn test_is_json() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_is_json() {
let conn = crate::connect().unwrap();
let create_table = r#"
CREATE TABLE test (
json TEXT CHECK(is_json(json))
) STRICT;
"#;
conn.query(create_table, ()).await.unwrap();
conn.execute(create_table, ()).unwrap();
const QUERY: &str = "INSERT INTO test (json) VALUES ($1)";
conn.execute(QUERY, ["{}"]).await.unwrap();
conn.execute(QUERY, ["{}"]).unwrap();
conn
.execute(QUERY, ["{\"foo\": 42, \"bar\": {}, \"baz\": []}"])
.await
.unwrap();
assert!(conn.execute(QUERY, [""]).await.is_err());
assert!(conn.execute(QUERY, [""]).is_err());
}
#[tokio::test]
async fn test_regexp() {
let conn = crate::connect().await.unwrap();
#[test]
fn test_regexp() {
let conn = crate::connect().unwrap();
let create_table = "CREATE TABLE test (text0 TEXT, text1 TEXT) STRICT";
conn.query(create_table, ()).await.unwrap();
conn.execute(create_table, ()).unwrap();
const QUERY: &str = "INSERT INTO test (text0, text1) VALUES ($1, $2)";
conn.execute(QUERY, ["abc123", "abc"]).await.unwrap();
conn.execute(QUERY, ["def123", "def"]).await.unwrap();
conn.execute(QUERY, ["abc123", "abc"]).unwrap();
conn.execute(QUERY, ["def123", "def"]).unwrap();
{
let mut rows = conn
.query("SELECT * FROM test WHERE text1 REGEXP '^abc$'", ())
.await
let mut stmt = conn
.prepare("SELECT * FROM test WHERE text1 REGEXP '^abc$'")
.unwrap();
let mut rows = stmt.query(()).unwrap();
let mut cnt = 0;
while let Some(row) = rows.next().await.unwrap() {
assert_eq!("abc123", row.get::<String>(0).unwrap());
while let Some(row) = rows.next().unwrap() {
assert_eq!("abc123", row.get::<_, String>(0).unwrap());
cnt += 1;
}
assert_eq!(cnt, 1);
}
{
let mut rows = conn
.query("SELECT * FROM test WHERE text1 REGEXP $1", params!(".*bc$"))
.await
let mut stmt = conn
.prepare("SELECT * FROM test WHERE text1 REGEXP $1")
.unwrap();
let mut rows = stmt.query(params!(".*bc$")).unwrap();
let mut cnt = 0;
while let Some(row) = rows.next().await.unwrap() {
assert_eq!("abc123", row.get::<String>(0).unwrap());
while let Some(row) = rows.next().unwrap() {
assert_eq!("abc123", row.get::<_, String>(0).unwrap());
cnt += 1;
}
assert_eq!(cnt, 1);
}
{
let mut rows = conn
.query(r#"SELECT * FROM test WHERE text0 REGEXP '12\d'"#, ())
.await
let mut stmt = conn
.prepare(r#"SELECT * FROM test WHERE text0 REGEXP '12\d'"#)
.unwrap();
let mut rows = stmt.query(()).unwrap();
let mut cnt = 0;
while let Some(_row) = rows.next().await.unwrap() {
while let Some(_row) = rows.next().unwrap() {
cnt += 1;
}
assert_eq!(cnt, 2);

View File

@@ -9,7 +9,7 @@ trailbase-extension = { path = "../trailbase-extension" }
infer = "0.16.0"
jsonschema = { version = "0.26.0", default-features = false }
lazy_static = "1.5.0"
libsql = { workspace = true }
rusqlite = { workspace = true }
schemars = "0.8.21"
serde = { version = "^1.0.203", features = ["derive"] }
serde_json = "1.0.122"

View File

@@ -1,27 +1,13 @@
use libsql::{params, Value::Text};
/// This is a very simple binary demonstrating how TrailBase's SQLite extensions (e.g. uuid_v7)
/// can be used outside of TrailBase, thus avoiding lock-in.
use trailbase_sqlite::connect_sqlite;
// NOTE: This binary demonstrates calling statically linked extensions, i.e. uuid_v7().
// NOTE: It also shows that libsql and sqlite_loadable can both be linked into the same binary
// despite both pulling in sqlite3 symbols through libsql-ffi and sqlite3ext-sys, respectively.
// Wasn't able to reproduce this in a larger binary :shrug:.
fn main() {
let conn = connect_sqlite(None, None).unwrap();
#[tokio::main]
async fn main() {
let conn = connect_sqlite(None, None).await.unwrap();
let mut stmt = conn.prepare("SELECT (uuid_v7_text())").unwrap();
conn
.query("SELECT 1", params!(Text("FOO".to_string())))
.await
.unwrap();
let uuid = conn
.prepare("SELECT (uuid_v7_text())")
.await
.unwrap()
.query_row(())
.await
.unwrap();
let uuid: String = stmt.query_row((), |row| row.get(0)).unwrap();
println!("Done! {uuid:?}");
}

View File

@@ -15,56 +15,35 @@ pub fn has_geoip_db() -> bool {
}
#[no_mangle]
unsafe extern "C" fn init_extension(
db: *mut libsql::ffi::sqlite3,
pz_err_msg: *mut *const ::std::os::raw::c_char,
p_thunk: *const libsql::ffi::sqlite3_api_routines,
unsafe extern "C" fn init_trailbase_extension(
db: *mut rusqlite::ffi::sqlite3,
pz_err_msg: *mut *mut ::std::os::raw::c_char,
p_thunk: *const rusqlite::ffi::sqlite3_api_routines,
) -> ::std::os::raw::c_int {
return trailbase_extension::sqlite3_extension_init(
db,
pz_err_msg as *mut *mut ::std::os::raw::c_char,
p_thunk as *mut libsql::ffi::sqlite3_api_routines,
p_thunk as *mut rusqlite::ffi::sqlite3_api_routines,
) as ::std::os::raw::c_int;
}
// Lightweight optimization on db connect based on $2.1: https://sqlite.org/lang_analyze.html
async fn initial_optimize(conn: &libsql::Connection) -> Result<(), libsql::Error> {
conn.execute("PRAGMA optimize = 0x10002", ()).await?;
return Ok(());
}
pub async fn connect_sqlite(
pub fn connect_sqlite(
path: Option<PathBuf>,
extensions: Option<Vec<PathBuf>>,
) -> Result<libsql::Connection, libsql::Error> {
) -> Result<rusqlite::Connection, rusqlite::Error> {
schema::try_init_schemas();
// NOTE: We need libsql to initialize some internal variables before auto_extension works
// reliably. That's why we're creating a throw-away connection first. Haven't debugged this
// further but see error message below.
//
// thread 'main' panicked at
// /.../libsql-0.5.0-alpha.2/src/local/database.rs:209:17: assertion `left == right` failed:
//
// libsql was configured with an incorrect threading configuration and the api is not safe to
// use. Please check that no multi-thread options have been set. If nothing was configured then
// please open an issue at: https://github.com/libsql/libsql
// left: 21
// right: 0
drop(
libsql::Builder::new_local(":memory:")
.build()
.await
.unwrap()
.connect(),
);
unsafe { rusqlite::ffi::sqlite3_auto_extension(Some(init_trailbase_extension)) };
let p: PathBuf = path.unwrap_or_else(|| PathBuf::from(":memory:"));
let builder = libsql::Builder::new_local(p).build().await?;
unsafe { libsql::ffi::sqlite3_auto_extension(Some(init_extension)) };
let conn = builder.connect()?;
let conn = if let Some(p) = path {
use rusqlite::OpenFlags;
let flags = OpenFlags::SQLITE_OPEN_READ_WRITE
| OpenFlags::SQLITE_OPEN_CREATE
| OpenFlags::SQLITE_OPEN_NO_MUTEX;
rusqlite::Connection::open_with_flags(p, flags)?
} else {
rusqlite::Connection::open_in_memory()?
};
const CONFIG: &[&str] = &[
"PRAGMA busy_timeout = 10000",
@@ -85,56 +64,43 @@ pub async fn connect_sqlite(
"PRAGMA trusted_schema = OFF",
];
// NOTE: we're querying here since some pragmas return data. However, libsql doesn't like
// executed statements to return rows.
// NOTE: we're querying here since some pragmas return data.
for pragma in CONFIG {
conn.query(pragma, ()).await?;
let mut stmt = conn.prepare(pragma)?;
let mut rows = stmt.query([])?;
rows.next()?;
}
if let Some(extensions) = extensions {
for path in extensions {
conn.load_extension(path, None)?;
unsafe { conn.load_extension(path, None)? }
}
}
initial_optimize(&conn).await?;
// Initial optimize.
conn.execute("PRAGMA optimize = 0x10002", ())?;
return Ok(conn);
}
pub async fn query_one_row(
conn: &libsql::Connection,
sql: &str,
params: impl libsql::params::IntoParams,
) -> Result<libsql::Row, libsql::Error> {
let mut rows = conn.query(sql, params).await?;
let row = rows.next().await?.ok_or(libsql::Error::QueryReturnedNoRows);
return row;
}
pub async fn query_row(
conn: &libsql::Connection,
sql: &str,
params: impl libsql::params::IntoParams,
) -> Result<Option<libsql::Row>, libsql::Error> {
let mut rows = conn.query(sql, params).await?;
return rows.next().await;
}
#[cfg(test)]
mod test {
use super::*;
use uuid::Uuid;
#[tokio::test]
async fn test_connect() {
let conn = connect_sqlite(None, None).await.unwrap();
#[test]
fn test_connect() {
let conn = connect_sqlite(None, None).unwrap();
let row = query_one_row(&conn, "SELECT (uuid_v7())", ())
.await
let row = conn
.query_row(
"SELECT (uuid_v7())",
(),
|row| -> rusqlite::Result<[u8; 16]> { row.get(0) },
)
.unwrap();
let uuid = Uuid::from_bytes(row.get::<[u8; 16]>(0).unwrap());
let uuid = Uuid::from_bytes(row);
assert_eq!(uuid.get_version_num(), 7);

View File

@@ -1,16 +0,0 @@
[package]
name = "refinery-libsql"
version = "0.0.1"
edition = "2021"
[dependencies]
async-trait = "0.1.82"
libsql = { workspace = true }
refinery-core = { workspace = true }
time = "0.3.36"
[dev-dependencies]
barrel = { version = "0.7.0", features = ["sqlite3"] }
refinery = { workspace = true }
tempfile = "3.12.0"
tokio = { version = "^1.38.0", features=["macros", "rt-multi-thread"] }

View File

@@ -1,87 +0,0 @@
use async_trait::async_trait;
use libsql::{params, Connection, Error as LibsqlError, Transaction};
use refinery_core::traits::r#async::{AsyncMigrate, AsyncQuery, AsyncTransaction};
use refinery_core::Migration;
use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime;
pub struct LibsqlConnection(Connection);
impl LibsqlConnection {
pub fn from_connection(c: Connection) -> Self {
Self(c)
}
}
impl std::ops::Deref for LibsqlConnection {
type Target = Connection;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
async fn query_applied_migrations(
transaction: &Transaction,
query: &str,
) -> Result<Vec<Migration>, LibsqlError> {
let mut rows = transaction.query(query, params![]).await?;
let mut applied = Vec::new();
loop {
// for row in rows.into_iter()
let Some(row) = rows.next().await? else {
break;
};
let version = row.get(0)?;
let applied_on: String = row.get(2)?;
// Safe to call unwrap, as we stored it in RFC3339 format on the database
let applied_on = OffsetDateTime::parse(&applied_on, &Rfc3339).unwrap();
let checksum: String = row.get(3)?;
applied.push(Migration::applied(
version,
row.get(1)?,
applied_on,
checksum
.parse::<u64>()
.expect("checksum must be a valid u64"),
));
}
Ok(applied)
}
#[async_trait]
impl AsyncTransaction for LibsqlConnection {
type Error = LibsqlError;
async fn execute<'a, T: Iterator<Item = &'a str> + Send>(
&mut self,
queries: T,
) -> Result<usize, Self::Error> {
let transaction = self.0.transaction().await?;
let mut count = 0;
for query in queries {
transaction.execute_batch(query).await?;
count += 1;
}
transaction.commit().await?;
Ok(count as usize)
}
}
#[async_trait]
impl AsyncQuery<Vec<Migration>> for LibsqlConnection {
async fn query(
&mut self,
query: &str,
) -> Result<Vec<Migration>, <Self as AsyncTransaction>::Error> {
let transaction = self.0.transaction().await?;
let applied = query_applied_migrations(&transaction, query).await?;
transaction.commit().await?;
Ok(applied)
}
}
impl AsyncMigrate for LibsqlConnection {}

View File

@@ -1,625 +0,0 @@
use refinery_libsql::LibsqlConnection;
use barrel::backend::Sqlite as Sql;
use libsql::{params, Builder, Row};
use refinery::{embed_migrations, error::Kind, AsyncMigrate, Migration, Target};
use time::OffsetDateTime;
const DEFAULT_TABLE_NAME: &str = "refinery_schema_history";
mod embedded {
use refinery::embed_migrations;
embed_migrations!("./tests/migrations");
}
mod broken {
use refinery::embed_migrations;
embed_migrations!("./tests/migrations_broken");
}
mod missing {
use refinery::embed_migrations;
embed_migrations!("./tests/migrations_missing");
}
fn get_migrations() -> Vec<Migration> {
embed_migrations!("./tests/migrations");
let migration1 =
Migration::unapplied("V1__initial.rs", &migrations::V1__initial::migration()).unwrap();
let migration2 = Migration::unapplied(
"V2__add_cars_and_motos_table.sql",
include_str!("./migrations/V1-2/V2__add_cars_and_motos_table.sql"),
)
.unwrap();
let migration3 = Migration::unapplied(
"V3__add_brand_to_cars_table",
include_str!("./migrations/V3/V3__add_brand_to_cars_table.sql"),
)
.unwrap();
let migration4 = Migration::unapplied(
"V4__add_year_to_motos_table.rs",
&migrations::V4__add_year_to_motos_table::migration(),
)
.unwrap();
let migration5 = Migration::unapplied(
"V5__add_year_field_to_cars",
"ALTER TABLE cars ADD year INTEGER;",
)
.unwrap();
vec![migration1, migration2, migration3, migration4, migration5]
}
async fn in_memory_conn() -> LibsqlConnection {
let db = Builder::new_local(":memory:").build().await.unwrap();
LibsqlConnection::from_connection(db.connect().unwrap())
}
async fn query_one(conn: &mut LibsqlConnection, sql: &str) -> Option<Row> {
let mut rows = conn.query(&sql, params![]).await.unwrap();
rows.next().await.unwrap()
}
#[tokio::test]
async fn report_contains_applied_migrations() {
let mut conn = in_memory_conn().await;
let report = embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let migrations = get_migrations();
let applied_migrations = report.applied_migrations();
assert_eq!(4, applied_migrations.len());
assert_eq!(migrations[0].version(), applied_migrations[0].version());
assert_eq!(migrations[1].version(), applied_migrations[1].version());
assert_eq!(migrations[2].version(), applied_migrations[2].version());
assert_eq!(migrations[3].version(), applied_migrations[3].version());
assert_eq!(migrations[0].name(), migrations[0].name());
assert_eq!(migrations[1].name(), applied_migrations[1].name());
assert_eq!(migrations[2].name(), applied_migrations[2].name());
assert_eq!(migrations[3].name(), applied_migrations[3].name());
assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum());
assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum());
assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum());
assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum());
}
#[tokio::test]
async fn creates_migration_table() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let table_name: String = query_one(
&mut conn,
&format!(
"SELECT name FROM sqlite_master WHERE type='table' AND name='{}'",
DEFAULT_TABLE_NAME
),
)
.await
.unwrap()
.get(0)
.unwrap();
assert_eq!(DEFAULT_TABLE_NAME, table_name);
}
#[tokio::test]
async fn creates_migration_table_grouped_transaction() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.set_grouped(true)
.run_async(&mut conn)
.await
.unwrap();
let row = query_one(
&mut conn,
&format!(
"SELECT name FROM sqlite_master WHERE type='table' AND name='{}'",
DEFAULT_TABLE_NAME
),
)
.await
.unwrap();
let table_name: String = row.get(0).unwrap();
assert_eq!(DEFAULT_TABLE_NAME, table_name);
}
#[tokio::test]
async fn applies_migration() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
conn
.execute(
"INSERT INTO persons (name, city) VALUES (?, ?)",
["John Legend", "New York"],
)
.await
.unwrap();
let row = query_one(&mut conn, "SELECT name, city FROM persons")
.await
.unwrap();
let (name, city): (String, String) = (row.get(0).unwrap(), row.get(1).unwrap());
assert_eq!("John Legend", name);
assert_eq!("New York", city);
}
#[tokio::test]
async fn applies_migration_grouped_transaction() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.set_grouped(true)
.run_async(&mut conn)
.await
.unwrap();
conn
.execute(
"INSERT INTO persons (name, city) VALUES (?, ?)",
["John Legend", "New York"],
)
.await
.unwrap();
let row = query_one(&mut conn, "SELECT name, city FROM persons")
.await
.unwrap();
let (name, city): (String, String) = (row.get(0).unwrap(), row.get(1).unwrap());
assert_eq!("John Legend", name);
assert_eq!("New York", city);
}
#[tokio::test]
async fn updates_schema_history() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
assert_eq!(4, current.version());
assert_eq!(
OffsetDateTime::now_utc().date(),
current.applied_on().unwrap().date()
);
}
#[tokio::test]
async fn updates_schema_history_grouped_transaction() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.set_grouped(true)
.run_async(&mut conn)
.await
.unwrap();
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
assert_eq!(4, current.version());
assert_eq!(
OffsetDateTime::now_utc().date(),
current.applied_on().unwrap().date()
);
}
#[tokio::test]
async fn updates_to_last_working_if_not_grouped() {
let mut conn = in_memory_conn().await;
let result = broken::migrations::runner().run_async(&mut conn).await;
assert!(result.is_err());
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
let err = result.unwrap_err();
let migrations = get_migrations();
let applied_migrations = err.report().unwrap().applied_migrations();
assert_eq!(
OffsetDateTime::now_utc().date(),
current.applied_on().unwrap().date()
);
assert_eq!(2, current.version());
assert_eq!(2, applied_migrations.len());
assert_eq!(1, applied_migrations[0].version());
assert_eq!(2, applied_migrations[1].version());
assert_eq!("initial", migrations[0].name());
assert_eq!("add_cars_table", applied_migrations[1].name());
assert_eq!(2959965718684201605, applied_migrations[0].checksum());
assert_eq!(8238603820526370208, applied_migrations[1].checksum());
}
#[tokio::test]
async fn doesnt_update_to_last_working_if_grouped() {
let mut conn = in_memory_conn().await;
let result = broken::migrations::runner()
.set_grouped(true)
.run_async(&mut conn)
.await;
assert!(result.is_err());
let row = query_one(&mut conn, "SELECT version FROM refinery_schema_history").await;
assert!(row.is_none());
}
#[tokio::test]
async fn gets_applied_migrations() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let migrations = get_migrations();
let applied_migrations = conn
.get_applied_migrations(DEFAULT_TABLE_NAME)
.await
.unwrap();
assert_eq!(4, applied_migrations.len());
assert_eq!(migrations[0].version(), applied_migrations[0].version());
assert_eq!(migrations[1].version(), applied_migrations[1].version());
assert_eq!(migrations[2].version(), applied_migrations[2].version());
assert_eq!(migrations[3].version(), applied_migrations[3].version());
assert_eq!(migrations[0].name(), migrations[0].name());
assert_eq!(migrations[1].name(), applied_migrations[1].name());
assert_eq!(migrations[2].name(), applied_migrations[2].name());
assert_eq!(migrations[3].name(), applied_migrations[3].name());
assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum());
assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum());
assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum());
assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum());
}
#[tokio::test]
async fn applies_new_migration() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let migrations = get_migrations();
let mchecksum = migrations[4].checksum();
conn
.migrate(
&migrations,
true,
true,
false,
Target::Latest,
DEFAULT_TABLE_NAME,
)
.await
.unwrap();
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
assert_eq!(5, current.version());
assert_eq!(mchecksum, current.checksum());
}
#[tokio::test]
async fn migrates_to_target_migration() {
let mut conn = in_memory_conn().await;
let report = embedded::migrations::runner()
.set_target(Target::Version(3))
.run_async(&mut conn)
.await
.unwrap();
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
let applied_migrations = report.applied_migrations();
let migrations = get_migrations();
assert_eq!(3, current.version());
assert_eq!(3, applied_migrations.len());
assert_eq!(migrations[0].version(), applied_migrations[0].version());
assert_eq!(migrations[1].version(), applied_migrations[1].version());
assert_eq!(migrations[2].version(), applied_migrations[2].version());
assert_eq!(migrations[0].name(), migrations[0].name());
assert_eq!(migrations[1].name(), applied_migrations[1].name());
assert_eq!(migrations[2].name(), applied_migrations[2].name());
assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum());
assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum());
assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum());
}
#[tokio::test]
async fn migrates_to_target_migration_grouped() {
let mut conn = in_memory_conn().await;
let report = embedded::migrations::runner()
.set_target(Target::Version(3))
.set_grouped(true)
.run_async(&mut conn)
.await
.unwrap();
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
let applied_migrations = report.applied_migrations();
let migrations = get_migrations();
assert_eq!(3, current.version());
assert_eq!(3, applied_migrations.len());
assert_eq!(migrations[0].version(), applied_migrations[0].version());
assert_eq!(migrations[1].version(), applied_migrations[1].version());
assert_eq!(migrations[2].version(), applied_migrations[2].version());
assert_eq!(migrations[0].name(), migrations[0].name());
assert_eq!(migrations[1].name(), applied_migrations[1].name());
assert_eq!(migrations[2].name(), applied_migrations[2].name());
assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum());
assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum());
assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum());
}
#[tokio::test]
async fn aborts_on_missing_migration_on_filesystem() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let migration = Migration::unapplied(
"V4__add_year_field_to_cars",
"ALTER TABLE cars ADD year INTEGER;",
)
.unwrap();
let err = conn
.migrate(
&[migration],
true,
true,
false,
Target::Latest,
DEFAULT_TABLE_NAME,
)
.await
.unwrap_err();
match err.kind() {
Kind::MissingVersion(missing) => {
assert_eq!(1, missing.version());
assert_eq!("initial", missing.name());
}
_ => panic!("failed test"),
}
}
#[tokio::test]
async fn aborts_on_divergent_migration() {
let mut conn = in_memory_conn().await;
embedded::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let migration = Migration::unapplied(
"V2__add_year_field_to_cars",
"ALTER TABLE cars ADD year INTEGER;",
)
.unwrap();
let err = conn
.migrate(
&[migration.clone()],
true,
false,
false,
Target::Latest,
DEFAULT_TABLE_NAME,
)
.await
.unwrap_err();
match err.kind() {
Kind::DivergentVersion(applied, divergent) => {
assert_eq!(&migration, divergent);
assert_eq!(2, applied.version());
assert_eq!("add_cars_and_motos_table", applied.name());
}
_ => panic!("failed test"),
}
}
#[tokio::test]
async fn aborts_on_missing_migration_on_database() {
let mut conn = in_memory_conn().await;
missing::migrations::runner()
.run_async(&mut conn)
.await
.unwrap();
let migration1 = Migration::unapplied(
"V1__initial",
concat!(
"CREATE TABLE persons (",
"id int,",
"name varchar(255),",
"city varchar(255)",
");"
),
)
.unwrap();
let migration2 = Migration::unapplied(
"V2__add_cars_table",
include_str!("./migrations_missing/V2__add_cars_table.sql"),
)
.unwrap();
let err = conn
.migrate(
&[migration1, migration2],
true,
true,
false,
Target::Latest,
DEFAULT_TABLE_NAME,
)
.await
.unwrap_err();
match err.kind() {
Kind::MissingVersion(missing) => {
assert_eq!(1, missing.version());
assert_eq!("initial", missing.name());
}
_ => panic!("failed test"),
}
}
#[tokio::test]
async fn doesnt_run_migrations_if_fake_version() {
let mut conn = in_memory_conn().await;
let report = embedded::migrations::runner()
.set_target(Target::FakeVersion(2))
.run_async(&mut conn)
.await
.unwrap();
let applied_migrations = report.applied_migrations();
assert!(applied_migrations.is_empty());
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
let migrations = get_migrations();
let mchecksum = migrations[1].checksum();
assert_eq!(2, current.version());
assert_eq!(mchecksum, current.checksum());
let row: Option<Row> = query_one(
&mut conn,
"SELECT name FROM sqlite_master WHERE type='table' AND name='persons'",
)
.await;
assert!(matches!(row, None));
}
#[tokio::test]
async fn doesnt_run_migrations_if_fake() {
let mut conn = in_memory_conn().await;
let report = embedded::migrations::runner()
.set_target(Target::Fake)
.run_async(&mut conn)
.await
.unwrap();
let applied_migrations = report.applied_migrations();
assert!(applied_migrations.is_empty());
let current = conn
.get_last_applied_migration(DEFAULT_TABLE_NAME)
.await
.unwrap()
.unwrap();
let migrations = get_migrations();
let mchecksum = migrations[3].checksum();
assert_eq!(4, current.version());
assert_eq!(mchecksum, current.checksum());
let row: Option<Row> = query_one(
&mut conn,
"SELECT name FROM sqlite_master WHERE type='table' AND name='persons'",
)
.await;
assert!(matches!(row, None));
}
// #[tokio::test]
// fn migrates_from_cli() {
// run_test(|| {
// Command::new("refinery")
// .args([
// "migrate",
// "-c",
// "tests/sqlite_refinery.toml",
// "-p",
// "tests/migrations",
// ])
// .unwrap()
// .assert()
// .stdout(contains("applying migration: V2__add_cars_and_motos_table"))
// .stdout(contains("applying migration: V3__add_brand_to_cars_table"));
// })
// }

View File

@@ -1,15 +0,0 @@
use barrel::{types, Migration};
use crate::Sql;
pub fn migration() -> String {
let mut m = Migration::new();
m.create_table("persons", |t| {
t.add_column("id", types::primary());
t.add_column("name", types::varchar(255));
t.add_column("city", types::varchar(255));
});
m.make::<Sql>()
}

View File

@@ -1,8 +0,0 @@
CREATE TABLE cars (
id int,
name varchar(255)
);
CREATE TABLE motos (
id int,
name varchar(255)
);

View File

@@ -1,2 +0,0 @@
ALTER TABLE cars
ADD brand varchar(255);

View File

@@ -1,13 +0,0 @@
use barrel::{types, Migration};
use crate::Sql;
pub fn migration() -> String {
let mut m = Migration::new();
m.change_table("motos", |t| {
t.add_column("brand", types::varchar(255).nullable(true));
});
m.make::<Sql>()
}

View File

@@ -1,5 +0,0 @@
CREATE TABLE persons (
id int,
name varchar(255),
city varchar(255)
);

View File

@@ -1,4 +0,0 @@
CREATE TABLE cars (
id int,
name varchar(255)
);

View File

@@ -1,2 +0,0 @@
ALTER TABLE non_existent
ADD brand varchar(255);

View File

@@ -1,4 +0,0 @@
CREATE TABLE cars (
id int,
name varchar(255)
);

View File

@@ -1,5 +0,0 @@
CREATE TABLE person_finance (
id int,
person_id int,
amount int
);

View File

@@ -4,8 +4,11 @@ version = "0.0.1"
edition = "2021"
[dependencies]
libsql-ffi = "0.5.0"
libsqlite3-sys = "0.30.1"
[build-dependencies]
bindgen = "0.70.1"
cc = "1.1.28"
[dev-dependencies]
rusqlite = { workspace = true }

View File

@@ -29,9 +29,6 @@ fn build_object() {
let mut cfg = cc::Build::new();
// Try to be consistent with:
// https://github.com/tursodatabase/libsql/blob/7521bc0d91f34a8a3b8776efe32aa0d20f20bf55/libsql-ffi/build.rs#L111
//
// Most importantly, define SQLITE_CORE to avoid dyn sqlite3_api symbol dep.
cfg
.flag("-std=c11")
@@ -68,8 +65,8 @@ fn build_object() {
println!("cargo:rustc-link-search={PATH}/define");
println!("cargo:rustc-link-lib=define");
// Link sqlite lib from libsql-ffi.
println!("cargo:rustc-link-lib=libsql");
// Link sqlite.
println!("cargo:rustc-link-lib=sqlite3");
}
fn main() {

View File

@@ -3,3 +3,36 @@
#![allow(non_snake_case)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
#[no_mangle]
unsafe extern "C" fn init_sqlean_extension(
db: *mut libsqlite3_sys::sqlite3,
_pzErrMrg: *mut *mut ::std::os::raw::c_char,
_pThunk: *const libsqlite3_sys::sqlite3_api_routines,
) -> ::std::os::raw::c_int {
define_init(db as *mut sqlite3)
}
#[cfg(test)]
mod tests {
use rusqlite::Connection;
#[test]
fn load_test() {
unsafe {
libsqlite3_sys::sqlite3_auto_extension(Some(super::init_sqlean_extension));
};
let conn = Connection::open_in_memory().unwrap();
conn
.query_row("SELECT define('sumn', ':n * (:n + 1) / 2')", (), |_row| {
Ok(())
})
.unwrap();
let sum: i64 = conn
.query_row("SELECT sumn(5)", (), |row| row.get(0))
.unwrap();
assert_eq!(15, sum);
}
}

1
vendor/tokio-rusqlite vendored Submodule

Submodule vendor/tokio-rusqlite added at 74e7d44c42