mirror of
https://github.com/trailbaseio/trailbase.git
synced 2026-05-04 15:40:01 -05:00
Make config validation rely only on SQLite schemas, rather than metadata. This avoids initialization order issue, where config loading depends on JSON Schema metadata and loading custom JSON schemas depends on the config. Now we can avoid double metadata init and more importantly propagate errors when JSON schemas referenced by the SQLite schemas are missing.
This commit is contained in:
@@ -115,7 +115,7 @@ async fn setup_app() -> Result<Setup, anyhow::Error> {
|
||||
let conn = app.state.conn();
|
||||
|
||||
create_chat_message_app_tables(conn).await?;
|
||||
app.state.rebuild_schema_cache().await?;
|
||||
app.state.rebuild_connection_metadata().await?;
|
||||
|
||||
let room = add_room(conn, "room0").await?;
|
||||
let password = "Secret!1!!";
|
||||
|
||||
@@ -157,7 +157,7 @@ pub async fn list_logs_handler(
|
||||
&trailbase_extension::jsonschema::JsonSchemaRegistry::default(),
|
||||
table.clone(),
|
||||
&[table],
|
||||
);
|
||||
)?;
|
||||
let filter_where_clause =
|
||||
build_filter_where_clause("log", &table_metadata.schema.columns, filter_params)?;
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ pub async fn query_handler(
|
||||
|
||||
// In the fallback case we always need to invalidate the cache.
|
||||
if must_invalidate_schema_cache {
|
||||
state.rebuild_schema_cache().await?;
|
||||
state.rebuild_connection_metadata().await?;
|
||||
}
|
||||
|
||||
let batched_rows = batched_rows_result.map_err(|err| Error::BadRequest(err.into()))?;
|
||||
|
||||
@@ -285,7 +285,7 @@ mod tests {
|
||||
.unwrap();
|
||||
assert_eq!(cnt, 1);
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
let (rows, cols) = fetch_rows(
|
||||
conn,
|
||||
|
||||
@@ -141,7 +141,7 @@ pub async fn alter_table_handler(
|
||||
debug!("Migration report: {report:?}");
|
||||
}
|
||||
|
||||
state.rebuild_schema_cache().await?;
|
||||
state.rebuild_connection_metadata().await?;
|
||||
|
||||
// Fix configuration: update all table references by existing APIs.
|
||||
if let Some(rename) = ephemeral_table_rename
|
||||
|
||||
@@ -57,7 +57,7 @@ pub async fn create_table_handler(
|
||||
.await?;
|
||||
}
|
||||
|
||||
state.rebuild_schema_cache().await?;
|
||||
state.rebuild_connection_metadata().await?;
|
||||
}
|
||||
|
||||
return Ok(Json(CreateTableResponse {
|
||||
|
||||
@@ -74,7 +74,7 @@ pub async fn drop_table_handler(
|
||||
.await?;
|
||||
}
|
||||
|
||||
state.rebuild_schema_cache().await?;
|
||||
state.rebuild_connection_metadata().await?;
|
||||
|
||||
// Fix configuration: remove all APIs reference the no longer existing table.
|
||||
let mut config = state.get_config();
|
||||
|
||||
@@ -18,6 +18,7 @@ use crate::records::subscribe::SubscriptionManager;
|
||||
use crate::scheduler::{JobRegistry, build_job_registry_from_config};
|
||||
use crate::schema_metadata::{
|
||||
ConnectionMetadata, build_connection_metadata_and_install_file_deletion_triggers,
|
||||
lookup_and_parse_all_table_schemas, lookup_and_parse_all_view_schemas,
|
||||
};
|
||||
use crate::wasm::Runtime;
|
||||
|
||||
@@ -264,14 +265,11 @@ impl AppState {
|
||||
return &self.state.subscription_manager;
|
||||
}
|
||||
|
||||
pub async fn rebuild_schema_cache(
|
||||
pub async fn rebuild_connection_metadata(
|
||||
&self,
|
||||
) -> Result<(), crate::schema_metadata::SchemaLookupError> {
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
|
||||
let connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&self.state.conn, ®istry)
|
||||
.await?;
|
||||
let tables = lookup_and_parse_all_table_schemas(self.conn()).await?;
|
||||
let views = lookup_and_parse_all_view_schemas(self.conn(), &tables).await?;
|
||||
|
||||
// We typically rebuild the schema representations when the DB schemas change, which in turn
|
||||
// can invalidate the config, e.g. an API may reference a deleted table. Let's make sure to
|
||||
@@ -279,11 +277,20 @@ impl AppState {
|
||||
// happened rendering the current config invalid. Unlike a config update, it's too late to
|
||||
// reject anything.
|
||||
let config = self.get_config();
|
||||
validate_config(&connection_metadata, &config).map_err(|err| {
|
||||
validate_config(&tables, &views, &config).map_err(|err| {
|
||||
log::error!("Schema change invalidated config: {err}");
|
||||
return crate::schema_metadata::SchemaLookupError::Other(err.into());
|
||||
})?;
|
||||
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
let connection_metadata = build_connection_metadata_and_install_file_deletion_triggers(
|
||||
self.conn(),
|
||||
tables,
|
||||
views,
|
||||
®istry,
|
||||
)
|
||||
.await?;
|
||||
|
||||
self
|
||||
.state
|
||||
.connection_metadata
|
||||
@@ -346,7 +353,8 @@ impl AppState {
|
||||
config: Config,
|
||||
hash: Option<String>,
|
||||
) -> Result<(), ConfigError> {
|
||||
validate_config(&self.connection_metadata(), &config)?;
|
||||
let metadata = self.connection_metadata();
|
||||
validate_config(&metadata.tables(), &metadata.views(), &config)?;
|
||||
|
||||
match hash {
|
||||
Some(hash) => {
|
||||
@@ -376,15 +384,17 @@ impl AppState {
|
||||
let new_config = self.get_config();
|
||||
|
||||
if update_json_schema_registry(&new_config).unwrap_or(true)
|
||||
&& let Err(err) = self.rebuild_schema_cache().await
|
||||
&& let Err(err) = self.rebuild_connection_metadata().await
|
||||
{
|
||||
log::warn!("reloading JSON schema cache failed: {err}");
|
||||
}
|
||||
|
||||
// Write new config to the file system.
|
||||
let metadata = self.connection_metadata();
|
||||
return write_config_and_vault_textproto(
|
||||
self.data_dir(),
|
||||
&self.connection_metadata(),
|
||||
&metadata.tables(),
|
||||
&metadata.views(),
|
||||
&new_config,
|
||||
)
|
||||
.await;
|
||||
@@ -527,15 +537,14 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
|
||||
assert!(new);
|
||||
let logs_conn = crate::connection::init_logs_db(None)?;
|
||||
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
let mut connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&conn, ®istry).await?;
|
||||
let tables = lookup_and_parse_all_table_schemas(&conn).await?;
|
||||
let views = lookup_and_parse_all_view_schemas(&conn, &tables).await?;
|
||||
|
||||
let TestStateOptions { config, mailer } = options.unwrap_or_default();
|
||||
let config = {
|
||||
let config = config.unwrap_or_else(test_config);
|
||||
|
||||
validate_config(&connection_metadata, &config).unwrap();
|
||||
validate_config(&tables, &views, &config).unwrap();
|
||||
|
||||
// NOTE: The below "append" semantics are different from prod's override behavior, to avoid
|
||||
// races between concurrent tests. The registry needs to be global for the sqlite extensions
|
||||
@@ -552,14 +561,16 @@ pub async fn test_state(options: Option<TestStateOptions>) -> anyhow::Result<App
|
||||
Some(trailbase_extension::jsonschema::Schema::from(schema_json, None, false).unwrap()),
|
||||
);
|
||||
}
|
||||
|
||||
connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&conn, ®istry).await?;
|
||||
}
|
||||
|
||||
Reactive::new(config)
|
||||
};
|
||||
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
let connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&conn, tables, views, ®istry)
|
||||
.await?;
|
||||
|
||||
let data_dir = DataDir(temp_dir.path().to_path_buf());
|
||||
|
||||
let object_store = if std::env::var("TEST_S3_OBJECT_STORE").map_or(false, |v| v == "TRUE") {
|
||||
|
||||
+17
-13
@@ -4,18 +4,19 @@ use prost_reflect::{
|
||||
DynamicMessage, ExtensionDescriptor, FieldDescriptor, Kind, MapKey, ReflectMessage, Value,
|
||||
};
|
||||
use proto::{EmailTemplate, OAuthProviderId, SmtpEncryption};
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::convert::TryFrom;
|
||||
use std::str::FromStr;
|
||||
use thiserror::Error;
|
||||
use tokio::fs;
|
||||
use trailbase_schema::sqlite::{Table, View};
|
||||
use validator::{ValidateEmail, ValidateUrl};
|
||||
|
||||
use crate::DESCRIPTOR_POOL;
|
||||
use crate::auth::oauth::providers::oauth_provider_registry;
|
||||
use crate::data_dir::DataDir;
|
||||
use crate::records::validate_record_api_config;
|
||||
use crate::schema_metadata::ConnectionMetadata;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ConfigError {
|
||||
@@ -403,9 +404,10 @@ async fn load_vault_textproto_or_default(data_dir: &DataDir) -> Result<proto::Va
|
||||
//
|
||||
// Right now this leads to a warning log on first load when SchemaMatadataCache is first built
|
||||
// but custom schemas are not yet registered :/.
|
||||
pub async fn load_or_init_config_textproto(
|
||||
pub async fn load_or_init_config_textproto<T: Borrow<Table>, V: Borrow<View>>(
|
||||
data_dir: &DataDir,
|
||||
connection_metadata: &ConnectionMetadata,
|
||||
tables: &[T],
|
||||
views: &[V],
|
||||
) -> Result<proto::Config, ConfigError> {
|
||||
let merged_config = {
|
||||
let config = match fs::read_to_string(data_dir.config_path().join(CONFIG_FILENAME)).await {
|
||||
@@ -414,7 +416,7 @@ pub async fn load_or_init_config_textproto(
|
||||
warn!("`config.textproto` not found, initializing new default.");
|
||||
|
||||
let config = proto::Config::new_with_custom_defaults();
|
||||
write_config_and_vault_textproto(data_dir, connection_metadata, &config).await?;
|
||||
write_config_and_vault_textproto(data_dir, tables, views, &config).await?;
|
||||
config
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -426,7 +428,7 @@ pub async fn load_or_init_config_textproto(
|
||||
merge_vault_and_env(config, vault)?
|
||||
};
|
||||
|
||||
validate_config(connection_metadata, &merged_config)?;
|
||||
validate_config(tables, views, &merged_config)?;
|
||||
|
||||
return Ok(merged_config);
|
||||
}
|
||||
@@ -442,12 +444,13 @@ fn split_config(config: &proto::Config) -> Result<(proto::Config, proto::Vault),
|
||||
return Ok((stripped_config, new_vault));
|
||||
}
|
||||
|
||||
pub async fn write_config_and_vault_textproto(
|
||||
pub async fn write_config_and_vault_textproto<T: Borrow<Table>, V: Borrow<View>>(
|
||||
data_dir: &DataDir,
|
||||
connection_metadata: &ConnectionMetadata,
|
||||
tables: &[T],
|
||||
views: &[V],
|
||||
config: &proto::Config,
|
||||
) -> Result<(), ConfigError> {
|
||||
validate_config(connection_metadata, config)?;
|
||||
validate_config(tables, views, config)?;
|
||||
|
||||
let (stripped_config, vault) = split_config(config)?;
|
||||
|
||||
@@ -483,8 +486,9 @@ fn validate_application_name(name: &str) -> Result<(), ConfigError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn validate_config(
|
||||
tables: &ConnectionMetadata,
|
||||
pub fn validate_config<T: Borrow<Table>, V: Borrow<View>>(
|
||||
tables: &[T],
|
||||
views: &[V],
|
||||
config: &proto::Config,
|
||||
) -> Result<(), ConfigError> {
|
||||
// Check server settings.
|
||||
@@ -506,7 +510,7 @@ pub(crate) fn validate_config(
|
||||
// table, however it's not valid to have conflicting api names.
|
||||
let mut api_names = HashSet::<String>::new();
|
||||
for api in &config.record_apis {
|
||||
let api_name = validate_record_api_config(tables, api)?;
|
||||
let api_name = validate_record_api_config(tables, views, api)?;
|
||||
|
||||
if !api_names.insert(api_name.clone()) {
|
||||
return ierr(format!(
|
||||
@@ -767,10 +771,10 @@ mod test {
|
||||
|
||||
async fn test_default_config_is_valid() {
|
||||
let state = test_state(None).await.unwrap();
|
||||
let connection_metadata = state.connection_metadata();
|
||||
let metadata = state.connection_metadata();
|
||||
|
||||
let config = Config::new_with_custom_defaults();
|
||||
validate_config(&connection_metadata, &config).unwrap();
|
||||
validate_config(&metadata.tables(), &metadata.views(), &config).unwrap();
|
||||
}
|
||||
|
||||
fn test_config_merging() {
|
||||
|
||||
@@ -229,7 +229,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
|
||||
@@ -260,7 +260,7 @@ mod tests {
|
||||
let table = lookup_and_parse_table_schema(conn, "test_table", Some("main"))
|
||||
.await
|
||||
.unwrap();
|
||||
let metadata = TableMetadata::new(®istry, table.clone(), &[table]);
|
||||
let metadata = TableMetadata::new(®istry, table.clone(), &[table]).unwrap();
|
||||
|
||||
let insert = |json: serde_json::Value| async move {
|
||||
conn
|
||||
|
||||
@@ -476,7 +476,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -568,7 +568,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -1047,7 +1047,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
|
||||
@@ -616,7 +616,7 @@ mod tests {
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
let metadata = TableMetadata::new(®istry, table.clone(), &[table]);
|
||||
let metadata = TableMetadata::new(®istry, table.clone(), &[table]).unwrap();
|
||||
|
||||
let id: [u8; 16] = uuid::Uuid::now_v7().as_bytes().clone();
|
||||
let blob: Vec<u8> = [0; 128].to_vec();
|
||||
|
||||
@@ -449,7 +449,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -877,7 +877,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
let room0 = add_room(conn, "room0").await.unwrap();
|
||||
let room1 = add_room(conn, "room1").await.unwrap();
|
||||
@@ -945,7 +945,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -1042,7 +1042,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -1125,7 +1125,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -1231,7 +1231,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
|
||||
@@ -772,7 +772,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
// Register message table as record api with moderator read access.
|
||||
add_record_api_config(
|
||||
@@ -1029,7 +1029,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
// Register message table as record api with moderator read access.
|
||||
add_record_api_config(
|
||||
|
||||
@@ -107,7 +107,7 @@ mod tests {
|
||||
)
|
||||
.await?;
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
@@ -148,7 +148,7 @@ mod tests {
|
||||
)
|
||||
.await?;
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
|
||||
@@ -111,7 +111,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -349,7 +349,7 @@ mod test {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use itertools::Itertools;
|
||||
use trailbase_schema::QualifiedName;
|
||||
use trailbase_schema::metadata::TableOrView;
|
||||
use trailbase_schema::parse::parse_into_statement;
|
||||
use trailbase_schema::sqlite::ColumnOption;
|
||||
use trailbase_schema::sqlite::{ColumnOption, Table, View};
|
||||
|
||||
use crate::config::{ConfigError, proto};
|
||||
use crate::schema_metadata::ConnectionMetadata;
|
||||
|
||||
fn validate_record_api_name(name: &str) -> Result<(), ConfigError> {
|
||||
if name.is_empty() {
|
||||
@@ -21,8 +22,9 @@ fn validate_record_api_name(name: &str) -> Result<(), ConfigError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn validate_record_api_config(
|
||||
schemas: &ConnectionMetadata,
|
||||
pub(crate) fn validate_record_api_config<T: Borrow<Table>, V: Borrow<View>>(
|
||||
tables: &[T],
|
||||
views: &[V],
|
||||
api_config: &proto::RecordApiConfig,
|
||||
) -> Result<String, ConfigError> {
|
||||
let Some(ref api_name) = api_config.name else {
|
||||
@@ -37,18 +39,18 @@ pub(crate) fn validate_record_api_config(
|
||||
let metadata: TableOrView = {
|
||||
let table_name = QualifiedName::parse(table_name)?;
|
||||
|
||||
if let Some(table_metadata) = schemas.get_table(&table_name) {
|
||||
if table_metadata.schema.temporary {
|
||||
if let Some(table) = tables.iter().find(|t| (*t).borrow().name == table_name) {
|
||||
if table.borrow().temporary {
|
||||
return Err(invalid("Record APIs must not reference TEMPORARY tables"));
|
||||
}
|
||||
|
||||
TableOrView::Table(table_metadata)
|
||||
} else if let Some(view_metadata) = schemas.get_view(&table_name) {
|
||||
if view_metadata.schema.temporary {
|
||||
TableOrView::Table(table.borrow())
|
||||
} else if let Some(view) = views.iter().find(|v| (*v).borrow().name == table_name) {
|
||||
if view.borrow().temporary {
|
||||
return Err(invalid("Record APIs must not reference TEMPORARY views"));
|
||||
}
|
||||
|
||||
TableOrView::View(view_metadata)
|
||||
TableOrView::View(view.borrow())
|
||||
} else {
|
||||
return Err(invalid(format!(
|
||||
"Missing table or view for API: {api_name}"
|
||||
@@ -56,7 +58,7 @@ pub(crate) fn validate_record_api_config(
|
||||
}
|
||||
};
|
||||
|
||||
let Some((pk_index, _)) = metadata.record_pk_column() else {
|
||||
let Some((pk_index, _)) = metadata.record_pk_column(tables) else {
|
||||
return Err(invalid(format!(
|
||||
"Table for api '{api_name}' is missing valid integer/UUID primary key column."
|
||||
)));
|
||||
@@ -125,13 +127,19 @@ pub(crate) fn validate_record_api_config(
|
||||
)));
|
||||
}
|
||||
|
||||
let Some(foreign_table) = schemas.get_table(&QualifiedName::parse(foreign_table_name)?) else {
|
||||
let fq_foreign_table_name = QualifiedName::parse(foreign_table_name)?;
|
||||
let Some(foreign_table) = tables
|
||||
.iter()
|
||||
.find(|t| (*t).borrow().name == fq_foreign_table_name)
|
||||
else {
|
||||
return Err(invalid(format!(
|
||||
"{api_name} reference missing table: {foreign_table_name}"
|
||||
)));
|
||||
};
|
||||
|
||||
let Some((_idx, foreign_pk_column)) = foreign_table.record_pk_column() else {
|
||||
let Some((_idx, foreign_pk_column)) =
|
||||
TableOrView::Table(foreign_table.borrow()).record_pk_column(tables)
|
||||
else {
|
||||
return Err(invalid(format!(
|
||||
"{api_name} references pk-less table: {foreign_table_name}"
|
||||
)));
|
||||
|
||||
@@ -7,22 +7,11 @@ use trailbase_schema::sqlite::{SchemaError, Table, View};
|
||||
use trailbase_sqlite::{Connection, params};
|
||||
|
||||
pub use trailbase_schema::metadata::{
|
||||
ConnectionMetadata, ConnectionSchemas, JsonColumnMetadata, JsonSchemaError, TableMetadata,
|
||||
ConnectionMetadata, JsonColumnMetadata, JsonSchemaError, TableMetadata,
|
||||
};
|
||||
|
||||
use crate::constants::SQLITE_SCHEMA_TABLE;
|
||||
|
||||
pub(crate) async fn build_connection_schemas(
|
||||
conn: &Connection,
|
||||
) -> Result<ConnectionSchemas, SchemaLookupError> {
|
||||
let tables = lookup_and_parse_all_table_schemas(conn).await?;
|
||||
|
||||
return Ok(ConnectionSchemas {
|
||||
views: lookup_and_parse_all_view_schemas(conn, &tables).await?,
|
||||
tables,
|
||||
});
|
||||
}
|
||||
|
||||
/// (Re-)build the connections schema representation *with* the side-effect of (re-)installing file
|
||||
/// deletion triggers.
|
||||
///
|
||||
@@ -31,10 +20,11 @@ pub(crate) async fn build_connection_schemas(
|
||||
/// column is added, we need to rebuild the metadata and update or install missing triggers.
|
||||
pub(crate) async fn build_connection_metadata_and_install_file_deletion_triggers(
|
||||
conn: &Connection,
|
||||
tables: Vec<Table>,
|
||||
views: Vec<View>,
|
||||
registry: &JsonSchemaRegistry,
|
||||
) -> Result<ConnectionMetadata, SchemaLookupError> {
|
||||
let schemas = build_connection_schemas(conn).await?;
|
||||
let metadata = ConnectionMetadata::from_schemas(schemas, registry);
|
||||
let metadata = ConnectionMetadata::from_schemas(tables, views, registry)?;
|
||||
|
||||
setup_file_deletion_triggers(conn, &metadata).await?;
|
||||
|
||||
@@ -53,6 +43,8 @@ pub enum SchemaLookupError {
|
||||
Missing,
|
||||
#[error("Sql parse error: {0}")]
|
||||
SqlParse(#[from] sqlite3_parser::lexer::sql::Error),
|
||||
#[error("Json Schema error: {0}")]
|
||||
JsonSchema(#[from] trailbase_schema::metadata::JsonSchemaError),
|
||||
#[error("Other error: {0}")]
|
||||
Other(Box<dyn std::error::Error + Send + Sync>),
|
||||
}
|
||||
@@ -169,7 +161,7 @@ pub async fn lookup_and_parse_all_view_schemas(
|
||||
|
||||
// Install file column triggers. This ain't pretty, this might be better on construction and
|
||||
// schema changes.
|
||||
async fn setup_file_deletion_triggers(
|
||||
pub(crate) async fn setup_file_deletion_triggers(
|
||||
conn: &trailbase_sqlite::Connection,
|
||||
metadata: &ConnectionMetadata,
|
||||
) -> Result<(), SchemaLookupError> {
|
||||
@@ -258,7 +250,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
let metadata = state.connection_metadata();
|
||||
let test_table = metadata
|
||||
@@ -333,7 +325,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
@@ -552,7 +544,7 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
add_record_api_config(
|
||||
&state,
|
||||
|
||||
@@ -8,7 +8,10 @@ use crate::config::load_or_init_config_textproto;
|
||||
use crate::constants::USER_TABLE;
|
||||
use crate::metadata::load_or_init_metadata_textproto;
|
||||
use crate::rand::generate_random_string;
|
||||
use crate::schema_metadata::build_connection_metadata_and_install_file_deletion_triggers;
|
||||
use crate::schema_metadata::{
|
||||
build_connection_metadata_and_install_file_deletion_triggers, lookup_and_parse_all_table_schemas,
|
||||
lookup_and_parse_all_view_schemas,
|
||||
};
|
||||
use crate::server::DataDir;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
@@ -86,27 +89,21 @@ pub async fn init_app_state(args: InitArgs) -> Result<(bool, AppState), InitErro
|
||||
let (conn, new_db) =
|
||||
crate::connection::init_main_db(Some(&args.data_dir), Some(extra_databases))?;
|
||||
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
let mut connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&conn, ®istry).await?;
|
||||
let tables = lookup_and_parse_all_table_schemas(&conn).await?;
|
||||
let views = lookup_and_parse_all_view_schemas(&conn, &tables).await?;
|
||||
|
||||
// Read config or write default one. Ensures config is validated.
|
||||
let config = load_or_init_config_textproto(&args.data_dir, &connection_metadata).await?;
|
||||
|
||||
if update_json_schema_registry(&config)? {
|
||||
// NOTE: We must reload the table schema metadata after registering new schemas. This is a
|
||||
// work-around because config validation currently depends on ConnectionMetadata and thus the
|
||||
// JSON schema registry. It would be cleaner to build SchemaMatadataCache only after
|
||||
// registering custom schemas and validating the config only against plain TABLE/VIEW
|
||||
// metadata.
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&conn, ®istry).await?;
|
||||
}
|
||||
let config = load_or_init_config_textproto(&args.data_dir, &tables, &views).await?;
|
||||
update_json_schema_registry(&config)?;
|
||||
|
||||
// Load the `<depot>/metadata.textproto`.
|
||||
let _metadata = load_or_init_metadata_textproto(&args.data_dir).await?;
|
||||
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
let connection_metadata =
|
||||
build_connection_metadata_and_install_file_deletion_triggers(&conn, tables, views, ®istry)
|
||||
.await?;
|
||||
|
||||
let jwt = JwtHelper::init_from_path(&args.data_dir).await?;
|
||||
|
||||
// Init geoip if present.
|
||||
|
||||
@@ -250,23 +250,23 @@ impl Server {
|
||||
error!("Failed to apply migrations: {err}");
|
||||
}
|
||||
Ok(_new_db) => {
|
||||
info!(
|
||||
"Migrations applied: {:?}",
|
||||
state.data_dir().migrations_path()
|
||||
);
|
||||
|
||||
// NOTE: we're always invalidating: simple & safe. We could also avoid invalidation
|
||||
// when no new migrations were applied :shrug:.
|
||||
if let Err(err) = state.rebuild_schema_cache().await {
|
||||
error!("Failed to invalidate schema cache: {err}");
|
||||
}
|
||||
let user_migrations_path = state.data_dir().migrations_path();
|
||||
info!("Migrations applied: {user_migrations_path:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: we're always invalidating: simple & safe. We could also avoid invalidation
|
||||
// when no new migrations were applied :shrug:.
|
||||
if let Err(err) = state.rebuild_connection_metadata().await {
|
||||
error!("Failed to invalidate schema cache: {err}");
|
||||
}
|
||||
let metadata = state.connection_metadata();
|
||||
|
||||
// Reload config:
|
||||
match crate::config::load_or_init_config_textproto(
|
||||
state.data_dir(),
|
||||
&state.connection_metadata(),
|
||||
&metadata.tables(),
|
||||
&metadata.views(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -60,7 +60,7 @@ async fn test_record_apis() {
|
||||
let logs_conn = state.logs_conn();
|
||||
|
||||
create_chat_message_app_tables(conn).await.unwrap();
|
||||
state.rebuild_schema_cache().await.unwrap();
|
||||
state.rebuild_connection_metadata().await.unwrap();
|
||||
|
||||
let room = add_room(conn, "room0").await.unwrap();
|
||||
let password = "Secret!1!!";
|
||||
|
||||
@@ -426,7 +426,7 @@ mod tests {
|
||||
let table = lookup_and_parse_table_schema(conn, table_name).unwrap();
|
||||
|
||||
let registry = trailbase_extension::jsonschema::json_schema_registry_snapshot();
|
||||
let table_metadata = TableMetadata::new(®istry, table.clone(), &[table.clone()]);
|
||||
let table_metadata = TableMetadata::new(®istry, table.clone(), &[table.clone()]).unwrap();
|
||||
let (schema, _) = build_json_schema(
|
||||
®istry,
|
||||
&table_metadata.name().name,
|
||||
|
||||
+142
-88
@@ -3,6 +3,7 @@ use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use regex::Regex;
|
||||
use sqlite3_parser::ast::JoinType;
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
@@ -73,16 +74,19 @@ pub struct JsonMetadata {
|
||||
}
|
||||
|
||||
impl JsonMetadata {
|
||||
fn from_columns(registry: &JsonSchemaRegistry, columns: &[Column]) -> Self {
|
||||
let columns: Vec<_> = columns
|
||||
fn from_columns(
|
||||
registry: &JsonSchemaRegistry,
|
||||
columns: &[Column],
|
||||
) -> Result<Self, JsonSchemaError> {
|
||||
let columns = columns
|
||||
.iter()
|
||||
.map(|c| build_json_metadata(registry, c))
|
||||
.collect();
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
return Self {
|
||||
return Ok(Self {
|
||||
file_column_indexes: find_file_column_indexes(&columns),
|
||||
columns,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
pub fn has_file_columns(&self) -> bool {
|
||||
@@ -116,10 +120,14 @@ impl TableMetadata {
|
||||
///
|
||||
/// NOTE: The list of all tables is needed only to extract interger/UUIDv7 pk columns for foreign
|
||||
/// key relationships.
|
||||
pub fn new(registry: &JsonSchemaRegistry, table: Table, tables: &[Table]) -> Self {
|
||||
return TableMetadata {
|
||||
pub fn new(
|
||||
registry: &JsonSchemaRegistry,
|
||||
table: Table,
|
||||
tables: &[Table],
|
||||
) -> Result<Self, JsonSchemaError> {
|
||||
return Ok(TableMetadata {
|
||||
record_pk_column: find_record_pk_column_index_for_table(&table, tables),
|
||||
json_metadata: JsonMetadata::from_columns(registry, &table.columns),
|
||||
json_metadata: JsonMetadata::from_columns(registry, &table.columns)?,
|
||||
name_to_index: HashMap::<String, usize>::from_iter(
|
||||
table
|
||||
.columns
|
||||
@@ -128,7 +136,7 @@ impl TableMetadata {
|
||||
.map(|(index, col)| (col.name.clone(), index)),
|
||||
),
|
||||
schema: table,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -171,38 +179,41 @@ impl ViewMetadata {
|
||||
///
|
||||
/// NOTE: The list of all tables is needed only to extract interger/UUIDv7 pk columns for foreign
|
||||
/// key relationships.
|
||||
pub fn new(registry: &JsonSchemaRegistry, view: View, tables: &[Table]) -> Self {
|
||||
return match view.column_mapping {
|
||||
Some(ref column_mapping) => {
|
||||
let columns: Vec<Column> = column_mapping
|
||||
.columns
|
||||
.iter()
|
||||
.map(|m| m.column.clone())
|
||||
.collect();
|
||||
|
||||
let name_to_index = HashMap::<String, usize>::from_iter(
|
||||
columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, col)| (col.name.clone(), index)),
|
||||
);
|
||||
|
||||
ViewMetadata {
|
||||
name_to_index,
|
||||
json_metadata: Some(JsonMetadata::from_columns(registry, &columns)),
|
||||
columns: Some(columns),
|
||||
record_pk_column: find_record_pk_column_index_for_view(column_mapping, tables),
|
||||
schema: view,
|
||||
}
|
||||
}
|
||||
None => ViewMetadata {
|
||||
pub fn new(
|
||||
registry: &JsonSchemaRegistry,
|
||||
view: View,
|
||||
tables: &[Table],
|
||||
) -> Result<Self, JsonSchemaError> {
|
||||
let Some(column_mapping) = &view.column_mapping else {
|
||||
return Ok(ViewMetadata {
|
||||
name_to_index: HashMap::<String, usize>::default(),
|
||||
columns: None,
|
||||
record_pk_column: None,
|
||||
json_metadata: None,
|
||||
schema: view,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
let columns: Vec<Column> = column_mapping
|
||||
.columns
|
||||
.iter()
|
||||
.map(|m| m.column.clone())
|
||||
.collect();
|
||||
|
||||
let name_to_index = HashMap::<String, usize>::from_iter(
|
||||
columns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, col)| (col.name.clone(), index)),
|
||||
);
|
||||
|
||||
return Ok(ViewMetadata {
|
||||
name_to_index,
|
||||
json_metadata: Some(JsonMetadata::from_columns(registry, &columns)?),
|
||||
columns: Some(columns),
|
||||
record_pk_column: find_record_pk_column_index_for_view(column_mapping, tables),
|
||||
schema: view,
|
||||
});
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -233,11 +244,51 @@ impl ViewMetadata {
|
||||
}
|
||||
|
||||
pub enum TableOrView<'a> {
|
||||
Table(&'a Table),
|
||||
View(&'a View),
|
||||
}
|
||||
|
||||
impl<'a> TableOrView<'a> {
|
||||
pub fn qualified_name(&self) -> &'a QualifiedName {
|
||||
return match self {
|
||||
Self::Table(t) => &t.name,
|
||||
Self::View(v) => &v.name,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn columns(&self) -> Option<Vec<Column>> {
|
||||
return match self {
|
||||
Self::Table(t) => Some(t.columns.clone()),
|
||||
Self::View(v) => v
|
||||
.column_mapping
|
||||
.as_ref()
|
||||
.map(|m| m.columns.iter().map(|m| m.column.clone()).collect()),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn record_pk_column<T: Borrow<Table>>(&self, tables: &[T]) -> Option<(usize, &'a Column)> {
|
||||
return match self {
|
||||
Self::Table(t) => {
|
||||
find_record_pk_column_index_for_table(t, tables).map(|i| (i, &t.columns[i]))
|
||||
}
|
||||
Self::View(v) => {
|
||||
if let Some(ref mapping) = v.column_mapping {
|
||||
find_record_pk_column_index_for_view(mapping, tables)
|
||||
.map(|i| (i, &mapping.columns[i].column))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub enum TableOrViewMetadata<'a> {
|
||||
Table(&'a TableMetadata),
|
||||
View(&'a ViewMetadata),
|
||||
}
|
||||
|
||||
impl<'a> TableOrView<'a> {
|
||||
impl<'a> TableOrViewMetadata<'a> {
|
||||
pub fn qualified_name(&self) -> &'a QualifiedName {
|
||||
return match self {
|
||||
Self::Table(t) => &t.schema.name,
|
||||
@@ -260,17 +311,6 @@ impl<'a> TableOrView<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// A bunch of TABLEs and VIEWs as parsed from their SQLite definitions (i.e. CREATE TABLE/VIEW).
|
||||
///
|
||||
/// Unlike "Metadata" below, this is raw SQLite information doesn't contain any additional
|
||||
/// contextual information such as associated JSON schemas, file columns or optimize certain
|
||||
/// look-up patterns..
|
||||
#[derive(Default)]
|
||||
pub struct ConnectionSchemas {
|
||||
pub tables: Vec<Table>,
|
||||
pub views: Vec<View>,
|
||||
}
|
||||
|
||||
/// Contains schema metadata for a bunch of TABLEs and VIEWs, which may belong to different
|
||||
/// databases, e.g. all the TABLEs and VIEWs attached to a connection. Each can be uniquely
|
||||
/// identified by their fully qualified name.
|
||||
@@ -288,20 +328,22 @@ impl ConnectionMetadata {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn from_schemas(schemas: ConnectionSchemas, registry: &JsonSchemaRegistry) -> Self {
|
||||
let table_metadata: Vec<TableMetadata> = schemas
|
||||
.tables
|
||||
pub fn from_schemas(
|
||||
tables: Vec<Table>,
|
||||
views: Vec<View>,
|
||||
registry: &JsonSchemaRegistry,
|
||||
) -> Result<Self, JsonSchemaError> {
|
||||
let table_metadata = tables
|
||||
.iter()
|
||||
.map(|t: &Table| TableMetadata::new(registry, t.clone(), &schemas.tables))
|
||||
.collect();
|
||||
.map(|t: &Table| TableMetadata::new(registry, t.clone(), &tables))
|
||||
.collect::<Result<Vec<TableMetadata>, _>>()?;
|
||||
|
||||
let view_metadata: Vec<ViewMetadata> = schemas
|
||||
.views
|
||||
let view_metadata = views
|
||||
.into_iter()
|
||||
.map(|view: View| ViewMetadata::new(registry, view, &schemas.tables))
|
||||
.collect();
|
||||
.map(|view: View| ViewMetadata::new(registry, view, &tables))
|
||||
.collect::<Result<Vec<ViewMetadata>, _>>()?;
|
||||
|
||||
return ConnectionMetadata::from(table_metadata, view_metadata);
|
||||
return Ok(ConnectionMetadata::from(table_metadata, view_metadata));
|
||||
}
|
||||
|
||||
pub fn get_table(&self, name: &QualifiedName) -> Option<&TableMetadata> {
|
||||
@@ -312,31 +354,36 @@ impl ConnectionMetadata {
|
||||
return self.views.get(name);
|
||||
}
|
||||
|
||||
pub fn get_table_or_view(&self, name: &QualifiedName) -> Option<TableOrView<'_>> {
|
||||
pub fn get_table_or_view(&self, name: &QualifiedName) -> Option<TableOrViewMetadata<'_>> {
|
||||
if let Some(table) = self.tables.get(name) {
|
||||
return Some(TableOrView::Table(table));
|
||||
return Some(TableOrViewMetadata::Table(table));
|
||||
}
|
||||
if let Some(view) = self.views.get(name) {
|
||||
return Some(TableOrView::View(view));
|
||||
return Some(TableOrViewMetadata::View(view));
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
pub fn tables(&self) -> Vec<&Table> {
|
||||
return self.tables.values().map(|t| &t.schema).collect();
|
||||
}
|
||||
|
||||
pub fn views(&self) -> Vec<&View> {
|
||||
return self.views.values().map(|v| &v.schema).collect();
|
||||
}
|
||||
}
|
||||
|
||||
fn build_json_metadata(registry: &JsonSchemaRegistry, col: &Column) -> Option<JsonColumnMetadata> {
|
||||
fn build_json_metadata(
|
||||
registry: &JsonSchemaRegistry,
|
||||
col: &Column,
|
||||
) -> Result<Option<JsonColumnMetadata>, JsonSchemaError> {
|
||||
for opt in &col.options {
|
||||
match extract_json_metadata(registry, opt) {
|
||||
Ok(Some(metadata)) => {
|
||||
return Some(metadata);
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(err) => {
|
||||
// TODO: We should propagate the error.
|
||||
warn!("Failed to get JSON schema: {err}");
|
||||
}
|
||||
if let Some(metadata) = extract_json_metadata(registry, opt)? {
|
||||
return Ok(Some(metadata));
|
||||
}
|
||||
}
|
||||
None
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
pub(crate) fn extract_json_metadata(
|
||||
@@ -429,7 +476,7 @@ pub(crate) fn is_pk_column(column: &Column) -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
fn is_suitable_record_pk_column(column: &Column, tables: &[Table]) -> bool {
|
||||
fn is_suitable_record_pk_column<T: Borrow<Table>>(column: &Column, tables: &[T]) -> bool {
|
||||
if !is_pk_column(column) {
|
||||
return false;
|
||||
}
|
||||
@@ -465,12 +512,16 @@ fn is_suitable_record_pk_column(column: &Column, tables: &[Table]) -> bool {
|
||||
|
||||
// NOTE: Foreign keys cannot cross database boundaries, we can therefore compare by
|
||||
// unqualified name.
|
||||
let Some(referred_table) = tables.iter().find(|t| t.name.name == *foreign_table) else {
|
||||
let Some(referred_table) = tables
|
||||
.iter()
|
||||
.find(|t| (*t).borrow().name.name == *foreign_table)
|
||||
else {
|
||||
warn!("Failed to get foreign key schema for {foreign_table}");
|
||||
return false;
|
||||
};
|
||||
|
||||
let Some(foreign_column) = referred_table
|
||||
.borrow()
|
||||
.columns
|
||||
.iter()
|
||||
.find(|c| c.name == *referred_column)
|
||||
@@ -498,7 +549,10 @@ fn is_suitable_record_pk_column(column: &Column, tables: &[Table]) -> bool {
|
||||
/// Finds suitable Integer or UUIDv7/UUIDv4 primary key columns, if present.
|
||||
///
|
||||
/// Cursors require certain properties like a stable, time-sortable primary key.
|
||||
fn find_record_pk_column_index_for_table(table: &Table, tables: &[Table]) -> Option<usize> {
|
||||
fn find_record_pk_column_index_for_table<T: Borrow<Table>>(
|
||||
table: &Table,
|
||||
tables: &[T],
|
||||
) -> Option<usize> {
|
||||
if table.strict {
|
||||
for (index, column) in table.columns.iter().enumerate() {
|
||||
if is_suitable_record_pk_column(column, tables) {
|
||||
@@ -509,9 +563,9 @@ fn find_record_pk_column_index_for_table(table: &Table, tables: &[Table]) -> Opt
|
||||
return None;
|
||||
}
|
||||
|
||||
fn find_record_pk_column_index_for_view(
|
||||
fn find_record_pk_column_index_for_view<T: Borrow<Table>>(
|
||||
column_mapping: &ColumnMapping,
|
||||
tables: &[Table],
|
||||
tables: &[T],
|
||||
) -> Option<usize> {
|
||||
if let Some(group_by_index) = column_mapping.group_by {
|
||||
let column = &column_mapping.columns[group_by_index];
|
||||
@@ -609,7 +663,7 @@ mod tests {
|
||||
|
||||
let tables = [table.clone()];
|
||||
let registry = JsonSchemaRegistry::default();
|
||||
let metadata = TableMetadata::new(®istry, table, &tables);
|
||||
let metadata = TableMetadata::new(®istry, table, &tables).unwrap();
|
||||
|
||||
assert_eq!("table0", metadata.name().name);
|
||||
assert_eq!("col1", metadata.schema.columns[2].name);
|
||||
@@ -634,7 +688,7 @@ mod tests {
|
||||
assert_eq!(view_columns[1].column.name, "col1");
|
||||
assert_eq!(view_columns[1].column.data_type, ColumnDataType::Blob);
|
||||
|
||||
let view_metadata = ViewMetadata::new(®istry, table_view, &tables);
|
||||
let view_metadata = ViewMetadata::new(®istry, table_view, &tables).unwrap();
|
||||
|
||||
assert!(view_metadata.record_pk_column().is_none());
|
||||
assert_eq!(view_metadata.columns().as_ref().unwrap().len(), 2);
|
||||
@@ -649,7 +703,7 @@ mod tests {
|
||||
assert_eq!(table_view.query, query);
|
||||
assert_eq!(table_view.temporary, false);
|
||||
|
||||
let view_metadata = ViewMetadata::new(®istry, table_view, &tables);
|
||||
let view_metadata = ViewMetadata::new(®istry, table_view, &tables).unwrap();
|
||||
|
||||
let uuidv7_col = view_metadata.record_pk_column().unwrap();
|
||||
let columns = view_metadata.columns().unwrap();
|
||||
@@ -683,7 +737,7 @@ mod tests {
|
||||
assert_eq!(view_columns[1].column.name, "data");
|
||||
assert_eq!(view_columns[1].column.data_type, ColumnDataType::Text);
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
let (pk_index, pk_col) = metadata.record_pk_column().unwrap();
|
||||
assert_eq!(pk_index, 0);
|
||||
assert_eq!(pk_col.name, "id");
|
||||
@@ -700,7 +754,7 @@ mod tests {
|
||||
assert_eq!(view_columns[0].column.name, "id");
|
||||
assert_eq!(view_columns[0].column.data_type, ColumnDataType::Integer);
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
let (pk_index, pk_col) = metadata.record_pk_column().unwrap();
|
||||
assert_eq!(pk_index, 0);
|
||||
assert_eq!(pk_col.name, "id");
|
||||
@@ -717,7 +771,7 @@ mod tests {
|
||||
assert_eq!(view_columns[0].column.name, "id");
|
||||
assert_eq!(view_columns[0].column.data_type, ColumnDataType::Integer);
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
let (pk_index, pk_col) = metadata.record_pk_column().unwrap();
|
||||
assert_eq!(pk_index, 0);
|
||||
assert_eq!(pk_col.name, "id");
|
||||
@@ -768,7 +822,7 @@ mod tests {
|
||||
assert_eq!(view_columns[1].column.name, "fk");
|
||||
assert_eq!(view_columns[1].column.data_type, ColumnDataType::Integer);
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
let (pk_index, pk_col) = metadata.record_pk_column().unwrap();
|
||||
assert_eq!(pk_index, 2);
|
||||
assert_eq!(pk_col.name, "id");
|
||||
@@ -790,7 +844,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
assert_eq!(
|
||||
expected,
|
||||
metadata.record_pk_column().map(|c| c.0),
|
||||
@@ -828,7 +882,7 @@ mod tests {
|
||||
let view = parse_create_view(sql, &tables).unwrap();
|
||||
assert!(view.column_mapping.is_some(), "{i}: {sql}");
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
assert_eq!(Some(1), metadata.record_pk_column().map(|c| c.0));
|
||||
}
|
||||
}
|
||||
@@ -840,7 +894,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
assert_eq!(Some(1), metadata.record_pk_column().map(|c| c.0));
|
||||
}
|
||||
}
|
||||
@@ -883,7 +937,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
assert_eq!(Some(0), metadata.record_pk_column().map(|c| c.0));
|
||||
}
|
||||
|
||||
@@ -900,7 +954,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables);
|
||||
let metadata = ViewMetadata::new(®istry, view, &tables).unwrap();
|
||||
assert_eq!(Some(0), metadata.record_pk_column().map(|c| c.0));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user