Add dry-run to all the schema mutating admin handlers. #89

This commit is contained in:
Sebastian Jeltsch
2025-07-18 12:12:45 +02:00
parent ed7aacd759
commit 500760fd13
19 changed files with 281 additions and 206 deletions

View File

@@ -59,8 +59,13 @@ export function CreateAlterIndexForm(props: {
const response = await alterIndex({
source_schema: o,
target_schema: value,
dry_run: dryRun,
});
console.debug("AlterIndexResponse:", response);
if (dryRun) {
setSql(response.sql);
}
} else {
const response = await createIndex({ schema: value, dry_run: dryRun });
console.debug(`CreateIndexResponse [dry: ${dryRun}]:`, response);
@@ -71,7 +76,10 @@ export function CreateAlterIndexForm(props: {
}
if (!dryRun) {
// Reload schemas.
props.schemaRefetch();
// Close dialog/sheet.
props.close();
}
} catch (err) {

View File

@@ -66,8 +66,13 @@ export function CreateAlterTableForm(props: {
const response = await alterTable({
source_schema: o,
target_schema: value,
dry_run: dryRun,
});
console.debug("AlterTableResponse:", response);
if (dryRun) {
setSql(response.sql);
}
} else {
const response = await createTable({ schema: value, dry_run: dryRun });
console.debug(`CreateTableResponse [dry: ${dryRun}]:`, response);
@@ -78,10 +83,15 @@ export function CreateAlterTableForm(props: {
}
if (!dryRun) {
// Trigger config reload
invalidateConfig(queryClient);
// Reload schemas.
props.schemaRefetch().then(() => {
props.setSelected(value.name);
});
// Close dialog/sheet.
props.close();
}
} catch (err) {

View File

@@ -253,6 +253,7 @@ function TableHeaderRightHandButtons(props: {
(async () => {
await dropTable({
name: table().name.name,
dry_run: null,
});
invalidateConfig(queryClient);
@@ -927,7 +928,7 @@ export function TablePane(props: {
const deleteIndexes = async () => {
for (const name of names) {
await dropIndex({ name });
await dropIndex({ name, dry_run: null });
}
setSelectedIndexes(new Set<string>());

View File

@@ -2,13 +2,17 @@ import { adminFetch } from "@/lib/fetch";
import { useQuery } from "@tanstack/solid-query";
import type { AlterIndexRequest } from "@bindings/AlterIndexRequest";
import type { AlterIndexResponse } from "@bindings/AlterIndexResponse";
import type { AlterTableRequest } from "@bindings/AlterTableRequest";
import type { AlterTableResponse } from "@bindings/AlterTableResponse";
import type { CreateIndexRequest } from "@bindings/CreateIndexRequest";
import type { CreateIndexResponse } from "@bindings/CreateIndexResponse";
import type { CreateTableRequest } from "@bindings/CreateTableRequest";
import type { CreateTableResponse } from "@bindings/CreateTableResponse";
import type { DropIndexRequest } from "@bindings/DropIndexRequest";
import type { DropIndexResponse } from "@bindings/DropIndexResponse";
import type { DropTableRequest } from "@bindings/DropTableRequest";
import type { DropTableResponse } from "@bindings/DropTableResponse";
import type { ListSchemasResponse } from "@bindings/ListSchemasResponse";
export function createTableSchemaQuery() {
@@ -45,34 +49,42 @@ export async function createTable(
return await response.json();
}
export async function alterIndex(request: AlterIndexRequest) {
export async function alterIndex(
request: AlterIndexRequest,
): Promise<AlterIndexResponse> {
const response = await adminFetch("/index", {
method: "PATCH",
body: JSON.stringify(request),
});
return await response.text();
return await response.json();
}
export async function alterTable(request: AlterTableRequest) {
export async function alterTable(
request: AlterTableRequest,
): Promise<AlterTableResponse> {
const response = await adminFetch("/table", {
method: "PATCH",
body: JSON.stringify(request),
});
return await response.text();
return await response.json();
}
export async function dropIndex(request: DropIndexRequest) {
export async function dropIndex(
request: DropIndexRequest,
): Promise<DropIndexResponse> {
const response = await adminFetch("/index", {
method: "DELETE",
body: JSON.stringify(request),
});
return await response.text();
return await response.json();
}
export async function dropTable(request: DropTableRequest) {
export async function dropTable(
request: DropTableRequest,
): Promise<DropTableResponse> {
const response = await adminFetch("/table", {
method: "DELETE",
body: JSON.stringify(request),
});
return await response.text();
return await response.json();
}

View File

@@ -1,4 +1,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { TableIndex } from "./TableIndex";
export type AlterIndexRequest = { source_schema: TableIndex, target_schema: TableIndex, };
export type AlterIndexRequest = { source_schema: TableIndex, target_schema: TableIndex, dry_run: boolean | null, };

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type AlterIndexResponse = { sql: string, };

View File

@@ -1,4 +1,4 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { Table } from "./Table";
export type AlterTableRequest = { source_schema: Table, target_schema: Table, };
export type AlterTableRequest = { source_schema: Table, target_schema: Table, dry_run: boolean | null, };

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type AlterTableResponse = { sql: string, };

View File

@@ -1,3 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type DropIndexRequest = { name: string, };
export type DropIndexRequest = { name: string, dry_run: boolean | null, };

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type DropIndexResponse = { sql: string, };

View File

@@ -1,3 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type DropTableRequest = { name: string, };
export type DropTableRequest = { name: string, dry_run: boolean | null, };

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type DropTableResponse = { sql: string, };

View File

@@ -1,11 +1,6 @@
use axum::{
Json,
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
};
use axum::extract::{Json, State};
use log::*;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use trailbase_schema::sqlite::TableIndex;
use ts_rs::TS;
@@ -18,6 +13,13 @@ use crate::transaction::TransactionRecorder;
pub struct AlterIndexRequest {
pub source_schema: TableIndex,
pub target_schema: TableIndex,
pub dry_run: Option<bool>,
}
#[derive(Clone, Debug, Serialize, TS)]
#[ts(export)]
pub struct AlterIndexResponse {
pub sql: String,
}
// NOTE: sqlite has very limited alter table support, thus we're always recreating the table and
@@ -26,11 +28,12 @@ pub struct AlterIndexRequest {
pub async fn alter_index_handler(
State(state): State<AppState>,
Json(request): Json<AlterIndexRequest>,
) -> Result<Response, Error> {
) -> Result<Json<AlterIndexResponse>, Error> {
if state.demo_mode() && request.source_schema.name.name.starts_with("_") {
return Err(Error::Precondition("Disallowed in demo".into()));
}
let dry_run = request.dry_run.unwrap_or(false);
let source_schema = request.source_schema;
let source_index_name = source_schema.name.clone();
let target_schema = request.target_schema;
@@ -38,8 +41,14 @@ pub async fn alter_index_handler(
debug!("Alter index:\nsource: {source_schema:?}\ntarget: {target_schema:?}",);
let conn = state.conn();
let log = conn
if source_schema == target_schema {
return Ok(Json(AlterIndexResponse {
sql: "".to_string(),
}));
}
let tx_log = state
.conn()
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
@@ -62,14 +71,18 @@ pub async fn alter_index_handler(
})
.await?;
// Write to migration file.
if let Some(log) = log {
let migration_path = state.data_dir().migrations_path();
let report = log
.apply_as_migration(conn, migration_path, &filename)
.await?;
debug!("Migration report: {report:?}");
if !dry_run {
// Take transaction log, write a migration file and apply.
if let Some(ref log) = tx_log {
let migration_path = state.data_dir().migrations_path();
let report = log
.apply_as_migration(state.conn(), migration_path, &filename)
.await?;
debug!("Migration report: {report:?}");
}
}
return Ok((StatusCode::OK, "altered index").into_response());
return Ok(Json(AlterIndexResponse {
sql: tx_log.map(|l| l.build_sql()).unwrap_or_default(),
}));
}

View File

@@ -1,13 +1,8 @@
use std::collections::HashSet;
use axum::{
Json,
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
};
use axum::extract::{Json, State};
use log::*;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use trailbase_schema::sqlite::{QualifiedName, Table};
use ts_rs::TS;
@@ -21,6 +16,13 @@ use crate::transaction::{TransactionLog, TransactionRecorder};
pub struct AlterTableRequest {
pub source_schema: Table,
pub target_schema: Table,
pub dry_run: Option<bool>,
}
#[derive(Clone, Debug, Serialize, TS)]
#[ts(export)]
pub struct AlterTableResponse {
pub sql: String,
}
/// Admin-only handler for altering `TABLE` schemas.
@@ -30,7 +32,7 @@ pub struct AlterTableRequest {
pub async fn alter_table_handler(
State(state): State<AppState>,
Json(request): Json<AlterTableRequest>,
) -> Result<Response, Error> {
) -> Result<Json<AlterTableResponse>, Error> {
if state.demo_mode() && request.source_schema.name.name.starts_with("_") {
return Err(Error::Precondition("Disallowed in demo".into()));
}
@@ -38,6 +40,7 @@ pub async fn alter_table_handler(
return Err(Error::Precondition("Cannot move between databases".into()));
}
let dry_run = request.dry_run.unwrap_or(false);
let source_schema = request.source_schema;
let source_table_name = source_schema.name.clone();
let filename = source_table_name.migration_filename("alter_table");
@@ -53,6 +56,12 @@ pub async fn alter_table_handler(
debug!("Alter table:\nsource: {source_schema:?}\ntarget: {target_schema:?}",);
if source_schema == target_schema {
return Ok(Json(AlterTableResponse {
sql: "".to_string(),
}));
}
// Check that removing columns won't break record API configuration. Note that table renames will
// be fixed up automatically later.
check_column_removals_invalidating_config(&state, &source_schema, &target_schema)?;
@@ -90,8 +99,8 @@ pub async fn alter_table_handler(
let mut target_schema_copy = target_schema.clone();
target_schema_copy.name = temp_table_name.clone();
let conn = state.conn();
let log = conn
let tx_log = state
.conn()
.call(
move |conn| -> Result<Option<TransactionLog>, trailbase_sqlite::Error> {
let mut tx = TransactionRecorder::new(conn)
@@ -149,41 +158,45 @@ pub async fn alter_table_handler(
)
.await?;
// Write migration file and apply it right away.
if let Some(log) = log {
let migration_path = state.data_dir().migrations_path();
let report = log
.apply_as_migration(state.conn(), migration_path, &filename)
.await?;
debug!("Migration report: {report:?}");
}
state.schema_metadata().invalidate_all().await?;
// Fix configuration: update all table references by existing APIs.
if is_table_rename
&& matches!(
source_schema.name.database_schema.as_deref(),
Some("main") | None
)
{
let mut config = state.get_config();
let old_config_hash = hash_config(&config);
for api in &mut config.record_apis {
if let Some(ref name) = api.table_name {
if *name == source_schema.name.name {
api.table_name = Some(target_schema.name.name.clone());
}
}
if !dry_run {
// Take transaction log, write a migration file and apply.
if let Some(ref log) = tx_log {
let migration_path = state.data_dir().migrations_path();
let report = log
.apply_as_migration(state.conn(), migration_path, &filename)
.await?;
debug!("Migration report: {report:?}");
}
state
.validate_and_update_config(config, Some(old_config_hash))
.await?;
state.schema_metadata().invalidate_all().await?;
// Fix configuration: update all table references by existing APIs.
if is_table_rename
&& matches!(
source_schema.name.database_schema.as_deref(),
Some("main") | None
)
{
let mut config = state.get_config();
let old_config_hash = hash_config(&config);
for api in &mut config.record_apis {
if let Some(ref name) = api.table_name {
if *name == source_schema.name.name {
api.table_name = Some(target_schema.name.name.clone());
}
}
}
state
.validate_and_update_config(config, Some(old_config_hash))
.await?;
}
}
return Ok((StatusCode::OK, "altered table").into_response());
return Ok(Json(AlterTableResponse {
sql: tx_log.map(|l| l.build_sql()).unwrap_or_default(),
}));
}
fn check_column_removals_invalidating_config(
@@ -322,11 +335,14 @@ mod tests {
let alter_table_request = AlterTableRequest {
source_schema: create_table_request.schema.clone(),
target_schema: create_table_request.schema.clone(),
dry_run: None,
};
alter_table_handler(State(state.clone()), Json(alter_table_request.clone()))
.await
.unwrap();
let Json(response) =
alter_table_handler(State(state.clone()), Json(alter_table_request.clone()))
.await
.unwrap();
assert_eq!(response.sql, "");
conn
.read_query_rows(format!("SELECT {pk_col} FROM foo"), ())
@@ -351,11 +367,14 @@ mod tests {
let alter_table_request = AlterTableRequest {
source_schema: create_table_request.schema.clone(),
target_schema,
dry_run: None,
};
alter_table_handler(State(state.clone()), Json(alter_table_request.clone()))
.await
.unwrap();
let Json(response) =
alter_table_handler(State(state.clone()), Json(alter_table_request.clone()))
.await
.unwrap();
assert!(response.sql.contains("new"));
conn
.read_query_rows(format!("SELECT {pk_col}, new FROM foo"), ())
@@ -373,11 +392,14 @@ mod tests {
let alter_table_request = AlterTableRequest {
source_schema: create_table_request.schema.clone(),
target_schema,
dry_run: None,
};
alter_table_handler(State(state.clone()), Json(alter_table_request.clone()))
.await
.unwrap();
let Json(response) =
alter_table_handler(State(state.clone()), Json(alter_table_request.clone()))
.await
.unwrap();
assert!(response.sql.contains("bar"));
assert!(conn.read_query_rows("SELECT * FROM foo", ()).await.is_err());
conn

View File

@@ -30,40 +30,30 @@ pub async fn create_index_handler(
let create_index_query = request.schema.create_index_statement();
let tx_log = state
.conn()
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
tx.execute(&create_index_query, ())?;
return tx
.rollback()
.map_err(|err| trailbase_sqlite::Error::Other(err.into()));
})
.await?;
if !dry_run {
let create_index_query = create_index_query.clone();
let conn = state.conn();
let log = conn
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
tx.execute(&create_index_query, ())?;
return tx
.rollback()
.map_err(|err| trailbase_sqlite::Error::Other(err.into()));
})
.await?;
// Write to migration file.
if let Some(log) = log {
// Take transaction log, write a migration file and apply.
if let Some(ref log) = tx_log {
let migration_path = state.data_dir().migrations_path();
log
.apply_as_migration(conn, migration_path, &filename)
.apply_as_migration(state.conn(), migration_path, &filename)
.await?;
}
}
return Ok(Json(CreateIndexResponse {
sql: sqlformat::format(
&format!("{create_index_query};"),
&sqlformat::QueryParams::None,
&sqlformat::FormatOptions {
ignore_case_convert: None,
indent: sqlformat::Indent::Spaces(2),
uppercase: Some(true),
lines_between_queries: 1,
},
),
sql: tx_log.map(|l| l.build_sql()).unwrap_or_default(),
}));
}

View File

@@ -35,23 +35,22 @@ pub async fn create_table_handler(
// This contains the create table statement and may also contain indexes and triggers.
let create_table_query = request.schema.create_table_statement();
let conn = state.conn();
let tx_log = conn
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
tx.execute(&create_table_query, ())?;
return tx
.rollback()
.map_err(|err| trailbase_sqlite::Error::Other(err.into()));
})
.await?;
if !dry_run {
let create_table_query = create_table_query.clone();
let conn = state.conn();
let log = conn
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
tx.execute(&create_table_query, ())?;
return tx
.rollback()
.map_err(|err| trailbase_sqlite::Error::Other(err.into()));
})
.await?;
// Write to migration file.
if let Some(log) = log {
// Take transaction log, write a migration file and apply.
if let Some(ref log) = tx_log {
let migration_path = state.data_dir().migrations_path();
let _report = log
.apply_as_migration(conn, migration_path, &filename)
@@ -62,15 +61,6 @@ pub async fn create_table_handler(
}
return Ok(Json(CreateTableResponse {
sql: sqlformat::format(
format!("{create_table_query};").as_str(),
&sqlformat::QueryParams::None,
&sqlformat::FormatOptions {
ignore_case_convert: None,
indent: sqlformat::Indent::Spaces(2),
uppercase: Some(true),
lines_between_queries: 1,
},
),
sql: tx_log.map(|l| l.build_sql()).unwrap_or_default(),
}));
}

View File

@@ -1,11 +1,6 @@
use axum::{
Json,
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
};
use axum::extract::{Json, State};
use log::*;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use trailbase_schema::sqlite::QualifiedName;
use ts_rs::TS;
@@ -17,20 +12,29 @@ use crate::transaction::TransactionRecorder;
#[ts(export)]
pub struct DropIndexRequest {
pub name: String,
pub dry_run: Option<bool>,
}
#[derive(Clone, Debug, Serialize, TS)]
#[ts(export)]
pub struct DropIndexResponse {
pub sql: String,
}
pub async fn drop_index_handler(
State(state): State<AppState>,
Json(request): Json<DropIndexRequest>,
) -> Result<Response, Error> {
) -> Result<Json<DropIndexResponse>, Error> {
let index_name = QualifiedName::parse(&request.name)?;
if state.demo_mode() && index_name.name.starts_with("_") {
return Err(Error::Precondition("Disallowed in demo".into()));
}
let dry_run = request.dry_run.unwrap_or(false);
let filename = index_name.migration_filename("drop_index");
let conn = state.conn();
let log = conn
let tx_log = state
.conn()
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
@@ -47,13 +51,17 @@ pub async fn drop_index_handler(
})
.await?;
// Write to migration file.
if let Some(log) = log {
let migration_path = state.data_dir().migrations_path();
let _report = log
.apply_as_migration(conn, migration_path, &filename)
.await?;
if !dry_run {
// Take transaction log, write a migration file and apply.
if let Some(ref log) = tx_log {
let migration_path = state.data_dir().migrations_path();
let _report = log
.apply_as_migration(state.conn(), migration_path, &filename)
.await?;
}
}
return Ok((StatusCode::OK, "").into_response());
return Ok(Json(DropIndexResponse {
sql: tx_log.map(|l| l.build_sql()).unwrap_or_default(),
}));
}

View File

@@ -1,11 +1,6 @@
use axum::{
Json,
extract::State,
http::StatusCode,
response::{IntoResponse, Response},
};
use axum::extract::{Json, State};
use log::*;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use trailbase_schema::QualifiedName;
use ts_rs::TS;
@@ -17,18 +12,27 @@ use crate::transaction::TransactionRecorder;
#[derive(Clone, Debug, Deserialize, TS)]
#[ts(export)]
pub struct DropTableRequest {
// TODO: Should be fully qualified.
pub name: String,
pub dry_run: Option<bool>,
}
#[derive(Clone, Debug, Serialize, TS)]
#[ts(export)]
pub struct DropTableResponse {
pub sql: String,
}
pub async fn drop_table_handler(
State(state): State<AppState>,
Json(request): Json<DropTableRequest>,
) -> Result<Response, Error> {
) -> Result<Json<DropTableResponse>, Error> {
let unqualified_table_name = request.name.to_string();
if state.demo_mode() && unqualified_table_name.starts_with("_") {
return Err(Error::Precondition("Disallowed in demo".into()));
}
let dry_run = request.dry_run.unwrap_or(false);
let table_name = QualifiedName::parse(&request.name)?;
let entity_type: &str;
@@ -43,7 +47,7 @@ pub async fn drop_table_handler(
}
let filename = table_name.migration_filename(&format!("drop_{}", entity_type.to_lowercase()));
let log = state
let tx_log = state
.conn()
.call(move |conn| {
let mut tx = TransactionRecorder::new(conn)?;
@@ -61,29 +65,33 @@ pub async fn drop_table_handler(
})
.await?;
// Write migration file and apply it right away.
if let Some(log) = log {
let migration_path = state.data_dir().migrations_path();
let _report = log
.apply_as_migration(state.conn(), migration_path, &filename)
if !dry_run {
// Write migration file and apply it right away.
if let Some(ref log) = tx_log {
let migration_path = state.data_dir().migrations_path();
let _report = log
.apply_as_migration(state.conn(), migration_path, &filename)
.await?;
}
state.schema_metadata().invalidate_all().await?;
// Fix configuration: remove all APIs reference the no longer existing table.
let mut config = state.get_config();
let old_config_hash = hash_config(&config);
config.record_apis.retain(|c| {
if let Some(ref name) = c.table_name {
return *name != unqualified_table_name;
}
return true;
});
state
.validate_and_update_config(config, Some(old_config_hash))
.await?;
}
state.schema_metadata().invalidate_all().await?;
// Fix configuration: remove all APIs reference the no longer existing table.
let mut config = state.get_config();
let old_config_hash = hash_config(&config);
config.record_apis.retain(|c| {
if let Some(ref name) = c.table_name {
return *name != unqualified_table_name;
}
return true;
});
state
.validate_and_update_config(config, Some(old_config_hash))
.await?;
return Ok((StatusCode::OK, "").into_response());
return Ok(Json(DropTableResponse {
sql: tx_log.map(|l| l.build_sql()).unwrap_or_default(),
}));
}

View File

@@ -29,6 +29,30 @@ pub struct TransactionLog {
}
impl TransactionLog {
pub(crate) fn build_sql(&self) -> String {
let sql_string: String = self
.log
.iter()
.filter_map(|(_, stmt)| match stmt.as_str() {
"" => None,
x if x.ends_with(";") => Some(stmt.clone()),
x => Some(format!("{x};")),
})
.collect::<Vec<String>>()
.join("\n");
return sqlformat::format(
&sql_string,
&sqlformat::QueryParams::None,
&sqlformat::FormatOptions {
ignore_case_convert: None,
indent: sqlformat::Indent::Spaces(4),
uppercase: Some(true),
lines_between_queries: 2,
},
);
}
/// Commit previously recorded transaction log on provided connection.
pub(crate) async fn apply_as_migration(
&self,
@@ -44,30 +68,7 @@ impl TransactionLog {
.to_string();
let path = migration_path.as_ref().join(filename);
let sql = {
let sql_string: String = self
.log
.iter()
.filter_map(|(_, stmt)| match stmt.as_str() {
"" => None,
x if x.ends_with(";") => Some(stmt.clone()),
x => Some(format!("{x};")),
})
.collect::<Vec<String>>()
.join("\n");
sqlformat::format(
&sql_string,
&sqlformat::QueryParams::None,
&sqlformat::FormatOptions {
ignore_case_convert: None,
indent: sqlformat::Indent::Spaces(4),
uppercase: Some(true),
lines_between_queries: 2,
},
)
};
let sql = self.build_sql();
let migrations = vec![trailbase_refinery::Migration::unapplied(&stem, &sql)?];
let runner = migrations::new_migration_runner(&migrations).set_abort_missing(false);