mirror of
https://github.com/TriliumNext/Notes.git
synced 2026-01-06 04:50:03 -06:00
Merge branch 'develop' into feat/llm-integration-part2
This commit is contained in:
@@ -87,8 +87,8 @@
|
||||
"mime-types": "3.0.1",
|
||||
"multer": "2.0.0",
|
||||
"normalize-strings": "1.1.1",
|
||||
"ollama": "0.5.15",
|
||||
"openai": "4.103.0",
|
||||
"ollama": "0.5.16",
|
||||
"openai": "4.104.0",
|
||||
"rand-token": "1.0.1",
|
||||
"safe-compare": "1.1.4",
|
||||
"sanitize-filename": "1.6.3",
|
||||
|
||||
@@ -21,7 +21,7 @@ fi
|
||||
echo "Selected Arch: $ARCH"
|
||||
|
||||
# Set Node.js version and architecture-specific filename
|
||||
NODE_VERSION=22.14.0
|
||||
NODE_VERSION=22.16.0
|
||||
|
||||
script_dir=$(realpath $(dirname $0))
|
||||
BUILD_DIR="$script_dir/../dist"
|
||||
@@ -43,7 +43,7 @@ rm -rf $BUILD_DIR/node/lib/node_modules/{npm,corepack} \
|
||||
$BUILD_DIR/node_modules/electron* \
|
||||
$BUILD_DIR/electron*.{js,map}
|
||||
|
||||
printf "#!/bin/sh\n./node/bin/node src/main\n" > $BUILD_DIR/trilium.sh
|
||||
printf "#!/bin/sh\n./node/bin/node main.cjs\n" > $BUILD_DIR/trilium.sh
|
||||
chmod 755 $BUILD_DIR/trilium.sh
|
||||
|
||||
VERSION=`jq -r ".version" package.json`
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "blobs" (
|
||||
`blobId` TEXT NOT NULL,
|
||||
`content` TEXT NULL DEFAULT NULL,
|
||||
`dateModified` TEXT NOT NULL,
|
||||
`utcDateModified` TEXT NOT NULL,
|
||||
PRIMARY KEY(`blobId`)
|
||||
);
|
||||
|
||||
ALTER TABLE notes ADD blobId TEXT DEFAULT NULL;
|
||||
ALTER TABLE note_revisions ADD blobId TEXT DEFAULT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_blobId on notes (blobId);
|
||||
CREATE INDEX IF NOT EXISTS IDX_note_revisions_blobId on note_revisions (blobId);
|
||||
@@ -1,4 +0,0 @@
|
||||
DROP TABLE note_contents;
|
||||
DROP TABLE note_revision_contents;
|
||||
|
||||
DELETE FROM entity_changes WHERE entityName IN ('note_contents', 'note_revision_contents');
|
||||
@@ -1,26 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "revisions" (`revisionId` TEXT NOT NULL PRIMARY KEY,
|
||||
`noteId` TEXT NOT NULL,
|
||||
type TEXT DEFAULT '' NOT NULL,
|
||||
mime TEXT DEFAULT '' NOT NULL,
|
||||
`title` TEXT NOT NULL,
|
||||
`isProtected` INT NOT NULL DEFAULT 0,
|
||||
blobId TEXT DEFAULT NULL,
|
||||
`utcDateLastEdited` TEXT NOT NULL,
|
||||
`utcDateCreated` TEXT NOT NULL,
|
||||
`utcDateModified` TEXT NOT NULL,
|
||||
`dateLastEdited` TEXT NOT NULL,
|
||||
`dateCreated` TEXT NOT NULL);
|
||||
|
||||
INSERT INTO revisions (revisionId, noteId, type, mime, title, isProtected, utcDateLastEdited, utcDateCreated, utcDateModified, dateLastEdited, dateCreated, blobId)
|
||||
SELECT noteRevisionId, noteId, type, mime, title, isProtected, utcDateLastEdited, utcDateCreated, utcDateModified, dateLastEdited, dateCreated, blobId FROM note_revisions;
|
||||
|
||||
DROP TABLE note_revisions;
|
||||
|
||||
CREATE INDEX `IDX_revisions_noteId` ON `revisions` (`noteId`);
|
||||
CREATE INDEX `IDX_revisions_utcDateCreated` ON `revisions` (`utcDateCreated`);
|
||||
CREATE INDEX `IDX_revisions_utcDateLastEdited` ON `revisions` (`utcDateLastEdited`);
|
||||
CREATE INDEX `IDX_revisions_dateCreated` ON `revisions` (`dateCreated`);
|
||||
CREATE INDEX `IDX_revisions_dateLastEdited` ON `revisions` (`dateLastEdited`);
|
||||
CREATE INDEX IF NOT EXISTS IDX_revisions_blobId on revisions (blobId);
|
||||
|
||||
UPDATE entity_changes SET entityName = 'revisions' WHERE entityName = 'note_revisions';
|
||||
@@ -1,23 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS "attachments"
|
||||
(
|
||||
attachmentId TEXT not null primary key,
|
||||
ownerId TEXT not null,
|
||||
role TEXT not null,
|
||||
mime TEXT not null,
|
||||
title TEXT not null,
|
||||
isProtected INT not null DEFAULT 0,
|
||||
position INT default 0 not null,
|
||||
blobId TEXT DEFAULT null,
|
||||
dateModified TEXT NOT NULL,
|
||||
utcDateModified TEXT not null,
|
||||
utcDateScheduledForErasureSince TEXT DEFAULT NULL,
|
||||
isDeleted INT not null,
|
||||
deleteId TEXT DEFAULT NULL);
|
||||
|
||||
CREATE INDEX IDX_attachments_ownerId_role
|
||||
on attachments (ownerId, role);
|
||||
|
||||
CREATE INDEX IDX_attachments_utcDateScheduledForErasureSince
|
||||
on attachments (utcDateScheduledForErasureSince);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_attachments_blobId on attachments (blobId);
|
||||
@@ -1,2 +0,0 @@
|
||||
DELETE FROM options WHERE name = 'hideIncludedImages_main';
|
||||
DELETE FROM entity_changes WHERE entityName = 'options' AND entityId = 'hideIncludedImages_main';
|
||||
@@ -1,2 +0,0 @@
|
||||
UPDATE options SET name = 'openNoteContexts' WHERE name = 'openTabs';
|
||||
UPDATE entity_changes SET entityId = 'openNoteContexts' WHERE entityName = 'options' AND entityId = 'openTabs';
|
||||
@@ -1 +0,0 @@
|
||||
SELECT 1;
|
||||
@@ -1,14 +0,0 @@
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '+', 'X') WHERE entityName = 'blobs';
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '/', 'Y') WHERE entityName = 'blobs';
|
||||
@@ -1,3 +0,0 @@
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_blobId on notes (blobId);
|
||||
CREATE INDEX IF NOT EXISTS IDX_revisions_blobId on revisions (blobId);
|
||||
CREATE INDEX IF NOT EXISTS IDX_attachments_blobId on attachments (blobId);
|
||||
@@ -1 +0,0 @@
|
||||
UPDATE attributes SET value = 'contentAndAttachmentsAndRevisionsSize' WHERE name = 'orderBy' AND value = 'noteSize';
|
||||
@@ -1,2 +0,0 @@
|
||||
-- emergency disabling of image compression since it appears to make problems in migration to 0.61
|
||||
UPDATE options SET value = 'false' WHERE name = 'compressImages';
|
||||
@@ -1,17 +0,0 @@
|
||||
-- + is normally replaced by X and / by Y, but this can temporarily cause UNIQUE key exception
|
||||
-- this might create blob duplicates, but cleanup will eventually take care of it
|
||||
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '+', 'A') WHERE entityName = 'blobs';
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '/', 'B') WHERE entityName = 'blobs';
|
||||
@@ -1,14 +0,0 @@
|
||||
-- Add the oauth user data table
|
||||
CREATE TABLE IF NOT EXISTS "user_data"
|
||||
(
|
||||
tmpID INT,
|
||||
username TEXT,
|
||||
email TEXT,
|
||||
userIDEncryptedDataKey TEXT,
|
||||
userIDVerificationHash TEXT,
|
||||
salt TEXT,
|
||||
derivedKey TEXT,
|
||||
isSetup TEXT DEFAULT "false",
|
||||
UNIQUE (tmpID),
|
||||
PRIMARY KEY (tmpID)
|
||||
);
|
||||
@@ -1,46 +0,0 @@
|
||||
-- Add tables for vector embeddings storage and management
|
||||
-- This migration adds embedding support to the main document.db database
|
||||
|
||||
-- Store embeddings for notes
|
||||
CREATE TABLE IF NOT EXISTS "note_embeddings" (
|
||||
"embedId" TEXT NOT NULL PRIMARY KEY,
|
||||
"noteId" TEXT NOT NULL,
|
||||
"providerId" TEXT NOT NULL,
|
||||
"modelId" TEXT NOT NULL,
|
||||
"dimension" INTEGER NOT NULL,
|
||||
"embedding" BLOB NOT NULL,
|
||||
"version" INTEGER NOT NULL DEFAULT 1,
|
||||
"dateCreated" TEXT NOT NULL,
|
||||
"utcDateCreated" TEXT NOT NULL,
|
||||
"dateModified" TEXT NOT NULL,
|
||||
"utcDateModified" TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX "IDX_note_embeddings_noteId" ON "note_embeddings" ("noteId");
|
||||
CREATE INDEX "IDX_note_embeddings_providerId_modelId" ON "note_embeddings" ("providerId", "modelId");
|
||||
|
||||
-- Table to track which notes need embedding updates
|
||||
CREATE TABLE IF NOT EXISTS "embedding_queue" (
|
||||
"noteId" TEXT NOT NULL PRIMARY KEY,
|
||||
"operation" TEXT NOT NULL, -- CREATE, UPDATE, DELETE
|
||||
"dateQueued" TEXT NOT NULL,
|
||||
"utcDateQueued" TEXT NOT NULL,
|
||||
"priority" INTEGER NOT NULL DEFAULT 0,
|
||||
"attempts" INTEGER NOT NULL DEFAULT 0,
|
||||
"lastAttempt" TEXT NULL,
|
||||
"error" TEXT NULL,
|
||||
"failed" INTEGER NOT NULL DEFAULT 0,
|
||||
"isProcessing" INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
-- Table to store embedding provider configurations
|
||||
CREATE TABLE IF NOT EXISTS "embedding_providers" (
|
||||
"providerId" TEXT NOT NULL PRIMARY KEY,
|
||||
"name" TEXT NOT NULL,
|
||||
"priority" INTEGER NOT NULL DEFAULT 0,
|
||||
"config" TEXT NOT NULL, -- JSON config object
|
||||
"dateCreated" TEXT NOT NULL,
|
||||
"utcDateCreated" TEXT NOT NULL,
|
||||
"dateModified" TEXT NOT NULL,
|
||||
"utcDateModified" TEXT NOT NULL
|
||||
);
|
||||
@@ -1,5 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
data TEXT,
|
||||
expires INTEGER
|
||||
);
|
||||
@@ -3,8 +3,9 @@
|
||||
<h2>Steps</h2>
|
||||
<ul>
|
||||
<li>SSH into your server</li>
|
||||
<li>use <code>wget</code> (or <code>curl</code>) to download latest <code>TriliumNextNotes-Server-[VERSION]-linux-x64.tar.xz</code> (notice <code>-Server</code> suffix)
|
||||
on your server.</li>
|
||||
<li>use <code>wget</code> (or <code>curl</code>) to download latest <code>TriliumNextNotes-Server-[VERSION]-linux-x64.tar.xz</code> (copy
|
||||
link from <a href="https://github.com/TriliumNext/Notes/releases">release page</a>,
|
||||
notice <code>-Server</code> suffix) on your server.</li>
|
||||
<li>unpack the archive, e.g. using <code>tar -xf -d TriliumNextNotes-Server-[VERSION]-linux-x64.tar.xz</code>
|
||||
</li>
|
||||
<li><code>cd trilium-linux-x64-server</code>
|
||||
@@ -17,7 +18,9 @@
|
||||
<p>The problem with above steps is that once you close the SSH connection,
|
||||
the Trilium process is terminated. To avoid that, you have two options:</p>
|
||||
<ul>
|
||||
<li>Kill it (with e.g. <kbd>Ctrl</kbd> + <kbd>C</kbd>) and run again like this: <code>nohup ./trilium &</code>.</li>
|
||||
<li>Kill it (with e.g. <kbd>Ctrl</kbd> + <kbd>C</kbd>) and run again like this: <code>nohup ./trilium.sh &</code>.
|
||||
(nohup keeps the process running in the background, <code>&</code> runs
|
||||
it in the background)</li>
|
||||
<li>Configure systemd to automatically run Trilium in the background on every
|
||||
boot</li>
|
||||
</ul>
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
<div id="toast-container" class="d-flex flex-column justify-content-center align-items-center"></div>
|
||||
|
||||
<div class="dropdown-menu dropdown-menu-sm" id="context-menu-container"></div>
|
||||
<div class="dropdown-menu dropdown-menu-sm" id="context-menu-container" style="display: none"></div>
|
||||
|
||||
<%- include("./partials/windowGlobal.ejs", locals) %>
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import sql from "../../../services/sql.js";
|
||||
import utils from "../../../services/utils.js";
|
||||
import sql from "../services/sql.js";
|
||||
import utils from "../services/utils.js";
|
||||
|
||||
interface NoteContentsRow {
|
||||
noteId: string;
|
||||
@@ -1,8 +1,8 @@
|
||||
import becca from "../../../becca/becca.js";
|
||||
import becca_loader from "../../../becca/becca_loader.js";
|
||||
import cls from "../../../services/cls.js";
|
||||
import log from "../../../services/log.js";
|
||||
import sql from "../../../services/sql.js";
|
||||
import becca from "../becca/becca.js";
|
||||
import becca_loader from "../becca/becca_loader.js";
|
||||
import cls from "../services/cls.js";
|
||||
import log from "../services/log.js";
|
||||
import sql from "../services/sql.js";
|
||||
|
||||
export default () => {
|
||||
cls.init(() => {
|
||||
295
apps/server/src/migrations/migrations.ts
Normal file
295
apps/server/src/migrations/migrations.ts
Normal file
@@ -0,0 +1,295 @@
|
||||
/**
|
||||
* @module
|
||||
*
|
||||
* Contains all the migrations that are run on the database.
|
||||
*/
|
||||
|
||||
// Migrations should be kept in descending order, so the latest migration is first.
|
||||
const MIGRATIONS: (SqlMigration | JsMigration)[] = [
|
||||
// Session store
|
||||
{
|
||||
version: 231,
|
||||
sql: /*sql*/`\
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
data TEXT,
|
||||
expires INTEGER
|
||||
);
|
||||
`
|
||||
},
|
||||
// Add tables for vector embeddings storage and management
|
||||
// This migration adds embedding support to the main document.db database
|
||||
{
|
||||
version: 230,
|
||||
sql: /*sql*/`\
|
||||
-- Store embeddings for notes
|
||||
CREATE TABLE IF NOT EXISTS "note_embeddings" (
|
||||
"embedId" TEXT NOT NULL PRIMARY KEY,
|
||||
"noteId" TEXT NOT NULL,
|
||||
"providerId" TEXT NOT NULL,
|
||||
"modelId" TEXT NOT NULL,
|
||||
"dimension" INTEGER NOT NULL,
|
||||
"embedding" BLOB NOT NULL,
|
||||
"version" INTEGER NOT NULL DEFAULT 1,
|
||||
"dateCreated" TEXT NOT NULL,
|
||||
"utcDateCreated" TEXT NOT NULL,
|
||||
"dateModified" TEXT NOT NULL,
|
||||
"utcDateModified" TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX "IDX_note_embeddings_noteId" ON "note_embeddings" ("noteId");
|
||||
CREATE INDEX "IDX_note_embeddings_providerId_modelId" ON "note_embeddings" ("providerId", "modelId");
|
||||
|
||||
-- Table to track which notes need embedding updates
|
||||
CREATE TABLE IF NOT EXISTS "embedding_queue" (
|
||||
"noteId" TEXT NOT NULL PRIMARY KEY,
|
||||
"operation" TEXT NOT NULL, -- CREATE, UPDATE, DELETE
|
||||
"dateQueued" TEXT NOT NULL,
|
||||
"utcDateQueued" TEXT NOT NULL,
|
||||
"priority" INTEGER NOT NULL DEFAULT 0,
|
||||
"attempts" INTEGER NOT NULL DEFAULT 0,
|
||||
"lastAttempt" TEXT NULL,
|
||||
"error" TEXT NULL,
|
||||
"failed" INTEGER NOT NULL DEFAULT 0,
|
||||
"isProcessing" INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
-- Table to store embedding provider configurations
|
||||
CREATE TABLE IF NOT EXISTS "embedding_providers" (
|
||||
"providerId" TEXT NOT NULL PRIMARY KEY,
|
||||
"name" TEXT NOT NULL,
|
||||
"priority" INTEGER NOT NULL DEFAULT 0,
|
||||
"config" TEXT NOT NULL, -- JSON config object
|
||||
"dateCreated" TEXT NOT NULL,
|
||||
"utcDateCreated" TEXT NOT NULL,
|
||||
"dateModified" TEXT NOT NULL,
|
||||
"utcDateModified" TEXT NOT NULL
|
||||
);
|
||||
`
|
||||
},
|
||||
|
||||
// add the oauth user data table
|
||||
{
|
||||
version: 229,
|
||||
sql: /*sql*/`\
|
||||
CREATE TABLE IF NOT EXISTS "user_data"
|
||||
(
|
||||
tmpID INT,
|
||||
username TEXT,
|
||||
email TEXT,
|
||||
userIDEncryptedDataKey TEXT,
|
||||
userIDVerificationHash TEXT,
|
||||
salt TEXT,
|
||||
derivedKey TEXT,
|
||||
isSetup TEXT DEFAULT "false",
|
||||
UNIQUE (tmpID),
|
||||
PRIMARY KEY (tmpID)
|
||||
);
|
||||
`
|
||||
},
|
||||
// fix blob IDs
|
||||
{
|
||||
version: 228,
|
||||
sql: /*sql*/`\
|
||||
-- + is normally replaced by X and / by Y, but this can temporarily cause UNIQUE key exception
|
||||
-- this might create blob duplicates, but cleanup will eventually take care of it
|
||||
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '+', 'A');
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '/', 'B');
|
||||
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '+', 'A') WHERE entityName = 'blobs';
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '/', 'B') WHERE entityName = 'blobs';
|
||||
`
|
||||
},
|
||||
// disable image compression
|
||||
{
|
||||
version: 227,
|
||||
sql: /*sql*/`\
|
||||
-- emergency disabling of image compression since it appears to make problems in migration to 0.61
|
||||
UPDATE options SET value = 'false' WHERE name = 'compressImages';
|
||||
`
|
||||
},
|
||||
// rename note size label
|
||||
{
|
||||
version: 226,
|
||||
sql: /*sql*/`\
|
||||
UPDATE attributes SET value = 'contentAndAttachmentsAndRevisionsSize' WHERE name = 'orderBy' AND value = 'noteSize';
|
||||
`
|
||||
},
|
||||
// create blob ID indices
|
||||
{
|
||||
version: 225,
|
||||
sql: /*sql*/`\
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_blobId on notes (blobId);
|
||||
CREATE INDEX IF NOT EXISTS IDX_revisions_blobId on revisions (blobId);
|
||||
CREATE INDEX IF NOT EXISTS IDX_attachments_blobId on attachments (blobId);
|
||||
`
|
||||
},
|
||||
// fix blob IDs
|
||||
{
|
||||
version: 224,
|
||||
sql: /*sql*/`\
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE blobs SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE notes SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE attachments SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '+', 'X');
|
||||
UPDATE revisions SET blobId = REPLACE(blobId, '/', 'Y');
|
||||
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '+', 'X') WHERE entityName = 'blobs';
|
||||
UPDATE entity_changes SET entityId = REPLACE(entityId, '/', 'Y') WHERE entityName = 'blobs';
|
||||
`
|
||||
},
|
||||
// no operation
|
||||
{
|
||||
version: 223,
|
||||
sql: /*sql*/`\
|
||||
SELECT 1;
|
||||
`
|
||||
},
|
||||
// rename open tabs to open note contexts
|
||||
{
|
||||
version: 222,
|
||||
sql: /*sql*/`\
|
||||
UPDATE options SET name = 'openNoteContexts' WHERE name = 'openTabs';
|
||||
UPDATE entity_changes SET entityId = 'openNoteContexts' WHERE entityName = 'options' AND entityId = 'openTabs';
|
||||
`
|
||||
},
|
||||
// remove hide included images option
|
||||
{
|
||||
version: 221,
|
||||
sql: /*sql*/`\
|
||||
DELETE FROM options WHERE name = 'hideIncludedImages_main';
|
||||
DELETE FROM entity_changes WHERE entityName = 'options' AND entityId = 'hideIncludedImages_main';
|
||||
`
|
||||
},
|
||||
// migrate images to attachments
|
||||
{
|
||||
version: 220,
|
||||
module: () => import("./0220__migrate_images_to_attachments.js")
|
||||
},
|
||||
// attachments
|
||||
{
|
||||
version: 219,
|
||||
sql: /*sql*/`\
|
||||
CREATE TABLE IF NOT EXISTS "attachments"
|
||||
(
|
||||
attachmentId TEXT not null primary key,
|
||||
ownerId TEXT not null,
|
||||
role TEXT not null,
|
||||
mime TEXT not null,
|
||||
title TEXT not null,
|
||||
isProtected INT not null DEFAULT 0,
|
||||
position INT default 0 not null,
|
||||
blobId TEXT DEFAULT null,
|
||||
dateModified TEXT NOT NULL,
|
||||
utcDateModified TEXT not null,
|
||||
utcDateScheduledForErasureSince TEXT DEFAULT NULL,
|
||||
isDeleted INT not null,
|
||||
deleteId TEXT DEFAULT NULL);
|
||||
|
||||
CREATE INDEX IDX_attachments_ownerId_role
|
||||
on attachments (ownerId, role);
|
||||
|
||||
CREATE INDEX IDX_attachments_utcDateScheduledForErasureSince
|
||||
on attachments (utcDateScheduledForErasureSince);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_attachments_blobId on attachments (blobId);
|
||||
`
|
||||
},
|
||||
// rename note revision to revision
|
||||
{
|
||||
version: 218,
|
||||
sql: /*sql*/`\
|
||||
CREATE TABLE IF NOT EXISTS "revisions" (
|
||||
revisionId TEXT NOT NULL PRIMARY KEY,
|
||||
noteId TEXT NOT NULL,
|
||||
type TEXT DEFAULT '' NOT NULL,
|
||||
mime TEXT DEFAULT '' NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
isProtected INT NOT NULL DEFAULT 0,
|
||||
blobId TEXT DEFAULT NULL,
|
||||
utcDateLastEdited TEXT NOT NULL,
|
||||
utcDateCreated TEXT NOT NULL,
|
||||
utcDateModified TEXT NOT NULL,
|
||||
dateLastEdited TEXT NOT NULL,
|
||||
dateCreated TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO revisions (revisionId, noteId, type, mime, title, isProtected, utcDateLastEdited, utcDateCreated, utcDateModified, dateLastEdited, dateCreated, blobId)
|
||||
SELECT noteRevisionId, noteId, type, mime, title, isProtected, utcDateLastEdited, utcDateCreated, utcDateModified, dateLastEdited, dateCreated, blobId FROM note_revisions;
|
||||
|
||||
DROP TABLE note_revisions;
|
||||
|
||||
CREATE INDEX IDX_revisions_noteId ON revisions (noteId);
|
||||
CREATE INDEX IDX_revisions_utcDateCreated ON revisions (utcDateCreated);
|
||||
CREATE INDEX IDX_revisions_utcDateLastEdited ON revisions (utcDateLastEdited);
|
||||
CREATE INDEX IDX_revisions_dateCreated ON revisions (dateCreated);
|
||||
CREATE INDEX IDX_revisions_dateLastEdited ON revisions (dateLastEdited);
|
||||
CREATE INDEX IF NOT EXISTS IDX_revisions_blobId on revisions (blobId);
|
||||
|
||||
UPDATE entity_changes SET entityName = 'revisions' WHERE entityName = 'note_revisions';
|
||||
`
|
||||
},
|
||||
// drop content tables
|
||||
{
|
||||
version: 217,
|
||||
sql: /*sql*/`\
|
||||
DROP TABLE note_contents;
|
||||
DROP TABLE note_revision_contents;
|
||||
|
||||
DELETE FROM entity_changes WHERE entityName IN ('note_contents', 'note_revision_contents');
|
||||
`
|
||||
},
|
||||
{
|
||||
version: 216,
|
||||
module: async () => import("./0216__move_content_into_blobs.js")
|
||||
},
|
||||
// content structure
|
||||
{
|
||||
version: 215,
|
||||
sql: /*sql*/`\
|
||||
CREATE TABLE IF NOT EXISTS "blobs" (
|
||||
blobId TEXT NOT NULL,
|
||||
content TEXT NULL DEFAULT NULL,
|
||||
dateModified TEXT NOT NULL,
|
||||
utcDateModified TEXT NOT NULL,
|
||||
PRIMARY KEY (blobId)
|
||||
);
|
||||
|
||||
ALTER TABLE notes ADD blobId TEXT DEFAULT NULL;
|
||||
ALTER TABLE note_revisions ADD blobId TEXT DEFAULT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_notes_blobId on notes (blobId);
|
||||
CREATE INDEX IF NOT EXISTS IDX_note_revisions_blobId on note_revisions (blobId);
|
||||
`
|
||||
}
|
||||
];
|
||||
|
||||
export default MIGRATIONS;
|
||||
|
||||
interface Migration {
|
||||
version: number;
|
||||
}
|
||||
|
||||
interface SqlMigration extends Migration {
|
||||
sql: string;
|
||||
}
|
||||
|
||||
interface JsMigration extends Migration {
|
||||
module: () => Promise<{ default: () => void }>;
|
||||
}
|
||||
@@ -72,7 +72,7 @@ export default function buildLaunchBarConfig() {
|
||||
id: "_lbLlmChat",
|
||||
title: t("hidden-subtree.llm-chat-title"),
|
||||
type: "launcher",
|
||||
command: "createAiChat",
|
||||
builtinWidget: "aiChatLauncher",
|
||||
icon: "bx bx-bot",
|
||||
attributes: [
|
||||
{ type: "label", name: "desktopOnly" }
|
||||
|
||||
@@ -1,25 +1,19 @@
|
||||
import backupService from "./backup.js";
|
||||
import sql from "./sql.js";
|
||||
import fs from "fs";
|
||||
import log from "./log.js";
|
||||
import { crash } from "./utils.js";
|
||||
import resourceDir from "./resource_dir.js";
|
||||
import appInfo from "./app_info.js";
|
||||
import cls from "./cls.js";
|
||||
import { t } from "i18next";
|
||||
import { join } from "path";
|
||||
import MIGRATIONS from "../migrations/migrations.js";
|
||||
|
||||
interface MigrationInfo {
|
||||
dbVersion: number;
|
||||
name: string;
|
||||
file: string;
|
||||
type: "sql" | "js" | "ts" | string;
|
||||
/**
|
||||
* Contains the JavaScript/TypeScript migration as a callback method that must be called to trigger the migration.
|
||||
* The method cannot be async since it runs in an SQL transaction.
|
||||
* For SQL migrations, this value is falsy.
|
||||
* If string, then the migration is an SQL script that will be executed.
|
||||
* If a function, then the migration is a JavaScript/TypeScript module that will be executed.
|
||||
*/
|
||||
module?: () => void;
|
||||
migration: string | (() => void);
|
||||
}
|
||||
|
||||
async function migrate() {
|
||||
@@ -37,7 +31,6 @@ async function migrate() {
|
||||
);
|
||||
|
||||
const migrations = await prepareMigrations(currentDbVersion);
|
||||
migrations.sort((a, b) => a.dbVersion - b.dbVersion);
|
||||
|
||||
// all migrations are executed in one transaction - upgrade either succeeds, or the user can stay at the old version
|
||||
// otherwise if half of the migrations succeed, user can't use any version - DB is too "new" for the old app,
|
||||
@@ -76,53 +69,37 @@ async function migrate() {
|
||||
}
|
||||
|
||||
async function prepareMigrations(currentDbVersion: number): Promise<MigrationInfo[]> {
|
||||
const migrationFiles = fs.readdirSync(resourceDir.MIGRATIONS_DIR) ?? [];
|
||||
MIGRATIONS.sort((a, b) => a.version - b.version);
|
||||
const migrations: MigrationInfo[] = [];
|
||||
for (const file of migrationFiles) {
|
||||
const match = file.match(/^([0-9]{4})__([a-zA-Z0-9_ ]+)\.(sql|js|ts)$/);
|
||||
if (!match) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const dbVersion = parseInt(match[1]);
|
||||
for (const migration of MIGRATIONS) {
|
||||
const dbVersion = migration.version;
|
||||
if (dbVersion > currentDbVersion) {
|
||||
const name = match[2];
|
||||
const type = match[3];
|
||||
|
||||
const migration: MigrationInfo = {
|
||||
dbVersion: dbVersion,
|
||||
name: name,
|
||||
file: file,
|
||||
type: type
|
||||
};
|
||||
|
||||
if (type === "js" || type === "ts") {
|
||||
if ("sql" in migration) {
|
||||
migrations.push({
|
||||
dbVersion,
|
||||
migration: migration.sql
|
||||
});
|
||||
} else {
|
||||
// Due to ESM imports, the migration file needs to be imported asynchronously and thus cannot be loaded at migration time (since migration is not asynchronous).
|
||||
// As such we have to preload the ESM.
|
||||
// Going back to the original approach but making it webpack-compatible
|
||||
const importPath = join(resourceDir.MIGRATIONS_DIR, file);
|
||||
migration.module = (await import(importPath)).default;
|
||||
migrations.push({
|
||||
dbVersion,
|
||||
migration: (await migration.module()).default
|
||||
});
|
||||
}
|
||||
|
||||
migrations.push(migration);
|
||||
}
|
||||
}
|
||||
return migrations;
|
||||
}
|
||||
|
||||
function executeMigration(mig: MigrationInfo) {
|
||||
if (mig.module) {
|
||||
console.log("Migration with JS module");
|
||||
mig.module();
|
||||
} else if (mig.type === "sql") {
|
||||
const migrationSql = fs.readFileSync(`${resourceDir.MIGRATIONS_DIR}/${mig.file}`).toString("utf8");
|
||||
|
||||
console.log(`Migration with SQL script: ${migrationSql}`);
|
||||
|
||||
sql.executeScript(migrationSql);
|
||||
function executeMigration({ migration }: MigrationInfo) {
|
||||
if (typeof migration === "string") {
|
||||
console.log(`Migration with SQL script: ${migration}`);
|
||||
sql.executeScript(migration);
|
||||
} else {
|
||||
throw new Error(`Unknown migration type '${mig.type}'`);
|
||||
}
|
||||
console.log("Migration with JS module");
|
||||
migration();
|
||||
};
|
||||
}
|
||||
|
||||
function getDbVersion() {
|
||||
|
||||
@@ -14,16 +14,8 @@ if (!fs.existsSync(DB_INIT_DIR)) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const MIGRATIONS_DIR = path.resolve(DB_INIT_DIR, "migrations");
|
||||
|
||||
if (!fs.existsSync(MIGRATIONS_DIR)) {
|
||||
log.error(`Could not find migration directory: ${MIGRATIONS_DIR}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
export default {
|
||||
RESOURCE_DIR,
|
||||
MIGRATIONS_DIR,
|
||||
DB_INIT_DIR,
|
||||
ELECTRON_APP_ROOT_DIR
|
||||
};
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env node
|
||||
import fs from "fs";
|
||||
import http from "http";
|
||||
import https from "https";
|
||||
|
||||
Reference in New Issue
Block a user