Revert "dev: memory filesystem"

This reverts commit 1f6bbe1672.
This commit is contained in:
KernelDeimos
2025-08-27 14:26:03 -04:00
parent 1f6bbe1672
commit 6f3bace4c4
22 changed files with 47 additions and 955 deletions

View File

@@ -20,7 +20,6 @@ const CoreModule = require("./src/CoreModule.js");
const { Kernel } = require("./src/Kernel.js");
const DatabaseModule = require("./src/DatabaseModule.js");
const LocalDiskStorageModule = require("./src/LocalDiskStorageModule.js");
const MemoryStorageModule = require("./src/MemoryStorageModule.js");
const SelfHostedModule = require("./src/modules/selfhosted/SelfHostedModule.js");
const { testlaunch } = require("./src/index.js");
const BaseService = require("./src/services/BaseService.js");
@@ -74,7 +73,6 @@ module.exports = {
WebModule,
DatabaseModule,
LocalDiskStorageModule,
MemoryStorageModule,
SelfHostedModule,
TestDriversModule,
PuterAIModule,

View File

@@ -1,27 +0,0 @@
/*
* Copyright (C) 2024-present Puter Technologies Inc.
*
* This file is part of Puter.
*
* Puter is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
class MemoryStorageModule {
async install (context) {
const services = context.get('services');
const MemoryStorageService = require("./services/MemoryStorageService");
services.registerService('memory-storage', MemoryStorageService);
}
}
module.exports = MemoryStorageModule;

View File

@@ -17,7 +17,6 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
const { URLSearchParams } = require("node:url");
const config = require("../config");
const { quot } = require('@heyputer/putility').libs.string;
/**
@@ -519,9 +518,8 @@ module.exports = class APIError {
* is set to null. The first argument is used as the status code.
*
* @static
* @param {number|string} status
* @param {object} source
* @param {string|Error|object} fields one of the following:
* @param {number} status
* @param {string|Error} message_or_source one of the following:
* - a string to use as the error message
* - an Error object to use as the source of the error
* - an object with a message property to use as the error message

View File

@@ -288,7 +288,7 @@ module.exports = class FSNodeContext {
controls,
});
if ( ! entry ) {
if ( entry === null ) {
this.found = false;
this.entry = false;
} else {

View File

@@ -159,8 +159,8 @@ class HLCopy extends HLFilesystemOperation {
throw APIError.create('source_and_dest_are_the_same');
}
if ( await is_ancestor_of(source.uid, parent.uid) ) {
throw APIError.create('cannot_copy_item_into_itself');
if ( await is_ancestor_of(source.mysql_id, parent.mysql_id) ) {
throw APIError('cannot_copy_item_into_itself');
}
let overwritten;

View File

@@ -287,7 +287,7 @@ class HLMkdir extends HLFilesystemOperation {
// "top_parent" is the immediate parent of the target directory
// (e.g: /home/foo/bar -> /home/foo)
const top_parent = values.create_missing_parents
? await this._create_dir(parent_node)
? await this._create_top_parent({ top_parent: parent_node })
: await this._get_existing_top_parent({ top_parent: parent_node })
;
@@ -331,14 +331,12 @@ class HLMkdir extends HLFilesystemOperation {
});
}
else if ( dedupe_name ) {
const fs = context.get('services').get('filesystem');
const parent_selector = parent_node.selector;
const fsEntryFetcher = context.get('services').get('fsEntryFetcher');
for ( let i=1 ;; i++ ) {
let try_new_name = `${target_basename} (${i})`;
const selector = new NodeChildSelector(parent_selector, try_new_name);
const exists = await parent_node.provider.quick_check({
selector,
});
const exists = await fsEntryFetcher.nameExistsUnderParent(
existing.entry.parent_uid, try_new_name
);
if ( ! exists ) {
target_basename = try_new_name;
break;
@@ -470,24 +468,16 @@ class HLMkdir extends HLFilesystemOperation {
return node;
}
/**
* Creates a directory and all its ancestors.
*
* @param {FSNodeContext} dir - The directory to create.
* @returns {Promise<FSNodeContext>} The created directory.
*/
async _create_dir (dir) {
console.log('CREATING DIR', dir.selector.describe());
if ( await dir.exists() ) {
if ( ! dir.entry.is_dir ) {
async _create_top_parent ({ top_parent }) {
if ( await top_parent.exists() ) {
if ( ! top_parent.entry.is_dir ) {
throw APIError.create('dest_is_not_a_directory');
}
return dir;
return top_parent;
}
const maybe_path_selector =
dir.get_selector_of_type(NodePathSelector);
top_parent.get_selector_of_type(NodePathSelector);
if ( ! maybe_path_selector ) {
throw APIError.create('dest_does_not_exist');

View File

@@ -18,7 +18,6 @@
*/
const APIError = require("../../api/APIError");
const { Sequence } = require("../../codex/Sequence");
const { MemoryFSProvider } = require("../../modules/puterfs/customfs/MemoryFSProvider");
const { DB_WRITE } = require("../../services/database/consts");
const { buffer_to_stream } = require("../../util/streamutil");
@@ -116,13 +115,10 @@ class LLRead extends LLFilesystemOperation {
},
async function create_S3_read_stream (a) {
const context = a.iget('context');
const storage = context.get('storage');
const { fsNode, version_id, offset, length, has_range, range } = a.values();
const svc_mountpoint = context.get('services').get('mountpoint');
const provider = await svc_mountpoint.get_provider(fsNode.selector);
const storage = svc_mountpoint.get_storage(provider.constructor);
// Empty object here is in the case of local fiesystem,
// where s3:location will return null.
// TODO: storage interface shouldn't have S3-specific properties.
@@ -134,7 +130,6 @@ class LLRead extends LLFilesystemOperation {
bucket_region: location.bucket_region,
version_id,
key: location.key,
memory_file: fsNode.entry,
...(range? {range} : (has_range ? {
range: `bytes=${offset}-${offset+length-1}`
} : {})),
@@ -149,11 +144,8 @@ class LLRead extends LLFilesystemOperation {
const { fsNode, stream, has_range, range} = a.values();
if ( ! has_range ) {
// only cache for non-memoryfs providers
if ( ! (fsNode.provider instanceof MemoryFSProvider) ) {
const res = await svc_fileCache.maybe_store(fsNode, stream);
if ( res.stream ) a.set('stream', res.stream);
}
const res = await svc_fileCache.maybe_store(fsNode, stream);
if ( res.stream ) a.set('stream', res.stream);
}
},
async function return_stream (a) {

View File

@@ -17,7 +17,6 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
const APIError = require("../../api/APIError");
const { MemoryFSProvider } = require("../../modules/puterfs/customfs/MemoryFSProvider");
const { ParallelTasks } = require("../../util/otelutil");
const FSNodeContext = require("../FSNodeContext");
const { NodeUIDSelector } = require("../node/selectors");
@@ -103,27 +102,14 @@ class LLRmDir extends LLFilesystemOperation {
}
await tasks.awaitAll();
// TODO (xiaochen): consolidate these two branches
if ( target.provider instanceof MemoryFSProvider ) {
await target.provider.rmdir( {
if ( ! descendants_only ) {
await target.provider.rmdir({
context,
node: target,
options: {
recursive,
descendants_only,
ignore_not_empty: true,
},
} );
} else {
if ( ! descendants_only ) {
await target.provider.rmdir( {
context,
node: target,
options: {
ignore_not_empty: true,
},
} );
}
});
}
}
}

View File

@@ -89,11 +89,7 @@ class NodeChildSelector {
setPropertiesKnownBySelector (node) {
node.name = this.name;
try_infer_attributes(this);
if ( this.path ) {
node.path = this.path;
}
// no properties known
}
describe () {
@@ -149,30 +145,6 @@ class NodeRawEntrySelector {
}
}
/**
* Try to infer following attributes for a selector:
* - path
* - uid
*
* @param {NodePathSelector | NodeUIDSelector | NodeChildSelector | RootNodeSelector | NodeRawEntrySelector} selector
*/
function try_infer_attributes (selector) {
if ( selector instanceof NodePathSelector ) {
selector.path = selector.value;
} else if ( selector instanceof NodeUIDSelector ) {
selector.uid = selector.value;
} else if ( selector instanceof NodeChildSelector ) {
try_infer_attributes(selector.parent);
if ( selector.parent.path ) {
selector.path = _path.join(selector.parent.path, selector.name);
}
} else if ( selector instanceof RootNodeSelector ) {
selector.path = '/';
} else {
// give up
}
}
const relativeSelector = (parent, path) => {
if ( path === '.' ) return parent;
if ( path.startsWith('..') ) {
@@ -197,5 +169,4 @@ module.exports = {
RootNodeSelector,
NodeRawEntrySelector,
relativeSelector,
try_infer_attributes,
};

View File

@@ -966,38 +966,7 @@ const body_parser_error_handler = (err, req, res, next) => {
next();
}
/**
* Given a uid, returns a file node.
*
* TODO (xiaochen): It only works for MemoryFSProvider currently.
*
* @param {string} uid - The uid of the file to get.
* @returns {Promise<MemoryFile|null>} The file node, or null if the file does not exist.
*/
async function get_entry(uid) {
const svc_mountpoint = Context.get('services').get('mountpoint');
const uid_selector = new NodeUIDSelector(uid);
const provider = await svc_mountpoint.get_provider(uid_selector);
// NB: We cannot import MemoryFSProvider here because it will cause a circular dependency.
if ( provider.constructor.name !== 'MemoryFSProvider' ) {
return null;
}
return provider.stat({
selector: uid_selector,
});
}
async function is_ancestor_of(ancestor_uid, descendant_uid){
const ancestor = await get_entry(ancestor_uid);
const descendant = await get_entry(descendant_uid);
if ( ancestor && descendant ) {
return descendant.path.startsWith(ancestor.path);
}
/** @type BaseDatabaseAccessService */
const db = services.get('database').get(DB_READ, 'filesystem');

View File

@@ -222,12 +222,4 @@ module.exports = class DatabaseFSEntryFetcher extends BaseService {
);
return !! check_dupe[0];
}
async nameExistsUnderParentID (parent_id, name) {
const parent = await this.findByID(parent_id);
if ( ! parent ) {
return false;
}
return this.nameExistsUnderParent(parent.uuid, name);
}
}

View File

@@ -19,7 +19,7 @@
*/
// const Mountpoint = o => ({ ...o });
const { RootNodeSelector, NodeUIDSelector, NodeChildSelector, NodePathSelector, NodeInternalIDSelector, NodeSelector, try_infer_attributes } = require("../../filesystem/node/selectors");
const { RootNodeSelector, NodeUIDSelector } = require("../../filesystem/node/selectors");
const BaseService = require("../../services/BaseService");
/**
@@ -57,9 +57,8 @@ class MountpointService extends BaseService {
* @returns {Promise<void>}
*/
async _init () {
// key: provider class (e.g: PuterFSProvider, MemoryFSProvider)
// value: storage instance
this.storage_ = {};
// Temporary solution - we'll develop this incrementally
this.storage_ = null;
}
async ['__on_boot.consolidation'] () {
@@ -88,32 +87,12 @@ class MountpointService extends BaseService {
}
async get_provider (selector) {
try_infer_attributes(selector);
if ( selector instanceof RootNodeSelector ) {
return this.mountpoints_['/'].provider;
}
if ( selector instanceof NodeUIDSelector ) {
for ( const [path, { provider }] of Object.entries(this.mountpoints_) ) {
const result = await provider.quick_check({
selector,
});
if ( result ) {
return provider;
}
}
// No provider found, but we shouldn't throw an error here
// because it's a valid case for a node that doesn't exist.
}
if ( selector instanceof NodeChildSelector ) {
if ( selector.path ) {
return this.get_provider(new NodePathSelector(selector.path));
} else {
return this.get_provider(selector.parent);
}
return this.mountpoints_['/'].provider;
}
const probe = {};
@@ -139,16 +118,15 @@ class MountpointService extends BaseService {
}
// Temporary solution - we'll develop this incrementally
set_storage (provider, storage) {
this.storage_[provider] = storage;
set_storage (storage) {
this.storage_ = storage;
}
/**
* Gets the current storage backend instance
* @returns {Object} The storage backend instance
*/
get_storage (provider) {
return this.storage_[provider];
get_storage () {
return this.storage_;
}
}

View File

@@ -40,9 +40,6 @@ class PuterFSModule extends AdvancedBase {
const DatabaseFSEntryFetcher = require("./DatabaseFSEntryFetcher");
services.registerService('fsEntryFetcher', DatabaseFSEntryFetcher);
const { MemoryFSService } = require('./customfs/MemoryFSService');
services.registerService('memoryfs', MemoryFSService);
}
}

View File

@@ -1,603 +0,0 @@
/*
* Copyright (C) 2024-present Puter Technologies Inc.
*
* This file is part of Puter.
*
* Puter is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
const FSNodeContext = require('../../../filesystem/FSNodeContext');
const _path = require('path');
const { Context } = require('../../../util/context');
const { v4: uuidv4 } = require('uuid');
const config = require('../../../config');
const {
NodeChildSelector,
NodePathSelector,
NodeUIDSelector,
NodeRawEntrySelector,
RootNodeSelector,
try_infer_attributes,
} = require('../../../filesystem/node/selectors');
const fsCapabilities = require('../../../filesystem/definitions/capabilities');
const APIError = require('../../../api/APIError');
class MemoryFile {
/**
* @param {Object} param
* @param {string} param.path - Relative path from the mountpoint.
* @param {boolean} param.is_dir
* @param {Buffer|null} param.content - The content of the file, `null` if the file is a directory.
* @param {string|null} [param.parent_uid] - UID of parent directory; null for root.
*/
constructor({ path, is_dir, content, parent_uid = null }) {
this.uuid = uuidv4();
this.is_public = true;
this.path = path;
this.name = _path.basename(path);
this.is_dir = is_dir;
this.content = content;
// parent_uid should reflect the actual parent's uid; null for root
this.parent_uid = parent_uid;
// TODO (xiaochen): return sensible values for "user_id", currently
// it must be 2 (admin) to pass the test.
this.user_id = 2;
// TODO (xiaochen): return sensible values for following fields
this.id = 123;
this.parent_id = 123;
this.immutable = 0;
this.is_shortcut = 0;
this.is_symlink = 0;
this.symlink_path = null;
this.created = Math.floor(Date.now() / 1000);
this.accessed = Math.floor(Date.now() / 1000);
this.modified = Math.floor(Date.now() / 1000);
this.size = is_dir ? 0 : content ? content.length : 0;
}
}
class MemoryFSProvider {
constructor(mountpoint) {
this.mountpoint = mountpoint;
// key: relative path from the mountpoint, always starts with `/`
// value: entry uuid
this.entriesByPath = new Map();
// key: entry uuid
// value: entry (MemoryFile)
//
// We declare 2 maps to support 2 lookup apis: by-path/by-uuid.
this.entriesByUUID = new Map();
const root = new MemoryFile({
path: '/',
is_dir: true,
content: null,
parent_uid: null,
});
this.entriesByPath.set('/', root.uuid);
this.entriesByUUID.set(root.uuid, root);
}
/**
* Get the capabilities of this filesystem provider.
*
* @returns {Set} - Set of capabilities supported by this provider.
*/
get_capabilities() {
return new Set([
fsCapabilities.READDIR_UUID_MODE,
fsCapabilities.UUID,
fsCapabilities.READ,
fsCapabilities.WRITE,
fsCapabilities.COPY_TREE,
]);
}
/**
* Normalize the path to be relative to the mountpoint. Returns `/` if the path is empty/undefined.
*
* @param {string} path - The path to normalize.
* @returns {string} - The normalized path, always starts with `/`.
*/
_inner_path(path) {
if (!path) {
return '/';
}
if (path.startsWith(this.mountpoint)) {
path = path.slice(this.mountpoint.length);
}
if (!path.startsWith('/')) {
path = '/' + path;
}
return path;
}
/**
* Check the integrity of the whole memory filesystem. Throws error if any violation is found.
*
* @returns {Promise<void>}
*/
_integrity_check() {
if (config.env !== 'dev') {
// only check in debug mode since it's expensive
return;
}
// check the 2 maps are consistent
if (this.entriesByPath.size !== this.entriesByUUID.size) {
throw new Error('Path map and UUID map have different sizes');
}
for (const [inner_path, uuid] of this.entriesByPath) {
const entry = this.entriesByUUID.get(uuid);
// entry should exist
if (!entry) {
throw new Error(`Entry ${uuid} does not exist`);
}
// path should match
if (this._inner_path(entry.path) !== inner_path) {
throw new Error(`Path ${inner_path} does not match entry ${uuid}`);
}
// uuid should match
if (entry.uuid !== uuid) {
throw new Error(`UUID ${uuid} does not match entry ${entry.uuid}`);
}
// parent should exist
if (entry.parent_uid) {
const parent_entry = this.entriesByUUID.get(entry.parent_uid);
if (!parent_entry) {
throw new Error(`Parent ${entry.parent_uid} does not exist`);
}
}
// parent's path should be a prefix of the entry's path
if (entry.parent_uid) {
const parent_entry = this.entriesByUUID.get(entry.parent_uid);
if (!entry.path.startsWith(parent_entry.path)) {
throw new Error(
`Parent ${entry.parent_uid} path ${parent_entry.path} is not a prefix of entry ${entry.path}`,
);
}
}
// parent should be a directory
if (entry.parent_uid) {
const parent_entry = this.entriesByUUID.get(entry.parent_uid);
if (!parent_entry.is_dir) {
throw new Error(`Parent ${entry.parent_uid} is not a directory`);
}
}
}
}
/**
* Check if a given node exists.
*
* @param {Object} param
* @param {NodePathSelector | NodeUIDSelector | NodeChildSelector | RootNodeSelector | NodeRawEntrySelector} param.selector - The selector used for checking.
* @returns {Promise<boolean>} - True if the node exists, false otherwise.
*/
async quick_check({ selector }) {
if (selector instanceof NodePathSelector) {
const inner_path = this._inner_path(selector.value);
return this.entriesByPath.has(inner_path);
}
if (selector instanceof NodeUIDSelector) {
return this.entriesByUUID.has(selector.value);
}
// fallback to stat
const entry = await this.stat({ selector });
return !!entry;
}
/**
* Performs a stat operation using the given selector.
*
* NB: Some returned fields currently contain placeholder values. And the
* `path` of the absolute path from the root.
*
* @param {Object} param
* @param {NodePathSelector | NodeUIDSelector | NodeChildSelector | RootNodeSelector | NodeRawEntrySelector} param.selector - The selector to stat.
* @returns {Promise<MemoryFile|null>} - The result of the stat operation, or `null` if the node doesn't exist.
*/
async stat({ selector }) {
try_infer_attributes(selector);
let entry_uuid = null;
if (selector instanceof NodePathSelector) {
// stat by path
const inner_path = this._inner_path(selector.value);
entry_uuid = this.entriesByPath.get(inner_path);
} else if (selector instanceof NodeUIDSelector) {
// stat by uid
entry_uuid = selector.value;
} else if (selector instanceof NodeChildSelector) {
if (selector.path) {
// Shouldn't care about about parent when the "path" is present
// since it might have different provider.
return await this.stat({
selector: new NodePathSelector(selector.path),
});
} else {
// recursively stat the parent and then stat the child
const parent_entry = await this.stat({
selector: selector.parent,
});
if (parent_entry) {
const full_path = _path.join(parent_entry.path, selector.name);
return await this.stat({
selector: new NodePathSelector(full_path),
});
}
}
} else {
// other selectors shouldn't reach here, i.e., it's an internal logic error
throw APIError.create('invalid_node');
}
const entry = this.entriesByUUID.get(entry_uuid);
if (!entry) {
return null;
}
// Return a copied entry with `full_path`, since external code only cares
// about full path.
const copied_entry = { ...entry };
copied_entry.path = _path.join(this.mountpoint, entry.path);
return copied_entry;
}
/**
* Read directory contents.
*
* @param {Object} param
* @param {Context} param.context - The context of the operation.
* @param {FSNodeContext} param.node - The directory node to read.
* @returns {Promise<string[]>} - Array of child UUIDs.
*/
async readdir({ context, node }) {
// prerequistes: get required path via stat
const entry = await this.stat({ selector: node.selector });
if (!entry) {
throw APIError.create('invalid_node');
}
const inner_path = this._inner_path(entry.path);
const child_uuids = [];
// Find all entries that are direct children of this directory
for (const [path, uuid] of this.entriesByPath) {
if (path === inner_path) {
continue; // Skip the directory itself
}
const dirname = _path.dirname(path);
if (dirname === inner_path) {
child_uuids.push(uuid);
}
}
return child_uuids;
}
/**
* Create a new directory.
*
* @param {Object} param
* @param {Context} param.context - The context of the operation.
* @param {FSNodeContext} param.parent - The parent node to create the directory in. Must exist and be a directory.
* @param {string} param.name - The name of the new directory.
* @returns {Promise<FSNodeContext>} - The new directory node.
*/
async mkdir({ context, parent, name }) {
// prerequistes: get required path via stat
const parent_entry = await this.stat({ selector: parent.selector });
if (!parent_entry) {
throw APIError.create('invalid_node');
}
const full_path = _path.join(parent_entry.path, name);
const inner_path = this._inner_path(full_path);
let entry = null;
if (this.entriesByPath.has(inner_path)) {
throw APIError.create('item_with_same_name_exists', null, {
entry_name: full_path,
});
} else {
entry = new MemoryFile({
path: inner_path,
is_dir: true,
content: null,
parent_uid: parent_entry.uuid,
});
this.entriesByPath.set(inner_path, entry.uuid);
this.entriesByUUID.set(entry.uuid, entry);
}
// create the node
const fs = context.get('services').get('filesystem');
const node = await fs.node(entry.uuid);
await node.fetchEntry();
this._integrity_check();
return node;
}
/**
* Remove a directory.
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.node: The directory to remove.
* @param {Object} param.options: The options for the operation.
* @returns {Promise<void>}
*/
async rmdir({ context, node, options = {} }) {
this._integrity_check();
// prerequistes: get required path via stat
const entry = await this.stat({ selector: node.selector });
if (!entry) {
throw APIError.create('invalid_node');
}
const inner_path = this._inner_path(entry.path);
// for mode: non-recursive
if (!options.recursive) {
const children = await this.readdir({ context, node });
if (children.length > 0) {
throw APIError.create('not_empty');
}
}
// remove all descendants
for (const [other_inner_path, other_entry_uuid] of this.entriesByPath) {
if (other_entry_uuid === entry.uuid) {
// skip the directory itself
continue;
}
if (other_inner_path.startsWith(inner_path)) {
this.entriesByPath.delete(other_inner_path);
this.entriesByUUID.delete(other_entry_uuid);
}
}
// for mode: non-descendants-only
if (!options.descendants_only) {
// remove the directory itself
this.entriesByPath.delete(inner_path);
this.entriesByUUID.delete(entry.uuid);
}
this._integrity_check();
}
/**
* Remove a file.
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.node: The file to remove.
* @returns {Promise<void>}
*/
async unlink({ context, node }) {
// prerequistes: get required path via stat
const entry = await this.stat({ selector: node.selector });
if (!entry) {
throw APIError.create('invalid_node');
}
const inner_path = this._inner_path(entry.path);
this.entriesByPath.delete(inner_path);
this.entriesByUUID.delete(entry.uuid);
}
/**
* Move a file.
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.node: The file to move.
* @param {FSNodeContext} param.new_parent: The new parent directory of the file.
* @param {string} param.new_name: The new name of the file.
* @param {Object} param.metadata: The metadata of the file.
* @returns {Promise<MemoryFile>}
*/
async move({ context, node, new_parent, new_name, metadata }) {
// prerequistes: get required path via stat
const new_parent_entry = await this.stat({ selector: new_parent.selector });
if (!new_parent_entry) {
throw APIError.create('invalid_node');
}
// create the new entry
const new_full_path = _path.join(new_parent_entry.path, new_name);
const new_inner_path = this._inner_path(new_full_path);
const entry = new MemoryFile({
path: new_inner_path,
is_dir: node.entry.is_dir,
content: node.entry.content,
parent_uid: new_parent_entry.uuid,
});
entry.uuid = node.entry.uuid;
this.entriesByPath.set(new_inner_path, entry.uuid);
this.entriesByUUID.set(entry.uuid, entry);
// remove the old entry
const inner_path = this._inner_path(node.path);
this.entriesByPath.delete(inner_path);
// NB: should not delete the entry by uuid because uuid does not change
// after the move.
this._integrity_check();
return entry;
}
/**
* Copy a tree of files and directories.
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.source - The source node to copy.
* @param {FSNodeContext} param.parent - The parent directory for the copy.
* @param {string} param.target_name - The name for the copied item.
* @returns {Promise<FSNodeContext>} - The copied node.
*/
async copy_tree({ context, source, parent, target_name }) {
const fs = context.get('services').get('filesystem');
if (source.entry.is_dir) {
// Create the directory
const new_dir = await this.mkdir({ context, parent, name: target_name });
// Copy all children
const children = await this.readdir({ context, node: source });
for (const child_uuid of children) {
const child_node = await fs.node(new NodeUIDSelector(child_uuid));
await child_node.fetchEntry();
const child_name = child_node.entry.name;
await this.copy_tree({
context,
source: child_node,
parent: new_dir,
target_name: child_name,
});
}
return new_dir;
} else {
// Copy the file
const new_file = await this.write_new({
context,
parent,
name: target_name,
file: { stream: { read: () => source.entry.content } },
});
return new_file;
}
}
/**
* Write a new file to the filesystem. Throws an error if the destination
* already exists.
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.parent: The parent directory of the destination directory.
* @param {string} param.name: The name of the destination directory.
* @param {Object} param.file: The file to write.
* @returns {Promise<FSNodeContext>}
*/
async write_new({ context, parent, name, file }) {
// prerequistes: get required path via stat
const parent_entry = await this.stat({ selector: parent.selector });
if (!parent_entry) {
throw APIError.create('invalid_node');
}
const full_path = _path.join(parent_entry.path, name);
const inner_path = this._inner_path(full_path);
let entry = null;
if (this.entriesByPath.has(inner_path)) {
throw APIError.create('item_with_same_name_exists', null, {
entry_name: full_path,
});
} else {
entry = new MemoryFile({
path: inner_path,
is_dir: false,
content: file.stream.read(),
parent_uid: parent_entry.uuid,
});
this.entriesByPath.set(inner_path, entry.uuid);
this.entriesByUUID.set(entry.uuid, entry);
}
const fs = context.get('services').get('filesystem');
const node = await fs.node(entry.uuid);
await node.fetchEntry();
this._integrity_check();
return node;
}
/**
* Overwrite an existing file. Throws an error if the destination does not
* exist.
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.node: The node to write to.
* @param {Object} param.file: The file to write.
* @returns {Promise<FSNodeContext>}
*/
async write_overwrite({ context, node, file }) {
const entry = await this.stat({ selector: node.selector });
if (!entry) {
throw APIError.create('invalid_node');
}
const inner_path = this._inner_path(entry.path);
this.entriesByPath.set(inner_path, entry.uuid);
let original_entry = this.entriesByUUID.get(entry.uuid);
if (!original_entry) {
throw new Error(`File ${entry.path} does not exist`);
} else {
if (original_entry.is_dir) {
throw new Error(`Cannot overwrite a directory`);
}
original_entry.content = file.stream.read();
original_entry.modified = Math.floor(Date.now() / 1000);
original_entry.size = original_entry.content ? original_entry.content.length : 0;
this.entriesByUUID.set(entry.uuid, original_entry);
}
const fs = context.get('services').get('filesystem');
node = await fs.node(original_entry.uuid);
await node.fetchEntry();
this._integrity_check();
return node;
}
}
module.exports = {
MemoryFSProvider,
};

View File

@@ -1,41 +0,0 @@
/*
* Copyright (C) 2024-present Puter Technologies Inc.
*
* This file is part of Puter.
*
* Puter is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
const BaseService = require("../../../services/BaseService");
const { MemoryFSProvider } = require("./MemoryFSProvider");
class MemoryFSService extends BaseService {
async _init () {
const svc_mountpoint = this.services.get('mountpoint');
svc_mountpoint.register_mounter('memoryfs', this.as('mounter'));
}
static IMPLEMENTS = {
mounter: {
async mount ({ path, options }) {
const provider = new MemoryFSProvider(path);
return provider;
}
}
}
}
module.exports = {
MemoryFSService,
};

View File

@@ -1,15 +0,0 @@
# Custom FS Providers
This directory contains custom FS providers that are not part of the core PuterFS.
## MemoryFSProvider
This is a demo FS provider that illustrates how to implement a custom FS provider.
## NullFSProvider
A FS provider that mimics `/dev/null`.
## LinuxFSProvider
Provide the ability to mount a Linux directory as a FS provider.

View File

@@ -22,7 +22,7 @@ const { MultiDetachable } = putility.libs.listener;
const { TDetachable } = putility.traits;
const { TeePromise } = putility.libs.promise;
const { NodeInternalIDSelector, NodeChildSelector, NodeUIDSelector, RootNodeSelector, NodePathSelector, NodeSelector } = require("../../../filesystem/node/selectors");
const { NodeInternalIDSelector, NodeChildSelector, NodeUIDSelector, RootNodeSelector, NodePathSelector } = require("../../../filesystem/node/selectors");
const { Context } = require("../../../util/context");
const fsCapabilities = require('../../../filesystem/definitions/capabilities');
const { UploadProgressTracker } = require('../../../filesystem/storage/UploadProgressTracker');
@@ -66,52 +66,6 @@ class PuterFSProvider extends putility.AdvancedBase {
]);
}
/**
* Check if a given node exists.
*
* @param {Object} param
* @param {NodeSelector} param.selector - The selector used for checking.
* @returns {Promise<boolean>} - True if the node exists, false otherwise.
*/
async quick_check ({
selector,
}) {
// a wrapper that access underlying database directly
const fsEntryFetcher = Context.get('services').get('fsEntryFetcher');
// shortcut: has full path
if ( selector?.path ) {
const entry = await fsEntryFetcher.findByPath(selector.path);
return Boolean(entry);
}
// shortcut: has uid
if ( selector?.uid ) {
const entry = await fsEntryFetcher.findByUID(selector.uid);
return Boolean(entry);
}
// shortcut: parent uid + child name
if ( selector instanceof NodeChildSelector && selector.parent instanceof NodeUIDSelector ) {
return await fsEntryFetcher.nameExistsUnderParent(
selector.parent.uid,
selector.name,
);
}
// shortcut: parent id + child name
if ( selector instanceof NodeChildSelector && selector.parent instanceof NodeInternalIDSelector ) {
return await fsEntryFetcher.nameExistsUnderParentID(
selector.parent.id,
selector.name,
);
}
// TODO (xiaochen): we should fallback to stat but we cannot at this moment
// since stat requires a valid `FSNodeContext` argument.
return false;
}
async stat ({
selector,
options,
@@ -702,9 +656,9 @@ class PuterFSProvider extends putility.AdvancedBase {
*
* @param {Object} param
* @param {Context} param.context
* @param {FSNodeContext} param.node: The node to write to.
* @param {FSNode} param.node: The node to write to.
* @param {File} param.file: The file to write.
* @returns {Promise<FSNodeContext>}
* @returns {Promise<FSNode>}
*/
async write_overwrite({ context, node, file }) {
const {
@@ -810,7 +764,7 @@ class PuterFSProvider extends putility.AdvancedBase {
const svc_event = svc.get('event');
const svc_mountpoint = svc.get('mountpoint');
const storage = svc_mountpoint.get_storage(this.constructor);
const storage = svc_mountpoint.get_storage();
bucket ??= config.s3_bucket;
bucket_region ??= config.s3_region ?? config.region;

View File

@@ -89,8 +89,6 @@ class DefaultUserService extends BaseService {
);
if ( ! is_default_password ) return;
console.log(`password for admin is: ${tmp_password}`);
// show console widget
this.default_user_widget = ({ is_docker }) => {
if ( is_docker ) {

View File

@@ -24,7 +24,6 @@ const api_error_handler = require('./api_error_handler.js');
const APIError = require('../../../api/APIError.js');
const { Context } = require('../../../util/context.js');
const { subdomain } = require('../../../helpers.js');
const config = require('../../../config.js');
/**
* eggspress() is a factory function for creating express routers.
@@ -170,9 +169,6 @@ module.exports = function eggspress (route, settings, handler) {
return next();
}
}
if ( config.env === 'dev' ) {
console.log(`request url: ${req.url}, body: ${JSON.stringify(req.body)}`);
}
try {
const expected_ctx = res.locals.ctx;
const received_ctx = Context.get(undefined, { allow_fallback: true });
@@ -183,14 +179,18 @@ module.exports = function eggspress (route, settings, handler) {
});
} else await handler(req, res, next);
} catch (e) {
if ( config.env === 'dev' ) {
if (! (e instanceof APIError)) {
// Any non-APIError indicates an unhandled error (i.e. a bug) from the backend.
// We add a dedicated branch to facilitate debugging.
console.error(e);
}
if (e instanceof TypeError || e instanceof ReferenceError) {
// We add a dedicated branch for TypeError/ReferenceError since it usually
// indicates a bug in the backend. And it's pretty convenient to debug if we
// set a breakpoint here.
//
// Typical TypeError:
// - read properties of undefined
console.error(e);
api_error_handler(e, req, res, next);
} else {
api_error_handler(e, req, res, next);
}
api_error_handler(e, req, res, next);
}
};
if (settings.allowedMethods.includes('GET')) {

View File

@@ -18,7 +18,6 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
const { LocalDiskStorageStrategy } = require("../filesystem/strategies/storage_a/LocalDiskStorageStrategy");
const { PuterFSProvider } = require("../modules/puterfs/lib/PuterFSProvider");
const { TeePromise } = require('@heyputer/putility').libs.promise;
const { progress_stream, size_limit_stream } = require("../util/streamutil");
const BaseService = require("./BaseService");
@@ -53,7 +52,7 @@ class LocalDiskStorageService extends BaseService {
svc_contextInit.register_value('storage', storage);
const svc_mountpoint = this.services.get('mountpoint');
svc_mountpoint.set_storage(PuterFSProvider, storage);
svc_mountpoint.set_storage(storage);
}

View File

@@ -1,42 +0,0 @@
// METADATA // {"ai-commented":{"service":"mistral","model":"mistral-large-latest"}}
/*
* Copyright (C) 2024-present Puter Technologies Inc.
*
* This file is part of Puter.
*
* Puter is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
const BaseService = require("./BaseService");
const { MemoryFSProvider } = require("../modules/puterfs/customfs/MemoryFSProvider");
const { Readable } = require("stream");
class MemoryStorageService extends BaseService {
async _init () {
console.log('MemoryStorageService._init');
const svc_mountpoint = this.services.get('mountpoint');
svc_mountpoint.set_storage(MemoryFSProvider, this);
}
async create_read_stream (uuid, options) {
const memory_file = options?.memory_file;
if ( ! memory_file ) {
throw new Error('MemoryStorageService.create_read_stream: memory_file is required');
}
return Readable.from(memory_file.content);
}
}
module.exports = MemoryStorageService;

View File

@@ -83,7 +83,6 @@ const main = async () => {
EssentialModules,
DatabaseModule,
LocalDiskStorageModule,
MemoryStorageModule,
SelfHostedModule,
BroadcastModule,
TestDriversModule,
@@ -101,7 +100,6 @@ const main = async () => {
}
k.add_module(new DatabaseModule());
k.add_module(new LocalDiskStorageModule());
k.add_module(new MemoryStorageModule());
k.add_module(new SelfHostedModule());
k.add_module(new BroadcastModule());
k.add_module(new TestDriversModule());