test: add puterjs test to apiteste (#1590)

This commit is contained in:
Xiaochen Cui
2025-09-20 04:27:47 +08:00
committed by GitHub
parent 9d0199f858
commit c8500ba1ab
6 changed files with 165 additions and 16 deletions

View File

@@ -1,6 +1,6 @@
# API Tester
A test framework for testing the backend API of puter.
A test framework for testing the puter HTTP API and puterjs API.
## Table of Contents
@@ -37,64 +37,70 @@ All commands below should be run from the root directory of puter.
- token: The token of the user. (can be obtained by logging in on the webpage and typing `puter.authToken` in Developer Tools's console)
- mountpoints: The mountpoints to test. (default config includes 2 mountpoints: `/` for "puter fs provider" and `/admin/tmp` for "memory fs provider")
3. Run all tests (unit tests and benchmarks):
3. Run tests against the HTTP API (unit tests and benchmarks):
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml
node ./tools/api-tester/apitest.js
```
4. (experimental) Run tests against the puter-js client:
```bash
node ./tools/api-tester/apitest.js --puterjs
```
### Shorthands
- Run all tests (unit tests and benchmarks):
- Run tests against the HTTP API (unit tests and benchmarks):
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml
node ./tools/api-tester/apitest.js
```
- Run all unit tests:
- Run unit tests against the HTTP API:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit
node ./tools/api-tester/apitest.js --unit
```
- Run all benchmarks:
- Run benchmarks against the HTTP API:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --bench
node ./tools/api-tester/apitest.js --bench
```
- Filter tests by suite name:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --suite=mkdir
node ./tools/api-tester/apitest.js --unit --suite=mkdir
```
- Filter benchmarks by name:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --bench --suite=stat_intensive_1
node ./tools/api-tester/apitest.js --bench --suite=stat_intensive_1
```
- Stop on first failure:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --stop-on-failure
node ./tools/api-tester/apitest.js --unit --stop-on-failure
```
- (unimplemented) Filter tests by test name:
```bash
# (wildcard matching) Run tests containing "memoryfs" in the name
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --test='*memoryfs*'
node ./tools/api-tester/apitest.js --unit --test='*memoryfs*'
# (exact matching) Run the test "mkdir in memoryfs"
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --test='mkdir in memoryfs'
node ./tools/api-tester/apitest.js --unit --test='mkdir in memoryfs'
```
- (unimplemented) Rerun failed tests in the last run:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --rerun-failed
node ./tools/api-tester/apitest.js --rerun-failed
```
## Basic Concepts

View File

@@ -9,7 +9,7 @@ const { parseArgs } = require('node:util');
const args = process.argv.slice(2);
let config, report, suiteName, onlycase, bench, unit, stopOnFailure, id;
let config, report, suiteName, onlycase, bench, unit, stopOnFailure, id, puterjs;
try {
const parsed = parseArgs({
@@ -26,6 +26,7 @@ try {
unit: { type: 'boolean' },
suite: { type: 'string' },
'stop-on-failure': { type: 'boolean' },
puterjs: { type: 'boolean' },
},
allowPositionals: true,
});
@@ -38,6 +39,7 @@ try {
unit,
suite: suiteName,
'stop-on-failure': stopOnFailure,
puterjs,
}, positionals: [id] } = parsed);
onlycase = onlycase !== undefined ? Number.parseInt(onlycase) : undefined;
@@ -50,6 +52,7 @@ try {
'\n' +
'Options:\n' +
' --config=<path> (required) Path to configuration file\n' +
' --puterjs (optional) Run tests against the puter-js client\n' +
' --report=<path> (optional) Output file for full test results\n' +
' --suite=<name> (optional) Run only tests with matching suite name\n' +
' --stop-on-failure (optional) Stop execution on first test failure\n' +
@@ -62,6 +65,27 @@ const conf = YAML.parse(fs.readFileSync(config).toString());
const main = async () => {
if (puterjs) {
const context = {
mountpoint: {
path: '/',
}
};
const ts = new TestSDK(conf, context, {});
const registry = new TestRegistry(ts);
await require('./puter_js/__entry__.js')(registry);
await registry.run_all_tests();
// await run(conf);
ts.printTestResults();
ts.printBenchmarkResults();
process.exit(0);
return;
}
const unit_test_results = [];
const benchmark_results = [];
for (const mountpoint of conf.mountpoints) {
@@ -112,6 +136,68 @@ const main = async () => {
console.log("==================== nightly build results end ====================")
}
/**
* Run test using the given config, and return the test results
*
* @param {Object} options
* @param {Object} options.mountpoint
* @returns {Promise<Object>}
*/
async function test({ mountpoint }) {
const context = {
options: {
onlycase,
suite: suiteName,
}
};
const ts = new TestSDK(conf, context);
try {
await ts.delete('api_test', { recursive: true });
} catch (e) {
}
// hard-coded identifier for ci script
console.log("==================== nightly build results begin ====================")
// print unit test results
let tbl = {};
for ( const result of unit_test_results ) {
tbl[result.name + ' - ' + result.settings] = {
passed: result.caseCount - result.failCount,
failed: result.failCount,
total: result.caseCount,
'duration (s)': result.duration ? result.duration.toFixed(2) : 'N/A',
}
}
console.table(tbl);
// print benchmark results
if (benchmark_results.length > 0) {
tbl = {};
for ( const result of benchmark_results ) {
const fs_provider = result.fs_provider || 'unknown';
tbl[result.name + ' - ' + fs_provider] = {
'duration (s)': result.duration ? (result.duration / 1000).toFixed(2) : 'N/A',
}
}
console.table(tbl);
// print description of each benchmark since it's too long to fit in the table
const seen = new Set();
for ( const result of benchmark_results ) {
if ( seen.has(result.name) ) continue;
seen.add(result.name);
if ( result.description ) {
console.log(result.name + ': ' + result.description);
}
}
}
// hard-coded identifier for ci script
console.log("==================== nightly build results end ====================")
}
/**
* Run test using the given config, and return the test results
*

View File

@@ -0,0 +1,25 @@
const load_puterjs = require('./load.cjs');
async function run(conf) {
const puter = await load_puterjs();
if (conf.token) {
puter.setAuthToken(conf.token);
} else {
throw new Error('No token found in config file. Please add a "token" field to your config.yaml');
}
return;
};
module.exports = async registry => {
const puter = await load_puterjs();
if (registry.t?.conf?.token) {
puter.setAuthToken(registry.t.conf.token);
} else {
throw new Error('No token found in config file. Please add a "token" field to your config.yaml');
}
registry.t.puter = puter;
console.log('__entry__.js');
require('./auth/__entry__.js')(registry);
};

View File

@@ -0,0 +1,3 @@
module.exports = registry => {
registry.add_test('whoami', require('./whoami.js'));
};

View File

@@ -0,0 +1,16 @@
const chai = require('chai');
chai.use(require('chai-as-promised'))
const expect = chai.expect;
module.exports = {
name: 'whoami',
description: 'a demo test for puterjs',
do: async t => {
const puter = t.puter;
await t.case('demo (whoami)', async () => {
const result = await puter.auth.whoami();
expect(result.username).to.equal('admin');
});
}
}

View File

@@ -0,0 +1,13 @@
const vm = require('vm');
async function load_puterjs() {
const goodContext = {}
Object.getOwnPropertyNames(globalThis).forEach(name => { try { goodContext[name] = globalThis[name]; } catch { } })
goodContext.globalThis = goodContext
const code = await fetch("http://puter.localhost:4100/puter.js/v2").then(res => res.text());
const context = vm.createContext(goodContext);
const result = vm.runInNewContext(code, context);
return goodContext.puter;
}
module.exports = load_puterjs;