mirror of
https://github.com/azukaar/Cosmos-Server.git
synced 2026-01-05 20:05:02 -06:00
[release] v0.17.0-unstable27
This commit is contained in:
@@ -16,6 +16,8 @@
|
||||
- Improved accessiblity of the menu for screen readers
|
||||
- Formatter now creates GPT partition tables (instead of MBR, which has a 2TB limit)
|
||||
- Update to Go 1.23.2
|
||||
- Fix 2-parity on Snapraid
|
||||
- Fix mount/unmount request false error
|
||||
- Added safeguard to prevent Docker from destroying stack containers hostnames
|
||||
|
||||
## Version 0.16.3
|
||||
|
||||
@@ -98,6 +98,7 @@ const ConfigManagement = () => {
|
||||
GeoBlocking: config.BlockedCountries,
|
||||
CountryBlacklistIsWhitelist: config.CountryBlacklistIsWhitelist,
|
||||
AutoUpdate: config.AutoUpdate,
|
||||
BetaUpdates: config.BetaUpdates,
|
||||
|
||||
Licence: config.Licence,
|
||||
ServerToken: config.ServerToken,
|
||||
@@ -181,7 +182,8 @@ const ConfigManagement = () => {
|
||||
},
|
||||
LoggingLevel: values.LoggingLevel,
|
||||
RequireMFA: values.RequireMFA,
|
||||
// AutoUpdate: values.AutoUpdate,
|
||||
AutoUpdate: values.AutoUpdate,
|
||||
BetaUpdates: values.BetaUpdates,
|
||||
BlockedCountries: values.GeoBlocking,
|
||||
CountryBlacklistIsWhitelist: values.CountryBlacklistIsWhitelist,
|
||||
MonitoringDisabled: !values.MonitoringEnabled,
|
||||
@@ -287,6 +289,20 @@ const ConfigManagement = () => {
|
||||
<Alert severity="info">{t('mgmt.config.general.configFileInfo')}</Alert>
|
||||
</Grid>
|
||||
|
||||
<CosmosCheckbox
|
||||
label={t('mgmt.config.general.autoupdates')}
|
||||
name="AutoUpdate"
|
||||
formik={formik}
|
||||
helperText={t('mgmt.config.general.autoupdates')}
|
||||
/>
|
||||
|
||||
<CosmosCheckbox
|
||||
label={t('mgmt.config.general.betaupdate')}
|
||||
name="BetaUpdates"
|
||||
formik={formik}
|
||||
helperText={t('mgmt.config.general.betaupdate')}
|
||||
/>
|
||||
|
||||
<CosmosCheckbox
|
||||
label={t('mgmt.config.general.forceMfaCheckbox.forceMfaLabel')}
|
||||
name="RequireMFA"
|
||||
|
||||
@@ -122,9 +122,7 @@ const MountDialogInternal = ({ unmount, refresh, open, setOpen, data }) => {
|
||||
</DialogContent>
|
||||
<DialogActions>
|
||||
<Button onClick={() => setOpen(false)}>{t('global.cancelAction')}</Button>
|
||||
<LoadingButton color="primary" variant="contained" type="submit" onClick={() => {
|
||||
formik.handleSubmit();
|
||||
}}>{unmount ? t('global.unmount') : t('global.mount')}</LoadingButton>
|
||||
<LoadingButton color="primary" variant="contained" type="submit">{unmount ? t('global.unmount') : t('global.mount')}</LoadingButton>
|
||||
</DialogActions>
|
||||
</form>
|
||||
</FormikProvider>
|
||||
|
||||
@@ -98,9 +98,7 @@ const MountDiskDialogInternal = ({disk, unmount, refresh, open, setOpen }) => {
|
||||
</DialogContent>
|
||||
<DialogActions>
|
||||
<Button onClick={() => setOpen(false)}>Cancel</Button>
|
||||
<LoadingButton color="primary" variant="contained" type="submit" onClick={() => {
|
||||
formik.handleSubmit();
|
||||
}}>{unmount ? t('global.unmount') : t('global.mount')}</LoadingButton>
|
||||
<LoadingButton color="primary" variant="contained" type="submit">{unmount ? t('global.unmount') : t('global.mount')}</LoadingButton>
|
||||
</DialogActions>
|
||||
</form>
|
||||
</FormikProvider>
|
||||
|
||||
@@ -732,6 +732,8 @@
|
||||
"mgmt.urls.edit.tunneledHostInput.tunneledHostLabel": "Hostname to tunnel from (what is the user facing hostname of the tunnel)",
|
||||
"mgmt.config.general.licenceInput.licenceLabel": "Licence Key",
|
||||
"mgmt.config.general.licenceInput.manageLicenceButton": "Manage Licence",
|
||||
"mgmt.config.general.autoupdates": "Automatically update Cosmos",
|
||||
"mgmt.config.general.betaupdate": "Include unstable beta updates",
|
||||
"language.selectLanguage": "Select Language",
|
||||
"global.downloadLogs": "Download Logs",
|
||||
"global.restartCosmos": "Restart Cosmos Cloud",
|
||||
|
||||
183
package-lock.json
generated
183
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "cosmos-server",
|
||||
"version": "0.17.0-unstable12",
|
||||
"version": "0.17.0-unstable26",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "cosmos-server",
|
||||
"version": "0.17.0-unstable12",
|
||||
"version": "0.17.0-unstable26",
|
||||
"dependencies": {
|
||||
"@ant-design/colors": "^6.0.0",
|
||||
"@ant-design/icons": "^4.7.0",
|
||||
@@ -97,7 +97,8 @@
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"eslint-plugin-react": "^7.31.8",
|
||||
"eslint-plugin-react-hooks": "4.6.0",
|
||||
"prettier": "2.7.1"
|
||||
"prettier": "2.7.1",
|
||||
"rollup-plugin-visualizer": "^5.12.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@actions/core": {
|
||||
@@ -9871,6 +9872,182 @@
|
||||
"fsevents": "~2.3.2"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer": {
|
||||
"version": "5.12.0",
|
||||
"resolved": "https://registry.npmjs.org/rollup-plugin-visualizer/-/rollup-plugin-visualizer-5.12.0.tgz",
|
||||
"integrity": "sha512-8/NU9jXcHRs7Nnj07PF2o4gjxmm9lXIrZ8r175bT9dK8qoLlvKTwRMArRCMgpMGlq8CTLugRvEmyMeMXIU2pNQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"open": "^8.4.0",
|
||||
"picomatch": "^2.3.1",
|
||||
"source-map": "^0.7.4",
|
||||
"yargs": "^17.5.1"
|
||||
},
|
||||
"bin": {
|
||||
"rollup-plugin-visualizer": "dist/bin/cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"rollup": "2.x || 3.x || 4.x"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"rollup": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/ansi-styles": {
|
||||
"version": "4.3.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
|
||||
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-convert": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/cliui": {
|
||||
"version": "8.0.1",
|
||||
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
|
||||
"integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"string-width": "^4.2.0",
|
||||
"strip-ansi": "^6.0.1",
|
||||
"wrap-ansi": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/color-convert": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
|
||||
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"color-name": "~1.1.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/color-name": {
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
|
||||
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/define-lazy-prop": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
|
||||
"integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/is-docker": {
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
|
||||
"integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
|
||||
"dev": true,
|
||||
"bin": {
|
||||
"is-docker": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/open": {
|
||||
"version": "8.4.2",
|
||||
"resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz",
|
||||
"integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"define-lazy-prop": "^2.0.0",
|
||||
"is-docker": "^2.1.1",
|
||||
"is-wsl": "^2.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/source-map": {
|
||||
"version": "0.7.4",
|
||||
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz",
|
||||
"integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/wrap-ansi": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
|
||||
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"ansi-styles": "^4.0.0",
|
||||
"string-width": "^4.1.0",
|
||||
"strip-ansi": "^6.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/y18n": {
|
||||
"version": "5.0.8",
|
||||
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
|
||||
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/yargs": {
|
||||
"version": "17.7.2",
|
||||
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
|
||||
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"cliui": "^8.0.1",
|
||||
"escalade": "^3.1.1",
|
||||
"get-caller-file": "^2.0.5",
|
||||
"require-directory": "^2.1.1",
|
||||
"string-width": "^4.2.3",
|
||||
"y18n": "^5.0.5",
|
||||
"yargs-parser": "^21.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/rollup-plugin-visualizer/node_modules/yargs-parser": {
|
||||
"version": "21.1.1",
|
||||
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
|
||||
"integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/run-applescript": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-5.0.0.tgz",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "cosmos-server",
|
||||
"version": "0.17.0-unstable26",
|
||||
"version": "0.17.0-unstable27",
|
||||
"description": "",
|
||||
"main": "test-server.js",
|
||||
"bugs": {
|
||||
@@ -138,6 +138,7 @@
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"eslint-plugin-react": "^7.31.8",
|
||||
"eslint-plugin-react-hooks": "4.6.0",
|
||||
"prettier": "2.7.1"
|
||||
"prettier": "2.7.1",
|
||||
"rollup-plugin-visualizer": "^5.12.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
"bufio"
|
||||
"gopkg.in/yaml.v2"
|
||||
"strings"
|
||||
"io/ioutil"
|
||||
@@ -41,68 +42,128 @@ func startNebulaInBackground() error {
|
||||
defer ProcessMux.Unlock()
|
||||
|
||||
NebulaFailedStarting = false
|
||||
|
||||
if process != nil {
|
||||
return errors.New("nebula is already running")
|
||||
return errors.New("nebula is already running")
|
||||
}
|
||||
|
||||
// if pid file, kill the process
|
||||
if _, err := os.Stat(utils.CONFIGFOLDER + "nebula.pid"); err == nil {
|
||||
// read pid
|
||||
pid, err := ioutil.ReadFile(utils.CONFIGFOLDER + "nebula.pid")
|
||||
if err != nil {
|
||||
utils.Error("Constellation: Error reading pid file", err)
|
||||
} else {
|
||||
// kill process
|
||||
pidInt, _ := strconv.Atoi(string(pid))
|
||||
processToKill, err := os.FindProcess(pidInt)
|
||||
if err != nil {
|
||||
utils.Error("Constellation: Error finding process", err)
|
||||
} else {
|
||||
err = processToKill.Kill()
|
||||
if err != nil {
|
||||
utils.Error("Constellation: Error killing process", err)
|
||||
}
|
||||
// Handle existing PID file
|
||||
pidFile := utils.CONFIGFOLDER + "nebula.pid"
|
||||
if _, err := os.Stat(pidFile); err == nil {
|
||||
if err := killExistingProcess(pidFile); err != nil {
|
||||
utils.Error("Constellation: Failed to kill existing process", err)
|
||||
// Continue execution as the process might not exist anymore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize log buffer
|
||||
logBuffer = &lumberjack.Logger{
|
||||
Filename: utils.CONFIGFOLDER+"nebula.log",
|
||||
MaxSize: 1, // megabytes
|
||||
MaxBackups: 1,
|
||||
MaxAge: 15, //days
|
||||
Compress: false,
|
||||
Filename: utils.CONFIGFOLDER + "nebula.log",
|
||||
MaxSize: 1, // megabytes
|
||||
MaxBackups: 1,
|
||||
MaxAge: 15, //days
|
||||
Compress: false,
|
||||
}
|
||||
|
||||
// Create and configure the process
|
||||
process = exec.Command(binaryToRun(), "-config", utils.CONFIGFOLDER+"nebula.yml")
|
||||
|
||||
// Set up multi-writer for stderr
|
||||
process.Stderr = io.MultiWriter(logBuffer, os.Stderr)
|
||||
|
||||
if utils.LoggingLevelLabels[utils.GetMainConfig().LoggingLevel] == utils.DEBUG {
|
||||
// Set up multi-writer for stdout if in debug mode
|
||||
process.Stdout = io.MultiWriter(logBuffer, os.Stdout)
|
||||
} else {
|
||||
process.Stdout = io.MultiWriter(logBuffer)
|
||||
|
||||
// Setup stdout and stderr pipes
|
||||
stdout, err := process.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %s", err)
|
||||
}
|
||||
stderr, err := process.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %s", err)
|
||||
}
|
||||
|
||||
// Start the process in the background
|
||||
// Start the process
|
||||
if err := process.Start(); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to start nebula: %s", err)
|
||||
}
|
||||
|
||||
// Handle process output
|
||||
go handleProcessOutput(stdout, stderr, logBuffer)
|
||||
|
||||
// Set process state
|
||||
NebulaStarted = true
|
||||
|
||||
// Monitor process
|
||||
go monitorNebulaProcess(process)
|
||||
|
||||
// save PID
|
||||
err := ioutil.WriteFile(utils.CONFIGFOLDER+"nebula.pid", []byte(fmt.Sprintf("%d", process.Process.Pid)), 0644)
|
||||
if err != nil {
|
||||
utils.Error("Constellation: Error writing PID file", err)
|
||||
// Save PID
|
||||
if err := savePID(process.Process.Pid); err != nil {
|
||||
utils.Error("Constellation: Error writing PID file", err)
|
||||
// Don't return error as process is already running
|
||||
}
|
||||
|
||||
utils.Log(fmt.Sprintf("%s started with PID %d\n", binaryToRun(), process.Process.Pid))
|
||||
utils.Log(fmt.Sprintf("%s started with PID %d", binaryToRun(), process.Process.Pid))
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleProcessOutput(stdout, stderr io.ReadCloser, logBuffer *lumberjack.Logger) {
|
||||
// Handle stdout
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
utils.VPN(line)
|
||||
if _, err := logBuffer.Write([]byte(line + "\n")); err != nil {
|
||||
utils.Error("Failed to write to log buffer", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Handle stderr
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stderr)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
utils.Error("Nebula error", errors.New(line))
|
||||
if _, err := logBuffer.Write([]byte("ERROR: " + line + "\n")); err != nil {
|
||||
utils.Error("Failed to write to log buffer", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func killExistingProcess(pidFile string) error {
|
||||
pidBytes, err := ioutil.ReadFile(pidFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading pid file: %w", err)
|
||||
}
|
||||
|
||||
pidInt, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid pid format: %w", err)
|
||||
}
|
||||
|
||||
process, err := os.FindProcess(pidInt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding process: %w", err)
|
||||
}
|
||||
|
||||
if err := process.Kill(); err != nil {
|
||||
return fmt.Errorf("error killing process: %w", err)
|
||||
}
|
||||
|
||||
// Clean up PID file
|
||||
if err := os.Remove(pidFile); err != nil {
|
||||
utils.Error("Failed to remove old PID file", err)
|
||||
// Continue as this is not critical
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func savePID(pid int) error {
|
||||
pidFile := utils.CONFIGFOLDER + "nebula.pid"
|
||||
pidContent := []byte(fmt.Sprintf("%d", pid))
|
||||
|
||||
if err := ioutil.WriteFile(pidFile, pidContent, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write PID file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -688,20 +749,46 @@ func generateNebulaCert(name, ip, PK string, saveToFile bool) (string, string, s
|
||||
defer os.Remove("./temp.key")
|
||||
}
|
||||
|
||||
utils.Debug(cmd.String())
|
||||
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if utils.LoggingLevelLabels[utils.GetMainConfig().LoggingLevel] == utils.DEBUG {
|
||||
cmd.Stdout = os.Stdout
|
||||
} else {
|
||||
cmd.Stdout = nil
|
||||
// Get pipes for stdout and stderr
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("failed to create stdout pipe: %s", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("failed to create stderr pipe: %s", err)
|
||||
}
|
||||
|
||||
// Start command
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("failed to start nebula-cert: %s", err)
|
||||
}
|
||||
|
||||
// Create scanner for stdout
|
||||
stdoutScanner := bufio.NewScanner(stdout)
|
||||
go func() {
|
||||
for stdoutScanner.Scan() {
|
||||
utils.VPN(stdoutScanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
// Create scanner for stderr
|
||||
stderrScanner := bufio.NewScanner(stderr)
|
||||
go func() {
|
||||
for stderrScanner.Scan() {
|
||||
utils.Error("nebula-cert error", errors.New(stderrScanner.Text()))
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for command to complete
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("nebula-cert failed: %s", err)
|
||||
}
|
||||
|
||||
cmd.Run()
|
||||
|
||||
if cmd.ProcessState.ExitCode() != 0 {
|
||||
return "", "", "", fmt.Errorf("nebula-cert exited with an error, check the Cosmos logs")
|
||||
return "", "", "", fmt.Errorf("nebula-cert exited with an error, check the Cosmos logs")
|
||||
}
|
||||
|
||||
|
||||
@@ -744,37 +831,98 @@ func generateNebulaCert(name, ip, PK string, saveToFile bool) (string, string, s
|
||||
return string(certContent), string(keyContent), fingerprint, nil
|
||||
}
|
||||
|
||||
func generateNebulaCACert(name string) (error) {
|
||||
// if ca.key exists, delete it
|
||||
if _, err := os.Stat("./ca.key"); err == nil {
|
||||
os.Remove("./ca.key")
|
||||
func generateNebulaCACert(name string) error {
|
||||
// Clean up existing files
|
||||
for _, file := range []string{"./ca.key", "./ca.crt"} {
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
if err := os.Remove(file); err != nil {
|
||||
return fmt.Errorf("failed to remove existing %s: %s", file, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat("./ca.crt"); err == nil {
|
||||
os.Remove("./ca.crt")
|
||||
}
|
||||
|
||||
// Run the nebula-cert command to generate CA certificate and key
|
||||
cmd := exec.Command(binaryToRun() + "-cert", "ca", "-name", "\""+name+"\"")
|
||||
|
||||
// Run the nebula-cert command to generate CA certificate and key
|
||||
cmd := exec.Command(binaryToRun()+"-cert", "ca", "-name", "\""+name+"\"")
|
||||
utils.Debug(cmd.String())
|
||||
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if utils.LoggingLevelLabels[utils.GetMainConfig().LoggingLevel] == utils.DEBUG {
|
||||
cmd.Stdout = os.Stdout
|
||||
} else {
|
||||
cmd.Stdout = nil
|
||||
// Get pipes for stdout and stderr
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %s", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %s", err)
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("nebula-cert error: %s", err)
|
||||
// Start command
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start nebula-cert: %s", err)
|
||||
}
|
||||
|
||||
// Handle stdout based on logging level
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
utils.VPN(scanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
// Handle stderr
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stderr)
|
||||
for scanner.Scan() {
|
||||
utils.Error("nebula-cert error", errors.New(scanner.Text()))
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for command to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("nebula-cert failed: %s", err)
|
||||
}
|
||||
|
||||
// Move files to config folder with error handling
|
||||
for _, moveCmd := range []struct{src, dst string}{
|
||||
{"./ca.crt", utils.CONFIGFOLDER + "ca.crt"},
|
||||
{"./ca.key", utils.CONFIGFOLDER + "ca.key"},
|
||||
} {
|
||||
cmd := exec.Command("mv", moveCmd.src, moveCmd.dst)
|
||||
|
||||
// Get pipes for move command
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe for move: %s", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe for move: %s", err)
|
||||
}
|
||||
|
||||
// Start move command
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start move command: %s", err)
|
||||
}
|
||||
|
||||
// Handle stdout and stderr for move command
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
utils.VPN(scanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(stderr)
|
||||
for scanner.Scan() {
|
||||
utils.Error("move command error", errors.New(scanner.Text()))
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for move command to complete
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("failed to move %s to %s: %s", moveCmd.src, moveCmd.dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
// copy to /config/ca.*
|
||||
cmd = exec.Command("mv", "./ca.crt", utils.CONFIGFOLDER + "ca.crt")
|
||||
cmd.Run()
|
||||
cmd = exec.Command("mv", "./ca.key", utils.CONFIGFOLDER + "ca.key")
|
||||
cmd.Run()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ type ConfigJob struct {
|
||||
Ctx context.Context `json:"-"`
|
||||
CancelFunc context.CancelFunc `json:"-"`
|
||||
Container string
|
||||
Timeout time.Duration
|
||||
MaxLogs int
|
||||
}
|
||||
|
||||
var jobsList = map[string]map[string]ConfigJob{}
|
||||
@@ -73,43 +75,78 @@ func getJobsList() map[string]map[string]ConfigJob {
|
||||
|
||||
func JobFromCommand(command string, args ...string) func(OnLog func(string), OnFail func(error), OnSuccess func(), ctx context.Context, cancel context.CancelFunc) {
|
||||
return func(OnLog func(string), OnFail func(error), OnSuccess func(), ctx context.Context, cancel context.CancelFunc) {
|
||||
// Create a command that respects the provided context
|
||||
cmd := exec.CommandContext(ctx, command, args...)
|
||||
done := make(chan bool, 1)
|
||||
var cmdErr error
|
||||
|
||||
// Getting the pipe for standard output
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
OnFail(err)
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
OnFail(fmt.Errorf("panic in command execution: %v", r))
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Getting the pipe for standard error
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
OnFail(err)
|
||||
return
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, command, args...)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
cmdErr = err
|
||||
return
|
||||
}
|
||||
|
||||
utils.Debug("Running command: " + cmd.String())
|
||||
|
||||
// Start the command
|
||||
if err := cmd.Start(); err != nil {
|
||||
OnFail(err)
|
||||
return
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
cmdErr = err
|
||||
return
|
||||
}
|
||||
|
||||
// Concurrently read from stdout and stderr
|
||||
go streamLogs(stdout, OnLog)
|
||||
go streamLogs(stderr, OnLog)
|
||||
if err := cmd.Start(); err != nil {
|
||||
cmdErr = err
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the command to finish
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
OnFail(err)
|
||||
return
|
||||
}
|
||||
// Use buffered channels for log streaming
|
||||
logsDone := make(chan bool, 2)
|
||||
|
||||
go func() {
|
||||
streamLogs(stdout, OnLog)
|
||||
logsDone <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
streamLogs(stderr, OnLog)
|
||||
logsDone <- true
|
||||
}()
|
||||
|
||||
OnSuccess()
|
||||
// Wait for both log streams to complete
|
||||
<-logsDone
|
||||
<-logsDone
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
cmdErr = err
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Set default timeout if none specified
|
||||
// timeout := 240 * time.Hour
|
||||
// if job, ok := jobsList[schedulerName][jobName]; ok && job.Timeout > 0 {
|
||||
// timeout = job.Timeout
|
||||
// }
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
if cmdErr != nil {
|
||||
OnFail(cmdErr)
|
||||
} else {
|
||||
OnSuccess()
|
||||
}
|
||||
// case <-time.After(timeout):
|
||||
// cancel() // Cancel the context
|
||||
// OnFail(errors.New("job timed out after " + timeout.String()))
|
||||
case <-ctx.Done():
|
||||
OnFail(ctx.Err())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,17 +269,32 @@ func InitJobs() {
|
||||
}
|
||||
|
||||
func jobRunner(schedulerName, jobName string) func(OnLog func(string), OnFail func(error), OnSuccess func()) {
|
||||
return func (OnLog func(string), OnFail func(error), OnSuccess func()) {
|
||||
CRONLock <- true
|
||||
if job, ok := jobsList[schedulerName][jobName]; ok {
|
||||
utils.Log("Starting CRON job: " + job.Name)
|
||||
return func(OnLog func(string), OnFail func(error), OnSuccess func()) {
|
||||
RunningLock <- true
|
||||
defer func() { <-RunningLock }()
|
||||
|
||||
if job.Running {
|
||||
utils.Error("Scheduler: job " + job.Name + " is already running", nil)
|
||||
<-CRONLock
|
||||
return
|
||||
CRONLock <- true
|
||||
var job ConfigJob
|
||||
var ok bool
|
||||
|
||||
if job, ok = jobsList[schedulerName][jobName]; !ok {
|
||||
utils.Error("Scheduler: job "+jobName+" not found", nil)
|
||||
<-CRONLock
|
||||
return
|
||||
}
|
||||
|
||||
if job.Running {
|
||||
utils.Error("Scheduler: job "+job.Name+" is already running", nil)
|
||||
<-CRONLock
|
||||
return
|
||||
}
|
||||
|
||||
// Create context with timeout
|
||||
// if job.Timeout == 0 {
|
||||
// job.Timeout = 240 * time.Hour
|
||||
// }
|
||||
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), )
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
job.Ctx = ctx
|
||||
job.LastStarted = time.Now()
|
||||
@@ -253,35 +305,45 @@ func jobRunner(schedulerName, jobName string) func(OnLog func(string), OnFail fu
|
||||
jobsList[job.Scheduler][job.Name] = job
|
||||
<-CRONLock
|
||||
|
||||
// Ensure cleanup happens
|
||||
defer func() {
|
||||
CRONLock <- true
|
||||
if j, ok := jobsList[schedulerName][jobName]; ok {
|
||||
j.Running = false
|
||||
j.CancelFunc = nil
|
||||
jobsList[schedulerName][jobName] = j
|
||||
}
|
||||
<-CRONLock
|
||||
cancel()
|
||||
}()
|
||||
|
||||
triggerJobUpdated("start", job.Name)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
OnFail(errors.New("Scheduler: job was canceled."))
|
||||
return
|
||||
default:
|
||||
job.Job(OnLog, OnFail, OnSuccess, ctx, cancel)
|
||||
}
|
||||
} else {
|
||||
utils.Error("Scheduler: job " + jobName + " not found", nil)
|
||||
<-CRONLock
|
||||
}
|
||||
job.Job(OnLog, OnFail, OnSuccess, ctx, cancel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func jobRunner_OnLog(schedulerName, jobName string) func(log string) {
|
||||
return func(log string) {
|
||||
CRONLock <- true
|
||||
if job, ok := jobsList[schedulerName][jobName]; ok {
|
||||
job.Logs = append(job.Logs, log)
|
||||
jobsList[job.Scheduler][job.Name] = job
|
||||
utils.Debug(log)
|
||||
triggerJobUpdated("log", job.Name, log)
|
||||
}
|
||||
<-CRONLock
|
||||
CRONLock <- true
|
||||
if job, ok := jobsList[schedulerName][jobName]; ok {
|
||||
// Implement circular buffer for logs
|
||||
maxlog := job.MaxLogs
|
||||
if maxlog == 0 {
|
||||
maxlog = 5000
|
||||
}
|
||||
if maxlog != 0 && len(job.Logs) >= maxlog {
|
||||
job.Logs = job.Logs[1:]
|
||||
}
|
||||
job.Logs = append(job.Logs, log)
|
||||
jobsList[job.Scheduler][job.Name] = job
|
||||
utils.Debug(log)
|
||||
triggerJobUpdated("log", job.Name, log)
|
||||
}
|
||||
<-CRONLock
|
||||
}
|
||||
}
|
||||
|
||||
func jobRunner_OnFail(schedulerName, jobName string) func(err error) {
|
||||
return func(err error) {
|
||||
CRONLock <- true
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"crypto/md5"
|
||||
"os/exec"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"archive/zip"
|
||||
@@ -100,6 +101,11 @@ func unzip(src string, dest string) error {
|
||||
func main() {
|
||||
fmt.Println("-- Cosmos Cloud Launcher --")
|
||||
fmt.Println("Checking for updates to install...")
|
||||
|
||||
// killall cosmos procedss before updating
|
||||
cmd := exec.Command("killall", "cosmos")
|
||||
cmd.Run()
|
||||
|
||||
|
||||
execPath, err := os.Executable()
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"os"
|
||||
"errors"
|
||||
"strings"
|
||||
"strconv"
|
||||
|
||||
"github.com/azukaar/cosmos-server/src/utils"
|
||||
"github.com/azukaar/cosmos-server/src/cron"
|
||||
@@ -200,8 +201,13 @@ func InitSnapRAIDConfig() {
|
||||
defer file.Close()
|
||||
|
||||
// write the configuration
|
||||
for _, d := range raidOptions.Parity {
|
||||
file.WriteString("parity " + d + "/snapraid.parity\n")
|
||||
for _di, d := range raidOptions.Parity {
|
||||
di := strconv.Itoa(_di)
|
||||
if _di == 0 {
|
||||
file.WriteString("parity " + d + "/snapraid.parity\n")
|
||||
} else {
|
||||
file.WriteString(di + "-parity " + d + "/snapraid."+di+"-parity\n")
|
||||
}
|
||||
}
|
||||
|
||||
// file.WriteString("content " + utils.CONFIGFOLDER + "snapraid/" + raidOptions.Name + ".conf\n")
|
||||
|
||||
@@ -97,6 +97,10 @@ func Warn(message string) {
|
||||
RawLogMessage(WARNING, "[WARN] ", bYellow, nYellow, message)
|
||||
}
|
||||
|
||||
func VPN(message string) {
|
||||
RawLogMessage(INFO, "[VPN] ", bCyan, nCyan, message)
|
||||
}
|
||||
|
||||
func Error(message string, err error) {
|
||||
errStr := ""
|
||||
if err != nil {
|
||||
|
||||
4842
stats.html
Normal file
4842
stats.html
Normal file
File diff suppressed because one or more lines are too long
@@ -1,5 +1,6 @@
|
||||
import { defineConfig } from 'vite'
|
||||
import react from '@vitejs/plugin-react'
|
||||
import visualizer from 'rollup-plugin-visualizer';
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig({
|
||||
@@ -7,18 +8,21 @@ export default defineConfig({
|
||||
root: 'client',
|
||||
build: {
|
||||
outDir: '../static',
|
||||
rollupOptions: {
|
||||
plugins: [visualizer({ open: true })],
|
||||
},
|
||||
},
|
||||
server: {
|
||||
proxy: {
|
||||
'/cosmos/api': {
|
||||
// target: 'https://localhost:8443',
|
||||
target: 'https://192.168.1.197',
|
||||
target: 'http://192.168.1.170:8080',
|
||||
secure: false,
|
||||
ws: true,
|
||||
},
|
||||
'/cosmos/rclone': {
|
||||
// target: 'https://localhost:8443',
|
||||
target: 'https://192.168.1.197',
|
||||
target: 'http://192.168.1.170:8080',
|
||||
secure: false,
|
||||
ws: true,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user