Updated makefile, added tests, remove lastupdate database entry for uploadstatus

This commit is contained in:
Marc Ole Bulling
2024-06-09 19:33:23 +02:00
parent b14d9f9e51
commit 7fef0450bc
13 changed files with 71 additions and 46 deletions

View File

@@ -11,7 +11,7 @@ import (
)
// CurrentConfigVersion is the version of the configuration structure. Used for upgrading
const CurrentConfigVersion = 19
const CurrentConfigVersion = 20
// DoUpgrade checks if an old version is present and updates it to the current version if required
func DoUpgrade(settings *models.Configuration, env *environment.Environment) bool {
@@ -65,7 +65,7 @@ func updateConfig(settings *models.Configuration, env *environment.Environment)
}
settings.Authentication.OAuthRecheckInterval = 168
}
// < v1.8.5
// < v1.8.5beta
if settings.ConfigVersion < 19 {
if settings.MaxMemory == 40 {
settings.MaxMemory = 50
@@ -73,6 +73,16 @@ func updateConfig(settings *models.Configuration, env *environment.Environment)
settings.ChunkSize = env.ChunkSizeMB
settings.MaxParallelUploads = env.MaxParallelUploads
}
// < v1.8.5
if settings.ConfigVersion < 20 {
err := database.RawSqlite(`DROP TABLE UploadStatus; CREATE TABLE "UploadStatus" (
"ChunkId" TEXT NOT NULL UNIQUE,
"CurrentStatus" INTEGER NOT NULL,
"CreationDate" INTEGER NOT NULL,
PRIMARY KEY("ChunkId")
) WITHOUT ROWID;`)
helper.Check(err)
}
}
// migrateToSqlite copies the content of the old bitcask database to a new sqlite database

View File

@@ -146,7 +146,6 @@ func createNewDatabase() {
CREATE TABLE "UploadStatus" (
"ChunkId" TEXT NOT NULL UNIQUE,
"CurrentStatus" INTEGER NOT NULL,
"LastUpdate" INTEGER NOT NULL,
"CreationDate" INTEGER NOT NULL,
PRIMARY KEY("ChunkId")
) WITHOUT ROWID;

View File

@@ -216,54 +216,44 @@ func TestGarbageCollectionUploads(t *testing.T) {
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctodelete1",
CurrentStatus: 0,
LastUpdate: time.Now().Add(-24 * time.Hour).Unix(),
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctodelete2",
CurrentStatus: 1,
LastUpdate: time.Now().Add(-24 * time.Hour).Unix(),
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctodelete3",
CurrentStatus: 0,
LastUpdate: 0,
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctodelete4",
CurrentStatus: 0,
LastUpdate: time.Now().Add(-20 * time.Hour).Unix(),
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctodelete5",
CurrentStatus: 1,
LastUpdate: time.Now().Add(40 * time.Hour).Unix(),
})
currentTime = orgiginalFunc
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctokeep1",
CurrentStatus: 0,
LastUpdate: time.Now().Add(-24 * time.Hour).Unix(),
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctokeep2",
CurrentStatus: 1,
LastUpdate: time.Now().Add(-24 * time.Hour).Unix(),
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctokeep3",
CurrentStatus: 0,
LastUpdate: 0,
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctokeep4",
CurrentStatus: 0,
LastUpdate: time.Now().Add(-20 * time.Hour).Unix(),
})
SaveUploadStatus(models.UploadStatus{
ChunkId: "ctokeep5",
CurrentStatus: 1,
LastUpdate: time.Now().Add(40 * time.Hour).Unix(),
})
for _, item := range []string{"ctodelete1", "ctodelete2", "ctodelete3", "ctodelete4", "ctokeep1", "ctokeep2", "ctokeep3", "ctokeep4"} {
_, result := GetUploadStatus(item)
@@ -463,3 +453,29 @@ func TestParallelConnectionsReading(t *testing.T) {
}
wg.Wait()
}
func TestUploadStatus(t *testing.T) {
allStatus := GetAllUploadStatus()
found := false
test.IsEqualInt(t, len(allStatus), 5)
for _, status := range allStatus {
if status.ChunkId == "ctokeep5" {
found = true
}
}
test.IsEqualBool(t, found, true)
newStatus := models.UploadStatus{
ChunkId: "testid",
CurrentStatus: 1,
}
retrievedStatus, ok := GetUploadStatus("testid")
test.IsEqualBool(t, ok, false)
test.IsEqualBool(t, retrievedStatus == models.UploadStatus{}, true)
SaveUploadStatus(newStatus)
retrievedStatus, ok = GetUploadStatus("testid")
test.IsEqualBool(t, ok, true)
test.IsEqualString(t, retrievedStatus.ChunkId, "testid")
test.IsEqualInt(t, retrievedStatus.CurrentStatus, 1)
allStatus = GetAllUploadStatus()
test.IsEqualInt(t, len(allStatus), 6)
}

View File

@@ -23,12 +23,11 @@ func GetAllUploadStatus() []models.UploadStatus {
defer rows.Close()
for rows.Next() {
rowResult := schemaUploadStatus{}
err = rows.Scan(&rowResult.ChunkId, &rowResult.CurrentStatus, &rowResult.LastUpdate, &rowResult.CreationDate)
err = rows.Scan(&rowResult.ChunkId, &rowResult.CurrentStatus, &rowResult.CreationDate)
helper.Check(err)
result = append(result, models.UploadStatus{
ChunkId: rowResult.ChunkId,
CurrentStatus: rowResult.CurrentStatus,
LastUpdate: rowResult.LastUpdate,
})
}
return result
@@ -39,12 +38,11 @@ func GetUploadStatus(id string) (models.UploadStatus, bool) {
result := models.UploadStatus{
ChunkId: id,
CurrentStatus: 0,
LastUpdate: 0,
}
var rowResult schemaUploadStatus
row := sqliteDb.QueryRow("SELECT * FROM UploadStatus WHERE ChunkId = ?", id)
err := row.Scan(&rowResult.ChunkId, &rowResult.CurrentStatus, &rowResult.LastUpdate, &rowResult.CreationDate)
err := row.Scan(&rowResult.ChunkId, &rowResult.CurrentStatus, &rowResult.CreationDate)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return models.UploadStatus{}, false
@@ -53,7 +51,6 @@ func GetUploadStatus(id string) (models.UploadStatus, bool) {
return models.UploadStatus{}, false
}
result.CurrentStatus = rowResult.CurrentStatus
result.LastUpdate = rowResult.LastUpdate
return result, true
}
@@ -67,12 +64,11 @@ func SaveUploadStatus(status models.UploadStatus) {
newData := schemaUploadStatus{
ChunkId: status.ChunkId,
CurrentStatus: status.CurrentStatus,
LastUpdate: status.LastUpdate,
CreationDate: currentTime().Unix(),
}
_, err := sqliteDb.Exec("INSERT OR REPLACE INTO UploadStatus (ChunkId, CurrentStatus, LastUpdate, CreationDate) VALUES (?, ?, ?, ?)",
newData.ChunkId, newData.CurrentStatus, newData.LastUpdate, newData.CreationDate)
_, err := sqliteDb.Exec("INSERT OR REPLACE INTO UploadStatus (ChunkId, CurrentStatus, CreationDate) VALUES (?, ?, ?)",
newData.ChunkId, newData.CurrentStatus, newData.CreationDate)
helper.Check(err)
}

View File

@@ -12,8 +12,6 @@ type UploadStatus struct {
// hashing) or being moved/uploaded to the file storage
// See processingstatus for definition
CurrentStatus int `json:"currentstatus"`
// LastUpdate indicates the last status change
LastUpdate int64 `json:"lastupdate"`
// Type is the type of the message and is always "uploadstatus"
Type string `json:"type"`
}

View File

@@ -9,5 +9,5 @@ func TestUploadStatus_ToJson(t *testing.T) {
status := UploadStatus{}
output, err := status.ToJson()
test.IsNil(t, err)
test.IsEqualString(t, string(output), "{\"chunkid\":\"\",\"currentstatus\":0,\"lastupdate\":0,\"type\":\"uploadstatus\"}")
test.IsEqualString(t, string(output), "{\"chunkid\":\"\",\"currentstatus\":0,\"type\":\"uploadstatus\"}")
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/forceu/gokapi/internal/helper"
"github.com/forceu/gokapi/internal/models"
"github.com/forceu/gokapi/internal/webserver/sse"
"time"
)
// StatusHashingOrEncrypting indicates that the file has been completely uploaded, but is now processed by Gokapi
@@ -25,7 +24,6 @@ func Set(id string, status int) {
newStatus := models.UploadStatus{
ChunkId: id,
CurrentStatus: status,
LastUpdate: time.Now().Unix(),
}
oldStatus, ok := database.GetUploadStatus(newStatus.ChunkId)
if ok && oldStatus.CurrentStatus > newStatus.CurrentStatus {

View File

@@ -38,7 +38,6 @@ func TestSetStatus(t *testing.T) {
initialStatus := models.UploadStatus{
ChunkId: chunkID,
CurrentStatus: tc.initialStatus,
LastUpdate: time.Now().Unix(),
}
database.SaveUploadStatus(initialStatus)

View File

@@ -11,6 +11,7 @@ import (
"github.com/forceu/gokapi/internal/storage/filesystem/s3filesystem/aws"
"github.com/johannesboyne/gofakes3"
"github.com/johannesboyne/gofakes3/backend/s3mem"
"log"
"net/http/httptest"
"os"
"strings"
@@ -172,20 +173,20 @@ func writeTestSessions() {
})
}
func writeTestUploadStatus() {
database.SaveUploadStatus(models.UploadStatus{
ChunkId: "expiredstatus",
CurrentStatus: 0,
LastUpdate: 100,
})
err := database.RawSqlite(`INSERT OR REPLACE INTO UploadStatus
("ChunkId", "CurrentStatus", "CreationDate")
VALUES ('expiredstatus', 0, 100);`)
if err != nil {
log.Println(err)
log.Fatal("Could not execute SQL")
}
database.SaveUploadStatus(models.UploadStatus{
ChunkId: "validstatus_0",
CurrentStatus: 0,
LastUpdate: 2065000681,
})
database.SaveUploadStatus(models.UploadStatus{
ChunkId: "validstatus_1",
CurrentStatus: 1,
LastUpdate: 2065000681,
})
}
@@ -342,21 +343,31 @@ var configTestFile = []byte(`{
"Username": "test",
"Password": "10340aece68aa4fb14507ae45b05506026f276cf",
"HeaderKey": "",
"OAuthProvider": "",
"OauthProvider": "",
"OAuthClientId": "",
"OAuthClientSecret": "",
"OauthUserScope": "",
"OauthGroupScope": "",
"OAuthRecheckInterval": 12,
"HeaderUsers": null,
"OAuthUsers": null
"OAuthGroups": [],
"OauthUsers": []
},
"Port":"127.0.0.1:53843",
"Port":"127.0.0.1:53843",
"ServerUrl": "http://127.0.0.1:53843/",
"RedirectUrl": "https://test.com/",
"ConfigVersion": 16,
"PublicName": "Gokapi Test Version",
"ConfigVersion": 20,
"LengthId": 20,
"DataDir": "test/data",
"MaxFileSizeMB": 25,
"MaxMemory": 10,
"ChunkSize": 45,
"MaxParallelUploads": 4,
"UseSsl": false,
"MaxFileSizeMB": 25
"PicturesAlwaysLocal": false,
"SaveIp": false,
"IncludeFilename": false
}`)
var sslCertValid = []byte(`-----BEGIN CERTIFICATE-----

View File

@@ -91,7 +91,7 @@ func TestGetStatusSSE(t *testing.T) {
body, err := io.ReadAll(rr.Body)
test.IsNil(t, err)
test.IsEqualString(t, string(body), "{\"chunkid\":\"expiredstatus\",\"currentstatus\":0,\"lastupdate\":100,\"type\":\"uploadstatus\"}\n{\"chunkid\":\"validstatus_0\",\"currentstatus\":0,\"lastupdate\":2065000681,\"type\":\"uploadstatus\"}\n{\"chunkid\":\"validstatus_1\",\"currentstatus\":1,\"lastupdate\":2065000681,\"type\":\"uploadstatus\"}\n")
test.IsEqualString(t, string(body), "{\"chunkid\":\"expiredstatus\",\"currentstatus\":0,\"type\":\"uploadstatus\"}\n{\"chunkid\":\"validstatus_0\",\"currentstatus\":0,\"type\":\"uploadstatus\"}\n{\"chunkid\":\"validstatus_1\",\"currentstatus\":1,\"type\":\"uploadstatus\"}\n")
// Test ping message
time.Sleep(3 * time.Second)

View File

@@ -473,7 +473,7 @@ function registerChangeHandler() {
console.log("Reconnecting to SSE...");
// Attempt to reconnect after a delay
setTimeout(registerChangeHandler, 1000);
setTimeout(registerChangeHandler, 5000);
};
}

File diff suppressed because one or more lines are too long

View File

@@ -9,17 +9,15 @@ all: build
# Build Gokapi binary
build:
@echo "Generating code..."
@echo
go generate ./...
@echo "Building binary..."
@echo
go generate ./...
CGO_ENABLED=0 go build $(BUILD_FLAGS) -o $(OUTPUT_BIN) $(GOPACKAGE)/cmd/gokapi
coverage:
@echo Generating coverage
@echo
go test ./... -parallel 8 --tags=test,awsmock -coverprofile=/tmp/coverage1.out && go tool cover -html=/tmp/coverage1.out
GOKAPI_AWS_BUCKET="gokapi" GOKAPI_AWS_REGION="eu-central-1" GOKAPI_AWS_KEY="keyid" GOKAPI_AWS_KEY_SECRET="secret" go test ./... -parallel 8 --tags=test,awstest -coverprofile=/tmp/coverage1.out && go tool cover -html=/tmp/coverage1.out
coverage-specific:
@echo Generating coverage for "$(TEST_PACKAGE)"