Migrating workingset from existing repo state file on load

This commit is contained in:
Zach Musgrave
2021-06-23 17:33:40 -07:00
parent cb98f65438
commit d8dfd68383
13 changed files with 307 additions and 92 deletions

View File

@@ -18,6 +18,7 @@ import (
"context"
"testing"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/stretchr/testify/assert"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
@@ -30,7 +31,7 @@ import (
func TestDocDiff(t *testing.T) {
ctx := context.Background()
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
ddb.WriteEmptyRepo(ctx, "billy bob", "bigbillieb@fake.horse")
cs, _ := doltdb.NewCommitSpec("master")

View File

@@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"math/rand"
"path/filepath"
"strings"
"time"
@@ -72,19 +73,26 @@ func DoltDBFromCS(cs chunks.ChunkStore) *DoltDB {
// LoadDoltDB will acquire a reference to the underlying noms db. If the Location is InMemDoltDB then a reference
// to a newly created in memory database will be used. If the location is LocalDirDoltDB, the directory must exist or
// this returns nil.
func LoadDoltDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string) (*DoltDB, error) {
return LoadDoltDBWithParams(ctx, nbf, urlStr, nil)
func LoadDoltDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys) (*DoltDB, error) {
return LoadDoltDBWithParams(ctx, nbf, urlStr, fs, nil)
}
func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, params map[string]string) (*DoltDB, error) {
func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys, params map[string]string) (*DoltDB, error) {
if urlStr == LocalDirDoltDB {
exists, isDir := filesys.LocalFS.Exists(dbfactory.DoltDataDir)
exists, isDir := fs.Exists(dbfactory.DoltDataDir)
if !exists {
return nil, errors.New("missing dolt data directory")
} else if !isDir {
return nil, errors.New("file exists where the dolt data directory should be")
}
absPath, err := fs.Abs(dbfactory.DoltDataDir)
if err != nil {
return nil, err
}
urlStr = fmt.Sprintf("file://%s", filepath.ToSlash(absPath))
}
db, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
@@ -922,6 +930,17 @@ func (ddb *DoltDB) UpdateWorkingSet(ctx context.Context, workingSetRef ref.Worki
return err
}
// DeleteWorkingSet deletes the working set given
func (ddb *DoltDB) DeleteWorkingSet(ctx context.Context, workingSetRef ref.WorkingSetRef) error {
ds, err := ddb.db.GetDataset(ctx, workingSetRef.String())
if err != nil {
return err
}
_, err = ddb.db.Delete(ctx, ds)
return err
}
func (ddb *DoltDB) DeleteTag(ctx context.Context, tag ref.DoltRef) error {
err := ddb.deleteRef(ctx, tag)

View File

@@ -177,7 +177,7 @@ func TestSystemTableTags(t *testing.T) {
}
func TestEmptyInMemoryRepoCreation(t *testing.T) {
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, InMemDoltDB)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, InMemDoltDB, filesys.LocalFS)
if err != nil {
t.Fatal("Failed to load db")
@@ -213,7 +213,7 @@ func TestLoadNonExistentLocalFSRepo(t *testing.T) {
panic("Couldn't change the working directory to the test directory.")
}
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
assert.Nil(t, ddb, "Should return nil when loading a non-existent data dir")
assert.Error(t, err, "Should see an error here")
}
@@ -228,7 +228,7 @@ func TestLoadBadLocalFSRepo(t *testing.T) {
contents := []byte("not a directory")
ioutil.WriteFile(filepath.Join(testDir, dbfactory.DoltDataDir), contents, 0644)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
assert.Nil(t, ddb, "Should return nil when loading a non-directory data dir file")
assert.Error(t, err, "Should see an error here")
}
@@ -251,7 +251,7 @@ func TestLDNoms(t *testing.T) {
t.Fatal("Failed to create noms directory")
}
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
err = ddb.WriteEmptyRepo(context.Background(), committerName, committerEmail)
if err != nil {
@@ -263,7 +263,7 @@ func TestLDNoms(t *testing.T) {
var valHash hash.Hash
var tbl *Table
{
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
cs, _ := NewCommitSpec("master")
commit, err := ddb.Resolve(context.Background(), cs, nil)
@@ -305,7 +305,7 @@ func TestLDNoms(t *testing.T) {
// reopen the db and commit the value. Perform a couple checks for
{
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
meta, err := NewCommitMeta(committerName, committerEmail, "Sample data")
if err != nil {
t.Error("Failed to commit")

View File

@@ -35,6 +35,12 @@ type MergeState struct {
preMergeWorking *RootValue
}
// NewMergeState returns a new MergeState.
// Most clients should not construct MergeState objects directly, but instead use WorkingSet.StartMerge
func NewMergeState(commit *Commit, preMergeWorking *RootValue) *MergeState {
return &MergeState{commit: commit, preMergeWorking: preMergeWorking}
}
func (m MergeState) Commit() *Commit {
return m.commit
}
@@ -46,7 +52,7 @@ func (m MergeState) PreMergeWorkingRoot() *RootValue {
type WorkingSet struct {
Name string
format *types.NomsBinFormat
st types.Struct
st *types.Struct
workingRoot *RootValue
stagedRoot *RootValue
mergeState *MergeState
@@ -56,7 +62,6 @@ func EmptyWorkingSet(wsRef ref.WorkingSetRef) *WorkingSet {
return &WorkingSet{
Name: wsRef.GetPath(),
format: types.Format_Default,
st: types.Struct{},
}
}
@@ -70,6 +75,11 @@ func (ws WorkingSet) WithWorkingRoot(workingRoot *RootValue) *WorkingSet {
return &ws
}
func (ws WorkingSet) WithMergeState(mergeState *MergeState) *WorkingSet {
ws.mergeState = mergeState
return &ws
}
func (ws WorkingSet) StartMerge(commit *Commit) *WorkingSet {
ws.mergeState = &MergeState{
commit: commit,
@@ -165,7 +175,7 @@ func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
return &WorkingSet{
Name: name,
format: vrw.Format(),
st: workingSetSt,
st: &workingSetSt,
workingRoot: workingRoot,
stagedRoot: stagedRoot,
}, nil
@@ -179,6 +189,9 @@ func (ws *WorkingSet) RootValue() *RootValue {
// HashOf returns the hash of the workingset struct, which is not the same as the hash of the root value stored in the
// working set. This value is used for optimistic locking when updating a working set for a head ref.
func (ws *WorkingSet) HashOf() (hash.Hash, error) {
if ws.st == nil {
return hash.Hash{}, nil
}
return ws.st.Hash(ws.format)
}

View File

@@ -18,6 +18,7 @@ import (
"context"
"testing"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -30,7 +31,7 @@ import (
func TestAddNewerTextAndValueFromTable(t *testing.T) {
ctx := context.Background()
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
ddb.WriteEmptyRepo(ctx, "billy bob", "bigbillieb@fake.horse")
// If no tbl/schema is provided, doc Text and Value should be nil.
@@ -84,7 +85,7 @@ func TestAddNewerTextAndValueFromTable(t *testing.T) {
func TestAddNewerTextAndDocPkFromRow(t *testing.T) {
ctx := context.Background()
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
ddb.WriteEmptyRepo(ctx, "billy bob", "bigbillieb@fake.horse")
sch := createTestDocsSchema()

View File

@@ -22,7 +22,7 @@ const (
)
func TestConfig(t *testing.T) {
dEnv := createTestEnv(true, true)
dEnv, _ := createTestEnv(true, true)
lCfg, _ := dEnv.Config.GetConfig(LocalConfig)
gCfg, _ := dEnv.Config.GetConfig(GlobalConfig)

View File

@@ -82,23 +82,22 @@ func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr,
config, cfgErr := loadDoltCliConfig(hdp, fs)
repoState, rsErr := LoadRepoState(fs)
// TODO: move repo state deprecated fields into working set on Load (if working set isn't present)
docs, docsErr := doltdocs.LoadDocs(fs)
ddb, dbLoadErr := doltdb.LoadDoltDB(ctx, types.Format_Default, urlStr)
ddb, dbLoadErr := doltdb.LoadDoltDB(ctx, types.Format_Default, urlStr, fs)
dEnv := &DoltEnv{
version,
config,
cfgErr,
repoState,
rsErr,
docs,
docsErr,
ddb,
dbLoadErr,
fs,
urlStr,
hdp,
Version: version,
Config: config,
CfgLoadErr: cfgErr,
RepoState: repoState,
RSLoadErr: rsErr,
Docs: docs,
DocsLoadErr: docsErr,
DoltDB: ddb,
DBLoadError: dbLoadErr,
FS: fs,
urlStr: urlStr,
hdp: hdp,
}
if dbLoadErr == nil && dEnv.HasDoltDir() {
@@ -126,9 +125,84 @@ func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr,
dbfactory.InitializeFactories(dEnv)
if rsErr == nil && dbLoadErr == nil {
// If the working set isn't present in the DB, create it from the repo state. This step can be removed post 1.0.
_, err := dEnv.WorkingSet(ctx)
if err == doltdb.ErrWorkingSetNotFound {
err := dEnv.initWorkingSetFromRepoState(ctx)
if err != nil {
dEnv.RSLoadErr = err
}
} else if err != nil {
dEnv.RSLoadErr = err
}
}
return dEnv
}
// initWorkingSetFromRepoState sets the working set for the env's head to mirror the contents of the repo state file.
// This is only necessary to migrate repos written before this method was introduced, and can be removed after 1.0
func (dEnv *DoltEnv) initWorkingSetFromRepoState(ctx context.Context) error {
headRef := dEnv.RepoStateReader().CWBHeadRef()
wsRef, err := ref.WorkingSetRefForHead(headRef)
if err != nil {
return err
}
workingHash, ok := hash.MaybeParse(dEnv.RepoState.working)
if !ok {
return fmt.Errorf("Corrupt repo, invalid working hash %s", workingHash)
}
workingRoot, err := dEnv.DoltDB.ReadRootValue(ctx, workingHash)
if err != nil {
return err
}
stagedHash, ok := hash.MaybeParse(dEnv.RepoState.staged)
if !ok {
return fmt.Errorf("Corrupt repo, invalid staged hash %s", stagedHash)
}
stagedRoot, err := dEnv.DoltDB.ReadRootValue(ctx, stagedHash)
if err != nil {
return err
}
mergeState, err := mergeStateToMergeState(ctx, dEnv.RepoState.merge, dEnv.DoltDB)
if err != nil {
return err
}
ws := doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(workingRoot).WithStagedRoot(stagedRoot).WithMergeState(mergeState)
return dEnv.UpdateWorkingSet(ctx, ws)
}
func mergeStateToMergeState(ctx context.Context, mergeState *mergeState, db *doltdb.DoltDB) (*doltdb.MergeState, error) {
if mergeState == nil {
return nil, nil
}
cs, err := doltdb.NewCommitSpec(mergeState.Commit)
if err != nil {
panic("Corrupted repostate. Active merge state is not valid.")
}
commit, err := db.Resolve(ctx, cs, nil)
if err != nil {
return nil, err
}
pmwh := hash.Parse(mergeState.PreMergeWorking)
pmwr, err := db.ReadRootValue(ctx, pmwh)
if err != nil {
return nil, err
}
return doltdb.NewMergeState(commit, pmwr), nil
}
// HasDoltDir returns true if the .dolt directory exists and is a valid directory
func (dEnv *DoltEnv) HasDoltDir() bool {
return dEnv.hasDoltDir("./")
@@ -232,7 +306,7 @@ func (dEnv *DoltEnv) InitRepoWithNoData(ctx context.Context, nbf *types.NomsBinF
return err
}
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr)
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr, filesys.LocalFS)
return err
}
@@ -297,7 +371,7 @@ func (dEnv *DoltEnv) InitDBAndRepoState(ctx context.Context, nbf *types.NomsBinF
// Does not update repo state.
func (dEnv *DoltEnv) InitDBWithTime(ctx context.Context, nbf *types.NomsBinFormat, name, email string, t time.Time) error {
var err error
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr)
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr, dEnv.FS)
if err != nil {
return err

View File

@@ -21,6 +21,8 @@ import (
"strings"
"testing"
"github.com/dolthub/dolt/go/store/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
@@ -40,7 +42,7 @@ func testHomeDirFunc() (string, error) {
return testHomeDir, nil
}
func createTestEnv(isInitialized bool, hasLocalConfig bool) *DoltEnv {
func createTestEnv(isInitialized bool, hasLocalConfig bool) (*DoltEnv, *filesys.InMemFS) {
initialDirs := []string{testHomeDir, workingDir}
initialFiles := map[string][]byte{}
@@ -70,11 +72,18 @@ func createTestEnv(isInitialized bool, hasLocalConfig bool) *DoltEnv {
fs := filesys.NewInMemFS(initialDirs, initialFiles, workingDir)
dEnv := Load(context.Background(), testHomeDirFunc, fs, doltdb.InMemDoltDB, "test")
return dEnv
return dEnv, fs
}
func createFileTestEnv(t *testing.T, path string) *DoltEnv {
fs, err := filesys.LocalFilesysWithWorkingDir(filepath.ToSlash(path))
require.NoError(t, err)
return Load(context.Background(), testHomeDirFunc, fs, doltdb.LocalDirDoltDB, "test")
}
func TestNonRepoDir(t *testing.T) {
dEnv := createTestEnv(false, false)
dEnv, _ := createTestEnv(false, false)
if !isCWDEmpty(dEnv) {
t.Error("Should start with a clean wd")
@@ -98,31 +107,26 @@ func TestNonRepoDir(t *testing.T) {
}
func TestRepoDir(t *testing.T) {
dEnv := createTestEnv(true, true)
dEnv, _ := createTestEnv(true, true)
assert.True(t, dEnv.HasDoltDir())
assert.True(t, dEnv.HasLocalConfig())
if !dEnv.HasDoltDir() || !dEnv.HasLocalConfig() {
t.Fatal("local config and .dolt dir should have been created")
}
userName, err := dEnv.Config.GetString("user.name")
require.NoError(t, err)
assert.Equal(t, "bheni", userName)
if dEnv.CfgLoadErr != nil {
t.Error("Only global config load / create error should result in an error")
}
if dEnv.RSLoadErr != nil {
t.Error("Repostate should be valid for an initialized directory")
}
if dEnv.DocsLoadErr != nil {
t.Error("Docs should be valid for an initialized directory")
}
if un, err := dEnv.Config.GetString("user.name"); err != nil || un != "bheni" {
t.Error("Bad local config value.")
}
assert.NoError(t, dEnv.CfgLoadErr)
assert.NoError(t, dEnv.DocsLoadErr)
// RSLoadErr will be set because the above method of creating the repo doesn't initialize a valid working or staged
}
func TestRepoDirNoLocal(t *testing.T) {
dEnv := createTestEnv(true, false)
dEnv, fs := createTestEnv(true, false)
err := dEnv.InitRepo(context.Background(), types.Format_Default, "aoeu aoeu", "aoeu@aoeu.org")
require.NoError(t, err)
// Now that we have initialized the repo, try loading it
dEnv = Load(context.Background(), testHomeDirFunc, fs, doltdb.InMemDoltDB, "test")
if !dEnv.HasDoltDir() {
t.Fatal(".dolt dir should exist.")
@@ -142,7 +146,7 @@ func TestRepoDirNoLocal(t *testing.T) {
t.Error("Files don't exist. There should be an error if the directory doesn't exist.")
}
err := dEnv.Config.CreateLocalConfig(map[string]string{"user.name": "bheni"})
err = dEnv.Config.CreateLocalConfig(map[string]string{"user.name": "bheni"})
require.NoError(t, err)
if !dEnv.HasLocalConfig() {
@@ -155,24 +159,15 @@ func TestRepoDirNoLocal(t *testing.T) {
}
func TestInitRepo(t *testing.T) {
dEnv := createTestEnv(false, false)
dEnv, _ := createTestEnv(false, false)
err := dEnv.InitRepo(context.Background(), types.Format_Default, "aoeu aoeu", "aoeu@aoeu.org")
if err != nil {
t.Error("Failed to init repo.", err.Error())
}
require.NoError(t, err)
_, err = dEnv.WorkingRoot(context.Background())
if err != nil {
t.Error("Failed to get working root value.")
}
require.NoError(t, err)
_, err = dEnv.StagedRoot(context.Background())
if err != nil {
t.Error("Failed to get staged root value.")
}
require.NoError(t, err)
for _, doc := range doltdocs.SupportedDocs {
docPath := doltdocs.GetDocFilePath(doc.File)
@@ -182,6 +177,68 @@ func TestInitRepo(t *testing.T) {
}
}
// TestMigrateWorkingSet tests migrating a repo with the old RepoState fields to a new one
func TestMigrateWorkingSet(t *testing.T) {
dir := t.TempDir()
dEnv := createFileTestEnv(t, dir)
err := dEnv.InitRepo(context.Background(), types.Format_Default, "aoeu aoeu", "aoeu@aoeu.org")
require.NoError(t, err)
ws, err := dEnv.WorkingSet(context.Background())
require.NoError(t, err)
// Make a new repo with the contents of this one, but with the working set cleared out and the repo state filled in
// with the legacy values
// We don't have a merge in progress, so we'll just fake one. We're only interested in seeing the fields loaded and
// persisted to the working set
commit, err := dEnv.DoltDB.ResolveCommitRef(context.Background(), dEnv.RepoState.CWBHeadRef())
require.NoError(t, err)
ws.StartMerge(commit)
workingRoot := ws.WorkingRoot()
stagedRoot := ws.StagedRoot()
workingHash, err := workingRoot.HashOf()
require.NoError(t, err)
stagedHash, err := stagedRoot.HashOf()
require.NoError(t, err)
rs := repoStateLegacyFromRepoState(dEnv.RepoState)
rs.Working = workingHash.String()
rs.Staged = stagedHash.String()
commitHash, err := commit.HashOf()
require.NoError(t, err)
rs.Merge = &mergeState{
Commit: commitHash.String(),
PreMergeWorking: workingHash.String(),
}
// Clear the working set
require.NoError(t, dEnv.DoltDB.DeleteWorkingSet(context.Background(), ws.Ref()))
// Make sure it's gone
_, err = dEnv.WorkingSet(context.Background())
require.Equal(t, doltdb.ErrWorkingSetNotFound, err)
// Now write the repo state file to disk and re-load the repo
require.NoError(t, rs.save(dEnv.FS))
dEnv = Load(context.Background(), testHomeDirFunc, dEnv.FS, doltdb.LocalDirDoltDB, "test")
assert.NoError(t, dEnv.RSLoadErr)
assert.NoError(t, dEnv.CfgLoadErr)
assert.NoError(t, dEnv.DocsLoadErr)
ws, err = dEnv.WorkingSet(context.Background())
require.NoError(t, err)
assert.Equal(t, mustHash(workingRoot.HashOf()), mustHash(ws.WorkingRoot().HashOf()))
assert.Equal(t, mustHash(stagedRoot.HashOf()), mustHash(ws.StagedRoot().HashOf()))
//assert.Equal(t, mustHash(commit.HashOf()), mustHash(ws.MergeState().Commit().HashOf()))
//assert.Equal(t, mustHash(workingRoot.HashOf()), mustHash(ws.MergeState().PreMergeWorkingRoot().HashOf()))
}
func isCWDEmpty(dEnv *DoltEnv) bool {
isEmpty := true
dEnv.FS.Iter("./", true, func(_ string, _ int64, _ bool) bool {
@@ -192,8 +249,15 @@ func isCWDEmpty(dEnv *DoltEnv) bool {
return isEmpty
}
func mustHash(hash hash.Hash, err error) hash.Hash {
if err != nil {
panic(err)
}
return hash
}
func TestBestEffortDelete(t *testing.T) {
dEnv := createTestEnv(true, true)
dEnv, _ := createTestEnv(true, true)
if isCWDEmpty(dEnv) {
t.Error("Dir should not be empty before delete.")

View File

@@ -19,6 +19,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
@@ -55,7 +56,7 @@ func (r *Remote) GetParamOrDefault(pName, defVal string) string {
}
func (r *Remote) GetRemoteDB(ctx context.Context, nbf *types.NomsBinFormat) (*doltdb.DoltDB, error) {
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, r.Params)
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, r.Params)
}
func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsBinFormat) (*doltdb.DoltDB, error) {
@@ -64,5 +65,5 @@ func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsB
params[k] = v
}
params[dbfactory.NoCachingParameter] = "true"
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, params)
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, params)
}

View File

@@ -65,11 +65,6 @@ type BranchConfig struct {
Remote string `json:"remote"`
}
type MergeState struct {
Commit string `json:"commit"`
PreMergeWorking string `json:"working_pre_merge"`
}
type RepoState struct {
Head ref.MarshalableRef `json:"head"`
Remotes map[string]Remote `json:"remotes"`
@@ -77,11 +72,60 @@ type RepoState struct {
// |staged|, |working|, and |merge| are legacy fields left over from when Dolt repos stored this info in the repo
// state file, not in the DB directly. They're still here so that we can migrate existing repositories forward to the
// new storage format, but they should be used only for this purpose and are no longer written.
staged string `json:"staged,omitempty"`
working string `json:"working,omitempty"`
merge *MergeState `json:"merge,omitempty"`
staged string
working string
merge *mergeState
}
// repoStateLegacy only exists to unmarshall legacy repo state files, since the JSON marshaller can't work with
// unexported fields
type repoStateLegacy struct {
Head ref.MarshalableRef `json:"head"`
Remotes map[string]Remote `json:"remotes"`
Branches map[string]BranchConfig `json:"branches"`
Staged string `json:"staged,omitempty"`
Working string `json:"working,omitempty"`
Merge *mergeState `json:"merge,omitempty"`
}
// repoStateLegacyFromRepoState creates a new repoStateLegacy from a RepoState file. Only for testing.
func repoStateLegacyFromRepoState(rs *RepoState) *repoStateLegacy {
return &repoStateLegacy{
Head: rs.Head,
Remotes: rs.Remotes,
Branches: rs.Branches,
Staged: rs.staged,
Working: rs.working,
Merge: rs.merge,
}
}
type mergeState struct {
Commit string `json:"commit"`
PreMergeWorking string `json:"working_pre_merge"`
}
func (rs *repoStateLegacy) toRepoState() *RepoState {
return &RepoState{
Head: rs.Head,
Remotes: rs.Remotes,
Branches: rs.Branches,
staged: rs.Staged,
working: rs.Working,
merge: rs.Merge,
}
}
func (rs *repoStateLegacy) save(fs filesys.ReadWriteFS) error {
data, err := json.MarshalIndent(rs, "", " ")
if err != nil {
return err
}
return fs.WriteFile(getRepoStateFile(), data)
}
// LoadRepoState parses the repo state file from the file system given
func LoadRepoState(fs filesys.ReadWriteFS) (*RepoState, error) {
path := getRepoStateFile()
data, err := fs.ReadFile(path)
@@ -90,14 +134,14 @@ func LoadRepoState(fs filesys.ReadWriteFS) (*RepoState, error) {
return nil, err
}
var repoState RepoState
var repoState repoStateLegacy
err = json.Unmarshal(data, &repoState)
if err != nil {
return nil, err
}
return &repoState, nil
return repoState.toRepoState(), nil
}
func CloneRepoState(fs filesys.ReadWriteFS, r Remote) (*RepoState, error) {
@@ -142,14 +186,9 @@ func CreateRepoState(fs filesys.ReadWriteFS, br string) (*RepoState, error) {
return rs, nil
}
// Save writes this repo state file to disk on the filesystem given
func (rs RepoState) Save(fs filesys.ReadWriteFS) error {
// clear deprecated fields on write
rs.merge = nil
rs.staged = ""
rs.working = ""
data, err := json.MarshalIndent(rs, "", " ")
if err != nil {
return err
}

View File

@@ -19,6 +19,7 @@ import (
"strconv"
"testing"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -263,7 +264,7 @@ func init() {
}
func setupMergeTest(t *testing.T) (types.ValueReadWriter, *doltdb.Commit, *doltdb.Commit, types.Map, types.Map) {
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
vrw := ddb.ValueReadWriter()
err := ddb.WriteEmptyRepo(context.Background(), name, email)

View File

@@ -57,7 +57,7 @@ func createRootAndFS() (*doltdb.DoltDB, *doltdb.RootValue, filesys.Filesys) {
initialDirs := []string{testHomeDir, workingDir}
fs := filesys.NewInMemFS(initialDirs, nil, workingDir)
fs.WriteFile(testSchemaFileName, []byte(testSchema))
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB, filesys.LocalFS)
ddb.WriteEmptyRepo(context.Background(), "billy bob", "bigbillieb@fake.horse")
cs, _ := doltdb.NewCommitSpec("master")

View File

@@ -35,11 +35,13 @@ type localFS struct {
// working directory. Path relative operations occur relative to this directory.
func LocalFilesysWithWorkingDir(cwd string) (Filesys, error) {
absCWD, err := filepath.Abs(cwd)
if err != nil {
return nil, err
}
// We're going to turn this into a URL, so we need to make sure that windows separators are converted to /
absCWD = filepath.ToSlash(absCWD)
stat, err := os.Stat(absCWD)
if err != nil {