Merge pull request #1890 from dolthub/zachmu/tx-enabled

Enable SQL transactions by default, and begin writing working set to database instead of repo_state.json file
This commit is contained in:
Zach Musgrave
2021-07-06 18:12:47 -07:00
committed by GitHub
118 changed files with 4302 additions and 3233 deletions
+8 -3
View File
@@ -73,13 +73,18 @@ func (cmd AddCmd) Exec(ctx context.Context, commandStr string, args []string, dE
allFlag := apr.Contains(cli.AllFlag)
var err error
roots, err := dEnv.Roots(ctx)
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
if apr.NArg() == 0 && !allFlag {
cli.Println("Nothing specified, nothing added.\n Maybe you wanted to say 'dolt add .'?")
} else if allFlag || apr.NArg() == 1 && apr.Arg(0) == "." {
err = actions.StageAllTables(ctx, dEnv.DbData())
err = actions.StageAllTables(ctx, roots, dEnv.DbData())
} else {
err = actions.StageTables(ctx, dEnv.DbData(), apr.Args())
err = actions.StageTables(ctx, roots, dEnv.DbData(), apr.Args())
}
if err != nil {
+3 -3
View File
@@ -147,7 +147,7 @@ func parseCommitSpecAndTableName(dEnv *env.DoltEnv, apr *argparser.ArgParseResul
// if passed a single arg, assume it's a table name and revision is HEAD
if apr.NArg() == 1 {
tableName := apr.Arg(0)
return dEnv.RepoState.CWBHeadSpec(), tableName, nil
return dEnv.RepoStateReader().CWBHeadSpec(), tableName, nil
}
comSpecStr := apr.Arg(0)
@@ -155,7 +155,7 @@ func parseCommitSpecAndTableName(dEnv *env.DoltEnv, apr *argparser.ArgParseResul
// support being passed -- as a revision like git does even though it's a little gross
if comSpecStr == "--" {
return dEnv.RepoState.CWBHeadSpec(), tableName, nil
return dEnv.RepoStateReader().CWBHeadSpec(), tableName, nil
}
cs, err := doltdb.NewCommitSpec(comSpecStr)
@@ -167,7 +167,7 @@ func parseCommitSpecAndTableName(dEnv *env.DoltEnv, apr *argparser.ArgParseResul
}
func runBlame(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, tableName string) error {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
}
+4 -4
View File
@@ -149,7 +149,7 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
return HandleVErrAndExitCode(errhand.BuildDError("error: failed to read refs from db").AddCause(err).Build(), nil)
}
currentBranch := dEnv.RepoState.CWBHeadRef()
currentBranch := dEnv.RepoStateReader().CWBHeadRef()
sort.Slice(branches, func(i, j int) bool {
return branches[i].String() < branches[j].String()
})
@@ -184,7 +184,7 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
}
if verbose {
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err == nil {
h, err := cm.HashOf()
@@ -207,7 +207,7 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
}
func printCurrentBranch(dEnv *env.DoltEnv) int {
cli.Println(dEnv.RepoState.CWBHeadRef().GetPath())
cli.Println(dEnv.RepoStateReader().CWBHeadRef().GetPath())
return 0
}
@@ -220,7 +220,7 @@ func moveBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseR
force := apr.Contains(forceFlag)
src := apr.Arg(0)
dest := apr.Arg(1)
err := actions.MoveBranch(ctx, dEnv, src, apr.Arg(1), force)
err := actions.RenameBranch(ctx, dEnv, src, apr.Arg(1), force)
var verr errhand.VerboseError
if err != nil {
+8 -3
View File
@@ -115,11 +115,11 @@ func (cmd CheckoutCmd) Exec(ctx context.Context, commandStr string, args []strin
// Check if the user executed `dolt checkout .`
if apr.NArg() == 1 && name == "." {
working, staged, head, err := getAllRoots(ctx, dEnv)
roots, err := dEnv.Roots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt)
}
verr := actions.ResetHard(ctx, dEnv, "HEAD", working, staged, head)
verr := actions.ResetHard(ctx, dEnv, "HEAD", roots)
return handleResetError(verr, usagePrt)
}
@@ -175,7 +175,12 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, newBranch string,
}
func checkoutTablesAndDocs(ctx context.Context, dEnv *env.DoltEnv, tables []string, docs doltdocs.Docs) errhand.VerboseError {
err := actions.CheckoutTablesAndDocs(ctx, dEnv.DbData(), tables, docs)
roots, err := dEnv.Roots(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = actions.CheckoutTablesAndDocs(ctx, roots, dEnv.DbData(), tables, docs)
if err != nil {
if doltdb.IsRootValUnreachable(err) {
+11 -8
View File
@@ -370,18 +370,21 @@ func cloneRemote(ctx context.Context, srcDB *doltdb.DoltDB, remoteName, branch s
}
}
h, err := dEnv.DoltDB.WriteRootValue(ctx, rootVal)
// TODO: make this interface take a DoltRef and marshal it automatically
err = dEnv.RepoStateWriter().SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: ref.NewBranchRef(branch)})
if err != nil {
return errhand.BuildDError("error: could not write root value").AddCause(err).Build()
return errhand.VerboseErrorFromError(err)
}
dEnv.RepoState.Head = ref.MarshalableRef{Ref: ref.NewBranchRef(branch)}
dEnv.RepoState.Staged = h.String()
dEnv.RepoState.Working = h.String()
err = dEnv.RepoState.Save(dEnv.FS)
wsRef, err := ref.WorkingSetRefForHead(ref.NewBranchRef(branch))
if err != nil {
return errhand.BuildDError("error: failed to write repo state").AddCause(err).Build()
return errhand.VerboseErrorFromError(err)
}
ws := doltdb.EmptyWorkingSet(wsRef)
err = dEnv.UpdateWorkingSet(ctx, ws.WithWorkingRoot(rootVal).WithStagedRoot(rootVal))
if err != nil {
return errhand.VerboseErrorFromError(err)
}
return nil
+22 -7
View File
@@ -76,9 +76,13 @@ func (cmd CommitCmd) Exec(ctx context.Context, commandStr string, args []string,
// Check if the -all param is provided. Stage all tables if so.
allFlag := apr.Contains(cli.AllFlag)
var err error
roots, err := dEnv.Roots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("Couldn't get working root").AddCause(err).Build(), usage)
}
if allFlag {
err = actions.StageAllTables(ctx, dEnv.DbData())
err = actions.StageAllTables(ctx, roots, dEnv.DbData())
}
if err != nil {
@@ -112,9 +116,14 @@ func (cmd CommitCmd) Exec(ctx context.Context, commandStr string, args []string,
}
}
// TODO: refactor above stage funcs to modify roots in memory instead of writing to disk
dbData := dEnv.DbData()
roots, err = dEnv.Roots(context.Background())
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("Couldn't get working root").AddCause(err).Build(), usage)
}
_, err = actions.CommitStaged(ctx, dbData, actions.CommitStagedProps{
_, err = actions.CommitStaged(ctx, roots, dbData, actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag),
@@ -194,24 +203,30 @@ func getCommitMessageFromEditor(ctx context.Context, dEnv *env.DoltEnv) string {
return finalMsg
}
// TODO: return an error here
func buildInitalCommitMsg(ctx context.Context, dEnv *env.DoltEnv) string {
initialNoColor := color.NoColor
color.NoColor = true
currBranch := dEnv.RepoState.CWBHeadRef()
stagedTblDiffs, notStagedTblDiffs, _ := diff.GetStagedUnstagedTableDeltas(ctx, dEnv.DoltDB, dEnv.RepoStateReader())
roots, err := dEnv.Roots(ctx)
if err != nil {
panic(err)
}
workingTblsInConflict, _, _, err := merge.GetTablesInConflict(ctx, dEnv.DoltDB, dEnv.RepoStateReader())
stagedTblDiffs, notStagedTblDiffs, _ := diff.GetStagedUnstagedTableDeltas(ctx, roots)
workingTblsInConflict, _, _, err := merge.GetTablesInConflict(ctx, roots)
if err != nil {
workingTblsInConflict = []string{}
}
stagedDocDiffs, notStagedDocDiffs, _ := diff.GetDocDiffs(ctx, dEnv.DoltDB, dEnv.RepoStateReader(), dEnv.DocsReadWriter())
stagedDocDiffs, notStagedDocDiffs, _ := diff.GetDocDiffs(ctx, roots, dEnv.DocsReadWriter())
buf := bytes.NewBuffer([]byte{})
n := printStagedDiffs(buf, stagedTblDiffs, stagedDocDiffs, true)
n = printDiffsNotStaged(ctx, dEnv, buf, notStagedTblDiffs, notStagedDocDiffs, true, n, workingTblsInConflict)
currBranch := dEnv.RepoStateReader().CWBHeadRef()
initialCommitMessage := "\n" + "# Please enter the commit message for your changes. Lines starting" + "\n" +
"# with '#' will be ignored, and an empty message aborts the commit." + "\n# On branch " + currBranch.GetPath() + "\n#" + "\n"
+1 -1
View File
@@ -364,7 +364,7 @@ func maybeResolve(ctx context.Context, dEnv *env.DoltEnv, spec string) (*doltdb.
return nil, false
}
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return nil, false
}
+35 -8
View File
@@ -38,6 +38,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/rebase"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/tracing"
@@ -142,7 +143,7 @@ func getNerf(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseResu
return nil, err
}
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return nil, err
}
@@ -156,7 +157,7 @@ func processFilterQuery(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commi
return nil, err
}
sqlCtx, eng, err := monoSqlEngine(ctx, dEnv, cm)
sqlCtx, eng, err := rebaseSqlEngine(ctx, dEnv, cm)
if err != nil {
return nil, err
}
@@ -228,16 +229,24 @@ func processFilterQuery(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commi
return roots[dbName], nil
}
// monoSqlEngine packages up the context necessary to run sql queries against single root.
func monoSqlEngine(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commit) (*sql.Context, *sqlEngine, error) {
dsess := dsqle.DefaultDoltSession()
// rebaseSqlEngine packages up the context necessary to run sql queries against single root
// The SQL engine returned has transactions disabled. This is to prevent transactions starts from overwriting the root
// we set manually with the one at the working set of the HEAD being rebased.
// Some functionality will not work on this kind of engine, e.g. many DOLT_ functions.
func rebaseSqlEngine(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commit) (*sql.Context, *sqlEngine, error) {
sess := dsess.DefaultSession()
sqlCtx := sql.NewContext(ctx,
sql.WithSession(dsess),
sql.WithSession(sess),
sql.WithIndexRegistry(sql.NewIndexRegistry()),
sql.WithViewRegistry(sql.NewViewRegistry()),
sql.WithTracer(tracing.Tracer(ctx)))
err := sqlCtx.SetSessionVariable(sqlCtx, sql.AutoCommitSessionVar, true)
err := sqlCtx.SetSessionVariable(sqlCtx, sql.AutoCommitSessionVar, false)
if err != nil {
return nil, nil, err
}
err = sqlCtx.SetSessionVariable(sqlCtx, dsess.TransactionsDisabledSysVar, true)
if err != nil {
return nil, nil, err
}
@@ -256,7 +265,25 @@ func monoSqlEngine(ctx context.Context, dEnv *env.DoltEnv, cm *doltdb.Commit) (*
engine := sqle.New(cat, azr, &sqle.Config{Auth: new(auth.None)})
engine.AddDatabase(db)
err = dsess.AddDB(sqlCtx, db, db.DbData())
head := dEnv.RepoStateReader().CWBHeadSpec()
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return nil, nil, err
}
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return nil, nil, err
}
dbState := dsess.InitialDbState{
Db: db,
HeadCommit: headCommit,
WorkingSet: ws,
DbData: dEnv.DbData(),
}
err = sess.AddDB(sqlCtx, dbState)
if err != nil {
return nil, nil, err
}
+1 -1
View File
@@ -114,7 +114,7 @@ func (cmd GarbageCollectionCmd) Exec(ctx context.Context, commandStr string, arg
return HandleVErrAndExitCode(verr, usage)
}
keepers, err := env.GetGCKeepers(ctx, dEnv.RepoStateReader(), dEnv.DoltDB)
keepers, err := env.GetGCKeepers(ctx, dEnv)
if err != nil {
verr = errhand.BuildDError("an error occurred while saving working set").AddCause(err).Build()
return HandleVErrAndExitCode(verr, usage)
+4 -4
View File
@@ -163,7 +163,7 @@ func logWithLoggerFunc(ctx context.Context, commandStr string, args []string, dE
// Just dolt log
if apr.NArg() == 0 {
return logCommits(ctx, dEnv, dEnv.RepoState.CWBHeadSpec(), opts, loggerFunc)
return logCommits(ctx, dEnv, dEnv.RepoStateReader().CWBHeadSpec(), opts, loggerFunc)
} else if apr.NArg() == 1 { // dolt log <ref/table>
argIsRef := actions.ValidateIsRef(ctx, apr.Arg(0), dEnv.DoltDB, dEnv.RepoStateReader())
@@ -174,7 +174,7 @@ func logWithLoggerFunc(ctx context.Context, commandStr string, args []string, dE
}
return logCommits(ctx, dEnv, cs, opts, loggerFunc)
} else {
return handleErrAndExit(logTableCommits(ctx, dEnv, opts, loggerFunc, dEnv.RepoState.CWBHeadSpec(), apr.Arg(0)))
return handleErrAndExit(logTableCommits(ctx, dEnv, opts, loggerFunc, dEnv.RepoStateReader().CWBHeadSpec(), apr.Arg(0)))
}
} else { // dolt log ref table
cs, err := doltdb.NewCommitSpec(apr.Arg(0))
@@ -186,7 +186,7 @@ func logWithLoggerFunc(ctx context.Context, commandStr string, args []string, dE
}
func logCommits(ctx context.Context, dEnv *env.DoltEnv, cs *doltdb.CommitSpec, opts logOpts, loggerFunc commitLoggerFunc) int {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch."))
@@ -258,7 +258,7 @@ func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) (
}
func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts logOpts, loggerFunc commitLoggerFunc, cs *doltdb.CommitSpec, tableName string) error {
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
}
+1 -1
View File
@@ -123,7 +123,7 @@ func getRootForCommitSpecStr(ctx context.Context, csStr string, dEnv *env.DoltEn
return "", nil, bdr.AddCause(err).Build()
}
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return "", nil, errhand.BuildDError(`Unable to resolve "%s"`, csStr).AddCause(err).Build()
+64 -40
View File
@@ -92,7 +92,13 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
var verr errhand.VerboseError
if apr.Contains(cli.AbortParam) {
if !dEnv.IsMergeActive() {
mergeActive, err := dEnv.IsMergeActive(ctx)
if err != nil {
cli.PrintErrln("fatal:", err.Error())
return 1
}
if !mergeActive {
cli.PrintErrln("fatal: There is no merge to abort")
return 1
}
@@ -110,6 +116,12 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
root, verr = GetWorkingWithVErr(dEnv)
if verr == nil {
mergeActive, err := dEnv.IsMergeActive(ctx)
if err != nil {
cli.PrintErrln(err.Error())
return 1
}
if has, err := root.HasConflicts(ctx); err != nil {
verr = errhand.BuildDError("error: failed to get conflicts").AddCause(err).Build()
} else if has {
@@ -118,7 +130,7 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
cli.Println("hint: as appropriate to mark resolution and make a commit.")
cli.Println("fatal: Exiting because of an unresolved conflict.")
return 1
} else if dEnv.IsMergeActive() {
} else if mergeActive {
cli.Println("error: Merging is not possible because you have not committed an active merge.")
cli.Println("hint: add affected tables using 'dolt add <table>' and commit using 'dolt commit -m <msg>'")
cli.Println("fatal: Exiting because of active merge")
@@ -135,10 +147,14 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
}
func abortMerge(ctx context.Context, doltEnv *env.DoltEnv) errhand.VerboseError {
err := actions.CheckoutAllTables(ctx, doltEnv.DbData())
roots, err := doltEnv.Roots(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = actions.CheckoutAllTables(ctx, roots, doltEnv.DbData())
if err == nil {
err = doltEnv.RepoState.AbortMerge(doltEnv.FS)
err = doltEnv.AbortMerge(ctx)
if err == nil {
return nil
@@ -185,7 +201,12 @@ func mergeCommitSpec(ctx context.Context, apr *argparser.ArgParseResults, dEnv *
cli.Println("Squash commit -- not updating HEAD")
}
tblNames, workingDiffs, err := env.MergeWouldStompChanges(ctx, cm2, dEnv.DbData())
roots, err := dEnv.Roots(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
tblNames, workingDiffs, err := env.MergeWouldStompChanges(ctx, roots.Working, cm2, dEnv.DbData())
if err != nil {
return errhand.BuildDError("error: failed to determine mergability.").AddCause(err).Build()
@@ -202,7 +223,7 @@ func mergeCommitSpec(ctx context.Context, apr *argparser.ArgParseResults, dEnv *
if ok, err := cm1.CanFastForwardTo(ctx, cm2); ok {
if apr.Contains(cli.NoFFParam) {
return execNoFFMerge(ctx, apr, dEnv, cm2, verr, workingDiffs)
return execNoFFMerge(ctx, apr, dEnv, roots, cm2, verr, workingDiffs)
} else {
return executeFFMerge(ctx, squash, dEnv, cm2, workingDiffs)
}
@@ -214,7 +235,7 @@ func mergeCommitSpec(ctx context.Context, apr *argparser.ArgParseResults, dEnv *
}
}
func execNoFFMerge(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv, cm2 *doltdb.Commit, verr errhand.VerboseError, workingDiffs map[string]hash.Hash) errhand.VerboseError {
func execNoFFMerge(ctx context.Context, apr *argparser.ArgParseResults, dEnv *env.DoltEnv, roots doltdb.Roots, cm2 *doltdb.Commit, verr errhand.VerboseError, workingDiffs map[string]hash.Hash) errhand.VerboseError {
mergedRoot, err := cm2.GetRootValue()
if err != nil {
@@ -248,9 +269,13 @@ func execNoFFMerge(ctx context.Context, apr *argparser.ArgParseResults, dEnv *en
return errhand.BuildDError("error: committing").AddCause(err).Build()
}
dbData := dEnv.DbData()
// Reload roots since the above method writes new values to the working set
roots, err = dEnv.Roots(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
_, err = actions.CommitStaged(ctx, dbData, actions.CommitStagedProps{
_, err = actions.CommitStaged(ctx, roots, dEnv.DbData(), actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag),
@@ -279,52 +304,48 @@ func applyChanges(ctx context.Context, root *doltdb.RootValue, workingDiffs map[
return root, nil
}
func executeFFMerge(ctx context.Context, squash bool, dEnv *env.DoltEnv, cm2 *doltdb.Commit, workingDiffs map[string]hash.Hash) errhand.VerboseError {
func executeFFMerge(
ctx context.Context,
squash bool,
dEnv *env.DoltEnv,
mergeCommit *doltdb.Commit,
workingDiffs map[string]hash.Hash,
) errhand.VerboseError {
cli.Println("Fast-forward")
rv, err := cm2.GetRootValue()
stagedRoot, err := mergeCommit.GetRootValue()
if err != nil {
return errhand.BuildDError("error: failed to get root value").AddCause(err).Build()
}
stagedHash, err := dEnv.DoltDB.WriteRootValue(ctx, rv)
if err != nil {
return errhand.BuildDError("Failed to write database").AddCause(err).Build()
}
workingHash := stagedHash
workingRoot := stagedRoot
if len(workingDiffs) > 0 {
rv, err = applyChanges(ctx, rv, workingDiffs)
workingRoot, err = applyChanges(ctx, stagedRoot, workingDiffs)
if err != nil {
return errhand.BuildDError("Failed to re-apply working changes.").AddCause(err).Build()
}
workingHash, err = dEnv.DoltDB.WriteRootValue(ctx, rv)
if err != nil {
return errhand.BuildDError("Failed to write database").AddCause(err).Build()
}
}
unstagedDocs, err := actions.GetUnstagedDocs(ctx, dEnv.DbData())
unstagedDocs, err := actions.GetUnstagedDocs(ctx, dEnv)
if err != nil {
return errhand.BuildDError("error: unable to determine unstaged docs").AddCause(err).Build()
}
if !squash {
err = dEnv.DoltDB.FastForward(ctx, dEnv.RepoState.CWBHeadRef(), cm2)
err = dEnv.DoltDB.FastForward(ctx, dEnv.RepoStateReader().CWBHeadRef(), mergeCommit)
if err != nil {
return errhand.BuildDError("Failed to write database").AddCause(err).Build()
}
}
dEnv.RepoState.Working = workingHash.String()
dEnv.RepoState.Staged = stagedHash.String()
workingSet, err := dEnv.WorkingSet(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = dEnv.RepoState.Save(dEnv.FS)
err = dEnv.UpdateWorkingSet(ctx, workingSet.WithWorkingRoot(workingRoot).WithStagedRoot(stagedRoot))
if err != nil {
return errhand.BuildDError("unable to execute repo state update.").
AddDetails(`As a result your .dolt/repo_state.json file may have invalid values for "staged" and "working".
@@ -459,7 +480,16 @@ func fkConstraintWarning(ctx context.Context, cm1, cm2 *doltdb.Commit) errhand.V
return nil
}
func mergedRootToWorking(ctx context.Context, squash bool, dEnv *env.DoltEnv, mergedRoot *doltdb.RootValue, workingDiffs map[string]hash.Hash, cm2 *doltdb.Commit, tblToStats map[string]*merge.MergeStats) errhand.VerboseError {
// TODO: change this to be functional and not write to repo state
func mergedRootToWorking(
ctx context.Context,
squash bool,
dEnv *env.DoltEnv,
mergedRoot *doltdb.RootValue,
workingDiffs map[string]hash.Hash,
cm2 *doltdb.Commit,
tblToStats map[string]*merge.MergeStats,
) errhand.VerboseError {
var err error
workingRoot := mergedRoot
@@ -471,21 +501,15 @@ func mergedRootToWorking(ctx context.Context, squash bool, dEnv *env.DoltEnv, me
}
}
h2, err := cm2.HashOf()
if err != nil {
return errhand.BuildDError("error: failed to hash commit").AddCause(err).Build()
}
if !squash {
err = dEnv.RepoState.StartMerge(h2.String(), dEnv.FS)
err = dEnv.StartMerge(ctx, cm2)
if err != nil {
return errhand.BuildDError("Unable to update the repo state").AddCause(err).Build()
}
}
unstagedDocs, err := actions.GetUnstagedDocs(ctx, dEnv.DbData())
unstagedDocs, err := actions.GetUnstagedDocs(ctx, dEnv)
if err != nil {
return errhand.BuildDError("error: failed to determine unstaged docs").AddCause(err).Build()
}
@@ -502,7 +526,7 @@ func mergedRootToWorking(ctx context.Context, squash bool, dEnv *env.DoltEnv, me
if err != nil {
return errhand.BuildDError("error: failed to update docs to the new working root").AddCause(err).Build()
}
verr = UpdateStagedWithVErr(dEnv.DoltDB, dEnv.RepoStateWriter(), mergedRoot)
verr = UpdateStagedWithVErr(dEnv, mergedRoot)
if verr != nil {
// Log a new message here to indicate that merge was successful, only staging failed.
cli.Println("Unable to stage changes: add and commit to finish merge")
+1 -1
View File
@@ -82,7 +82,7 @@ func pullFromRemote(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPa
return errhand.BuildDError("dolt pull takes at most one arg").SetPrintUsage().Build()
}
branch := dEnv.RepoState.CWBHeadRef()
branch := dEnv.RepoStateReader().CWBHeadRef()
var remoteName string
if apr.NArg() == 1 {
+2 -2
View File
@@ -134,7 +134,7 @@ func parsePushArgs(ctx context.Context, apr *argparser.ArgParseResults, dEnv *en
}
remote, remoteOK := remotes[remoteName]
currentBranch := dEnv.RepoState.CWBHeadRef()
currentBranch := dEnv.RepoStateReader().CWBHeadRef()
upstream, hasUpstream := dEnv.RepoState.Branches[currentBranch.GetPath()]
var refSpec ref.RefSpec
@@ -373,7 +373,7 @@ func pushToRemoteBranch(ctx context.Context, dEnv *env.DoltEnv, mode ref.UpdateM
}
cs, _ := doltdb.NewCommitSpec(srcRef.GetPath())
cm, err := localDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := localDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return errhand.BuildDError("error: refspec '%v' not found.", srcRef.GetPath()).Build()
+33 -33
View File
@@ -84,46 +84,46 @@ func (cmd ResetCmd) Exec(ctx context.Context, commandStr string, args []string,
return HandleDocTableVErrAndExitCode()
}
workingRoot, stagedRoot, headRoot, verr := getAllRoots(ctx, dEnv)
roots, err := dEnv.Roots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
var err error
if verr == nil {
if apr.ContainsAll(HardResetParam, SoftResetParam) {
verr = errhand.BuildDError("error: --%s and --%s are mutually exclusive options.", HardResetParam, SoftResetParam).Build()
HandleVErrAndExitCode(verr, usage)
} else if apr.Contains(HardResetParam) {
arg := ""
if apr.NArg() > 1 {
return handleResetError(fmt.Errorf("--hard supports at most one additional param"), usage)
} else if apr.NArg() == 1 {
arg = apr.Arg(0)
}
if apr.ContainsAll(HardResetParam, SoftResetParam) {
verr := errhand.BuildDError("error: --%s and --%s are mutually exclusive options.", HardResetParam, SoftResetParam).Build()
HandleVErrAndExitCode(verr, usage)
} else if apr.Contains(HardResetParam) {
arg := ""
if apr.NArg() > 1 {
return handleResetError(fmt.Errorf("--hard supports at most one additional param"), usage)
} else if apr.NArg() == 1 {
arg = apr.Arg(0)
}
err = actions.ResetHard(ctx, dEnv, arg, workingRoot, stagedRoot, headRoot)
} else {
// Check whether the input argument is a ref.
if apr.NArg() == 1 {
argToCheck := apr.Arg(0)
err = actions.ResetHard(ctx, dEnv, arg, roots)
} else {
// Check whether the input argument is a ref.
if apr.NArg() == 1 {
argToCheck := apr.Arg(0)
ok := actions.ValidateIsRef(ctx, argToCheck, dEnv.DoltDB, dEnv.RepoStateReader())
ok := actions.ValidateIsRef(ctx, argToCheck, dEnv.DoltDB, dEnv.RepoStateReader())
// This is a valid ref
if ok {
err = actions.ResetSoftToRef(ctx, dEnv.DbData(), apr.Arg(0))
return handleResetError(err, usage)
}
}
tables := apr.Args()
stagedRoot, err = actions.ResetSoft(ctx, dEnv.DbData(), tables, stagedRoot, headRoot)
if err != nil {
// This is a valid ref
if ok {
err = actions.ResetSoftToRef(ctx, dEnv.DbData(), apr.Arg(0))
return handleResetError(err, usage)
}
printNotStaged(ctx, dEnv, stagedRoot)
}
tables := apr.Args()
roots.Staged, err = actions.ResetSoft(ctx, dEnv.DbData(), tables, roots)
if err != nil {
return handleResetError(err, usage)
}
printNotStaged(ctx, dEnv, roots.Staged)
}
return handleResetError(err, usage)
+179 -152
View File
@@ -49,6 +49,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
@@ -73,12 +74,7 @@ var sqlDocs = cli.CommandDocumentationContent{
By default, {{.EmphasisLeft}}-q{{.EmphasisRight}} executes a single statement. To execute multiple SQL statements separated by semicolons, use {{.EmphasisLeft}}-b{{.EmphasisRight}} to enable batch mode. Queries can be saved with {{.EmphasisLeft}}-s{{.EmphasisRight}}. Alternatively {{.EmphasisLeft}}-x{{.EmphasisRight}} can be used to execute a saved query by name. Pipe SQL statements to dolt sql (no {{.EmphasisLeft}}-q{{.EmphasisRight}}) to execute a SQL import or update script.
By default this command uses the dolt data repository in the current working directory as the one and only database. Running with {{.EmphasisLeft}}--multi-db-dir <directory>{{.EmphasisRight}} uses each of the subdirectories of the supplied directory (each subdirectory must be a valid dolt data repository) as databases. Subdirectories starting with '.' are ignored. Known limitations:
- No support for creating indexes
- No support for foreign keys
- No support for column constraints besides NOT NULL
- No support for default values
- Joins can only use indexes for two table joins. Three or more tables in a join query will use a non-indexed join, which is very slow.`,
By default this command uses the dolt data repository in the current working directory as the one and only database. Running with {{.EmphasisLeft}}--multi-db-dir <directory>{{.EmphasisRight}} uses each of the subdirectories of the supplied directory (each subdirectory must be a valid dolt data repository) as databases. Subdirectories starting with '.' are ignored.`,
Synopsis: []string{
"[--multi-db-dir {{.LessThan}}directory{{.GreaterThan}}] [-r {{.LessThan}}result format{{.GreaterThan}}]",
@@ -90,16 +86,17 @@ By default this command uses the dolt data repository in the current working dir
}
const (
QueryFlag = "query"
FormatFlag = "result-format"
saveFlag = "save"
executeFlag = "execute"
listSavedFlag = "list-saved"
messageFlag = "message"
BatchFlag = "batch"
multiDBDirFlag = "multi-db-dir"
continueFlag = "continue"
welcomeMsg = `# Welcome to the DoltSQL shell.
QueryFlag = "query"
FormatFlag = "result-format"
saveFlag = "save"
executeFlag = "execute"
listSavedFlag = "list-saved"
messageFlag = "message"
BatchFlag = "batch"
disableBatchFlag = "disable-batch"
multiDBDirFlag = "multi-db-dir"
continueFlag = "continue"
welcomeMsg = `# Welcome to the DoltSQL shell.
# Statements must be terminated with ';'.
# "exit" or "quit" (or Ctrl-D) to exit.`
)
@@ -123,7 +120,7 @@ type SqlCmd struct {
VersionStr string
}
// Name is returns the name of the Dolt cli command. This is what is used on the command line to invoke the command
// Name returns the name of the Dolt cli command. This is what is used on the command line to invoke the command
func (cmd SqlCmd) Name() string {
return "sql"
}
@@ -149,6 +146,7 @@ func (cmd SqlCmd) createArgParser() *argparser.ArgParser {
ap.SupportsFlag(listSavedFlag, "l", "Lists all saved queries")
ap.SupportsString(messageFlag, "m", "saved query description", "Used with --query and --save, saves the query with the descriptive message given. See also --name")
ap.SupportsFlag(BatchFlag, "b", "batch mode, to run more than one query with --query, separated by ';'. Piping input to sql with no arguments also uses batch mode")
ap.SupportsFlag(disableBatchFlag, "", "When issuing multiple statements, used to override more efficient batch processing to give finer control over session")
ap.SupportsString(multiDBDirFlag, "", "directory", "Defines a directory whose subdirectories should all be dolt data repositories accessible as independent databases within ")
ap.SupportsFlag(continueFlag, "c", "continue running queries on an error. Used for batch mode only.")
return ap
@@ -168,6 +166,8 @@ func (cmd SqlCmd) RequiresRepo() bool {
}
// Exec executes the command
// Unlike other commands, sql doesn't set a new working root directly, as the SQL layer updates the working set as
// necessary when committing work.
func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.createArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.GetCommandDocumentation(commandStr, sqlDocs, ap))
@@ -189,7 +189,10 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
}
dsess := dsqle.DefaultDoltSession()
sess := dsess.DefaultSession()
// TODO: not having user and email for this command should probably be an error or warning, it disables certain functionality
sess.Username = *dEnv.Config.GetStringOrDefault(env.UserNameKey, "")
sess.Email = *dEnv.Config.GetStringOrDefault(env.UserEmailKey, "")
var mrEnv env.MultiRepoEnv
var initialRoots map[string]*doltdb.RootValue
@@ -211,7 +214,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
return HandleVErrAndExitCode(errhand.BuildDError("Invalid commit %s", apr.Arg(0)).SetPrintUsage().Build(), usage)
}
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("Invalid commit %s", apr.Arg(0)).SetPrintUsage().Build(), usage)
@@ -235,9 +238,6 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
}
dsess.Username = *dEnv.Config.GetStringOrDefault(env.UserNameKey, "")
dsess.Email = *dEnv.Config.GetStringOrDefault(env.UserEmailKey, "")
} else {
if apr.NArg() > 0 {
return HandleVErrAndExitCode(errhand.BuildDError("Specifying a commit is not compatible with the --multi-db-dir flag.").SetPrintUsage().Build(), usage)
@@ -257,7 +257,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
}
sqlCtx := sql.NewContext(ctx,
sql.WithSession(dsess),
sql.WithSession(sess),
sql.WithIndexRegistry(sql.NewIndexRegistry()),
sql.WithViewRegistry(sql.NewViewRegistry()),
sql.WithTracer(tracing.Tracer(ctx)))
@@ -283,8 +283,12 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
_, continueOnError := apr.GetValue(continueFlag)
if query, queryOK := apr.GetValue(QueryFlag); queryOK {
batchMode := apr.Contains(BatchFlag)
multiStatementMode := apr.Contains(disableBatchFlag)
if batchMode {
if multiStatementMode {
batchInput := strings.NewReader(query)
verr = execMultiStatements(sqlCtx, readOnly, continueOnError, mrEnv, roots, batchInput, format)
} else if batchMode {
batchInput := strings.NewReader(query)
verr = execBatch(sqlCtx, readOnly, continueOnError, mrEnv, roots, batchInput, format)
} else {
@@ -328,6 +332,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
} else {
// Run in either batch mode for piped input, or shell mode for interactive
runInBatchMode := true
multiStatementMode := apr.Contains(disableBatchFlag)
fi, err := os.Stdin.Stat()
if err != nil {
@@ -338,7 +343,9 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
runInBatchMode = fi.Mode()&os.ModeCharDevice == 0
}
if runInBatchMode {
if multiStatementMode {
verr = execMultiStatements(sqlCtx, readOnly, continueOnError, mrEnv, roots, os.Stdin, format)
} else if runInBatchMode {
verr = execBatch(sqlCtx, readOnly, continueOnError, mrEnv, roots, os.Stdin, format)
} else {
verr = execShell(sqlCtx, readOnly, mrEnv, roots, format)
@@ -354,7 +361,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE
func parseCommitSpec(dEnv *env.DoltEnv, apr *argparser.ArgParseResults) (*doltdb.CommitSpec, error) {
if apr.NArg() == 0 || apr.Arg(0) == "--" {
return dEnv.RepoState.CWBHeadSpec(), nil
return dEnv.RepoStateReader().CWBHeadSpec(), nil
}
comSpecStr := apr.Arg(0)
@@ -388,37 +395,63 @@ func execBatch(sqlCtx *sql.Context, readOnly bool, continueOnErr bool, mrEnv env
return errhand.VerboseErrorFromError(err)
}
// In batch mode, we need to set a couple flags on the session to prevent flushes to disk after every commit
dsqle.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
err = sqlCtx.Session.SetSessionVariable(sqlCtx, sql.AutoCommitSessionVar, true)
// In batch mode, we need to set a couple flags on the session to prevent constant flushes to disk
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
err = sqlCtx.Session.SetSessionVariable(sqlCtx, sql.AutoCommitSessionVar, false)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = runBatchMode(sqlCtx, se, batchInput, continueOnErr)
if err != nil {
// If we encounter an error, flush what we have so far to disk before exiting, except in the case of merge
// errors, which have already updated the repo state all they're going to (and writing session root on top of
// them would overwrite these changes)
// TODO: this is a mess, merge conflicts need to follow the same code path as everything else
if err == doltdb.ErrUnresolvedConflicts || err == doltdb.ErrMergeActive {
return errhand.BuildDError("Error processing batch").Build()
// If we encounter an error, attempt to flush what we have so far to disk before exiting
flushErr := flushBatchedEdits(sqlCtx, se)
if flushErr != nil {
cli.PrintErrf("Could not flush batch: %s", err.Error())
}
_ = flushBatchedEdits(sqlCtx, se)
_ = writeRoots(sqlCtx, se, mrEnv, roots)
return errhand.BuildDError("Error processing batch").Build()
return errhand.BuildDError("Error processing batch").AddCause(err).Build()
}
return writeRoots(sqlCtx, se, mrEnv, roots)
return nil
}
func execMultiStatements(
sqlCtx *sql.Context,
readOnly bool,
continueOnErr bool,
mrEnv env.MultiRepoEnv,
roots map[string]*doltdb.RootValue,
batchInput io.Reader,
format resultFormat,
) errhand.VerboseError {
dbs := CollectDBs(mrEnv)
se, err := newSqlEngine(sqlCtx, readOnly, mrEnv, roots, format, dbs...)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
err = runMultiStatementMode(sqlCtx, se, batchInput, continueOnErr)
if err != nil {
// If we encounter an error, attempt to flush what we have so far to disk before exiting
return errhand.BuildDError("Error processing batch").AddCause(err).Build()
}
return errhand.VerboseErrorFromError(err)
}
func newDatabase(name string, dEnv *env.DoltEnv) dsqle.Database {
return dsqle.NewDatabase(name, dEnv.DbData())
}
func execQuery(sqlCtx *sql.Context, readOnly bool, mrEnv env.MultiRepoEnv, roots map[string]*doltdb.RootValue, query string, format resultFormat) errhand.VerboseError {
func execQuery(
sqlCtx *sql.Context,
readOnly bool,
mrEnv env.MultiRepoEnv,
roots map[string]*doltdb.RootValue,
query string,
format resultFormat,
) errhand.VerboseError {
dbs := CollectDBs(mrEnv)
se, err := newSqlEngine(sqlCtx, readOnly, mrEnv, roots, format, dbs...)
if err != nil {
@@ -437,7 +470,7 @@ func execQuery(sqlCtx *sql.Context, readOnly bool, mrEnv env.MultiRepoEnv, roots
}
}
return writeRoots(sqlCtx, se, mrEnv, roots)
return nil
}
// CollectDBs takes a MultiRepoEnv and creates Database objects from each environment and returns a slice of these
@@ -506,7 +539,7 @@ func formatQueryError(message string, err error) errhand.VerboseError {
return verrBuilder.Build()
} else {
if len(message) > 0 {
err = fmt.Errorf("%s: %s", message, err.Error())
err = fmt.Errorf("%s: %+v", message, err)
}
return errhand.VerboseErrorFromError(err)
}
@@ -607,6 +640,59 @@ func saveQuery(ctx context.Context, root *doltdb.RootValue, query string, name s
return newRoot, nil
}
// runMultiStatementMode alows for the execution of more than one query, but it doesn't attempt any batch optimizations
func runMultiStatementMode(ctx *sql.Context, se *sqlEngine, input io.Reader, continueOnErr bool) error {
scanner := NewSqlStatementScanner(input)
var query string
for scanner.Scan() {
query += scanner.Text()
if len(query) == 0 || query == "\n" {
continue
}
shouldProcessQuery := true
if matches := delimiterRegex.FindStringSubmatch(query); len(matches) == 3 {
// If we don't match from anything, then we just pass to the SQL engine and let it complain.
scanner.Delimiter = matches[1]
shouldProcessQuery = false
}
if shouldProcessQuery {
sqlSch, rowIter, err := processQuery(ctx, query, se)
if err != nil {
verr := formatQueryError(fmt.Sprintf("error on line %d for query %s", scanner.statementStartLine, query), err)
cli.PrintErrln(verr.Verbose())
// If continueOnErr is set keep executing the remaining queries but print the error out anyway.
if !continueOnErr {
return err
}
}
if rowIter != nil {
err = PrettyPrintResults(ctx, se.resultFormat, sqlSch, rowIter, HasTopLevelOrderByClause(query))
if err != nil {
return errhand.VerboseErrorFromError(err)
}
if err != nil {
verr := formatQueryError(fmt.Sprintf("error on line %d for query %s", scanner.statementStartLine, query), err)
cli.PrintErrln(verr.Verbose())
// If continueOnErr is set keep executing the remaining queries but print the error out anyway.
if !continueOnErr {
return err
}
}
}
}
query = ""
}
cli.Println() // need a newline after all statements are executed
if err := scanner.Err(); err != nil {
cli.Println(err.Error())
}
return nil
}
// runBatchMode processes queries until EOF. The Root of the sqlEngine may be updated.
func runBatchMode(ctx *sql.Context, se *sqlEngine, input io.Reader, continueOnErr bool) error {
scanner := NewSqlStatementScanner(input)
@@ -627,9 +713,9 @@ func runBatchMode(ctx *sql.Context, se *sqlEngine, input io.Reader, continueOnEr
if err := processBatchQuery(ctx, query, se); err != nil {
// TODO: this line number will not be accurate for errors that occur when flushing a batch of inserts (as opposed
// to processing the query)
// If continueOnErr is set keep executing the remaining queries but print the error out anyway.
verr := formatQueryError(fmt.Sprintf("error on line %d for query %s", scanner.statementStartLine, query), err)
cli.PrintErrln(verr.Verbose())
// If continueOnErr is set keep executing the remaining queries but print the error out anyway.
if !continueOnErr {
return err
}
@@ -744,13 +830,6 @@ func runShell(ctx *sql.Context, se *sqlEngine, mrEnv env.MultiRepoEnv, initialRo
shell.Println(color.RedString(err.Error()))
}
}
if err == nil {
returnedVerr = writeRoots(ctx, se, mrEnv, initialRoots)
if returnedVerr != nil {
return
}
}
}
currPrompt := fmt.Sprintf("%s> ", ctx.GetCurrentDatabase())
@@ -764,31 +843,6 @@ func runShell(ctx *sql.Context, se *sqlEngine, mrEnv env.MultiRepoEnv, initialRo
return returnedVerr
}
// writeRoots updates the working root values using the sql context, the sql engine, a multi repo env and a root_val map.
func writeRoots(ctx *sql.Context, se *sqlEngine, mrEnv env.MultiRepoEnv, initialRoots map[string]*doltdb.RootValue) errhand.VerboseError {
roots, err := se.getRoots(ctx)
if err != nil {
return errhand.BuildDError("failed to get roots").AddCause(err).Build()
}
// If the SQL session wrote a new root value, update the working set with it
var verr errhand.VerboseError
for name, origRoot := range initialRoots {
root := roots[name]
if origRoot != root {
currEnv := mrEnv[name]
verr = UpdateWorkingWithVErr(currEnv, root)
if verr != nil {
return verr
}
}
}
return verr
}
// Returns a new auto completer with table names, column names, and SQL keywords.
func newCompleter(ctx context.Context, dEnv *env.DoltEnv) (*sqlCompleter, error) {
// TODO: change the sqlCompleter based on the current database and change it when the database changes.
@@ -985,45 +1039,19 @@ func (s *stats) shouldFlush() bool {
return s.unflushedEdits >= maxBatchSize
}
// updateRepoState takes in a context and database and updates repo state.
func updateRepoState(ctx *sql.Context, se *sqlEngine) error {
err := se.iterDBs(func(_ string, db dsqle.Database) (bool, error) {
root, err := db.GetRoot(ctx)
if err != nil {
return false, err
}
h, err := root.HashOf()
if err != nil {
return false, err
}
dsess := dsqle.DSessFromSess(ctx.Session)
rsw, ok := dsess.GetDoltDBRepoStateWriter(db.Name())
if ok {
err = rsw.SetWorkingHash(ctx, h)
if err != nil {
return false, err
}
}
ddb, ok := dsess.GetDoltDB(db.Name())
if ok {
_, err = ddb.WriteRootValue(ctx, root)
if err != nil {
return false, err
}
}
return false, nil
})
return err
}
func flushBatchedEdits(ctx *sql.Context, se *sqlEngine) error {
err := se.iterDBs(func(_ string, db dsqle.Database) (bool, error) {
err := db.Flush(ctx)
_, rowIter, err := se.engine.Query(ctx, "COMMIT;")
if err != nil {
return false, err
}
err = rowIter.Close(ctx)
if err != nil {
return false, err
}
err = db.Flush(ctx)
if err != nil {
return false, err
}
@@ -1092,19 +1120,6 @@ func processBatchQuery(ctx *sql.Context, query string, se *sqlEngine) error {
}
func processNonBatchableQuery(ctx *sql.Context, se *sqlEngine, query string, sqlStatement sqlparser.Statement) (returnErr error) {
foundDoltSQLFunc, err := checkForDoltSQLFunction(sqlStatement)
if err != nil {
return err
}
// DOLT SQL functions like DOLT_COMMIT require an updated repo state to work correctly.
if foundDoltSQLFunc {
err = updateRepoState(ctx, se)
if err != nil {
return err
}
}
sqlSch, rowIter, err := processQuery(ctx, query, se)
if err != nil {
return err
@@ -1238,30 +1253,6 @@ func foundSubquery(node sqlparser.SQLNode) bool {
return has
}
func checkForDoltSQLFunction(statement sqlparser.Statement) (bool, error) {
switch node := statement.(type) {
default:
return hasDoltSQLFunction(node), nil
}
}
// hasDoltSQLFunction checks if a function is a dolt SQL function as defined in the dfunc package.
func hasDoltSQLFunction(node sqlparser.SQLNode) bool {
has := false
_ = sqlparser.Walk(func(node sqlparser.SQLNode) (keepGoing bool, err error) {
if f, ok := node.(*sqlparser.FuncExpr); ok {
name := strings.ToLower(f.Name.String())
if strings.HasPrefix(name, "dolt_") {
has = true
}
return false, nil
}
return true, nil
}, node)
return has
}
func HasTopLevelOrderByClause(query string) bool {
st, _ := sqlparser.Parse(query)
@@ -1388,20 +1379,23 @@ func newSqlEngine(sqlCtx *sql.Context, readOnly bool, mrEnv env.MultiRepoEnv, ro
}
}
dsess := dsqle.DSessFromSess(sqlCtx.Session)
sess := dsess.DSessFromSess(sqlCtx.Session)
nameToDB := make(map[string]dsqle.Database)
for _, db := range dbs {
nameToDB[db.Name()] = db
root := roots[db.Name()]
engine.AddDatabase(db)
err := dsess.AddDB(sqlCtx, db, db.DbData())
// TODO: this doesn't consider the root above, which may not be the HEAD of the branch
// To fix this, we need to pass a commit here as a separate param, and install a read-only database on it
// since it isn't a current HEAD.
dbState, err := getDbState(sqlCtx, db, mrEnv)
if err != nil {
return nil, err
}
err = db.SetRoot(sqlCtx, root)
err = sess.AddDB(sqlCtx, dbState)
if err != nil {
return nil, err
}
@@ -1416,6 +1410,39 @@ func newSqlEngine(sqlCtx *sql.Context, readOnly bool, mrEnv env.MultiRepoEnv, ro
return &sqlEngine{nameToDB, mrEnv, engine, format}, nil
}
func getDbState(ctx context.Context, db dsqle.Database, mrEnv env.MultiRepoEnv) (dsess.InitialDbState, error) {
var dEnv *env.DoltEnv
mrEnv.Iter(func(name string, de *env.DoltEnv) (stop bool, err error) {
if name == db.Name() {
dEnv = de
return true, nil
}
return false, nil
})
if dEnv == nil {
return dsess.InitialDbState{}, fmt.Errorf("Couldn't find environment for database %s", db.Name())
}
head := dEnv.RepoStateReader().CWBHeadSpec()
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return dsess.InitialDbState{}, err
}
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return dsess.InitialDbState{}, err
}
return dsess.InitialDbState{
Db: db,
HeadCommit: headCommit,
WorkingSet: ws,
DbData: dEnv.DbData(),
}, nil
}
func (se *sqlEngine) getDB(name string) (dsqle.Database, error) {
db, ok := se.dbs[name]
+50 -8
View File
@@ -36,6 +36,7 @@ import (
dsqle "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
_ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/tracing"
)
@@ -139,7 +140,7 @@ func Serve(ctx context.Context, version string, serverConfig ServerConfig, serve
// to the value of mysql that we support.
},
sqlEngine,
newSessionBuilder(sqlEngine, username, email, serverConfig.AutoCommit()),
newSessionBuilder(sqlEngine, username, email, mrEnv, serverConfig.AutoCommit()),
)
if startError != nil {
@@ -166,11 +167,18 @@ func portInUse(hostPort string) bool {
return false
}
func newSessionBuilder(sqlEngine *sqle.Engine, username, email string, autocommit bool) server.SessionBuilder {
func newSessionBuilder(sqlEngine *sqle.Engine, username, email string, mrEnv env.MultiRepoEnv, autocommit bool) server.SessionBuilder {
return func(ctx context.Context, conn *mysql.Conn, host string) (sql.Session, *sql.IndexRegistry, *sql.ViewRegistry, error) {
tmpSqlCtx := sql.NewEmptyContext()
mysqlSess := sql.NewSession(host, conn.RemoteAddr().String(), conn.User, conn.ConnectionID)
doltSess, err := dsqle.NewDoltSession(tmpSqlCtx, mysqlSess, username, email, dbsAsDSQLDBs(sqlEngine.Catalog.AllDatabases())...)
doltDbs := dbsAsDSQLDBs(sqlEngine.Catalog.AllDatabases())
dbStates, err := getDbStates(ctx, mrEnv, doltDbs)
if err != nil {
return nil, nil, nil, err
}
doltSess, err := dsess.NewSession(tmpSqlCtx, mysqlSess, username, email, dbStates...)
if err != nil {
return nil, nil, nil, err
@@ -193,11 +201,6 @@ func newSessionBuilder(sqlEngine *sqle.Engine, username, email string, autocommi
dbs := dbsAsDSQLDBs(sqlEngine.Catalog.AllDatabases())
for _, db := range dbs {
err := db.LoadRootFromRepoState(sqlCtx)
if err != nil {
return nil, nil, nil, err
}
root, err := db.GetRoot(sqlCtx)
if err != err {
cli.PrintErrln(err)
@@ -228,3 +231,42 @@ func dbsAsDSQLDBs(dbs []sql.Database) []dsqle.Database {
return dsqlDBs
}
func getDbStates(ctx context.Context, mrEnv env.MultiRepoEnv, dbs []dsqle.Database) ([]dsess.InitialDbState, error) {
var dbStates []dsess.InitialDbState
for _, db := range dbs {
var dEnv *env.DoltEnv
mrEnv.Iter(func(name string, de *env.DoltEnv) (stop bool, err error) {
if name == db.Name() {
dEnv = de
return true, nil
}
return false, nil
})
if dEnv == nil {
return nil, fmt.Errorf("couldn't find environment for database %s", db.Name())
}
head := dEnv.RepoStateReader().CWBHeadSpec()
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return nil, err
}
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return nil, err
}
dbStates = append(dbStates, dsess.InitialDbState{
Db: db,
HeadCommit: headCommit,
WorkingSet: ws,
DbData: dEnv.DbData(),
})
}
return dbStates, nil
}
+34 -10
View File
@@ -66,37 +66,53 @@ func (cmd StatusCmd) createArgParser() *argparser.ArgParser {
// Exec executes the command
func (cmd StatusCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
ap := cmd.createArgParser()
help, _ := cli.HelpAndUsagePrinters(cli.GetCommandDocumentation(commandStr, statusDocs, ap))
help, usage := cli.HelpAndUsagePrinters(cli.GetCommandDocumentation(commandStr, statusDocs, ap))
cli.ParseArgsOrDie(ap, args, help)
staged, notStaged, err := diff.GetStagedUnstagedTableDeltas(ctx, dEnv.DoltDB, dEnv.RepoStateReader())
workingRoot, err := dEnv.WorkingRoot(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("Couldn't get working root").AddCause(err).Build(), usage)
}
roots, err := dEnv.Roots(ctx)
if err != nil {
cli.PrintErrln(toStatusVErr(err).Verbose())
return 1
}
staged, notStaged, err := diff.GetStagedUnstagedTableDeltas(ctx, roots)
if err != nil {
cli.PrintErrln(toStatusVErr(err).Verbose())
return 1
}
workingTblsInConflict, _, _, err := merge.GetTablesInConflict(ctx, dEnv.DoltDB, dEnv.RepoStateReader())
workingTblsInConflict, _, _, err := merge.GetTablesInConflict(ctx, roots)
if err != nil {
cli.PrintErrln(toStatusVErr(err).Verbose())
return 1
}
stagedDocDiffs, notStagedDocDiffs, err := diff.GetDocDiffs(ctx, dEnv.DoltDB, dEnv.RepoStateReader(), dEnv.DocsReadWriter())
stagedDocDiffs, notStagedDocDiffs, err := diff.GetDocDiffs(ctx, roots, dEnv.DocsReadWriter())
if err != nil {
cli.PrintErrln(toStatusVErr(err).Verbose())
return 1
}
workingDocsInConflict, err := merge.GetDocsInConflict(ctx, dEnv.DoltDB, dEnv.RepoStateReader(), dEnv.DocsReadWriter())
workingDocsInConflict, err := merge.GetDocsInConflict(ctx, workingRoot, dEnv.DocsReadWriter())
if err != nil {
cli.PrintErrln(toStatusVErr(err).Verbose())
return 1
}
printStatus(ctx, dEnv, staged, notStaged, workingTblsInConflict, workingDocsInConflict, stagedDocDiffs, notStagedDocDiffs)
err = printStatus(ctx, dEnv, staged, notStaged, workingTblsInConflict, workingDocsInConflict, stagedDocDiffs, notStagedDocDiffs)
if err != nil {
cli.PrintErrln(toStatusVErr(err).Verbose())
return 1
}
return 0
}
@@ -325,10 +341,16 @@ func getAddedNotStaged(notStagedTbls []diff.TableDelta, notStagedDocs *diff.DocD
return lines
}
func printStatus(ctx context.Context, dEnv *env.DoltEnv, stagedTbls, notStagedTbls []diff.TableDelta, workingTblsInConflict []string, workingDocsInConflict *diff.DocDiffs, stagedDocs, notStagedDocs *diff.DocDiffs) {
cli.Printf(branchHeader, dEnv.RepoState.CWBHeadRef().GetPath())
// TODO: working docs in conflict param not used here
func printStatus(ctx context.Context, dEnv *env.DoltEnv, stagedTbls, notStagedTbls []diff.TableDelta, workingTblsInConflict []string, workingDocsInConflict *diff.DocDiffs, stagedDocs, notStagedDocs *diff.DocDiffs) error {
cli.Printf(branchHeader, dEnv.RepoStateReader().CWBHeadRef().GetPath())
if dEnv.RepoState.Merge != nil {
mergeActive, err := dEnv.IsMergeActive(ctx)
if err != nil {
return err
}
if mergeActive {
if len(workingTblsInConflict) > 0 {
cli.Println(unmergedTablesHeader)
} else {
@@ -339,9 +361,11 @@ func printStatus(ctx context.Context, dEnv *env.DoltEnv, stagedTbls, notStagedTb
n := printStagedDiffs(cli.CliOut, stagedTbls, stagedDocs, true)
n = printDiffsNotStaged(ctx, dEnv, cli.CliOut, notStagedTbls, notStagedDocs, true, n, workingTblsInConflict)
if dEnv.RepoState.Merge == nil && n == 0 {
if !mergeActive && n == 0 {
cli.Println("nothing to commit, working tree clean")
}
return nil
}
func toStatusVErr(err error) errhand.VerboseError {
+3 -3
View File
@@ -58,8 +58,8 @@ func UpdateWorkingWithVErr(dEnv *env.DoltEnv, updatedRoot *doltdb.RootValue) err
return nil
}
func UpdateStagedWithVErr(ddb *doltdb.DoltDB, rsw env.RepoStateWriter, updatedRoot *doltdb.RootValue) errhand.VerboseError {
_, err := env.UpdateStagedRoot(context.Background(), ddb, rsw, updatedRoot)
func UpdateStagedWithVErr(doltEnv *env.DoltEnv, updatedRoot *doltdb.RootValue) errhand.VerboseError {
err := doltEnv.UpdateStagedRoot(context.Background(), updatedRoot)
switch err {
case doltdb.ErrNomsIO:
@@ -78,7 +78,7 @@ func ResolveCommitWithVErr(dEnv *env.DoltEnv, cSpecStr string) (*doltdb.Commit,
return nil, errhand.BuildDError("'%s' is not a valid commit", cSpecStr).Build()
}
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
if err == doltdb.ErrInvalidAncestorSpec {
+2
View File
@@ -140,6 +140,8 @@ github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMS
github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi v0.0.0-20201005193433-3ee972b1d078 h1:nrkoh/RcgTq5EsWTcbSBF8KQghCtM+1dhyslghbBoj8=
github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi v0.0.0-20201005193433-3ee972b1d078/go.mod h1:8Jdiq6CVg8HM4n9fF17sGgXUpFa98zDyscW0A7OQmuM=
github.com/dolthub/fslock v0.0.2 h1:8vUh47iKovgrtXNrXVIzsIoWLlspoXg+3nslhUzgKSw=
github.com/dolthub/fslock v0.0.2/go.mod h1:0i7bsNkK+XHwFL3dIsSWeXSV7sykVzzVr6+jq8oeEo0=
github.com/dolthub/go-mysql-server v0.10.1-0.20210706210037-774723f8dd7a h1:iirS4XzDfhkN0osCqmc6X8ihxT4JUBWiMqhbivl65LQ=
+2 -1
View File
@@ -25,12 +25,13 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/encoding"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
func TestDocDiff(t *testing.T) {
ctx := context.Background()
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
ddb.WriteEmptyRepo(ctx, "billy bob", "bigbillieb@fake.horse")
cs, _ := doltdb.NewCommitSpec("master")
+10 -36
View File
@@ -106,33 +106,22 @@ func (nd *DocDiffs) Len() int {
}
// GetDocDiffs retrieves staged and unstaged DocDiffs.
func GetDocDiffs(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader, drw env.DocsReadWriter) (*DocDiffs, *DocDiffs, error) {
func GetDocDiffs(
ctx context.Context,
roots doltdb.Roots,
drw env.DocsReadWriter,
) (*DocDiffs, *DocDiffs, error) {
docsOnDisk, err := drw.GetDocsOnDisk()
if err != nil {
return nil, nil, err
}
workingRoot, err := env.WorkingRoot(ctx, ddb, rsr)
notStagedDocDiffs, err := NewDocDiffs(ctx, roots.Working, nil, docsOnDisk)
if err != nil {
return nil, nil, err
}
notStagedDocDiffs, err := NewDocDiffs(ctx, workingRoot, nil, docsOnDisk)
if err != nil {
return nil, nil, err
}
headRoot, err := env.HeadRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, err
}
stagedRoot, err := env.StagedRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, err
}
stagedDocDiffs, err := NewDocDiffs(ctx, headRoot, stagedRoot, docsOnDisk)
stagedDocDiffs, err := NewDocDiffs(ctx, roots.Head, roots.Staged, docsOnDisk)
if err != nil {
return nil, nil, err
}
@@ -260,28 +249,13 @@ func GetTableDeltas(ctx context.Context, fromRoot, toRoot *doltdb.RootValue) (de
return deltas, nil
}
func GetStagedUnstagedTableDeltas(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (staged, unstaged []TableDelta, err error) {
headRoot, err := env.HeadRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, doltdb.RootValueUnreadable{RootType: doltdb.HeadRoot, Cause: err}
}
stagedRoot, err := env.StagedRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, doltdb.RootValueUnreadable{RootType: doltdb.StagedRoot, Cause: err}
}
workingRoot, err := env.WorkingRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, doltdb.RootValueUnreadable{RootType: doltdb.WorkingRoot, Cause: err}
}
staged, err = GetTableDeltas(ctx, headRoot, stagedRoot)
func GetStagedUnstagedTableDeltas(ctx context.Context, roots doltdb.Roots) (staged, unstaged []TableDelta, err error) {
staged, err = GetTableDeltas(ctx, roots.Head, roots.Staged)
if err != nil {
return nil, nil, err
}
unstaged, err = GetTableDeltas(ctx, stagedRoot, workingRoot)
unstaged, err = GetTableDeltas(ctx, roots.Staged, roots.Working)
if err != nil {
return nil, nil, err
}
+10
View File
@@ -15,6 +15,7 @@
package doltdb
import (
"bytes"
"context"
"errors"
@@ -276,3 +277,12 @@ func (c *Commit) GetAncestor(ctx context.Context, as *AncestorSpec) (*Commit, er
return NewCommit(c.vrw, ancestorSt), nil
}
func (c *Commit) DebugString(ctx context.Context) string {
var buf bytes.Buffer
err := types.WriteEncodedValue(ctx, &buf, c.commitSt)
if err != nil {
panic(err)
}
return buf.String()
}
+116 -40
View File
@@ -19,6 +19,7 @@ import (
"errors"
"fmt"
"math/rand"
"path/filepath"
"strings"
"time"
@@ -73,19 +74,26 @@ func DoltDBFromCS(cs chunks.ChunkStore) *DoltDB {
// LoadDoltDB will acquire a reference to the underlying noms db. If the Location is InMemDoltDB then a reference
// to a newly created in memory database will be used. If the location is LocalDirDoltDB, the directory must exist or
// this returns nil.
func LoadDoltDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string) (*DoltDB, error) {
return LoadDoltDBWithParams(ctx, nbf, urlStr, nil)
func LoadDoltDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys) (*DoltDB, error) {
return LoadDoltDBWithParams(ctx, nbf, urlStr, fs, nil)
}
func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, params map[string]string) (*DoltDB, error) {
func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys, params map[string]string) (*DoltDB, error) {
if urlStr == LocalDirDoltDB {
exists, isDir := filesys.LocalFS.Exists(dbfactory.DoltDataDir)
exists, isDir := fs.Exists(dbfactory.DoltDataDir)
if !exists {
return nil, errors.New("missing dolt data directory")
} else if !isDir {
return nil, errors.New("file exists where the dolt data directory should be")
}
absPath, err := fs.Abs(dbfactory.DoltDataDir)
if err != nil {
return nil, err
}
urlStr = fmt.Sprintf("file://%s", filepath.ToSlash(absPath))
}
db, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
@@ -293,6 +301,22 @@ func getAncestor(ctx context.Context, vrw types.ValueReadWriter, commitSt types.
return commitSt, nil
}
// Roots is a convenience struct to package up the three roots that most library functions will need to inspect and
// modify the working set. This struct is designed to be passed by value always: functions should take a Roots as a
// param and return a modified one.
//
// It contains three root values:
// Head: The root of the head of the current working branch
// Working: The root of the current working set
// Staged: The root of the staged value
//
// See doltEnvironment.Roots(context.Context)
type Roots struct {
Head *RootValue
Working *RootValue
Staged *RootValue
}
// Resolve takes a CommitSpec and returns a Commit, or an error if the commit cannot be found.
// If the CommitSpec is HEAD, Resolve also needs the DoltRef of the current working branch.
func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef) (*Commit, error) {
@@ -336,6 +360,9 @@ func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef)
}
}
case headCommitSpec:
if cwb == nil {
return nil, fmt.Errorf("cannot use a nil current working branch with a HEAD commit spec")
}
commitSt, err = getCommitStForRefStr(ctx, ddb.db, cwb.String())
default:
panic("unrecognized commit spec csType: " + cs.csType)
@@ -410,28 +437,34 @@ func (ddb *DoltDB) ResolveWorkingSet(ctx context.Context, workingSetRef ref.Work
// WriteRootValue will write a doltdb.RootValue instance to the database. This value will not be associated with a commit
// and can be committed by hash at a later time. Returns the hash of the value written.
// This method is the primary place in doltcore that handles setting the FeatureVersion of root values to the current
// value, so all writes of RootValues should happen here.
func (ddb *DoltDB) WriteRootValue(ctx context.Context, rv *RootValue) (hash.Hash, error) {
var err error
rv.valueSt, err = rv.valueSt.Set(featureVersKey, types.Int(DoltFeatureVersion))
if err != nil {
return hash.Hash{}, err
}
valRef, err := ddb.db.WriteValue(ctx, rv.valueSt)
valRef, err := ddb.writeRootValue(ctx, rv)
if err != nil {
return hash.Hash{}, err
}
err = ddb.db.Flush(ctx)
if err != nil {
return hash.Hash{}, err
}
valHash := valRef.TargetHash()
return valRef.TargetHash(), nil
}
return valHash, err
// writeRootValue writes the root value given to the DB and returns a ref to it. Unlike WriteRootValue, this method
// does not flush the DB to disk afterward.
// This method is the primary place in doltcore that handles setting the FeatureVersion of root values to the current
// value, so all writes of RootValues should happen here or via WriteRootValue.
func (ddb *DoltDB) writeRootValue(ctx context.Context, rv *RootValue) (types.Ref, error) {
var err error
rv.valueSt, err = rv.valueSt.Set(featureVersKey, types.Int(DoltFeatureVersion))
if err != nil {
return types.Ref{}, err
}
return ddb.db.WriteValue(ctx, rv.valueSt)
}
// ReadRootValue reads the RootValue associated with the hash given and returns it. Returns an error if the value cannot
@@ -793,26 +826,48 @@ func (ddb *DoltDB) GetRefsOfType(ctx context.Context, refTypeFilter map[ref.RefT
}
// NewBranchAtCommit creates a new branch with HEAD at the commit given. Branch names must pass IsValidUserBranchName.
func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, dref ref.DoltRef, commit *Commit) error {
if !IsValidBranchRef(dref) {
panic(fmt.Sprintf("invalid branch name %s, use IsValidUserBranchName check", dref.String()))
func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, commit *Commit) error {
if !IsValidBranchRef(branchRef) {
panic(fmt.Sprintf("invalid branch name %s, use IsValidUserBranchName check", branchRef.String()))
}
ds, err := ddb.db.GetDataset(ctx, dref.String())
ds, err := ddb.db.GetDataset(ctx, branchRef.String())
if err != nil {
return err
}
rf, err := types.NewRef(commit.commitSt, ddb.db.Format())
if err != nil {
return err
}
_, err = ddb.db.SetHead(ctx, ds, rf)
if err != nil {
return err
}
return err
// Update the corresponding working set at the same time, either by updating it or creating a new one
// TODO: find all the places HEAD can change, update working set too. This is only necessary when we don't already
// update the working set when the head changes.
commitRoot, err := commit.GetRootValue()
wsRef, _ := ref.WorkingSetRefForHead(branchRef)
var ws *WorkingSet
var currWsHash hash.Hash
ws, err = ddb.ResolveWorkingSet(ctx, wsRef)
if err == ErrWorkingSetNotFound {
ws = EmptyWorkingSet(wsRef)
} else if err != nil {
return err
} else {
currWsHash, err = ws.HashOf()
if err != nil {
return err
}
}
ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot)
return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, nil)
}
// DeleteBranch deletes the branch given, returning an error if it doesn't exist.
@@ -887,36 +942,57 @@ func (ddb *DoltDB) NewTagAtCommit(ctx context.Context, tagRef ref.DoltRef, c *Co
// UpdateWorkingSet updates the working set with the ref given to the root value given
// |prevHash| is the hash of the expected WorkingSet struct stored in the ref, not the hash of the RootValue there.
func (ddb *DoltDB) UpdateWorkingSet(ctx context.Context, workingSetRef ref.WorkingSetRef, rootVal *RootValue, prevHash hash.Hash) error {
func (ddb *DoltDB) UpdateWorkingSet(
ctx context.Context,
workingSetRef ref.WorkingSetRef,
workingSet *WorkingSet,
prevHash hash.Hash,
meta *WorkingSetMeta,
) error {
ds, err := ddb.db.GetDataset(ctx, workingSetRef.String())
if err != nil {
return err
}
// st, err := NewWorkingSetMeta().toNomsStruct(ddb.Format())
// if err != nil {
// return err
// }
// logrus.Tracef("Updating working set with root %s", workingSet.RootValue().DebugString(ctx, true))
rootRef, err := ddb.db.WriteValue(ctx, rootVal.valueSt)
workingRootRef, stagedRef, mergeStateRef, err := workingSet.writeValues(ctx, ddb)
if err != nil {
return err
}
// workspaceStruct, err := datas.NewWorkspace(ctx, rootRef, st)
// if err != nil {
// return err
// }
//
// wsRef, err := types.NewRef(workspaceStruct, ddb.Format())
// if err != nil {
// return err
// }
// While we still have places that need user info threaded through, we're lenient on providing the meta
var metaSt types.Struct
if meta != nil {
metaSt, err = meta.toNomsStruct(types.Format_Default)
if err != nil {
return err
}
} else {
metaSt, err = datas.NewWorkingSetMeta(types.Format_Default, "incomplete", "incomplete", uint64(time.Now().Unix()), "incomplete")
if err != nil {
return err
}
}
// h, err = wsRef.Hash(wsRef.Format())
// fmt.Sprintf("%v", h)
_, err = ddb.db.UpdateWorkingSet(ctx, ds, datas.WorkingSetSpec{
Meta: datas.WorkingSetMeta{Meta: metaSt},
WorkingRoot: workingRootRef,
StagedRoot: stagedRef,
MergeState: mergeStateRef,
}, prevHash)
_, err = ddb.db.UpdateWorkingSet(ctx, ds, rootRef, datas.WorkingSetMeta{}, prevHash)
return err
}
// DeleteWorkingSet deletes the working set given
func (ddb *DoltDB) DeleteWorkingSet(ctx context.Context, workingSetRef ref.WorkingSetRef) error {
ds, err := ddb.db.GetDataset(ctx, workingSetRef.String())
if err != nil {
return err
}
_, err = ddb.db.Delete(ctx, ds)
return err
}
+6 -6
View File
@@ -177,7 +177,7 @@ func TestSystemTableTags(t *testing.T) {
}
func TestEmptyInMemoryRepoCreation(t *testing.T) {
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, InMemDoltDB)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, InMemDoltDB, filesys.LocalFS)
if err != nil {
t.Fatal("Failed to load db")
@@ -213,7 +213,7 @@ func TestLoadNonExistentLocalFSRepo(t *testing.T) {
panic("Couldn't change the working directory to the test directory.")
}
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
assert.Nil(t, ddb, "Should return nil when loading a non-existent data dir")
assert.Error(t, err, "Should see an error here")
}
@@ -228,7 +228,7 @@ func TestLoadBadLocalFSRepo(t *testing.T) {
contents := []byte("not a directory")
ioutil.WriteFile(filepath.Join(testDir, dbfactory.DoltDataDir), contents, 0644)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, err := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
assert.Nil(t, ddb, "Should return nil when loading a non-directory data dir file")
assert.Error(t, err, "Should see an error here")
}
@@ -251,7 +251,7 @@ func TestLDNoms(t *testing.T) {
t.Fatal("Failed to create noms directory")
}
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
err = ddb.WriteEmptyRepo(context.Background(), committerName, committerEmail)
if err != nil {
@@ -263,7 +263,7 @@ func TestLDNoms(t *testing.T) {
var valHash hash.Hash
var tbl *Table
{
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
cs, _ := NewCommitSpec("master")
commit, err := ddb.Resolve(context.Background(), cs, nil)
@@ -305,7 +305,7 @@ func TestLDNoms(t *testing.T) {
// reopen the db and commit the value. Perform a couple checks for
{
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB)
ddb, _ := LoadDoltDB(context.Background(), types.Format_Default, LocalDirDoltDB, filesys.LocalFS)
meta, err := NewCommitMeta(committerName, committerEmail, "Sample data")
if err != nil {
t.Error("Failed to commit")
+1 -1
View File
@@ -92,7 +92,7 @@ func testGarbageCollection(t *testing.T, test gcTest) {
working, err = dEnv.WorkingRoot(ctx)
require.NoError(t, err)
// assert all out rows are present after gc
actual, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, working, test.query)
actual, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, working, test.query)
require.NoError(t, err)
assert.Equal(t, test.expected, actual)
+16 -5
View File
@@ -403,7 +403,6 @@ func (t *Table) GetRowData(ctx context.Context) (types.Map, error) {
func (t *Table) ResolveConflicts(ctx context.Context, pkTuples []types.Value) (invalid, notFound []types.Value, tbl *Table, err error) {
removed := 0
_, confData, err := t.GetConflicts(ctx)
if err != nil {
return nil, nil, nil, err
}
@@ -425,24 +424,36 @@ func (t *Table) ResolveConflicts(ctx context.Context, pkTuples []types.Value) (i
}
conflicts, err := confEdit.Map(ctx)
if err != nil {
return nil, nil, nil, err
}
conflictsRef, err := WriteValAndGetRef(ctx, t.vrw, conflicts)
if err != nil {
return nil, nil, nil, err
}
updatedSt, err := t.tableStruct.Set(conflictsKey, conflictsRef)
if err != nil {
return nil, nil, nil, err
}
return invalid, notFound, &Table{t.vrw, updatedSt}, nil
newTbl := &Table{t.vrw, updatedSt}
// If we resolved the last conflict, mark the table conflict free
numRowsInConflict, err := newTbl.NumRowsInConflict(ctx)
if err != nil {
return nil, nil, nil, err
}
if numRowsInConflict == 0 {
newTbl, err = newTbl.ClearConflicts()
if err != nil {
return nil, nil, nil, err
}
}
return invalid, notFound, newTbl, nil
}
// GetIndexData returns the internal index map which goes from index name to a ref of the row data map.
+336 -77
View File
@@ -20,94 +20,353 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
const (
workingMetaStName = "workingset"
workingMetaVersionStName = "version"
workingMetaVersion = "1.0"
)
type WorkingSet struct {
Name string
format *types.NomsBinFormat
st types.Struct
rootValue *RootValue
}
// NewWorkingSet creates a new WorkingSet object.
func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter, workingSetSt types.Struct) (*WorkingSet, error) {
// TODO: meta struct
// metaSt, ok, err := workingSetSt.MaybeGet(datas.TagMetaField)
//
// if err != nil {
// return nil, err
// }
// if !ok {
// return nil, fmt.Errorf("tag struct does not have field %s", datas.TagMetaField)
// }
//
// meta, err := tagMetaFromNomsSt(metaSt.(types.Struct))
//
// if err != nil {
// return nil, err
// }
rootRef, ok, err := workingSetSt.MaybeGet(datas.WorkingSetRefField)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("workingset struct does not have field %s", datas.WorkingSetRefField)
}
rootValSt, err := rootRef.(types.Ref).TargetValue(ctx, vrw)
if err != nil {
return nil, err
}
rootVal, err := newRootValue(vrw, rootValSt.(types.Struct))
if err != nil {
return nil, err
}
return &WorkingSet{
Name: name,
format: vrw.Format(),
st: workingSetSt,
rootValue: rootVal,
}, nil
}
// RootValue returns the root value stored by this workingset
func (t *WorkingSet) RootValue() *RootValue {
return t.rootValue
}
// Struct returns the struct used to construct this WorkingSet.
func (t *WorkingSet) Struct() types.Struct {
return t.st
}
// Ref returns a WorkingSetRef for this WorkingSet.
func (t *WorkingSet) Ref() ref.WorkingSetRef {
return ref.NewWorkingSetRef(t.Name)
type MergeState struct {
commit *Commit
preMergeWorking *RootValue
}
// WorkingSetMeta contains all the metadata that is associated with a working set
type WorkingSetMeta struct {
// empty for now
User string
Email string
Timestamp uint64
Description string
Version string
}
func NewWorkingSetMeta() *WorkingSetMeta {
return &WorkingSetMeta{}
func (wsm *WorkingSetMeta) toNomsStruct(nbf *types.NomsBinFormat) (types.Struct, error) {
return datas.NewWorkingSetMeta(nbf, wsm.User, wsm.Email, wsm.Timestamp, wsm.Description)
}
func (tm *WorkingSetMeta) toNomsStruct(nbf *types.NomsBinFormat) (types.Struct, error) {
metadata := types.StructData{
workingMetaVersionStName: types.String(workingMetaVersion),
// MergeStateFromCommitAndWorking returns a new MergeState.
// Most clients should not construct MergeState objects directly, but instead use WorkingSet.StartMerge
func MergeStateFromCommitAndWorking(commit *Commit, preMergeWorking *RootValue) *MergeState {
return &MergeState{commit: commit, preMergeWorking: preMergeWorking}
}
func newMergeState(ctx context.Context, vrw types.ValueReadWriter, mergeState types.Struct) (*MergeState, error) {
commitSt, ok, err := mergeState.MaybeGet(datas.MergeStateCommitField)
if err != nil {
return nil, err
}
return types.NewStruct(nbf, workingMetaStName, metadata)
if !ok {
return nil, fmt.Errorf("corrupted MergeState struct")
}
commit := NewCommit(vrw, commitSt.(types.Struct))
workingRootRef, ok, err := mergeState.MaybeGet(datas.MergeStateWorkingPreMergeField)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("corrupted MergeState struct")
}
workingRootValSt, err := workingRootRef.(types.Ref).TargetValue(ctx, vrw)
if err != nil {
return nil, err
}
workingRoot, err := newRootValue(vrw, workingRootValSt.(types.Struct))
if err != nil {
return nil, err
}
return &MergeState{
commit: commit,
preMergeWorking: workingRoot,
}, nil
}
func (m MergeState) Commit() *Commit {
return m.commit
}
func (m MergeState) PreMergeWorkingRoot() *RootValue {
return m.preMergeWorking
}
type WorkingSet struct {
Name string
meta WorkingSetMeta
format *types.NomsBinFormat
st *types.Struct
workingRoot *RootValue
stagedRoot *RootValue
mergeState *MergeState
}
// TODO: remove this, require working and staged
func EmptyWorkingSet(wsRef ref.WorkingSetRef) *WorkingSet {
return &WorkingSet{
Name: wsRef.GetPath(),
format: types.Format_Default,
}
}
func (ws WorkingSet) WithStagedRoot(stagedRoot *RootValue) *WorkingSet {
ws.stagedRoot = stagedRoot
return &ws
}
func (ws WorkingSet) WithWorkingRoot(workingRoot *RootValue) *WorkingSet {
ws.workingRoot = workingRoot
return &ws
}
func (ws WorkingSet) WithMergeState(mergeState *MergeState) *WorkingSet {
ws.mergeState = mergeState
return &ws
}
func (ws WorkingSet) StartMerge(commit *Commit) *WorkingSet {
ws.mergeState = &MergeState{
commit: commit,
preMergeWorking: ws.workingRoot,
}
return &ws
}
func (ws WorkingSet) AbortMerge() *WorkingSet {
ws.workingRoot = ws.mergeState.PreMergeWorkingRoot()
ws.mergeState = nil
return &ws
}
func (ws WorkingSet) ClearMerge() *WorkingSet {
ws.mergeState = nil
return &ws
}
func (ws *WorkingSet) WorkingRoot() *RootValue {
return ws.workingRoot
}
func (ws *WorkingSet) StagedRoot() *RootValue {
return ws.stagedRoot
}
func (ws *WorkingSet) MergeState() *MergeState {
return ws.mergeState
}
func (ws *WorkingSet) MergeActive() bool {
return ws.mergeState != nil
}
func (ws WorkingSet) Meta() WorkingSetMeta {
return ws.meta
}
// NewWorkingSet creates a new WorkingSet object.
func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter, workingSetSt types.Struct) (*WorkingSet, error) {
metaSt, ok, err := workingSetSt.MaybeGet(datas.WorkingSetMetaField)
if err != nil {
return nil, err
}
// We're very lenient about the working set meta being here, in expectation of a time when we change how the
// working set info is stored and this field changes in a backwards incompatible way
var meta WorkingSetMeta
if ok {
meta, err = workingSetMetaFromNomsSt(metaSt.(types.Struct))
if err != nil {
return nil, err
}
} else {
meta = WorkingSetMeta{
User: "not present",
Email: "not present",
Timestamp: 0,
Description: "not present",
Version: "not present",
}
}
workingRootRef, ok, err := workingSetSt.MaybeGet(datas.WorkingRootRefField)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("workingset struct does not have field %s", datas.WorkingRootRefField)
}
workingRootValSt, err := workingRootRef.(types.Ref).TargetValue(ctx, vrw)
if err != nil {
return nil, err
}
workingRoot, err := newRootValue(vrw, workingRootValSt.(types.Struct))
if err != nil {
return nil, err
}
stagedRootRef, ok, err := workingSetSt.MaybeGet(datas.StagedRootRefField)
if err != nil {
return nil, err
}
var stagedRoot *RootValue
if ok {
stagedRootValSt, err := stagedRootRef.(types.Ref).TargetValue(ctx, vrw)
if err != nil {
return nil, err
}
stagedRoot, err = newRootValue(vrw, stagedRootValSt.(types.Struct))
if err != nil {
return nil, err
}
}
var mergeState *MergeState
mergeStateRef, ok, err := workingSetSt.MaybeGet(datas.MergeStateField)
if err != nil {
return nil, err
}
if ok {
mergeStateValSt, err := mergeStateRef.(types.Ref).TargetValue(ctx, vrw)
if err != nil {
return nil, err
}
mergeState, err = newMergeState(ctx, vrw, mergeStateValSt.(types.Struct))
if err != nil {
return nil, err
}
}
return &WorkingSet{
Name: name,
meta: meta,
format: vrw.Format(),
st: &workingSetSt,
workingRoot: workingRoot,
stagedRoot: stagedRoot,
mergeState: mergeState,
}, nil
}
func workingSetMetaFromNomsSt(st types.Struct) (WorkingSetMeta, error) {
// Like other places that deal with working set meta, we err on the side of leniency w.r.t. this data structure's
// contents
name, ok, err := st.MaybeGet(datas.WorkingSetMetaNameField)
if err != nil {
return WorkingSetMeta{}, err
}
if !ok {
name = types.String("not present")
}
email, ok, err := st.MaybeGet(datas.WorkingSetMetaEmailField)
if err != nil {
return WorkingSetMeta{}, err
}
if !ok {
email = types.String("not present")
}
timestamp, ok, err := st.MaybeGet(datas.WorkingSetMetaTimestampField)
if err != nil {
return WorkingSetMeta{}, err
}
if !ok {
timestamp = types.Uint(0)
}
description, ok, err := st.MaybeGet(datas.WorkingSetMetaDescriptionField)
if err != nil {
return WorkingSetMeta{}, err
}
if !ok {
description = types.String("not present")
}
version, ok, err := st.MaybeGet(datas.WorkingSetMetaVersionField)
if err != nil {
return WorkingSetMeta{}, err
}
if !ok {
version = types.String("not present")
}
return WorkingSetMeta{
User: string(name.(types.String)),
Email: string(email.(types.String)),
Timestamp: uint64(timestamp.(types.Uint)),
Description: string(description.(types.String)),
Version: string(version.(types.String)),
}, nil
}
// RootValue returns the root value stored by this workingset
// TODO: replace references with calls to WorkingRoot
func (ws *WorkingSet) RootValue() *RootValue {
return ws.workingRoot
}
// HashOf returns the hash of the workingset struct, which is not the same as the hash of the root value stored in the
// working set. This value is used for optimistic locking when updating a working set for a head ref.
func (ws *WorkingSet) HashOf() (hash.Hash, error) {
if ws.st == nil {
return hash.Hash{}, nil
}
return ws.st.Hash(ws.format)
}
// Ref returns a WorkingSetRef for this WorkingSet.
func (ws *WorkingSet) Ref() ref.WorkingSetRef {
return ref.NewWorkingSetRef(ws.Name)
}
// writeValues write the values in this working set to the database and returns them
func (ws *WorkingSet) writeValues(ctx context.Context, db *DoltDB) (
workingRoot types.Ref,
stagedRoot types.Ref,
mergeState *types.Ref,
err error,
) {
if ws.stagedRoot == nil || ws.workingRoot == nil {
return types.Ref{}, types.Ref{}, nil, fmt.Errorf("StagedRoot and workingRoot must be set. This is a bug.")
}
workingRoot, err = db.writeRootValue(ctx, ws.workingRoot)
if err != nil {
return types.Ref{}, types.Ref{}, nil, err
}
stagedRoot, err = db.writeRootValue(ctx, ws.stagedRoot)
if err != nil {
return types.Ref{}, types.Ref{}, nil, err
}
if ws.mergeState != nil {
var mergeStateRef types.Ref
preMergeWorking, err := db.writeRootValue(ctx, ws.mergeState.preMergeWorking)
if err != nil {
return types.Ref{}, types.Ref{}, nil, err
}
mergeStateRefSt, err := datas.NewMergeState(ctx, preMergeWorking, ws.mergeState.commit.commitSt)
if err != nil {
return types.Ref{}, types.Ref{}, nil, err
}
mergeStateRef, err = db.db.WriteValue(ctx, mergeStateRefSt)
if err != nil {
return types.Ref{}, types.Ref{}, nil, err
}
mergeState = &mergeStateRef
}
return workingRoot, stagedRoot, mergeState, nil
}
+3 -2
View File
@@ -25,12 +25,13 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/encoding"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
func TestAddNewerTextAndValueFromTable(t *testing.T) {
ctx := context.Background()
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
ddb.WriteEmptyRepo(ctx, "billy bob", "bigbillieb@fake.horse")
// If no tbl/schema is provided, doc Text and Value should be nil.
@@ -84,7 +85,7 @@ func TestAddNewerTextAndValueFromTable(t *testing.T) {
func TestAddNewerTextAndDocPkFromRow(t *testing.T) {
ctx := context.Background()
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(ctx, types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
ddb.WriteEmptyRepo(ctx, "billy bob", "bigbillieb@fake.horse")
sch := createTestDocsSchema()
@@ -45,7 +45,10 @@ func (a StageAll) CommandString() string { return "stage_all" }
// Exec executes a StageAll command on a test dolt environment.
func (a StageAll) Exec(t *testing.T, dEnv *env.DoltEnv) error {
return actions.StageAllTables(context.Background(), dEnv.DbData())
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
return actions.StageAllTables(context.Background(), roots, dEnv.DbData())
}
type CommitStaged struct {
@@ -57,6 +60,9 @@ func (c CommitStaged) CommandString() string { return fmt.Sprintf("commit_staged
// Exec executes a CommitStaged command on a test dolt environment.
func (c CommitStaged) Exec(t *testing.T, dEnv *env.DoltEnv) error {
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
name, email, err := actions.GetNameAndEmail(dEnv.Config)
if err != nil {
@@ -65,7 +71,7 @@ func (c CommitStaged) Exec(t *testing.T, dEnv *env.DoltEnv) error {
dbData := dEnv.DbData()
_, err = actions.CommitStaged(context.Background(), dbData, actions.CommitStagedProps{
_, err = actions.CommitStaged(context.Background(), roots, dbData, actions.CommitStagedProps{
Message: c.Message,
Date: time.Now(),
AllowEmpty: false,
@@ -86,7 +92,10 @@ func (c CommitAll) CommandString() string { return fmt.Sprintf("commit: %s", c.M
// Exec executes a CommitAll command on a test dolt environment.
func (c CommitAll) Exec(t *testing.T, dEnv *env.DoltEnv) error {
err := actions.StageAllTables(context.Background(), dEnv.DbData())
roots, err := dEnv.Roots(context.Background())
require.NoError(t, err)
err = actions.StageAllTables(context.Background(), roots, dEnv.DbData())
require.NoError(t, err)
name, email, err := actions.GetNameAndEmail(dEnv.Config)
@@ -96,8 +105,11 @@ func (c CommitAll) Exec(t *testing.T, dEnv *env.DoltEnv) error {
}
dbData := dEnv.DbData()
// TODO: refactor StageAllTables to just modify roots in memory, not write to disk
roots, err = dEnv.Roots(context.Background())
require.NoError(t, err)
_, err = actions.CommitStaged(context.Background(), dbData, actions.CommitStagedProps{
_, err = actions.CommitStaged(context.Background(), roots, dbData, actions.CommitStagedProps{
Message: c.Message,
Date: time.Now(),
AllowEmpty: false,
@@ -126,7 +138,7 @@ func (r ResetHard) Exec(t *testing.T, dEnv *env.DoltEnv) error {
return err
}
_, err = dEnv.UpdateStagedRoot(context.Background(), headRoot)
err = dEnv.UpdateStagedRoot(context.Background(), headRoot)
if err != nil {
return err
}
@@ -147,7 +159,7 @@ func (q Query) Exec(t *testing.T, dEnv *env.DoltEnv) error {
root, err := dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
sqlDb := dsqle.NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := dsqle.NewTestEngine(context.Background(), sqlDb, root)
engine, sqlCtx, err := dsqle.NewTestEngine(t, dEnv, context.Background(), sqlDb, root)
require.NoError(t, err)
_, iter, err := engine.Query(sqlCtx, q.Query)
@@ -186,7 +198,7 @@ func (b Branch) CommandString() string { return fmt.Sprintf("branch: %s", b.Bran
// Exec executes a Branch command on a test dolt environment.
func (b Branch) Exec(_ *testing.T, dEnv *env.DoltEnv) error {
cwb := dEnv.RepoState.Head.Ref.String()
cwb := dEnv.RepoStateReader().CWBHeadRef().String()
return actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), b.BranchName, cwb, false)
}
@@ -225,7 +237,10 @@ func (m Merge) Exec(t *testing.T, dEnv *env.DoltEnv) error {
assert.NoError(t, err)
assert.NotEqual(t, h1, h2)
tblNames, _, err := env.MergeWouldStompChanges(context.Background(), cm2, dEnv.DbData())
workingRoot, err := dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
tblNames, _, err := env.MergeWouldStompChanges(context.Background(), workingRoot, cm2, dEnv.DbData())
if err != nil {
return err
}
@@ -238,22 +253,22 @@ func (m Merge) Exec(t *testing.T, dEnv *env.DoltEnv) error {
return err
}
rv, err := cm2.GetRootValue()
assert.NoError(t, err)
h, err := dEnv.DoltDB.WriteRootValue(context.Background(), rv)
assert.NoError(t, err)
err = dEnv.DoltDB.FastForward(context.Background(), dEnv.RepoState.CWBHeadRef(), cm2)
err = dEnv.DoltDB.FastForward(context.Background(), dEnv.RepoStateReader().CWBHeadRef(), cm2)
if err != nil {
return err
}
dEnv.RepoState.Working = h.String()
dEnv.RepoState.Staged = h.String()
err = dEnv.RepoState.Save(dEnv.FS)
workingSet, err := dEnv.WorkingSet(context.Background())
if err != nil {
return errhand.VerboseErrorFromError(err)
}
rv, err := cm2.GetRootValue()
assert.NoError(t, err)
err = dEnv.UpdateWorkingSet(context.Background(), workingSet.WithWorkingRoot(rv))
require.NoError(t, err)
err = actions.SaveTrackedDocsFromWorking(context.Background(), dEnv)
assert.NoError(t, err)
@@ -264,10 +279,7 @@ func (m Merge) Exec(t *testing.T, dEnv *env.DoltEnv) error {
require.True(t, stats.Conflicts == 0)
}
h2, err := cm2.HashOf()
require.NoError(t, err)
err = dEnv.RepoState.StartMerge(h2.String(), dEnv.FS)
err = dEnv.StartMerge(context.Background(), cm2)
if err != nil {
return err
}
@@ -282,7 +294,7 @@ func (m Merge) Exec(t *testing.T, dEnv *env.DoltEnv) error {
return err
}
_, err = dEnv.UpdateStagedRoot(context.Background(), mergedRoot)
err = dEnv.UpdateStagedRoot(context.Background(), mergedRoot)
if err != nil {
return err
}
@@ -293,7 +305,7 @@ func (m Merge) Exec(t *testing.T, dEnv *env.DoltEnv) error {
func resolveCommit(t *testing.T, cSpecStr string, dEnv *env.DoltEnv) *doltdb.Commit {
cs, err := doltdb.NewCommitSpec(cSpecStr)
require.NoError(t, err)
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoStateReader().CWBHeadRef())
require.NoError(t, err)
return cm
}
+83 -86
View File
@@ -30,20 +30,17 @@ var ErrAlreadyExists = errors.New("already exists")
var ErrCOBranchDelete = errors.New("attempted to delete checked out branch")
var ErrUnmergedBranchDelete = errors.New("attempted to delete a branch that is not fully merged into master; use `-f` to force")
func MoveBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch string, force bool) error {
func RenameBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch string, force bool) error {
oldRef := ref.NewBranchRef(oldBranch)
newRef := ref.NewBranchRef(newBranch)
err := CopyBranch(ctx, dEnv, oldBranch, newBranch, force)
if err != nil {
return err
}
if ref.Equals(dEnv.RepoState.CWBHeadRef(), oldRef) {
dEnv.RepoState.Head = ref.MarshalableRef{Ref: newRef}
err = dEnv.RepoState.Save(dEnv.FS)
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), oldRef) {
err = dEnv.RepoStateWriter().SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: newRef})
if err != nil {
return err
}
@@ -105,7 +102,7 @@ func DeleteBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, opts De
}
} else {
dref = ref.NewBranchRef(brName)
if ref.Equals(dEnv.RepoState.CWBHeadRef(), dref) {
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), dref) {
return ErrCOBranchDelete
}
}
@@ -174,9 +171,8 @@ func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch,
}
func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, startingPoint string, force bool, headRef ref.DoltRef) error {
newRef := ref.NewBranchRef(newBranch)
hasRef, err := ddb.HasRef(ctx, newRef)
branchRef := ref.NewBranchRef(newBranch)
hasRef, err := ddb.HasRef(ctx, branchRef)
if err != nil {
return err
}
@@ -190,110 +186,112 @@ func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, starti
}
cs, err := doltdb.NewCommitSpec(startingPoint)
if err != nil {
return err
}
cm, err := ddb.Resolve(ctx, cs, headRef)
if err != nil {
return err
}
return ddb.NewBranchAtCommit(ctx, newRef, cm)
err = ddb.NewBranchAtCommit(ctx, branchRef, cm)
if err != nil {
return err
}
return nil
}
func createBranch(ctx context.Context, dbData env.DbData, newBranch, startingPoint string, force bool) error {
return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, dbData.Rsr.CWBHeadRef())
}
// updateRootsForBranch writes the roots needed for a checkout and returns the updated work and staged hash.
func updateRootsForBranch(ctx context.Context, dbData env.DbData, dref ref.DoltRef, brName string) (wrkHash hash.Hash, stgHash hash.Hash, err error) {
hasRef, err := dbData.Ddb.HasRef(ctx, dref)
if err != nil {
return hash.Hash{}, hash.Hash{}, err
}
if !hasRef {
return hash.Hash{}, hash.Hash{}, doltdb.ErrBranchNotFound
}
if ref.Equals(dbData.Rsr.CWBHeadRef(), dref) {
return hash.Hash{}, hash.Hash{}, doltdb.ErrAlreadyOnBranch
}
currRoots, err := getRoots(ctx, dbData.Ddb, dbData.Rsr, doltdb.HeadRoot, doltdb.WorkingRoot, doltdb.StagedRoot)
if err != nil {
return hash.Hash{}, hash.Hash{}, err
}
cs, err := doltdb.NewCommitSpec(brName)
if err != nil {
return hash.Hash{}, hash.Hash{}, doltdb.RootValueUnreadable{RootType: doltdb.HeadRoot, Cause: err}
}
cm, err := dbData.Ddb.Resolve(ctx, cs, nil)
if err != nil {
return hash.Hash{}, hash.Hash{}, doltdb.RootValueUnreadable{RootType: doltdb.HeadRoot, Cause: err}
}
newRoot, err := cm.GetRootValue()
if err != nil {
return hash.Hash{}, hash.Hash{}, err
}
// UpdateRootsForBranch writes the roots needed for a branch checkout and returns the updated roots. |roots.Head|
// should be the pre-checkout head. The returned roots struct has |Head| set to |branchRoot|.
func UpdateRootsForBranch(ctx context.Context, roots doltdb.Roots, branchRoot *doltdb.RootValue) (doltdb.Roots, error) {
conflicts := set.NewStrSet([]string{})
wrkTblHashes, err := moveModifiedTables(ctx, currRoots[doltdb.HeadRoot], newRoot, currRoots[doltdb.WorkingRoot], conflicts)
wrkTblHashes, err := moveModifiedTables(ctx, roots.Head, branchRoot, roots.Working, conflicts)
if err != nil {
return hash.Hash{}, hash.Hash{}, err
return doltdb.Roots{}, err
}
stgTblHashes, err := moveModifiedTables(ctx, currRoots[doltdb.HeadRoot], newRoot, currRoots[doltdb.StagedRoot], conflicts)
stgTblHashes, err := moveModifiedTables(ctx, roots.Head, branchRoot, roots.Staged, conflicts)
if err != nil {
return hash.Hash{}, hash.Hash{}, err
return doltdb.Roots{}, err
}
if conflicts.Size() > 0 {
return hash.Hash{}, hash.Hash{}, CheckoutWouldOverwrite{conflicts.AsSlice()}
return doltdb.Roots{}, CheckoutWouldOverwrite{conflicts.AsSlice()}
}
wrkHash, err = writeRoot(ctx, dbData.Ddb, newRoot, wrkTblHashes)
roots.Working, err = overwriteRoot(ctx, branchRoot, wrkTblHashes)
if err != nil {
return hash.Hash{}, hash.Hash{}, err
return doltdb.Roots{}, err
}
stgHash, err = writeRoot(ctx, dbData.Ddb, newRoot, stgTblHashes)
roots.Staged, err = overwriteRoot(ctx, branchRoot, stgTblHashes)
if err != nil {
return hash.Hash{}, hash.Hash{}, err
return doltdb.Roots{}, err
}
return wrkHash, stgHash, nil
roots.Head = branchRoot
return roots, nil
}
func CheckoutBranchNoDocs(ctx context.Context, roots doltdb.Roots, branchRoot *doltdb.RootValue, rsw env.RepoStateWriter, branchRef ref.BranchRef) error {
roots, err := UpdateRootsForBranch(ctx, roots, branchRoot)
if err != nil {
return err
}
err = rsw.SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: branchRef})
if err != nil {
return err
}
// TODO: combine into single update
err = rsw.UpdateWorkingRoot(ctx, roots.Working)
if err != nil {
return err
}
return rsw.UpdateStagedRoot(ctx, roots.Staged)
}
func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string) error {
dbData := dEnv.DbData()
dref := ref.NewBranchRef(brName)
branchRef := ref.NewBranchRef(brName)
wrkHash, stgHash, err := updateRootsForBranch(ctx, dbData, dref, brName)
db := dEnv.DoltDB
hasRef, err := db.HasRef(ctx, branchRef)
if err != nil {
return err
}
if !hasRef {
return doltdb.ErrBranchNotFound
}
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), branchRef) {
return doltdb.ErrAlreadyOnBranch
}
branchRoot, err := BranchRoot(ctx, db, brName)
if err != nil {
return err
}
unstagedDocs, err := GetUnstagedDocs(ctx, dbData)
roots, err := dEnv.Roots(ctx)
if err != nil {
return err
}
err = dbData.Rsw.SetWorkingHash(ctx, wrkHash)
unstagedDocs, err := GetUnstagedDocs(ctx, dEnv)
if err != nil {
return err
}
err = dbData.Rsw.SetStagedHash(ctx, stgHash)
if err != nil {
return err
}
err = dbData.Rsw.SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: dref})
err = CheckoutBranchNoDocs(ctx, roots, branchRoot, dEnv.RepoStateWriter(), branchRef)
if err != nil {
return err
}
@@ -301,27 +299,24 @@ func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string) error
return SaveDocsFromWorkingExcludingFSChanges(ctx, dEnv, unstagedDocs)
}
// CheckoutBranchWithoutDocs checkouts a branch without considering any working changes to the local docs. Used
// with DOLT_CHECKOUT.
func CheckoutBranchWithoutDocs(ctx context.Context, dbData env.DbData, brName string) error {
dref := ref.NewBranchRef(brName)
wrkHash, stgHash, err := updateRootsForBranch(ctx, dbData, dref, brName)
// BranchRoot returns the root value at the branch with the name given
// TODO: this belongs in DoltDB, maybe
func BranchRoot(ctx context.Context, db *doltdb.DoltDB, brName string) (*doltdb.RootValue, error) {
cs, err := doltdb.NewCommitSpec(brName)
if err != nil {
return err
return nil, doltdb.RootValueUnreadable{RootType: doltdb.HeadRoot, Cause: err}
}
err = dbData.Rsw.SetWorkingHash(ctx, wrkHash)
cm, err := db.Resolve(ctx, cs, nil)
if err != nil {
return err
return nil, doltdb.RootValueUnreadable{RootType: doltdb.HeadRoot, Cause: err}
}
err = dbData.Rsw.SetStagedHash(ctx, stgHash)
branchRoot, err := cm.GetRootValue()
if err != nil {
return err
return nil, err
}
return dbData.Rsw.SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: dref})
return branchRoot, nil
}
var emptyHash = hash.Hash{}
@@ -392,10 +387,12 @@ func moveModifiedTables(ctx context.Context, oldRoot, newRoot, changedRoot *dolt
return resultMap, nil
}
func writeRoot(ctx context.Context, ddb *doltdb.DoltDB, head *doltdb.RootValue, tblHashes map[string]hash.Hash) (hash.Hash, error) {
// overwriteRoot writes new table hash values for the root given and returns it.
// This is an inexpensive and convenient way of replacing all the tables at once.
func overwriteRoot(ctx context.Context, head *doltdb.RootValue, tblHashes map[string]hash.Hash) (*doltdb.RootValue, error) {
names, err := head.GetTableNames(ctx)
if err != nil {
return hash.Hash{}, err
return nil, err
}
var toDrop []string
@@ -407,7 +404,7 @@ func writeRoot(ctx context.Context, ddb *doltdb.DoltDB, head *doltdb.RootValue,
head, err = head.RemoveTables(ctx, toDrop...)
if err != nil {
return hash.Hash{}, err
return nil, err
}
for k, v := range tblHashes {
@@ -417,11 +414,11 @@ func writeRoot(ctx context.Context, ddb *doltdb.DoltDB, head *doltdb.RootValue,
head, err = head.SetTableHash(ctx, k, v)
if err != nil {
return hash.Hash{}, err
return nil, err
}
}
return ddb.WriteRootValue(ctx, head)
return head, nil
}
func IsBranch(ctx context.Context, ddb *doltdb.DoltDB, str string) (bool, error) {
@@ -437,7 +434,7 @@ func MaybeGetCommit(ctx context.Context, dEnv *env.DoltEnv, str string) (*doltdb
cs, err := doltdb.NewCommitSpec(str)
if err == nil {
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
switch err {
case nil:
+38 -66
View File
@@ -22,68 +22,55 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
)
func CheckoutAllTables(ctx context.Context, dbData env.DbData) error {
roots, err := getRoots(ctx, dbData.Ddb, dbData.Rsr, doltdb.WorkingRoot, doltdb.StagedRoot, doltdb.HeadRoot)
if err != nil {
return err
}
tbls, err := doltdb.UnionTableNames(ctx, roots[doltdb.WorkingRoot], roots[doltdb.StagedRoot], roots[doltdb.HeadRoot])
func CheckoutAllTables(ctx context.Context, roots doltdb.Roots, dbData env.DbData) error {
tbls, err := doltdb.UnionTableNames(ctx, roots.Working, roots.Staged, roots.Head)
if err != nil {
return err
}
docs := doltdocs.SupportedDocs
return checkoutTablesAndDocs(ctx, dbData, roots, tbls, docs)
}
func CheckoutTables(ctx context.Context, dbData env.DbData, tables []string) error {
roots, err := getRoots(ctx, dbData.Ddb, dbData.Rsr, doltdb.WorkingRoot, doltdb.StagedRoot, doltdb.HeadRoot)
if err != nil {
return err
}
return checkoutTables(ctx, dbData, roots, tables)
}
// CheckoutTablesAndDocs takes in a set of tables and docs and checks them out to another branch.
func CheckoutTablesAndDocs(ctx context.Context, dbData env.DbData, tables []string, docs doltdocs.Docs) error {
roots, err := getRoots(ctx, dbData.Ddb, dbData.Rsr, doltdb.WorkingRoot, doltdb.StagedRoot, doltdb.HeadRoot)
if err != nil {
return err
}
func CheckoutTablesAndDocs(ctx context.Context, roots doltdb.Roots, dbData env.DbData, tables []string, docs doltdocs.Docs) error {
return checkoutTablesAndDocs(ctx, dbData, roots, tables, docs)
}
func checkoutTables(ctx context.Context, dbData env.DbData, roots map[doltdb.RootType]*doltdb.RootValue, tbls []string) error {
unknownTbls := []string{}
func checkoutTables(ctx context.Context, dbData env.DbData, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) {
roots, err := MoveTablesFromHeadToWorking(ctx, roots, tbls)
if err != nil {
return doltdb.Roots{}, err
}
currRoot := roots[doltdb.WorkingRoot]
staged := roots[doltdb.StagedRoot]
head := roots[doltdb.HeadRoot]
err = env.UpdateWorkingRoot(ctx, dbData.Rsw, roots.Working)
if err != nil {
return doltdb.Roots{}, err
}
return roots, nil
}
// MoveTablesFromHeadToWorking replaces the tables named from the given head to the given working root, overwriting any
// working changes, and returns the new resulting roots
func MoveTablesFromHeadToWorking(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) {
var unknownTbls []string
for _, tblName := range tbls {
// TODO: not at all clear why this should be excluded (this code was moved from elsewhere)
if tblName == doltdb.DocTableName {
continue
}
tbl, ok, err := staged.GetTable(ctx, tblName)
tbl, ok, err := roots.Staged.GetTable(ctx, tblName)
if err != nil {
return err
return doltdb.Roots{}, err
}
if !ok {
tbl, ok, err = head.GetTable(ctx, tblName)
tbl, ok, err = roots.Head.GetTable(ctx, tblName)
if err != nil {
return err
return doltdb.Roots{}, err
}
if !ok {
@@ -92,49 +79,40 @@ func checkoutTables(ctx context.Context, dbData env.DbData, roots map[doltdb.Roo
}
}
currRoot, err = currRoot.PutTable(ctx, tblName, tbl)
roots.Working, err = roots.Working.PutTable(ctx, tblName, tbl)
if err != nil {
return err
return doltdb.Roots{}, err
}
}
if len(unknownTbls) > 0 {
// Return table not exist error before RemoveTables, which fails silently if the table is not on the root.
err := validateTablesExist(ctx, currRoot, unknownTbls)
err := validateTablesExist(ctx, roots.Working, unknownTbls)
if err != nil {
return err
return doltdb.Roots{}, err
}
currRoot, err = currRoot.RemoveTables(ctx, unknownTbls...)
roots.Working, err = roots.Working.RemoveTables(ctx, unknownTbls...)
if err != nil {
return err
return doltdb.Roots{}, err
}
}
// update the working root with currRoot
_, err := env.UpdateWorkingRoot(ctx, dbData.Ddb, dbData.Rsw, currRoot)
return err
return roots, nil
}
func checkoutDocs(ctx context.Context, dbData env.DbData, roots map[doltdb.RootType]*doltdb.RootValue, docs doltdocs.Docs) error {
currRoot := roots[doltdb.WorkingRoot]
staged := roots[doltdb.StagedRoot]
head := roots[doltdb.HeadRoot]
func checkoutDocs(ctx context.Context, dbData env.DbData, roots doltdb.Roots, docs doltdocs.Docs) error {
if len(docs) > 0 {
currRootWithDocs, stagedWithDocs, updatedDocs, err := getUpdatedWorkingAndStagedWithDocs(ctx, currRoot, staged, head, docs)
var err error
roots, docs, err = getUpdatedWorkingAndStagedWithDocs(ctx, roots, docs)
if err != nil {
return err
}
currRoot = currRootWithDocs
staged = stagedWithDocs
docs = updatedDocs
}
_, err := env.UpdateWorkingRoot(ctx, dbData.Ddb, dbData.Rsw, currRoot)
err := env.UpdateWorkingRoot(ctx, dbData.Rsw, roots.Working)
if err != nil {
return err
}
@@ -142,15 +120,9 @@ func checkoutDocs(ctx context.Context, dbData env.DbData, roots map[doltdb.RootT
return dbData.Drw.WriteDocsToDisk(docs)
}
func checkoutTablesAndDocs(ctx context.Context, dbData env.DbData, roots map[doltdb.RootType]*doltdb.RootValue, tbls []string, docs doltdocs.Docs) error {
err := checkoutTables(ctx, dbData, roots, tbls)
if err != nil {
return err
}
roots, err = getRoots(ctx, dbData.Ddb, dbData.Rsr, doltdb.WorkingRoot, doltdb.StagedRoot, doltdb.HeadRoot)
func checkoutTablesAndDocs(ctx context.Context, dbData env.DbData, roots doltdb.Roots, tbls []string, docs doltdocs.Docs) error {
var err error
roots, err = checkoutTables(ctx, dbData, roots, tbls)
if err != nil {
return err
}
+50 -73
View File
@@ -20,11 +20,10 @@ import (
"sort"
"time"
"github.com/dolthub/dolt/go/libraries/doltcore/fkconstrain"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/fkconstrain"
"github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -60,19 +59,19 @@ func GetNameAndEmail(cfg config.ReadableConfig) (string, string, error) {
}
// CommitStaged adds a new commit to HEAD with the given props. Returns the new commit's hash as a string and an error.
func CommitStaged(ctx context.Context, dbData env.DbData, props CommitStagedProps) (string, error) {
func CommitStaged(ctx context.Context, roots doltdb.Roots, dbData env.DbData, props CommitStagedProps) (*doltdb.Commit, error) {
ddb := dbData.Ddb
rsr := dbData.Rsr
rsw := dbData.Rsw
drw := dbData.Drw
if props.Message == "" {
return "", doltdb.ErrEmptyCommitMessage
return nil, doltdb.ErrEmptyCommitMessage
}
staged, notStaged, err := diff.GetStagedUnstagedTableDeltas(ctx, ddb, rsr)
staged, notStaged, err := diff.GetStagedUnstagedTableDeltas(ctx, roots)
if err != nil {
return "", err
return nil, err
}
var stagedTblNames []string
@@ -84,119 +83,97 @@ func CommitStaged(ctx context.Context, dbData env.DbData, props CommitStagedProp
stagedTblNames = append(stagedTblNames, n)
}
if len(staged) == 0 && !rsr.IsMergeActive() && !props.AllowEmpty {
_, notStagedDocs, err := diff.GetDocDiffs(ctx, ddb, rsr, drw)
if err != nil {
return "", err
}
return "", NothingStaged{notStaged, notStagedDocs}
mergeActive, err := rsr.IsMergeActive(ctx)
if err != nil {
return nil, err
}
var mergeCmSpec []*doltdb.CommitSpec
if rsr.IsMergeActive() {
root, err := env.WorkingRoot(ctx, ddb, rsr)
if len(staged) == 0 && !mergeActive && !props.AllowEmpty {
_, notStagedDocs, err := diff.GetDocDiffs(ctx, roots, drw)
if err != nil {
return "", err
return nil, err
}
inConflict, err := root.TablesInConflict(ctx)
return nil, NothingStaged{notStaged, notStagedDocs}
}
var mergeParentCommits []*doltdb.Commit
if mergeActive {
inConflict, err := roots.Working.TablesInConflict(ctx)
if err != nil {
return "", err
return nil, err
}
if len(inConflict) > 0 {
return "", NewTblInConflictError(inConflict)
return nil, NewTblInConflictError(inConflict)
}
spec, err := doltdb.NewCommitSpec(rsr.GetMergeCommit())
commit, err := rsr.GetMergeCommit(ctx)
if err != nil {
panic("Corrupted repostate. Active merge state is not valid.")
return nil, err
}
mergeCmSpec = []*doltdb.CommitSpec{spec}
mergeParentCommits = []*doltdb.Commit{commit}
}
srt, err := env.StagedRoot(ctx, ddb, rsr)
stagedRoot, err := roots.Staged.UpdateSuperSchemasFromOther(ctx, stagedTblNames, roots.Staged)
if err != nil {
return "", err
}
hrt, err := env.HeadRoot(ctx, ddb, rsr)
if err != nil {
return "", err
}
srt, err = srt.UpdateSuperSchemasFromOther(ctx, stagedTblNames, srt)
if err != nil {
return "", err
return nil, err
}
if props.CheckForeignKeys {
srt, err = srt.ValidateForeignKeysOnSchemas(ctx)
stagedRoot, err = stagedRoot.ValidateForeignKeysOnSchemas(ctx)
if err != nil {
return "", err
return nil, err
}
err = fkconstrain.Validate(ctx, hrt, srt)
err = fkconstrain.Validate(ctx, roots.Head, stagedRoot)
if err != nil {
return "", err
return nil, err
}
}
h, err := env.UpdateStagedRoot(ctx, ddb, rsw, srt)
// TODO: combine into a single update
err = env.UpdateStagedRoot(ctx, rsw, stagedRoot)
if err != nil {
return "", err
return nil, err
}
wrt, err := env.WorkingRoot(ctx, ddb, rsr)
workingRoot, err := roots.Working.UpdateSuperSchemasFromOther(ctx, stagedTblNames, stagedRoot)
if err != nil {
return "", err
return nil, err
}
wrt, err = wrt.UpdateSuperSchemasFromOther(ctx, stagedTblNames, srt)
err = env.UpdateWorkingRoot(ctx, rsw, workingRoot)
if err != nil {
return "", err
}
_, err = env.UpdateWorkingRoot(ctx, ddb, rsw, wrt)
if err != nil {
return "", err
return nil, err
}
meta, err := doltdb.NewCommitMetaWithUserTS(props.Name, props.Email, props.Message, props.Date)
if err != nil {
return "", err
return nil, err
}
// TODO: this is only necessary in some contexts (SQL). Come up with a more coherent set of interfaces to
// rationalize where the root value writes happen before a commit is created.
h, err := ddb.WriteRootValue(ctx, stagedRoot)
if err != nil {
return nil, err
}
// logrus.Errorf("staged root is %s", stagedRoot.DebugString(ctx, true))
// DoltDB resolves the current working branch head ref to provide a parent commit.
// Any commit specs in mergeCmSpec are also resolved and added.
c, err := ddb.CommitWithParentSpecs(ctx, h, rsr.CWBHeadRef(), mergeCmSpec, meta)
c, err := ddb.CommitWithParentCommits(ctx, h, rsr.CWBHeadRef(), mergeParentCommits, meta)
if err != nil {
return "", err
return nil, err
}
err = rsw.ClearMerge()
err = rsw.ClearMerge(ctx)
if err != nil {
return "", err
return nil, err
}
h, err = c.HashOf()
if err != nil {
return "", err
}
return h.String(), nil
return c, nil
}
func ValidateForeignKeysOnCommit(ctx context.Context, srt *doltdb.RootValue, stagedTblNames []string) (*doltdb.RootValue, error) {
+21 -15
View File
@@ -124,42 +124,48 @@ func getUntrackedDocs(docs doltdocs.Docs, docDiffs *diff.DocDiffs) []string {
return untracked
}
func getUpdatedWorkingAndStagedWithDocs(ctx context.Context, working, staged, head *doltdb.RootValue, docs doltdocs.Docs) (currRoot, stgRoot *doltdb.RootValue, retDocs doltdocs.Docs, err error) {
root := head
_, ok, err := staged.GetTable(ctx, doltdb.DocTableName)
func getUpdatedWorkingAndStagedWithDocs(ctx context.Context, roots doltdb.Roots, docs doltdocs.Docs) (doltdb.Roots, doltdocs.Docs, error) {
docsRoot := roots.Head
_, ok, err := roots.Staged.GetTable(ctx, doltdb.DocTableName)
if err != nil {
return nil, nil, nil, err
return doltdb.Roots{}, nil, err
} else if ok {
root = staged
docsRoot = roots.Staged
}
docs, err = doltdocs.GetDocsFromRoot(ctx, root, doltdocs.GetDocNamesFromDocs(docs)...)
docs, err = doltdocs.GetDocsFromRoot(ctx, docsRoot, doltdocs.GetDocNamesFromDocs(docs)...)
if err != nil {
return nil, nil, nil, err
return doltdb.Roots{}, nil, err
}
currRoot, err = doltdocs.UpdateRootWithDocs(ctx, working, docs)
roots.Working, err = doltdocs.UpdateRootWithDocs(ctx, roots.Working, docs)
if err != nil {
return nil, nil, nil, err
return doltdb.Roots{}, nil, err
}
stgRoot, err = doltdocs.UpdateRootWithDocs(ctx, staged, docs)
roots.Staged, err = doltdocs.UpdateRootWithDocs(ctx, roots.Staged, docs)
if err != nil {
return nil, nil, nil, err
return doltdb.Roots{}, nil, err
}
return currRoot, stgRoot, docs, nil
return roots, docs, nil
}
// GetUnstagedDocs retrieves the unstaged docs (docs from the filesystem).
func GetUnstagedDocs(ctx context.Context, dbData env.DbData) (doltdocs.Docs, error) {
_, unstagedDocDiffs, err := diff.GetDocDiffs(ctx, dbData.Ddb, dbData.Rsr, dbData.Drw)
func GetUnstagedDocs(ctx context.Context, dEnv *env.DoltEnv) (doltdocs.Docs, error) {
roots, err := dEnv.Roots(ctx)
if err != nil {
return nil, err
}
_, unstagedDocDiffs, err := diff.GetDocDiffs(ctx, roots, dEnv.DocsReadWriter())
if err != nil {
return nil, err
}
unstagedDocs := doltdocs.Docs{}
for _, docName := range unstagedDocDiffs.Docs {
docAr, err := dbData.Drw.GetDocsOnDisk(docName)
docAr, err := dEnv.DocsReadWriter().GetDocsOnDisk(docName)
if err != nil {
return nil, err
}
+62 -90
View File
@@ -24,113 +24,90 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, workingRoot, stagedRoot, headRoot *doltdb.RootValue) (*doltdb.Commit, error) {
func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) {
ddb := dbData.Ddb
rsr := dbData.Rsr
rsw := dbData.Rsw
var newHead *doltdb.Commit
if cSpecStr != "" {
cs, err := doltdb.NewCommitSpec(cSpecStr)
if err != nil {
return nil, err
return nil, doltdb.Roots{}, err
}
newHead, err = ddb.Resolve(ctx, cs, rsr.CWBHeadRef())
if err != nil {
return nil, err
return nil, doltdb.Roots{}, err
}
headRoot, err = newHead.GetRootValue()
roots.Head, err = newHead.GetRootValue()
if err != nil {
return nil, err
return nil, doltdb.Roots{}, err
}
}
// need to save the state of files that aren't tracked
untrackedTables := make(map[string]*doltdb.Table)
wTblNames, err := workingRoot.GetTableNames(ctx)
wTblNames, err := roots.Working.GetTableNames(ctx)
if err != nil {
return nil, err
return nil, doltdb.Roots{}, err
}
for _, tblName := range wTblNames {
untrackedTables[tblName], _, err = workingRoot.GetTable(ctx, tblName)
untrackedTables[tblName], _, err = roots.Working.GetTable(ctx, tblName)
if err != nil {
return nil, err
return nil, doltdb.Roots{}, err
}
}
headTblNames, err := stagedRoot.GetTableNames(ctx)
headTblNames, err := roots.Staged.GetTableNames(ctx)
if err != nil {
return nil, err
return nil, doltdb.Roots{}, err
}
for _, tblName := range headTblNames {
delete(untrackedTables, tblName)
}
newWkRoot := headRoot
newWkRoot := roots.Head
for tblName, tbl := range untrackedTables {
if tblName != doltdb.DocTableName {
newWkRoot, err = newWkRoot.PutTable(ctx, tblName, tbl)
}
if err != nil {
return nil, errors.New("error: failed to write table back to database")
return nil, doltdb.Roots{}, errors.New("error: failed to write table back to database")
}
}
_, err = env.UpdateWorkingRoot(ctx, ddb, rsw, newWkRoot)
roots.Working = newWkRoot
roots.Staged = roots.Head
if err != nil {
return nil, err
}
_, err = env.UpdateStagedRoot(ctx, ddb, rsw, headRoot)
if err != nil {
return nil, err
}
return newHead, nil
return newHead, roots, nil
}
// ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the commit hash
// if head is updated.
func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, workingRoot, stagedRoot, headRoot *doltdb.RootValue) (string, error) {
newHead, err := resetHardTables(ctx, dbData, cSpecStr, workingRoot, stagedRoot, headRoot)
if err != nil {
return "", err
}
ddb := dbData.Ddb
rsr := dbData.Rsr
if newHead != nil {
if err := ddb.SetHeadToCommit(ctx, rsr.CWBHeadRef(), newHead); err != nil {
return "", err
}
h, err := newHead.HashOf()
if err != nil {
return "", err
}
return h.String(), nil
}
return "", nil
// ResetHardTables resets the tables in working, staged, and head based on the given parameters. Returns the new
// head commit and resulting roots
func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, roots doltdb.Roots) (*doltdb.Commit, doltdb.Roots, error) {
return resetHardTables(ctx, dbData, cSpecStr, roots)
}
func ResetHard(ctx context.Context, dEnv *env.DoltEnv, cSpecStr string, workingRoot, stagedRoot, headRoot *doltdb.RootValue) error {
func ResetHard(ctx context.Context, dEnv *env.DoltEnv, cSpecStr string, roots doltdb.Roots) error {
dbData := dEnv.DbData()
newHead, err := resetHardTables(ctx, dbData, cSpecStr, workingRoot, stagedRoot, headRoot)
newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots)
if err != nil {
return err
}
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return err
}
err = dEnv.UpdateWorkingSet(ctx, ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged))
if err != nil {
return err
}
@@ -140,11 +117,9 @@ func ResetHard(ctx context.Context, dEnv *env.DoltEnv, cSpecStr string, workingR
return err
}
ddb := dbData.Ddb
rsr := dbData.Rsr
if newHead != nil {
if err = ddb.SetHeadToCommit(ctx, rsr.CWBHeadRef(), newHead); err != nil {
err = dEnv.DoltDB.SetHeadToCommit(ctx, dEnv.RepoStateReader().CWBHeadRef(), newHead)
if err != nil {
return err
}
}
@@ -152,31 +127,28 @@ func ResetHard(ctx context.Context, dEnv *env.DoltEnv, cSpecStr string, workingR
return nil
}
func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, stagedRoot, headRoot *doltdb.RootValue) (*doltdb.RootValue, error) {
tables, err := getUnionedTables(ctx, apr.Args(), stagedRoot, headRoot)
func ResetSoftTables(ctx context.Context, dbData env.DbData, apr *argparser.ArgParseResults, roots doltdb.Roots) (doltdb.Roots, error) {
tables, err := getUnionedTables(ctx, apr.Args(), roots.Staged, roots.Head)
tables = RemoveDocsTable(tables)
if err != nil {
return nil, err
return doltdb.Roots{}, err
}
err = ValidateTables(context.TODO(), tables, stagedRoot, headRoot)
err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head)
if err != nil {
return nil, err
return doltdb.Roots{}, err
}
stagedRoot, err = resetStaged(ctx, dbData.Ddb, dbData.Rsw, tables, stagedRoot, headRoot)
roots.Staged, err = MoveTablesBetweenRoots(ctx, tables, roots.Head, roots.Staged)
if err != nil {
return nil, err
return doltdb.Roots{}, err
}
return stagedRoot, nil
return roots, nil
}
func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, stagedRoot, headRoot *doltdb.RootValue) (*doltdb.RootValue, error) {
tables, err := getUnionedTables(ctx, tables, stagedRoot, headRoot)
func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, roots doltdb.Roots) (*doltdb.RootValue, error) {
tables, err := getUnionedTables(ctx, tables, roots.Staged, roots.Head)
if err != nil {
return nil, err
@@ -191,24 +163,24 @@ func ResetSoft(ctx context.Context, dbData env.DbData, tables []string, stagedRo
tables = RemoveDocsTable(tables)
}
err = ValidateTables(context.TODO(), tables, stagedRoot, headRoot)
err = ValidateTables(context.TODO(), tables, roots.Staged, roots.Head)
if err != nil {
return nil, err
}
stagedRoot, err = resetDocs(ctx, dbData, headRoot, stagedRoot, docs)
roots.Staged, err = resetDocs(ctx, dbData, roots, docs)
if err != nil {
return nil, err
}
stagedRoot, err = resetStaged(ctx, dbData.Ddb, dbData.Rsw, tables, stagedRoot, headRoot)
roots.Staged, err = resetStaged(ctx, roots, dbData.Rsw, tables)
if err != nil {
return nil, err
}
return stagedRoot, nil
return roots.Staged, nil
}
// ResetSoftToRef matches the `git reset --soft <REF>` pattern. It resets both staged and head to the previous ref
@@ -230,7 +202,7 @@ func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) err
}
// Changed the stage to old the root. Leave the working as is.
_, err = env.UpdateStagedRoot(ctx, dbData.Ddb, dbData.Rsw, foundRoot)
err = env.UpdateStagedRoot(ctx, dbData.Rsw, foundRoot)
if err != nil {
return err
}
@@ -257,38 +229,38 @@ func getUnionedTables(ctx context.Context, tables []string, stagedRoot, headRoot
}
// resetDocs resets the working and staged docs with docs from head.
func resetDocs(ctx context.Context, dbData env.DbData, headRoot *doltdb.RootValue, staged *doltdb.RootValue, docs doltdocs.Docs) (newStgRoot *doltdb.RootValue, err error) {
docs, err = doltdocs.GetDocsFromRoot(ctx, headRoot, doltdocs.GetDocNamesFromDocs(docs)...)
func resetDocs(ctx context.Context, dbData env.DbData, roots doltdb.Roots, docs doltdocs.Docs) (newStgRoot *doltdb.RootValue, err error) {
docs, err = doltdocs.GetDocsFromRoot(ctx, roots.Head, doltdocs.GetDocNamesFromDocs(docs)...)
if err != nil {
return nil, err
}
working, err := env.WorkingRoot(ctx, dbData.Ddb, dbData.Rsr)
roots.Working, err = doltdocs.UpdateRootWithDocs(ctx, roots.Working, docs)
if err != nil {
return nil, err
}
working, err = doltdocs.UpdateRootWithDocs(ctx, working, docs)
err = env.UpdateWorkingRoot(ctx, dbData.Rsw, roots.Working)
if err != nil {
return nil, err
}
_, err = env.UpdateWorkingRoot(ctx, dbData.Ddb, dbData.Rsw, working)
if err != nil {
return nil, err
}
return doltdocs.UpdateRootWithDocs(ctx, staged, docs)
return doltdocs.UpdateRootWithDocs(ctx, roots.Staged, docs)
}
func resetStaged(ctx context.Context, ddb *doltdb.DoltDB, rsw env.RepoStateWriter, tbls []string, staged, head *doltdb.RootValue) (*doltdb.RootValue, error) {
updatedRoot, err := MoveTablesBetweenRoots(ctx, tbls, head, staged)
// TODO: this should just work in memory, not write to disk
func resetStaged(ctx context.Context, roots doltdb.Roots, rsw env.RepoStateWriter, tbls []string) (*doltdb.RootValue, error) {
newStaged, err := MoveTablesBetweenRoots(ctx, tbls, roots.Head, roots.Staged)
if err != nil {
return nil, err
}
return updatedRoot, env.UpdateStagedRootWithVErr(ddb, rsw, updatedRoot)
err = rsw.UpdateStagedRoot(ctx, newStaged)
if err != nil {
return nil, err
}
return newStaged, nil
}
// ValidateIsRef validates whether the input parameter is a valid cString
+65 -93
View File
@@ -18,17 +18,14 @@ import (
"context"
"errors"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdocs"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdocs"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
)
var ErrTablesInConflict = errors.New("table is in conflict")
func StageTables(ctx context.Context, dbData env.DbData, tbls []string) error {
ddb := dbData.Ddb
rsr := dbData.Rsr
func StageTables(ctx context.Context, roots doltdb.Roots, dbData env.DbData, tbls []string) error {
rsw := dbData.Rsw
drw := dbData.Drw
@@ -37,97 +34,109 @@ func StageTables(ctx context.Context, dbData env.DbData, tbls []string) error {
return err
}
staged, working, err := getStagedAndWorking(ctx, ddb, rsr)
if err != nil {
return err
}
if len(docs) > 0 {
working, err = doltdocs.UpdateRootWithDocs(ctx, working, docs)
roots.Working, err = doltdocs.UpdateRootWithDocs(ctx, roots.Working, docs)
if err != nil {
return err
}
}
err = stageTables(ctx, ddb, rsw, tables, staged, working)
err = stageTables(ctx, roots, rsw, tables)
if err != nil {
env.ResetWorkingDocsToStagedDocs(ctx, ddb, rsr, rsw)
env.ResetWorkingDocsToStagedDocs(ctx, roots, rsw)
return err
}
return nil
}
func StageAllTables(ctx context.Context, dbData env.DbData) error {
ddb := dbData.Ddb
rsr := dbData.Rsr
func StageTablesNoDocs(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb.Roots, error) {
return stageTablesNoEnvUpdate(ctx, roots, tbls)
}
func StageAllTables(ctx context.Context, roots doltdb.Roots, dbData env.DbData) error {
rsw := dbData.Rsw
drw := dbData.Drw
staged, err := env.StagedRoot(ctx, ddb, rsr)
if err != nil {
return err
}
working, err := env.WorkingRoot(ctx, ddb, rsr)
if err != nil {
return err
}
docs, err := drw.GetDocsOnDisk()
if err != nil {
return err
}
working, err = doltdocs.UpdateRootWithDocs(ctx, working, docs)
roots.Working, err = doltdocs.UpdateRootWithDocs(ctx, roots.Working, docs)
if err != nil {
return err
}
tbls, err := doltdb.UnionTableNames(ctx, staged, working)
tbls, err := doltdb.UnionTableNames(ctx, roots.Staged, roots.Working)
if err != nil {
return err
}
err = stageTables(ctx, ddb, rsw, tbls, staged, working)
err = stageTables(ctx, roots, rsw, tbls)
if err != nil {
env.ResetWorkingDocsToStagedDocs(ctx, ddb, rsr, rsw)
env.ResetWorkingDocsToStagedDocs(ctx, roots, rsw)
return err
}
return nil
}
func stageTables(ctx context.Context, db *doltdb.DoltDB, rsw env.RepoStateWriter, tbls []string, staged *doltdb.RootValue, working *doltdb.RootValue) error {
err := ValidateTables(ctx, tbls, staged, working)
func StageAllTablesNoDocs(ctx context.Context, roots doltdb.Roots) (doltdb.Roots, error) {
tbls, err := doltdb.UnionTableNames(ctx, roots.Staged, roots.Working)
if err != nil {
return err
return doltdb.Roots{}, err
}
working, err = checkTablesForConflicts(ctx, tbls, working)
if err != nil {
return err
}
staged, err = MoveTablesBetweenRoots(ctx, tbls, working, staged)
if err != nil {
return err
}
if _, err := env.UpdateWorkingRoot(ctx, db, rsw, working); err == nil {
if sh, err := env.UpdateStagedRoot(ctx, db, rsw, staged); err == nil {
err = rsw.SetStagedHash(ctx, sh)
if err != nil {
return env.ErrStateUpdate
}
return nil
}
}
return doltdb.ErrNomsIO
return stageTablesNoEnvUpdate(ctx, roots, tbls)
}
func stageTablesNoEnvUpdate(
ctx context.Context,
roots doltdb.Roots,
tbls []string,
) (doltdb.Roots, error) {
var err error
err = ValidateTables(ctx, tbls, roots.Staged, roots.Working)
if err != nil {
return doltdb.Roots{}, err
}
roots.Working, err = checkTablesForConflicts(ctx, tbls, roots.Working)
if err != nil {
return doltdb.Roots{}, err
}
roots.Staged, err = MoveTablesBetweenRoots(ctx, tbls, roots.Working, roots.Staged)
if err != nil {
return doltdb.Roots{}, err
}
return roots, nil
}
func stageTables(
ctx context.Context,
roots doltdb.Roots,
rsw env.RepoStateWriter,
tbls []string,
) error {
var err error
roots, err = stageTablesNoEnvUpdate(ctx, roots, tbls)
if err != nil {
return err
}
// TODO: combine to single operation
err = env.UpdateWorkingRoot(ctx, rsw, roots.Working)
if err != nil {
return err
}
return env.UpdateStagedRoot(ctx, rsw, roots.Staged)
}
// checkTablesForConflicts clears any 0-row conflicts from the tables named, and returns a new root with those
// conflicts cleared. If any tables named have real conflicts, returns an error.
func checkTablesForConflicts(ctx context.Context, tbls []string, working *doltdb.RootValue) (*doltdb.RootValue, error) {
var inConflict []string
for _, tblName := range tbls {
@@ -196,40 +205,3 @@ func ValidateTables(ctx context.Context, tbls []string, roots ...*doltdb.RootVal
return NewTblNotExistError(missing)
}
func getStagedAndWorking(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (*doltdb.RootValue, *doltdb.RootValue, error) {
roots, err := getRoots(ctx, ddb, rsr, doltdb.StagedRoot, doltdb.WorkingRoot)
if err != nil {
return nil, nil, err
}
return roots[doltdb.StagedRoot], roots[doltdb.WorkingRoot], nil
}
// getRoots returns a RootValue object for each root type passed in rootTypes.
func getRoots(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader, rootTypes ...doltdb.RootType) (map[doltdb.RootType]*doltdb.RootValue, error) {
roots := make(map[doltdb.RootType]*doltdb.RootValue)
for _, rt := range rootTypes {
var err error
var root *doltdb.RootValue
switch rt {
case doltdb.StagedRoot:
root, err = env.StagedRoot(ctx, ddb, rsr)
case doltdb.WorkingRoot:
root, err = env.WorkingRoot(ctx, ddb, rsr)
case doltdb.HeadRoot:
root, err = env.HeadRoot(ctx, ddb, rsr)
default:
panic("Method does not support this root type.")
}
if err != nil {
return nil, doltdb.RootValueUnreadable{RootType: rt, Cause: err}
}
roots[rt] = root
}
return roots, nil
}
+1 -1
View File
@@ -53,7 +53,7 @@ func CreateTag(ctx context.Context, dEnv *env.DoltEnv, tagName, startPoint strin
return err
}
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoState.CWBHeadRef())
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return err
+2 -2
View File
@@ -28,7 +28,7 @@ var ErrCOWorkspaceDelete = errors.New("attempted to delete checked out workspace
var ErrBranchNameExists = errors.New("workspace name must not be existing branch name")
func CreateWorkspace(ctx context.Context, dEnv *env.DoltEnv, name, startPoint string) error {
return CreateWorkspaceOnDB(ctx, dEnv.DoltDB, name, startPoint, dEnv.RepoState.CWBHeadRef())
return CreateWorkspaceOnDB(ctx, dEnv.DoltDB, name, startPoint, dEnv.RepoStateReader().CWBHeadRef())
}
func CreateWorkspaceOnDB(ctx context.Context, ddb *doltdb.DoltDB, name, startPoint string, headRef ref.DoltRef) error {
@@ -86,7 +86,7 @@ func DeleteWorkspace(ctx context.Context, dEnv *env.DoltEnv, workspaceName strin
}
} else {
dref = ref.NewWorkspaceRef(workspaceName)
if ref.Equals(dEnv.RepoState.CWBHeadRef(), dref) {
if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), dref) {
return ErrCOWorkspaceDelete
}
}
+1 -1
View File
@@ -22,7 +22,7 @@ const (
)
func TestConfig(t *testing.T) {
dEnv := createTestEnv(true, true)
dEnv, _ := createTestEnv(true, true)
lCfg, _ := dEnv.Config.GetConfig(LocalConfig)
gCfg, _ := dEnv.Config.GetConfig(GlobalConfig)
+12 -19
View File
@@ -22,43 +22,36 @@ import (
// ResetWorkingDocsToStagedDocs resets the `dolt_docs` table on the working root to match the staged root.
// If the `dolt_docs` table does not exist on the staged root, it will be removed from the working root.
func ResetWorkingDocsToStagedDocs(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader, rsw RepoStateWriter) error {
wrkRoot, err := WorkingRoot(ctx, ddb, rsr)
func ResetWorkingDocsToStagedDocs(
ctx context.Context,
roots doltdb.Roots,
rsw RepoStateWriter,
) error {
stgDocTbl, stgDocsFound, err := roots.Staged.GetTable(ctx, doltdb.DocTableName)
if err != nil {
return err
}
stgRoot, err := StagedRoot(ctx, ddb, rsr)
if err != nil {
return err
}
stgDocTbl, stgDocsFound, err := stgRoot.GetTable(ctx, doltdb.DocTableName)
if err != nil {
return err
}
_, wrkDocsFound, err := wrkRoot.GetTable(ctx, doltdb.DocTableName)
_, wrkDocsFound, err := roots.Working.GetTable(ctx, doltdb.DocTableName)
if err != nil {
return err
}
if wrkDocsFound && !stgDocsFound {
newWrkRoot, err := wrkRoot.RemoveTables(ctx, doltdb.DocTableName)
newWrkRoot, err := roots.Working.RemoveTables(ctx, doltdb.DocTableName)
if err != nil {
return err
}
_, err = UpdateWorkingRoot(ctx, ddb, rsw, newWrkRoot)
return err
return UpdateWorkingRoot(ctx, rsw, newWrkRoot)
}
if stgDocsFound {
newWrkRoot, err := wrkRoot.PutTable(ctx, doltdb.DocTableName, stgDocTbl)
newWrkRoot, err := roots.Working.PutTable(ctx, doltdb.DocTableName, stgDocTbl)
if err != nil {
return err
}
_, err = UpdateWorkingRoot(ctx, ddb, rsw, newWrkRoot)
return err
return UpdateWorkingRoot(ctx, rsw, newWrkRoot)
}
return nil
}
+292 -79
View File
@@ -81,22 +81,23 @@ type DoltEnv struct {
func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr, version string) *DoltEnv {
config, cfgErr := loadDoltCliConfig(hdp, fs)
repoState, rsErr := LoadRepoState(fs)
docs, docsErr := doltdocs.LoadDocs(fs)
ddb, dbLoadErr := doltdb.LoadDoltDB(ctx, types.Format_Default, urlStr)
ddb, dbLoadErr := doltdb.LoadDoltDB(ctx, types.Format_Default, urlStr, fs)
dEnv := &DoltEnv{
version,
config,
cfgErr,
repoState,
rsErr,
docs,
docsErr,
ddb,
dbLoadErr,
fs,
urlStr,
hdp,
Version: version,
Config: config,
CfgLoadErr: cfgErr,
RepoState: repoState,
RSLoadErr: rsErr,
Docs: docs,
DocsLoadErr: docsErr,
DoltDB: ddb,
DBLoadError: dbLoadErr,
FS: fs,
urlStr: urlStr,
hdp: hdp,
}
if dbLoadErr == nil && dEnv.HasDoltDir() {
@@ -124,9 +125,84 @@ func Load(ctx context.Context, hdp HomeDirProvider, fs filesys.Filesys, urlStr,
dbfactory.InitializeFactories(dEnv)
if rsErr == nil && dbLoadErr == nil {
// If the working set isn't present in the DB, create it from the repo state. This step can be removed post 1.0.
_, err := dEnv.WorkingSet(ctx)
if err == doltdb.ErrWorkingSetNotFound {
err := dEnv.initWorkingSetFromRepoState(ctx)
if err != nil {
dEnv.RSLoadErr = err
}
} else if err != nil {
dEnv.RSLoadErr = err
}
}
return dEnv
}
// initWorkingSetFromRepoState sets the working set for the env's head to mirror the contents of the repo state file.
// This is only necessary to migrate repos written before this method was introduced, and can be removed after 1.0
func (dEnv *DoltEnv) initWorkingSetFromRepoState(ctx context.Context) error {
headRef := dEnv.RepoStateReader().CWBHeadRef()
wsRef, err := ref.WorkingSetRefForHead(headRef)
if err != nil {
return err
}
workingHash, ok := hash.MaybeParse(dEnv.RepoState.working)
if !ok {
return fmt.Errorf("Corrupt repo, invalid working hash %s", workingHash)
}
workingRoot, err := dEnv.DoltDB.ReadRootValue(ctx, workingHash)
if err != nil {
return err
}
stagedHash, ok := hash.MaybeParse(dEnv.RepoState.staged)
if !ok {
return fmt.Errorf("Corrupt repo, invalid staged hash %s", stagedHash)
}
stagedRoot, err := dEnv.DoltDB.ReadRootValue(ctx, stagedHash)
if err != nil {
return err
}
mergeState, err := mergeStateToMergeState(ctx, dEnv.RepoState.merge, dEnv.DoltDB)
if err != nil {
return err
}
ws := doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(workingRoot).WithStagedRoot(stagedRoot).WithMergeState(mergeState)
return dEnv.UpdateWorkingSet(ctx, ws)
}
func mergeStateToMergeState(ctx context.Context, mergeState *mergeState, db *doltdb.DoltDB) (*doltdb.MergeState, error) {
if mergeState == nil {
return nil, nil
}
cs, err := doltdb.NewCommitSpec(mergeState.Commit)
if err != nil {
panic("Corrupted repostate. Active merge state is not valid.")
}
commit, err := db.Resolve(ctx, cs, nil)
if err != nil {
return nil, err
}
pmwh := hash.Parse(mergeState.PreMergeWorking)
pmwr, err := db.ReadRootValue(ctx, pmwh)
if err != nil {
return nil, err
}
return doltdb.MergeStateFromCommitAndWorking(commit, pmwr), nil
}
// HasDoltDir returns true if the .dolt directory exists and is a valid directory
func (dEnv *DoltEnv) HasDoltDir() bool {
return dEnv.hasDoltDir("./")
@@ -230,7 +306,7 @@ func (dEnv *DoltEnv) InitRepoWithNoData(ctx context.Context, nbf *types.NomsBinF
return err
}
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr)
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr, filesys.LocalFS)
return err
}
@@ -295,7 +371,7 @@ func (dEnv *DoltEnv) InitDBAndRepoState(ctx context.Context, nbf *types.NomsBinF
// Does not update repo state.
func (dEnv *DoltEnv) InitDBWithTime(ctx context.Context, nbf *types.NomsBinFormat, name, email string, t time.Time) error {
var err error
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr)
dEnv.DoltDB, err = doltdb.LoadDoltDB(ctx, nbf, dEnv.urlStr, dEnv.FS)
if err != nil {
return err
@@ -322,38 +398,126 @@ func (dEnv *DoltEnv) InitializeRepoState(ctx context.Context) error {
return err
}
rootHash, err := root.HashOf()
dEnv.RepoState, err = CreateRepoState(dEnv.FS, doltdb.MasterBranch)
if err != nil {
return ErrStateUpdate
}
// TODO: combine into one update
err = dEnv.UpdateWorkingRoot(ctx, root)
if err != nil {
return err
}
dEnv.RepoState, err = CreateRepoState(dEnv.FS, doltdb.MasterBranch, rootHash)
err = dEnv.UpdateStagedRoot(ctx, root)
if err != nil {
return ErrStateUpdate
return err
}
dEnv.RSLoadErr = nil
return nil
}
func (dEnv *DoltEnv) WorkingRoot(ctx context.Context) (*doltdb.RootValue, error) {
return dEnv.DoltDB.ReadRootValue(ctx, dEnv.RepoState.WorkingHash())
type RootsProvider interface {
GetRoots(ctx context.Context) (doltdb.Roots, error)
}
func (dEnv *DoltEnv) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
h, err := dEnv.DoltDB.WriteRootValue(ctx, newRoot)
func (dEnv *DoltEnv) Roots(ctx context.Context) (doltdb.Roots, error) {
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return doltdb.ErrNomsIO
return doltdb.Roots{}, err
}
return dEnv.RepoStateWriter().SetWorkingHash(ctx, h)
headRoot, err := dEnv.HeadRoot(ctx)
if err != nil {
return doltdb.Roots{}, err
}
return doltdb.Roots{
Head: headRoot,
Working: ws.WorkingRoot(),
Staged: ws.StagedRoot(),
}, nil
}
// WorkingRoot returns the working root for the current working branch
func (dEnv *DoltEnv) WorkingRoot(ctx context.Context) (*doltdb.RootValue, error) {
workingSet, err := dEnv.WorkingSet(ctx)
if err != nil {
return nil, err
}
return workingSet.RootValue(), nil
}
func (dEnv *DoltEnv) WorkingSet(ctx context.Context) (*doltdb.WorkingSet, error) {
workingSetRef, err := ref.WorkingSetRefForHead(dEnv.RepoState.CWBHeadRef())
if err != nil {
return nil, err
}
workingSet, err := dEnv.DoltDB.ResolveWorkingSet(ctx, workingSetRef)
if err != nil {
return nil, err
}
return workingSet, nil
}
// UpdateWorkingRoot updates the working root for the current working branch to the root value given.
// This method can fail if another client updates the working root at the same time.
func (dEnv *DoltEnv) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
var h hash.Hash
var wsRef ref.WorkingSetRef
ws, err := dEnv.WorkingSet(ctx)
if err == doltdb.ErrWorkingSetNotFound {
// first time updating root
wsRef, err = ref.WorkingSetRefForHead(dEnv.RepoState.CWBHeadRef())
if err != nil {
return err
}
ws = doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(newRoot).WithStagedRoot(newRoot)
} else if err != nil {
return err
} else {
h, err = ws.HashOf()
if err != nil {
return err
}
wsRef = ws.Ref()
}
// TODO: add actual trace logging here
// logrus.Infof("Updating working root to %s", newRoot.DebugString(context.Background(), true))
return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, dEnv.workingSetMeta())
}
// UpdateWorkingSet updates the working set for the current working branch to the value given.
// This method can fail if another client updates the working set at the same time.
func (dEnv *DoltEnv) UpdateWorkingSet(ctx context.Context, ws *doltdb.WorkingSet) error {
h, err := ws.HashOf()
if err != nil {
return err
}
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws, h, dEnv.workingSetMeta())
}
type repoStateReader struct {
dEnv *DoltEnv
}
func (r *repoStateReader) StagedRoot(ctx context.Context) (*doltdb.RootValue, error) {
return r.dEnv.StagedRoot(ctx)
}
func (r *repoStateReader) WorkingRoot(ctx context.Context) (*doltdb.RootValue, error) {
return r.dEnv.WorkingRoot(ctx)
}
func (r *repoStateReader) CWBHeadRef() ref.DoltRef {
return r.dEnv.RepoState.CWBHeadRef()
}
@@ -373,24 +537,21 @@ func (r *repoStateReader) CWBHeadHash(ctx context.Context) (hash.Hash, error) {
return cm.HashOf()
}
func (r *repoStateReader) WorkingHash() hash.Hash {
return r.dEnv.RepoState.WorkingHash()
}
func (r *repoStateReader) StagedHash() hash.Hash {
return hash.Parse(r.dEnv.RepoState.Staged)
return hash.Parse(r.dEnv.RepoState.staged)
}
func (r *repoStateReader) IsMergeActive() bool {
return r.dEnv.RepoState.Merge != nil
func (r *repoStateReader) IsMergeActive(ctx context.Context) (bool, error) {
return r.dEnv.IsMergeActive(ctx)
}
func (r *repoStateReader) GetMergeCommit() string {
return r.dEnv.RepoState.Merge.Commit
}
func (r *repoStateReader) GetMergeCommit(ctx context.Context) (*doltdb.Commit, error) {
ws, err := r.dEnv.WorkingSet(ctx)
if err != nil {
return nil, err
}
func (r *repoStateReader) GetPreMergeWorking() string {
return r.dEnv.RepoState.Merge.PreMergeWorking
return ws.MergeState().Commit(), nil
}
func (dEnv *DoltEnv) RepoStateReader() RepoStateReader {
@@ -398,34 +559,12 @@ func (dEnv *DoltEnv) RepoStateReader() RepoStateReader {
}
type repoStateWriter struct {
dEnv *DoltEnv
}
func (r *repoStateWriter) SetStagedHash(ctx context.Context, h hash.Hash) error {
r.dEnv.RepoState.Staged = h.String()
err := r.dEnv.RepoState.Save(r.dEnv.FS)
if err != nil {
return ErrStateUpdate
}
return nil
}
func (r *repoStateWriter) SetWorkingHash(ctx context.Context, h hash.Hash) error {
r.dEnv.RepoState.Working = h.String()
err := r.dEnv.RepoState.Save(r.dEnv.FS)
if err != nil {
return ErrStateUpdate
}
return nil
*DoltEnv
}
func (r *repoStateWriter) SetCWBHeadRef(ctx context.Context, marshalableRef ref.MarshalableRef) error {
r.dEnv.RepoState.Head = marshalableRef
err := r.dEnv.RepoState.Save(r.dEnv.FS)
r.RepoState.Head = marshalableRef
err := r.RepoState.Save(r.FS)
if err != nil {
return ErrStateUpdate
@@ -434,16 +573,17 @@ func (r *repoStateWriter) SetCWBHeadRef(ctx context.Context, marshalableRef ref.
return nil
}
func (r *repoStateWriter) AbortMerge() error {
return r.dEnv.RepoState.AbortMerge(r.dEnv.FS)
// TODO: kill merge methods
func (r *repoStateWriter) AbortMerge(ctx context.Context) error {
return r.DoltEnv.AbortMerge(ctx)
}
func (r *repoStateWriter) ClearMerge() error {
return r.dEnv.RepoState.ClearMerge(r.dEnv.FS)
func (r *repoStateWriter) ClearMerge(ctx context.Context) error {
return r.DoltEnv.ClearMerge(ctx)
}
func (r *repoStateWriter) StartMerge(commitStr string) error {
return r.dEnv.RepoState.StartMerge(commitStr, r.dEnv.FS)
func (r *repoStateWriter) StartMerge(ctx context.Context, commit *doltdb.Commit) error {
return r.DoltEnv.StartMerge(ctx, commit)
}
func (dEnv *DoltEnv) RepoStateWriter() RepoStateWriter {
@@ -501,25 +641,93 @@ func (dEnv *DoltEnv) DbData() DbData {
}
}
// StagedRoot returns the staged root value in the current working set
func (dEnv *DoltEnv) StagedRoot(ctx context.Context) (*doltdb.RootValue, error) {
return dEnv.DoltDB.ReadRootValue(ctx, dEnv.RepoState.StagedHash())
workingSet, err := dEnv.WorkingSet(ctx)
if err != nil {
return nil, err
}
return workingSet.StagedRoot(), nil
}
func (dEnv *DoltEnv) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootValue) (hash.Hash, error) {
h, err := dEnv.DoltDB.WriteRootValue(ctx, newRoot)
// UpdateStagedRoot updates the staged root for the current working branch. This can fail if multiple clients attempt
// to update at the same time.
func (dEnv *DoltEnv) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
var h hash.Hash
var wsRef ref.WorkingSetRef
if err != nil {
return hash.Hash{}, doltdb.ErrNomsIO
ws, err := dEnv.WorkingSet(ctx)
if err == doltdb.ErrWorkingSetNotFound {
// first time updating root
wsRef, err = ref.WorkingSetRefForHead(dEnv.RepoState.CWBHeadRef())
if err != nil {
return err
}
ws = doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(newRoot).WithStagedRoot(newRoot)
} else if err != nil {
return err
} else {
h, err = ws.HashOf()
if err != nil {
return err
}
wsRef = ws.Ref()
}
dEnv.RepoState.Staged = h.String()
err = dEnv.RepoState.Save(dEnv.FS)
return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, dEnv.workingSetMeta())
}
func (dEnv *DoltEnv) AbortMerge(ctx context.Context) error {
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return hash.Hash{}, ErrStateUpdate
return err
}
return h, nil
h, err := ws.HashOf()
if err != nil {
return err
}
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.AbortMerge(), h, dEnv.workingSetMeta())
}
func (dEnv *DoltEnv) workingSetMeta() *doltdb.WorkingSetMeta {
return &doltdb.WorkingSetMeta{
User: *dEnv.Config.GetStringOrDefault(UserNameKey, ""),
Email: *dEnv.Config.GetStringOrDefault(UserEmailKey, ""),
Timestamp: uint64(time.Now().Unix()),
Description: "updated from dolt environment",
}
}
func (dEnv *DoltEnv) ClearMerge(ctx context.Context) error {
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return err
}
h, err := ws.HashOf()
if err != nil {
return err
}
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.ClearMerge(), h, dEnv.workingSetMeta())
}
func (dEnv *DoltEnv) StartMerge(ctx context.Context, commit *doltdb.Commit) error {
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return err
}
h, err := ws.HashOf()
if err != nil {
return err
}
return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.StartMerge(commit), h, dEnv.workingSetMeta())
}
// todo: move this out of env to actions
@@ -561,8 +769,13 @@ func (dEnv *DoltEnv) PutTableToWorking(ctx context.Context, sch schema.Schema, r
return dEnv.UpdateWorkingRoot(ctx, newRoot)
}
func (dEnv *DoltEnv) IsMergeActive() bool {
return dEnv.RepoState.Merge != nil
func (dEnv *DoltEnv) IsMergeActive(ctx context.Context) (bool, error) {
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
return false, err
}
return ws.MergeActive(), nil
}
func (dEnv *DoltEnv) GetTablesWithConflicts(ctx context.Context) ([]string, error) {
+113 -50
View File
@@ -17,10 +17,12 @@ package env
import (
"context"
"encoding/json"
"io/ioutil"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
@@ -41,7 +43,7 @@ func testHomeDirFunc() (string, error) {
return testHomeDir, nil
}
func createTestEnv(isInitialized bool, hasLocalConfig bool) *DoltEnv {
func createTestEnv(isInitialized bool, hasLocalConfig bool) (*DoltEnv, *filesys.InMemFS) {
initialDirs := []string{testHomeDir, workingDir}
initialFiles := map[string][]byte{}
@@ -51,9 +53,8 @@ func createTestEnv(isInitialized bool, hasLocalConfig bool) *DoltEnv {
initialDirs = append(initialDirs, doltDir)
initialDirs = append(initialDirs, doltDataDir)
hashStr := hash.Hash{}.String()
masterRef := ref.NewBranchRef("master")
repoState := &RepoState{ref.MarshalableRef{Ref: masterRef}, hashStr, hashStr, nil, nil, nil}
repoState := &RepoState{Head: ref.MarshalableRef{Ref: masterRef}}
repoStateData, err := json.Marshal(repoState)
if err != nil {
@@ -72,11 +73,20 @@ func createTestEnv(isInitialized bool, hasLocalConfig bool) *DoltEnv {
fs := filesys.NewInMemFS(initialDirs, initialFiles, workingDir)
dEnv := Load(context.Background(), testHomeDirFunc, fs, doltdb.InMemDoltDB, "test")
return dEnv
return dEnv, fs
}
func createFileTestEnv(t *testing.T, workingDir, homeDir string) *DoltEnv {
fs, err := filesys.LocalFilesysWithWorkingDir(filepath.ToSlash(workingDir))
require.NoError(t, err)
return Load(context.Background(), func() (string, error) {
return homeDir, nil
}, fs, doltdb.LocalDirDoltDB, "test")
}
func TestNonRepoDir(t *testing.T) {
dEnv := createTestEnv(false, false)
dEnv, _ := createTestEnv(false, false)
if !isCWDEmpty(dEnv) {
t.Error("Should start with a clean wd")
@@ -100,31 +110,21 @@ func TestNonRepoDir(t *testing.T) {
}
func TestRepoDir(t *testing.T) {
dEnv := createTestEnv(true, true)
dEnv, _ := createTestEnv(true, true)
assert.True(t, dEnv.HasDoltDir())
assert.True(t, dEnv.HasLocalConfig())
if !dEnv.HasDoltDir() || !dEnv.HasLocalConfig() {
t.Fatal("local config and .dolt dir should have been created")
}
userName, err := dEnv.Config.GetString("user.name")
require.NoError(t, err)
assert.Equal(t, "bheni", userName)
if dEnv.CfgLoadErr != nil {
t.Error("Only global config load / create error should result in an error")
}
if dEnv.RSLoadErr != nil {
t.Error("Repostate should be valid for an initialized directory")
}
if dEnv.DocsLoadErr != nil {
t.Error("Docs should be valid for an initialized directory")
}
if un, err := dEnv.Config.GetString("user.name"); err != nil || un != "bheni" {
t.Error("Bad local config value.")
}
assert.NoError(t, dEnv.CfgLoadErr)
assert.NoError(t, dEnv.DocsLoadErr)
// RSLoadErr will be set because the above method of creating the repo doesn't initialize a valid working or staged
}
func TestRepoDirNoLocal(t *testing.T) {
dEnv := createTestEnv(true, false)
dEnv, _ := createTestEnv(true, false)
if !dEnv.HasDoltDir() {
t.Fatal(".dolt dir should exist.")
@@ -132,17 +132,9 @@ func TestRepoDirNoLocal(t *testing.T) {
t.Fatal("This should not be here before creation")
}
if dEnv.CfgLoadErr != nil {
t.Error("Only global config load / create error should result in an error")
}
if dEnv.RSLoadErr != nil {
t.Error("File doesn't exist. There should be an error if the directory doesn't exist.")
}
if dEnv.DocsLoadErr != nil {
t.Error("Files don't exist. There should be an error if the directory doesn't exist.")
}
require.NoError(t, dEnv.CfgLoadErr)
require.NoError(t, dEnv.DocsLoadErr)
// RSLoadErr will be set because the above method of creating the repo doesn't initialize a valid working or staged
err := dEnv.Config.CreateLocalConfig(map[string]string{"user.name": "bheni"})
require.NoError(t, err)
@@ -157,24 +149,15 @@ func TestRepoDirNoLocal(t *testing.T) {
}
func TestInitRepo(t *testing.T) {
dEnv := createTestEnv(false, false)
dEnv, _ := createTestEnv(false, false)
err := dEnv.InitRepo(context.Background(), types.Format_Default, "aoeu aoeu", "aoeu@aoeu.org")
if err != nil {
t.Error("Failed to init repo.", err.Error())
}
require.NoError(t, err)
_, err = dEnv.WorkingRoot(context.Background())
if err != nil {
t.Error("Failed to get working root value.")
}
require.NoError(t, err)
_, err = dEnv.StagedRoot(context.Background())
if err != nil {
t.Error("Failed to get staged root value.")
}
require.NoError(t, err)
for _, doc := range doltdocs.SupportedDocs {
docPath := doltdocs.GetDocFilePath(doc.File)
@@ -184,6 +167,79 @@ func TestInitRepo(t *testing.T) {
}
}
// TestMigrateWorkingSet tests migrating a repo with the old RepoState fields to a new one
func TestMigrateWorkingSet(t *testing.T) {
t.Skip("This fails under race on ubuntu / mac")
// TODO: t.TempDir breaks on windows because of automatic cleanup (files still in use)
// dir := t.TempDir()
working, err := ioutil.TempDir("", "TestMigrateWorkingSet*")
require.NoError(t, err)
homeDir, err := ioutil.TempDir("", "TestMigrateWorkingSet*")
require.NoError(t, err)
dEnv := createFileTestEnv(t, working, homeDir)
assert.NoError(t, dEnv.CfgLoadErr)
err = dEnv.InitRepo(context.Background(), types.Format_Default, "aoeu aoeu", "aoeu@aoeu.org")
require.NoError(t, err)
ws, err := dEnv.WorkingSet(context.Background())
require.NoError(t, err)
// Make a new repo with the contents of this one, but with the working set cleared out and the repo state filled in
// with the legacy values
// We don't have a merge in progress, so we'll just fake one. We're only interested in seeing the fields loaded and
// persisted to the working set
commit, err := dEnv.DoltDB.ResolveCommitRef(context.Background(), dEnv.RepoState.CWBHeadRef())
require.NoError(t, err)
ws.StartMerge(commit)
workingRoot := ws.WorkingRoot()
stagedRoot := ws.StagedRoot()
workingHash, err := workingRoot.HashOf()
require.NoError(t, err)
stagedHash, err := stagedRoot.HashOf()
require.NoError(t, err)
rs := repoStateLegacyFromRepoState(dEnv.RepoState)
rs.Working = workingHash.String()
rs.Staged = stagedHash.String()
commitHash, err := commit.HashOf()
require.NoError(t, err)
rs.Merge = &mergeState{
Commit: commitHash.String(),
PreMergeWorking: workingHash.String(),
}
// Clear the working set
require.NoError(t, dEnv.DoltDB.DeleteWorkingSet(context.Background(), ws.Ref()))
// Make sure it's gone
_, err = dEnv.WorkingSet(context.Background())
require.Equal(t, doltdb.ErrWorkingSetNotFound, err)
// Now write the repo state file to disk and re-load the repo
require.NoError(t, rs.save(dEnv.FS))
dEnv = Load(context.Background(), testHomeDirFunc, dEnv.FS, doltdb.LocalDirDoltDB, "test")
assert.NoError(t, dEnv.RSLoadErr)
assert.NoError(t, dEnv.CfgLoadErr)
assert.NoError(t, dEnv.DocsLoadErr)
ws, err = dEnv.WorkingSet(context.Background())
require.NoError(t, err)
assert.Equal(t, mustHash(workingRoot.HashOf()), mustHash(ws.WorkingRoot().HashOf()))
assert.Equal(t, mustHash(stagedRoot.HashOf()), mustHash(ws.StagedRoot().HashOf()))
assert.Equal(t, mustHash(commit.HashOf()), mustHash(ws.MergeState().Commit().HashOf()))
assert.Equal(t, mustHash(workingRoot.HashOf()), mustHash(ws.MergeState().PreMergeWorkingRoot().HashOf()))
}
func isCWDEmpty(dEnv *DoltEnv) bool {
isEmpty := true
dEnv.FS.Iter("./", true, func(_ string, _ int64, _ bool) bool {
@@ -194,8 +250,15 @@ func isCWDEmpty(dEnv *DoltEnv) bool {
return isEmpty
}
func mustHash(hash hash.Hash, err error) hash.Hash {
if err != nil {
panic(err)
}
return hash
}
func TestBestEffortDelete(t *testing.T) {
dEnv := createTestEnv(true, true)
dEnv, _ := createTestEnv(true, true)
if isCWDEmpty(dEnv) {
t.Error("Dir should not be empty before delete.")
+3 -2
View File
@@ -19,6 +19,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
@@ -55,7 +56,7 @@ func (r *Remote) GetParamOrDefault(pName, defVal string) string {
}
func (r *Remote) GetRemoteDB(ctx context.Context, nbf *types.NomsBinFormat) (*doltdb.DoltDB, error) {
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, r.Params)
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, r.Params)
}
func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsBinFormat) (*doltdb.DoltDB, error) {
@@ -64,5 +65,5 @@ func (r *Remote) GetRemoteDBWithoutCaching(ctx context.Context, nbf *types.NomsB
params[k] = v
}
params[dbfactory.NoCachingParameter] = "true"
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, params)
return doltdb.LoadDoltDBWithParams(ctx, nbf, r.Url, filesys2.LocalFS, params)
}
+132 -170
View File
@@ -17,9 +17,7 @@ package env
import (
"context"
"encoding/json"
"fmt"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdocs"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
@@ -30,22 +28,24 @@ import (
type RepoStateReader interface {
CWBHeadRef() ref.DoltRef
CWBHeadSpec() *doltdb.CommitSpec
CWBHeadHash(ctx context.Context) (hash.Hash, error)
WorkingHash() hash.Hash
StagedHash() hash.Hash
IsMergeActive() bool
GetMergeCommit() string
GetPreMergeWorking() string
// TODO: get rid of this
IsMergeActive(ctx context.Context) (bool, error)
// TODO: get rid of this
GetMergeCommit(ctx context.Context) (*doltdb.Commit, error)
}
type RepoStateWriter interface {
// SetCWBHeadSpec(context.Context, *doltdb.CommitSpec) error
SetStagedHash(context.Context, hash.Hash) error
SetWorkingHash(context.Context, hash.Hash) error
// TODO: get rid of this
UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootValue) error
// TODO: get rid of this
UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error
SetCWBHeadRef(context.Context, ref.MarshalableRef) error
AbortMerge() error
ClearMerge() error
StartMerge(commitStr string) error
// TODO: get rid of this
AbortMerge(ctx context.Context) error
// TODO: get rid of this
ClearMerge(ctx context.Context) error
// TODO: get rid of this
StartMerge(ctx context.Context, commit *doltdb.Commit) error
}
type DocsReadWriter interface {
@@ -67,20 +67,67 @@ type BranchConfig struct {
Remote string `json:"remote"`
}
type MergeState struct {
type RepoState struct {
Head ref.MarshalableRef `json:"head"`
Remotes map[string]Remote `json:"remotes"`
Branches map[string]BranchConfig `json:"branches"`
// |staged|, |working|, and |merge| are legacy fields left over from when Dolt repos stored this info in the repo
// state file, not in the DB directly. They're still here so that we can migrate existing repositories forward to the
// new storage format, but they should be used only for this purpose and are no longer written.
staged string
working string
merge *mergeState
}
// repoStateLegacy only exists to unmarshall legacy repo state files, since the JSON marshaller can't work with
// unexported fields
type repoStateLegacy struct {
Head ref.MarshalableRef `json:"head"`
Remotes map[string]Remote `json:"remotes"`
Branches map[string]BranchConfig `json:"branches"`
Staged string `json:"staged,omitempty"`
Working string `json:"working,omitempty"`
Merge *mergeState `json:"merge,omitempty"`
}
// repoStateLegacyFromRepoState creates a new repoStateLegacy from a RepoState file. Only for testing.
func repoStateLegacyFromRepoState(rs *RepoState) *repoStateLegacy {
return &repoStateLegacy{
Head: rs.Head,
Remotes: rs.Remotes,
Branches: rs.Branches,
Staged: rs.staged,
Working: rs.working,
Merge: rs.merge,
}
}
type mergeState struct {
Commit string `json:"commit"`
PreMergeWorking string `json:"working_pre_merge"`
}
type RepoState struct {
Head ref.MarshalableRef `json:"head"`
Staged string `json:"staged"`
Working string `json:"working"`
Merge *MergeState `json:"merge"`
Remotes map[string]Remote `json:"remotes"`
Branches map[string]BranchConfig `json:"branches"`
func (rs *repoStateLegacy) toRepoState() *RepoState {
return &RepoState{
Head: rs.Head,
Remotes: rs.Remotes,
Branches: rs.Branches,
staged: rs.Staged,
working: rs.Working,
merge: rs.Merge,
}
}
func (rs *repoStateLegacy) save(fs filesys.ReadWriteFS) error {
data, err := json.MarshalIndent(rs, "", " ")
if err != nil {
return err
}
return fs.WriteFile(getRepoStateFile(), data)
}
// LoadRepoState parses the repo state file from the file system given
func LoadRepoState(fs filesys.ReadWriteFS) (*RepoState, error) {
path := getRepoStateFile()
data, err := fs.ReadFile(path)
@@ -89,26 +136,25 @@ func LoadRepoState(fs filesys.ReadWriteFS) (*RepoState, error) {
return nil, err
}
var repoState RepoState
var repoState repoStateLegacy
err = json.Unmarshal(data, &repoState)
if err != nil {
return nil, err
}
return &repoState, nil
return repoState.toRepoState(), nil
}
func CloneRepoState(fs filesys.ReadWriteFS, r Remote) (*RepoState, error) {
h := hash.Hash{}
hashStr := h.String()
rs := &RepoState{ref.MarshalableRef{
rs := &RepoState{Head: ref.MarshalableRef{
Ref: ref.NewBranchRef("master")},
hashStr,
hashStr,
nil,
map[string]Remote{r.Name: r},
make(map[string]BranchConfig),
staged: hashStr,
working: hashStr,
Remotes: map[string]Remote{r.Name: r},
Branches: make(map[string]BranchConfig),
}
err := rs.Save(fs)
@@ -120,8 +166,7 @@ func CloneRepoState(fs filesys.ReadWriteFS, r Remote) (*RepoState, error) {
return rs, nil
}
func CreateRepoState(fs filesys.ReadWriteFS, br string, rootHash hash.Hash) (*RepoState, error) {
hashStr := rootHash.String()
func CreateRepoState(fs filesys.ReadWriteFS, br string) (*RepoState, error) {
headRef, err := ref.Parse(br)
if err != nil {
@@ -129,12 +174,9 @@ func CreateRepoState(fs filesys.ReadWriteFS, br string, rootHash hash.Hash) (*Re
}
rs := &RepoState{
ref.MarshalableRef{Ref: headRef},
hashStr,
hashStr,
nil,
make(map[string]Remote),
make(map[string]BranchConfig),
Head: ref.MarshalableRef{Ref: headRef},
Remotes: make(map[string]Remote),
Branches: make(map[string]BranchConfig),
}
err = rs.Save(fs)
@@ -146,16 +188,14 @@ func CreateRepoState(fs filesys.ReadWriteFS, br string, rootHash hash.Hash) (*Re
return rs, nil
}
func (rs *RepoState) Save(fs filesys.ReadWriteFS) error {
// Save writes this repo state file to disk on the filesystem given
func (rs RepoState) Save(fs filesys.ReadWriteFS) error {
data, err := json.MarshalIndent(rs, "", " ")
if err != nil {
return err
}
path := getRepoStateFile()
return fs.WriteFile(path, data)
return fs.WriteFile(getRepoStateFile(), data)
}
func (rs *RepoState) CWBHeadRef() ref.DoltRef {
@@ -167,61 +207,20 @@ func (rs *RepoState) CWBHeadSpec() *doltdb.CommitSpec {
return spec
}
func (rs *RepoState) StartMerge(commit string, fs filesys.Filesys) error {
rs.Merge = &MergeState{commit, rs.Working}
return rs.Save(fs)
}
func (rs *RepoState) AbortMerge(fs filesys.Filesys) error {
rs.Working = rs.Merge.PreMergeWorking
return rs.ClearMerge(fs)
}
func (rs *RepoState) ClearMerge(fs filesys.Filesys) error {
rs.Merge = nil
return rs.Save(fs)
}
func (rs *RepoState) AddRemote(r Remote) {
rs.Remotes[r.Name] = r
}
func (rs *RepoState) WorkingHash() hash.Hash {
return hash.Parse(rs.Working)
}
func (rs *RepoState) StagedHash() hash.Hash {
return hash.Parse(rs.Staged)
}
func (rs *RepoState) IsMergeActive() bool {
return rs.Merge != nil
}
func (rs *RepoState) GetMergeCommit() string {
return rs.Merge.Commit
}
// Returns the working root.
func WorkingRoot(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader) (*doltdb.RootValue, error) {
return ddb.ReadRootValue(ctx, rsr.WorkingHash())
}
// Updates the working root.
func UpdateWorkingRoot(ctx context.Context, ddb *doltdb.DoltDB, rsw RepoStateWriter, newRoot *doltdb.RootValue) (hash.Hash, error) {
h, err := ddb.WriteRootValue(ctx, newRoot)
func UpdateWorkingRoot(ctx context.Context, rsw RepoStateWriter, newRoot *doltdb.RootValue) error {
// logrus.Infof("Updating working root with value %s", newRoot.DebugString(ctx, true))
err := rsw.UpdateWorkingRoot(ctx, newRoot)
if err != nil {
return hash.Hash{}, doltdb.ErrNomsIO
return ErrStateUpdate
}
err = rsw.SetWorkingHash(ctx, h)
if err != nil {
return hash.Hash{}, ErrStateUpdate
}
return h, nil
return nil
}
// Returns the head root.
@@ -235,106 +234,50 @@ func HeadRoot(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader) (*do
return commit.GetRootValue()
}
// Returns the staged root.
func StagedRoot(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader) (*doltdb.RootValue, error) {
return ddb.ReadRootValue(ctx, rsr.StagedHash())
}
// Updates the staged root.
func UpdateStagedRoot(ctx context.Context, ddb *doltdb.DoltDB, rsw RepoStateWriter, newRoot *doltdb.RootValue) (hash.Hash, error) {
h, err := ddb.WriteRootValue(ctx, newRoot)
// TODO: remove this
func UpdateStagedRoot(ctx context.Context, rsw RepoStateWriter, newRoot *doltdb.RootValue) error {
err := rsw.UpdateStagedRoot(ctx, newRoot)
if err != nil {
return hash.Hash{}, doltdb.ErrNomsIO
}
err = rsw.SetStagedHash(ctx, h)
if err != nil {
return hash.Hash{}, ErrStateUpdate
}
return h, nil
}
func UpdateStagedRootWithVErr(ddb *doltdb.DoltDB, rsw RepoStateWriter, updatedRoot *doltdb.RootValue) errhand.VerboseError {
_, err := UpdateStagedRoot(context.Background(), ddb, rsw, updatedRoot)
switch err {
case doltdb.ErrNomsIO:
return errhand.BuildDError("fatal: failed to write value").Build()
case ErrStateUpdate:
return errhand.BuildDError("fatal: failed to update the staged root state").Build()
return ErrStateUpdate
}
return nil
}
func GetRoots(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader) (working *doltdb.RootValue, staged *doltdb.RootValue, head *doltdb.RootValue, err error) {
working, err = WorkingRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, nil, err
}
staged, err = StagedRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, nil, err
}
head, err = HeadRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, nil, err
}
return working, staged, head, nil
}
func MergeWouldStompChanges(ctx context.Context, mergeCommit *doltdb.Commit, dbData DbData) ([]string, map[string]hash.Hash, error) {
// TODO: this needs to be a function in the merge package, not repo state
func MergeWouldStompChanges(ctx context.Context, workingRoot *doltdb.RootValue, mergeCommit *doltdb.Commit, dbData DbData) ([]string, map[string]hash.Hash, error) {
headRoot, err := HeadRoot(ctx, dbData.Ddb, dbData.Rsr)
if err != nil {
return nil, nil, err
}
workingRoot, err := WorkingRoot(ctx, dbData.Ddb, dbData.Rsr)
if err != nil {
return nil, nil, err
}
mergeRoot, err := mergeCommit.GetRootValue()
if err != nil {
return nil, nil, err
}
headTableHashes, err := mapTableHashes(ctx, headRoot)
if err != nil {
return nil, nil, err
}
workingTableHashes, err := mapTableHashes(ctx, workingRoot)
if err != nil {
return nil, nil, err
}
mergeTableHashes, err := mapTableHashes(ctx, mergeRoot)
if err != nil {
return nil, nil, err
}
headWorkingDiffs := diffTableHashes(headTableHashes, workingTableHashes)
mergeWorkingDiffs := diffTableHashes(headTableHashes, mergeTableHashes)
mergedHeadDiffs := diffTableHashes(headTableHashes, mergeTableHashes)
stompedTables := make([]string, 0, len(headWorkingDiffs))
for tName, _ := range headWorkingDiffs {
if _, ok := mergeWorkingDiffs[tName]; ok {
if _, ok := mergedHeadDiffs[tName]; ok {
// even if the working changes match the merge changes, don't allow (matches git behavior).
stompedTables = append(stompedTables, tName)
}
@@ -344,38 +287,57 @@ func MergeWouldStompChanges(ctx context.Context, mergeCommit *doltdb.Commit, dbD
}
// GetGCKeepers queries |rsr| to find a list of values that need to be temporarily saved during GC.
func GetGCKeepers(ctx context.Context, rsr RepoStateReader, ddb *doltdb.DoltDB) ([]hash.Hash, error) {
keepers := []hash.Hash{
rsr.WorkingHash(),
rsr.StagedHash(),
// TODO: move this out of repo_state.go
func GetGCKeepers(ctx context.Context, env *DoltEnv) ([]hash.Hash, error) {
workingRoot, err := env.WorkingRoot(ctx)
if err != nil {
return nil, err
}
if rsr.IsMergeActive() {
spec, err := doltdb.NewCommitSpec(rsr.GetMergeCommit())
if err != nil {
return nil, err
}
cm, err := ddb.Resolve(ctx, spec, nil)
workingHash, err := workingRoot.HashOf()
if err != nil {
return nil, err
}
stagedRoot, err := env.StagedRoot(ctx)
if err != nil {
return nil, err
}
stagedHash, err := stagedRoot.HashOf()
if err != nil {
return nil, err
}
keepers := []hash.Hash{
workingHash,
stagedHash,
}
mergeActive, err := env.IsMergeActive(ctx)
if err != nil {
return nil, err
}
if mergeActive {
ws, err := env.WorkingSet(ctx)
if err != nil {
return nil, err
}
cm := ws.MergeState().Commit()
ch, err := cm.HashOf()
if err != nil {
return nil, err
}
pmw := hash.Parse(rsr.GetPreMergeWorking())
val, err := ddb.ValueReadWriter().ReadValue(ctx, pmw)
pmw := ws.MergeState().PreMergeWorkingRoot()
pmwh, err := pmw.HashOf()
if err != nil {
return nil, err
}
if val == nil {
return nil, fmt.Errorf("MergeState.PreMergeWorking is a dangling hash")
}
keepers = append(keepers, ch, pmw)
keepers = append(keepers, ch, pmwh)
}
return keepers, nil
@@ -535,7 +535,7 @@ func checkSchema(t *testing.T, r *doltdb.RootValue, tableName string, expectedSc
func checkRows(t *testing.T, dEnv *env.DoltEnv, root *doltdb.RootValue, tableName string, sch schema.Schema, selectQuery string, expectedRows []row.Row) {
sqlDb := dsqle.NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := dsqle.NewTestEngine(context.Background(), sqlDb, root)
engine, sqlCtx, err := dsqle.NewTestEngine(t, dEnv, context.Background(), sqlDb, root)
require.NoError(t, err)
s, rowIter, err := engine.Query(sqlCtx, selectQuery)
@@ -369,7 +369,7 @@ func testSuperSchema(t *testing.T, test SuperSchemaTest) {
if test.ExpectedErrStr != "" {
require.Error(t, ee, test.ExpectedErrStr)
} else {
spec := dEnv.RepoState.CWBHeadRef()
spec := dEnv.RepoStateReader().CWBHeadRef()
require.Equal(t, "refs/heads/"+test.ExpectedBranch, spec.String())
r, err := dEnv.WorkingRoot(context.Background())
@@ -121,7 +121,7 @@ func TestMerge(t *testing.T) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, test.query)
actRows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.expected), len(actRows))
@@ -234,7 +234,7 @@ func TestMergeConflicts(t *testing.T) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, test.query)
actRows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.expected), len(actRows))
+8 -37
View File
@@ -751,25 +751,21 @@ func mergeAutoIncrementValues(ctx context.Context, tbl, otherTbl, resultTbl *dol
func MergeCommits(ctx context.Context, commit, mergeCommit *doltdb.Commit) (*doltdb.RootValue, map[string]*MergeStats, error) {
ancCommit, err := doltdb.GetCommitAncestor(ctx, commit, mergeCommit)
if err != nil {
return nil, nil, err
}
ourRoot, err := commit.GetRootValue()
if err != nil {
return nil, nil, err
}
theirRoot, err := mergeCommit.GetRootValue()
if err != nil {
return nil, nil, err
}
ancRoot, err := ancCommit.GetRootValue()
if err != nil {
return nil, nil, err
}
@@ -860,41 +856,21 @@ func MergeRoots(ctx context.Context, ourRoot, theirRoot, ancRoot *doltdb.RootVal
return newRoot, tblToStats, nil
}
func GetTablesInConflict(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (workingInConflict, stagedInConflict, headInConflict []string, err error) {
var headRoot, stagedRoot, workingRoot *doltdb.RootValue
headRoot, err = env.HeadRoot(ctx, ddb, rsr)
func GetTablesInConflict(ctx context.Context, roots doltdb.Roots) (
workingInConflict, stagedInConflict, headInConflict []string,
err error,
) {
headInConflict, err = roots.Head.TablesInConflict(ctx)
if err != nil {
return nil, nil, nil, err
}
stagedRoot, err = env.StagedRoot(ctx, ddb, rsr)
stagedInConflict, err = roots.Staged.TablesInConflict(ctx)
if err != nil {
return nil, nil, nil, err
}
workingRoot, err = env.WorkingRoot(ctx, ddb, rsr)
if err != nil {
return nil, nil, nil, err
}
headInConflict, err = headRoot.TablesInConflict(ctx)
if err != nil {
return nil, nil, nil, err
}
stagedInConflict, err = stagedRoot.TablesInConflict(ctx)
if err != nil {
return nil, nil, nil, err
}
workingInConflict, err = workingRoot.TablesInConflict(ctx)
workingInConflict, err = roots.Working.TablesInConflict(ctx)
if err != nil {
return nil, nil, nil, err
}
@@ -902,16 +878,11 @@ func GetTablesInConflict(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoSt
return workingInConflict, stagedInConflict, headInConflict, err
}
func GetDocsInConflict(ctx context.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader, drw env.DocsReadWriter) (*diff.DocDiffs, error) {
func GetDocsInConflict(ctx context.Context, workingRoot *doltdb.RootValue, drw env.DocsReadWriter) (*diff.DocDiffs, error) {
docs, err := drw.GetDocsOnDisk()
if err != nil {
return nil, err
}
workingRoot, err := env.WorkingRoot(ctx, ddb, rsr)
if err != nil {
return nil, err
}
return diff.NewDocDiffs(ctx, workingRoot, nil, docs)
}
+2 -1
View File
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/encoding"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
@@ -263,7 +264,7 @@ func init() {
}
func setupMergeTest(t *testing.T) (types.ValueReadWriter, *doltdb.Commit, *doltdb.Commit, types.Map, types.Map) {
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB, filesys2.LocalFS)
vrw := ddb.ValueReadWriter()
err := ddb.WriteEmptyRepo(context.Background(), name, email)
@@ -57,7 +57,7 @@ func createRootAndFS() (*doltdb.DoltDB, *doltdb.RootValue, filesys.Filesys) {
initialDirs := []string{testHomeDir, workingDir}
fs := filesys.NewInMemFS(initialDirs, nil, workingDir)
fs.WriteFile(testSchemaFileName, []byte(testSchema))
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB)
ddb, _ := doltdb.LoadDoltDB(context.Background(), types.Format_Default, doltdb.InMemDoltDB, filesys.LocalFS)
ddb.WriteEmptyRepo(context.Background(), "billy bob", "bigbillieb@fake.horse")
cs, _ := doltdb.NewCommitSpec("master")
@@ -299,7 +299,7 @@ func (te *tableEditorWriteCloser) GC(ctx context.Context) error {
return err
}
keepers, err := env.GetGCKeepers(ctx, te.dEnv.RepoStateReader(), te.dEnv.DoltDB)
keepers, err := env.GetGCKeepers(ctx, te.dEnv)
if err != nil {
return err
}
+5 -9
View File
@@ -105,7 +105,7 @@ func AllBranches(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn,
// CurrentBranch rewrites the history of the current branch using the |replay| function.
func CurrentBranch(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn, nerf NeedsRebaseFn) error {
return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, dEnv.RepoState.CWBHeadRef())
return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, dEnv.RepoStateReader().CWBHeadRef())
}
// AllBranchesByRoots rewrites the history of all branches in the repo using the |replay| function.
@@ -122,7 +122,7 @@ func AllBranchesByRoots(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRoo
// CurrentBranchByRoot rewrites the history of the current branch using the |replay| function.
func CurrentBranchByRoot(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRootFn, nerf NeedsRebaseFn) error {
replayCommit := wrapReplayRootFn(replay)
return rebaseRefs(ctx, dEnv.DbData(), replayCommit, nerf, dEnv.RepoState.CWBHeadRef())
return rebaseRefs(ctx, dEnv.DbData(), replayCommit, nerf, dEnv.RepoStateReader().CWBHeadRef())
}
func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, nerf NeedsRebaseFn, refs ...ref.DoltRef) error {
@@ -183,17 +183,13 @@ func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, n
return err
}
_, err = env.UpdateStagedRoot(ctx, ddb, rsw, r)
// TODO: this should be a single update to repo state, not two
err = env.UpdateStagedRoot(ctx, rsw, r)
if err != nil {
return err
}
_, err = env.UpdateWorkingRoot(ctx, ddb, rsw, r)
if err != nil {
return err
}
return err
return env.UpdateWorkingRoot(ctx, rsw, r)
}
func rebase(ctx context.Context, ddb *doltdb.DoltDB, replay ReplayCommitFn, nerf NeedsRebaseFn, origins ...*doltdb.Commit) ([]*doltdb.Commit, error) {
+1 -1
View File
@@ -55,7 +55,7 @@ func (r WorkingSetRef) GetPath() string {
}
func (r WorkingSetRef) ToHeadRef() (DoltRef, error) {
return Parse(r.GetPath())
return Parse(refPrefix + r.GetPath())
}
// String returns the fully qualified reference name e.g.
@@ -51,17 +51,17 @@ func RunModifyTypeTests(t *testing.T, tests []ModifyTypeTest) {
dEnv := dtestutils.CreateTestEnv()
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
root, err = executeModify(ctx, dEnv, root, fmt.Sprintf("CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 %s);", test.FromType))
root, err = executeModify(t, ctx, dEnv, root, fmt.Sprintf("CREATE TABLE test(pk BIGINT PRIMARY KEY, v1 %s);", test.FromType))
require.NoError(t, err)
root, err = executeModify(ctx, dEnv, root, fmt.Sprintf("INSERT INTO test VALUES %s;", test.InsertValues))
root, err = executeModify(t, ctx, dEnv, root, fmt.Sprintf("INSERT INTO test VALUES %s;", test.InsertValues))
require.NoError(t, err)
root, err = executeModify(ctx, dEnv, root, fmt.Sprintf("ALTER TABLE test MODIFY v1 %s;", test.ToType))
root, err = executeModify(t, ctx, dEnv, root, fmt.Sprintf("ALTER TABLE test MODIFY v1 %s;", test.ToType))
if test.ExpectedErr {
assert.Error(t, err)
return
}
require.NoError(t, err)
res, err := executeSelect(ctx, dEnv, root, "SELECT v1 FROM test ORDER BY pk;")
res, err := executeSelect(t, ctx, dEnv, root, "SELECT v1 FROM test ORDER BY pk;")
require.NoError(t, err)
assert.Equal(t, test.SelectRes, res)
})
@@ -113,10 +113,10 @@ func parseTime(timestampLayout bool, value string) time.Time {
return t.UTC()
}
func executeSelect(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) ([]interface{}, error) {
func executeSelect(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) ([]interface{}, error) {
var err error
db := sqle.NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := sqle.NewTestEngine(ctx, db, root)
engine, sqlCtx, err := sqle.NewTestEngine(t, dEnv, ctx, db, root)
if err != nil {
return nil, err
}
@@ -142,9 +142,9 @@ func executeSelect(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValu
return vals, nil
}
func executeModify(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) (*doltdb.RootValue, error) {
func executeModify(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) (*doltdb.RootValue, error) {
db := sqle.NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := sqle.NewTestEngine(ctx, db, root)
engine, sqlCtx, err := sqle.NewTestEngine(t, dEnv, ctx, db, root)
if err != nil {
return nil, err
}
+4 -4
View File
@@ -40,10 +40,10 @@ type SetupFn func(t *testing.T, dEnv *env.DoltEnv)
// Runs the query given and returns the result. The schema result of the query's execution is currently ignored, and
// the targetSchema given is used to prepare all rows.
func executeSelect(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) ([]sql.Row, sql.Schema, error) {
func executeSelect(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) ([]sql.Row, sql.Schema, error) {
var err error
db := NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := NewTestEngine(ctx, db, root)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
if err != nil {
return nil, nil, err
}
@@ -67,9 +67,9 @@ func executeSelect(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValu
}
// Runs the query given and returns the error (if any).
func executeModify(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) (*doltdb.RootValue, error) {
func executeModify(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, query string) (*doltdb.RootValue, error) {
db := NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := NewTestEngine(ctx, db, root)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
if err != nil {
return nil, err
+74 -115
View File
@@ -34,6 +34,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/alterschema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
@@ -75,45 +76,9 @@ func (r ReadOnlyDatabase) IsReadOnly() bool {
return true
}
// DisabledTransaction is a no-op transaction type that lets us feature-gate transaction logic changes
type DisabledTransaction struct{}
func (d DisabledTransaction) String() string {
return "Disabled transaction"
}
func (db Database) StartTransaction(ctx *sql.Context) (sql.Transaction, error) {
if !TransactionsEnabled(ctx) {
return DisabledTransaction{}, nil
}
dsession := DSessFromSess(ctx.Session)
// When we begin the transaction, we must synchronize the state of this session with the global state for the
// current head ref. Any pending transaction has already been committed before this happens.
wsRef := dsession.workingSets[ctx.GetCurrentDatabase()]
ws, err := db.ddb.ResolveWorkingSet(ctx, wsRef)
if err != nil {
return nil, err
}
err = db.SetRoot(ctx, ws.RootValue())
if err != nil {
return nil, err
}
root, err := db.GetRoot(ctx)
if err != nil {
return nil, err
}
err = db.setHeadHash(ctx, wsRef)
if err != nil {
return nil, err
}
return NewDoltTransaction(root, wsRef, db.DbData()), nil
dsession := dsess.DSessFromSess(ctx.Session)
return dsession.StartTransaction(ctx, db.Name())
}
func (db Database) setHeadHash(ctx *sql.Context, ref ref.WorkingSetRef) error {
@@ -131,35 +96,35 @@ func (db Database) setHeadHash(ctx *sql.Context, ref ref.WorkingSetRef) error {
if err != nil {
return err
}
if doltSession, ok := ctx.Session.(*DoltSession); ok {
return doltSession.SetSessionVarDirectly(ctx, HeadKey(db.name), headHash.String())
if doltSession, ok := ctx.Session.(*dsess.Session); ok {
return doltSession.SetSessionVarDirectly(ctx, dsess.HeadKey(db.name), headHash.String())
} else {
return ctx.SetSessionVariable(ctx, HeadKey(db.name), headHash.String())
return ctx.SetSessionVariable(ctx, dsess.HeadKey(db.name), headHash.String())
}
}
func (db Database) CommitTransaction(ctx *sql.Context, tx sql.Transaction) error {
dsession := DSessFromSess(ctx.Session)
dsession := dsess.DSessFromSess(ctx.Session)
return dsession.CommitTransaction(ctx, db.name, tx)
}
func (db Database) Rollback(ctx *sql.Context, tx sql.Transaction) error {
dsession := DSessFromSess(ctx.Session)
dsession := dsess.DSessFromSess(ctx.Session)
return dsession.RollbackTransaction(ctx, db.name, tx)
}
func (db Database) CreateSavepoint(ctx *sql.Context, tx sql.Transaction, name string) error {
dsession := DSessFromSess(ctx.Session)
dsession := dsess.DSessFromSess(ctx.Session)
return dsession.CreateSavepoint(ctx, name, db.name, tx)
}
func (db Database) RollbackToSavepoint(ctx *sql.Context, tx sql.Transaction, name string) error {
dsession := DSessFromSess(ctx.Session)
dsession := dsess.DSessFromSess(ctx.Session)
return dsession.RollbackToSavepoint(ctx, name, db.name, tx)
}
func (db Database) ReleaseSavepoint(ctx *sql.Context, tx sql.Transaction, name string) error {
dsession := DSessFromSess(ctx.Session)
dsession := dsess.DSessFromSess(ctx.Session)
return dsession.ReleaseSavepoint(ctx, name, db.name, tx)
}
@@ -243,43 +208,60 @@ func (db Database) GetTableInsensitive(ctx *sql.Context, tblName string) (sql.Ta
return db.GetTableInsensitiveWithRoot(ctx, root, tblName)
}
func (db Database) GetTableInsensitiveWithRoot(ctx *sql.Context, root *doltdb.RootValue, tblName string) (dt sql.Table, found bool, err error) {
func (db Database) GetTableInsensitiveWithRoot(ctx *sql.Context, root *doltdb.RootValue, tblName string) (sql.Table, bool, error) {
lwrName := strings.ToLower(tblName)
head, _, err := DSessFromSess(ctx.Session).GetHeadCommit(ctx, db.name)
if err != nil {
return nil, false, err
}
sess := dsess.DSessFromSess(ctx.Session)
// NOTE: system tables are not suitable for caching
switch {
case strings.HasPrefix(lwrName, doltdb.DoltDiffTablePrefix):
suffix := tblName[len(doltdb.DoltDiffTablePrefix):]
found = true
dt, err = dtables.NewDiffTable(ctx, suffix, db.ddb, root, head)
head, err := sess.GetHeadCommit(ctx, db.name)
if err != nil {
return nil, false, err
}
dt, err := dtables.NewDiffTable(ctx, suffix, db.ddb, root, head)
if err != nil {
return nil, false, err
}
return dt, true, nil
case strings.HasPrefix(lwrName, doltdb.DoltCommitDiffTablePrefix):
suffix := tblName[len(doltdb.DoltCommitDiffTablePrefix):]
found = true
dt, err = dtables.NewCommitDiffTable(ctx, suffix, db.ddb, root)
dt, err := dtables.NewCommitDiffTable(ctx, suffix, db.ddb, root)
if err != nil {
return nil, false, err
}
return dt, true, nil
case strings.HasPrefix(lwrName, doltdb.DoltHistoryTablePrefix):
suffix := tblName[len(doltdb.DoltHistoryTablePrefix):]
found = true
dt, err = dtables.NewHistoryTable(ctx, suffix, db.ddb, root, head)
head, err := sess.GetHeadCommit(ctx, db.name)
if err != nil {
return nil, false, err
}
dt, err := dtables.NewHistoryTable(ctx, suffix, db.ddb, root, head)
if err != nil {
return nil, false, err
}
return dt, true, nil
case strings.HasPrefix(lwrName, doltdb.DoltConfTablePrefix):
suffix := tblName[len(doltdb.DoltConfTablePrefix):]
found = true
dt, err = dtables.NewConflictsTable(ctx, suffix, root, dtables.RootSetter(db))
}
if err != nil {
return nil, false, err
}
if found {
return dt, found, nil
dt, err := dtables.NewConflictsTable(ctx, suffix, root, dtables.RootSetter(db))
if err != nil {
return nil, false, err
}
return dt, true, nil
}
// NOTE: system tables are not suitable for caching
var dt sql.Table
found := false
switch lwrName {
case doltdb.LogTableName:
head, err := sess.GetHeadCommit(ctx, db.name)
if err != nil {
return nil, false, err
}
dt, found = dtables.NewLogTable(ctx, db.ddb, head), true
case doltdb.TableOfTablesInConflictName:
dt, found = dtables.NewTableOfTablesInConflict(ctx, db.ddb, root), true
@@ -290,7 +272,7 @@ func (db Database) GetTableInsensitiveWithRoot(ctx *sql.Context, root *doltdb.Ro
case doltdb.CommitAncestorsTableName:
dt, found = dtables.NewCommitAncestorsTable(ctx, db.ddb), true
case doltdb.StatusTableName:
dt, found = dtables.NewStatusTable(ctx, db.ddb, db.rsr, db.drw), true
dt, found = dtables.NewStatusTable(ctx, db.name, db.ddb, dsess.NewSessionStateAdapter(sess, db.name), db.drw), true
}
if found {
return dt, found, nil
@@ -497,65 +479,41 @@ func filterDoltInternalTables(tblNames []string) []string {
return result
}
func HeadKey(dbName string) string {
return dbName + HeadKeySuffix
}
func HeadRefKey(dbName string) string {
return dbName + HeadRefKeySuffix
}
func WorkingKey(dbName string) string {
return dbName + WorkingKeySuffix
}
var hashType = sql.MustCreateString(query.Type_TEXT, 32, sql.Collation_ascii_bin)
// GetRoot returns the root value for this database session
func (db Database) GetRoot(ctx *sql.Context) (*doltdb.RootValue, error) {
dsess := DSessFromSess(ctx.Session)
currRoot, dbRootOk := dsess.roots[db.name]
sess := dsess.DSessFromSess(ctx.Session)
dbState, dbRootOk := sess.DbStates[db.name]
if !dbRootOk {
return nil, fmt.Errorf("no root value found in session")
}
return currRoot.root, nil
return dbState.WorkingSet.WorkingRoot(), nil
}
func (db Database) GetTemporaryTablesRoot(ctx *sql.Context) (*doltdb.RootValue, bool) {
dsess := DSessFromSess(ctx.Session)
return dsess.GetTempTableRootValue(ctx, db.Name())
sess := dsess.DSessFromSess(ctx.Session)
return sess.GetTempTableRootValue(ctx, db.Name())
}
// SetRoot should typically be called on the Session, which is where this state lives. But it's available here as a
// convenience.
func (db Database) SetRoot(ctx *sql.Context, newRoot *doltdb.RootValue) error {
dsess := DSessFromSess(ctx.Session)
return dsess.SetRoot(ctx, db.name, newRoot)
sess := dsess.DSessFromSess(ctx.Session)
return sess.SetRoot(ctx, db.name, newRoot)
}
// SetTemporaryRoot sets the root value holding temporary tables not persisted to the repo state after the session.
func (db Database) SetTemporaryRoot(ctx *sql.Context, newRoot *doltdb.RootValue) error {
dsess := DSessFromSess(ctx.Session)
return dsess.SetTempTableRoot(ctx, db.name, newRoot)
}
// LoadRootFromRepoState loads the root value from the repo state's working hash, then calls SetRoot with the loaded
// root value.
func (db Database) LoadRootFromRepoState(ctx *sql.Context) error {
workingHash := db.rsr.WorkingHash()
root, err := db.ddb.ReadRootValue(ctx, workingHash)
if err != nil {
return err
}
return db.SetRoot(ctx, root)
sess := dsess.DSessFromSess(ctx.Session)
return sess.SetTempTableRoot(ctx, db.name, newRoot)
}
// GetHeadRoot returns root value for the current session head
func (db Database) GetHeadRoot(ctx *sql.Context) (*doltdb.RootValue, error) {
dsess := DSessFromSess(ctx.Session)
head, _, err := dsess.GetHeadCommit(ctx, db.name)
sess := dsess.DSessFromSess(ctx.Session)
head, err := sess.GetHeadCommit(ctx, db.name)
if err != nil {
return nil, err
}
@@ -696,12 +654,12 @@ func (db Database) CreateTemporaryTable(ctx *sql.Context, tableName string, sch
func (db Database) createTempSQLTable(ctx *sql.Context, tableName string, sch sql.Schema) error {
// Get temporary root value
dsess := DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
tempTableRootValue, exists := db.GetTemporaryTablesRoot(ctx)
// create the root value only when needed.
if !exists {
err := dsess.CreateTemporaryTablesRoot(ctx, db.Name(), db.GetDoltDB())
err := sess.CreateTemporaryTablesRoot(ctx, db.Name(), db.GetDoltDB())
if err != nil {
return err
}
@@ -714,10 +672,10 @@ func (db Database) createTempSQLTable(ctx *sql.Context, tableName string, sch sq
return err
}
return db.createTempDoltTable(ctx, tableName, tempTableRootValue, doltSch, dsess)
return db.createTempDoltTable(ctx, tableName, tempTableRootValue, doltSch, sess)
}
func (db Database) createTempDoltTable(ctx *sql.Context, tableName string, root *doltdb.RootValue, doltSch schema.Schema, dsess *DoltSession) error {
func (db Database) createTempDoltTable(ctx *sql.Context, tableName string, root *doltdb.RootValue, doltSch schema.Schema, dsess *dsess.Session) error {
if exists, err := root.HasTable(ctx, tableName); err != nil {
return err
} else if exists {
@@ -778,8 +736,8 @@ func (db Database) RenameTable(ctx *sql.Context, oldName, newName string) error
// Flush flushes the current batch of outstanding changes and returns any errors.
func (db Database) Flush(ctx *sql.Context) error {
dsess := DSessFromSess(ctx.Session)
editSession := dsess.editSessions[db.name]
sess := dsess.DSessFromSess(ctx.Session)
editSession := sess.DbStates[db.name].EditSession
newRoot, err := editSession.Flush(ctx)
if err != nil {
@@ -793,14 +751,14 @@ func (db Database) Flush(ctx *sql.Context) error {
// Flush any changes made to temporary tables
// TODO: Shouldn't always be updating both roots. Needs to update either both roots or neither of them, atomically
tempTableEditSession, sessionExists := dsess.tempTableEditSessions[db.Name()]
if sessionExists {
tempTableEditSession := sess.DbStates[db.name].TempTableEditSession
if tempTableEditSession != nil {
newTempTableRoot, err := tempTableEditSession.Flush(ctx)
if err != nil {
return nil
}
return dsess.SetTempTableRoot(ctx, db.Name(), newTempTableRoot)
return sess.SetTempTableRoot(ctx, db.Name(), newTempTableRoot)
}
return nil
@@ -1063,18 +1021,19 @@ func (db Database) dropFragFromSchemasTable(ctx *sql.Context, fragType, name str
// TableEditSession returns the TableEditSession for this database from the given context.
func (db Database) TableEditSession(ctx *sql.Context, isTemporary bool) *editor.TableEditSession {
if isTemporary {
return DSessFromSess(ctx.Session).tempTableEditSessions[db.name]
return dsess.DSessFromSess(ctx.Session).DbStates[db.name].TempTableEditSession
}
return DSessFromSess(ctx.Session).editSessions[db.name]
return dsess.DSessFromSess(ctx.Session).DbStates[db.name].EditSession
}
// GetAllTemporaryTables returns all temporary tables
func (db Database) GetAllTemporaryTables(ctx *sql.Context) ([]sql.Table, error) {
dsess := DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
tables := make([]sql.Table, 0)
for _, root := range dsess.tempTableRoots {
root := sess.DbStates[db.name].TempTableRoot
if root != nil {
tNames, err := root.GetTableNames(ctx)
if err != nil {
return nil, err
+8 -6
View File
@@ -17,6 +17,8 @@ package sqle
import (
"testing"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/stretchr/testify/assert"
)
@@ -27,10 +29,10 @@ func testKeyFunc(t *testing.T, keyFunc func(string) (bool, string), testVal stri
}
func TestIsKeyFuncs(t *testing.T) {
testKeyFunc(t, IsHeadKey, "", false, "")
testKeyFunc(t, IsWorkingKey, "", false, "")
testKeyFunc(t, IsHeadKey, "dolt_head", true, "dolt")
testKeyFunc(t, IsWorkingKey, "dolt_head", false, "")
testKeyFunc(t, IsHeadKey, "dolt_working", false, "")
testKeyFunc(t, IsWorkingKey, "dolt_working", true, "dolt")
testKeyFunc(t, dsess.IsHeadKey, "", false, "")
testKeyFunc(t, dsess.IsWorkingKey, "", false, "")
testKeyFunc(t, dsess.IsHeadKey, "dolt_head", true, "dolt")
testKeyFunc(t, dsess.IsWorkingKey, "dolt_head", false, "")
testKeyFunc(t, dsess.IsHeadKey, "dolt_working", false, "")
testKeyFunc(t, dsess.IsWorkingKey, "dolt_working", true, "dolt")
}
@@ -20,7 +20,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const ActiveBranchFuncName = "active_branch"
@@ -34,9 +34,9 @@ func NewActiveBranchFunc(ctx *sql.Context) sql.Expression {
}
// Eval implements the Expression interface.
func (cf *ActiveBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
func (ab *ActiveBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
dbName := ctx.GetCurrentDatabase()
dSess := sqle.DSessFromSess(ctx.Session)
dSess := dsess.DSessFromSess(ctx.Session)
ddb, ok := dSess.GetDoltDB(dbName)
@@ -44,22 +44,18 @@ func (cf *ActiveBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, er
return nil, sql.ErrDatabaseNotFound.New(dbName)
}
rsr, ok := dSess.GetDoltDBRepoStateReader(dbName)
if !ok {
return nil, sql.ErrDatabaseNotFound.New(dbName)
currentBranchRef, err := dSess.CWBHeadRef(dbName)
if err != nil {
return nil, err
}
currentBranch := rsr.CWBHeadRef()
branches, err := ddb.GetBranches(ctx)
if err != nil {
return nil, err
}
for _, br := range branches {
if ref.Equals(br, currentBranch) {
if ref.Equals(br, currentBranchRef) {
return br.GetPath(), nil
}
}
@@ -68,12 +64,12 @@ func (cf *ActiveBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, er
}
// String implements the Stringer interface.
func (cf *ActiveBranchFunc) String() string {
func (ab *ActiveBranchFunc) String() string {
return fmt.Sprint("ACTIVE_BRANCH()")
}
// IsNullable implements the Expression interface.
func (cf *ActiveBranchFunc) IsNullable() bool {
func (ab *ActiveBranchFunc) IsNullable() bool {
return false
}
@@ -82,7 +78,7 @@ func (*ActiveBranchFunc) Resolved() bool {
return true
}
func (cf *ActiveBranchFunc) Type() sql.Type {
func (ab *ActiveBranchFunc) Type() sql.Type {
return sql.Text
}
@@ -92,9 +88,9 @@ func (*ActiveBranchFunc) Children() []sql.Expression {
}
// WithChildren implements the Expression interface.
func (v *ActiveBranchFunc) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
func (ab *ActiveBranchFunc) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
if len(children) != 0 {
return nil, sql.ErrInvalidChildrenNumber.New(v, len(children), 0)
return nil, sql.ErrInvalidChildrenNumber.New(ab, len(children), 0)
}
return NewActiveBranchFunc(ctx), nil
}
@@ -22,7 +22,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const CommitFuncName = "commit"
@@ -39,7 +39,7 @@ func NewCommitFunc(ctx *sql.Context, args ...sql.Expression) (sql.Expression, er
// Eval implements the Expression interface.
func (cf *CommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
dbName := ctx.GetCurrentDatabase()
dSess := sqle.DSessFromSess(ctx.Session)
dSess := dsess.DSessFromSess(ctx.Session)
// Get the params associated with COMMIT.
ap := cli.CreateCommitArgParser()
@@ -70,8 +70,7 @@ func (cf *CommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return nil, fmt.Errorf("Must provide commit message.")
}
parent, _, err := dSess.GetHeadCommit(ctx, dbName)
parent, err := dSess.GetHeadCommit(ctx, dbName)
if err != nil {
return nil, err
}
@@ -22,7 +22,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const DoltAddFuncName = "dolt_add"
@@ -38,13 +38,6 @@ func (d DoltAddFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return 1, fmt.Errorf("Empty database name.")
}
dSess := sqle.DSessFromSess(ctx.Session)
dbData, ok := dSess.GetDbData(dbName)
if !ok {
return 1, fmt.Errorf("Could not load database %s", dbName)
}
ap := cli.CreateAddArgParser()
args, err := getDoltArgs(ctx, row, d.Children())
@@ -59,27 +52,34 @@ func (d DoltAddFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
allFlag := apr.Contains(cli.AllFlag)
dSess := dsess.DSessFromSess(ctx.Session)
roots, ok := dSess.GetRoots(dbName)
if apr.NArg() == 0 && !allFlag {
return 1, fmt.Errorf("Nothing specified, nothing added. Maybe you wanted to say 'dolt add .'?")
} else if allFlag || apr.NArg() == 1 && apr.Arg(0) == "." {
err = actions.StageAllTables(ctx, dbData)
if !ok {
return 1, fmt.Errorf("db session not found")
}
roots, err = actions.StageAllTablesNoDocs(ctx, roots)
if err != nil {
return 1, err
}
hashString := dbData.Rsr.StagedHash().String()
err = dSess.SetRoots(ctx, dbName, roots)
if err != nil {
return 1, err
return nil, err
}
// Sets @@_working to staged.
err = setSessionRootExplicit(ctx, hashString, sqle.WorkingKeySuffix)
} else {
err = actions.StageTables(ctx, dbData, apr.Args())
}
roots, err = actions.StageTablesNoDocs(ctx, roots, apr.Args())
if err != nil {
return 1, err
}
if err != nil {
return 1, err
err = dSess.SetRoots(ctx, dbName, roots)
if err != nil {
return nil, err
}
}
return 0, nil
@@ -26,7 +26,8 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const DoltCheckoutFuncName = "dolt_checkout"
@@ -44,13 +45,6 @@ func (d DoltCheckoutFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, erro
return 1, fmt.Errorf("Empty database name.")
}
dSess := sqle.DSessFromSess(ctx.Session)
dbData, ok := dSess.GetDbData(dbName)
if !ok {
return 1, fmt.Errorf("Could not load database %s", dbName)
}
ap := cli.CreateCheckoutArgParser()
args, err := getDoltArgs(ctx, row, d.Children())
@@ -68,11 +62,22 @@ func (d DoltCheckoutFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, erro
}
// Checking out new branch.
dSess := dsess.DSessFromSess(ctx.Session)
dbData, ok := dSess.GetDbData(dbName)
if !ok {
return 1, fmt.Errorf("Could not load database %s", dbName)
}
roots, ok := dSess.GetRoots(dbName)
if !ok {
return 1, fmt.Errorf("Could not load database %s", dbName)
}
if newBranch, newBranchOk := apr.GetValue(cli.CheckoutCoBranch); newBranchOk {
if len(newBranch) == 0 {
err = errors.New("error: cannot checkout empty string")
} else {
err = checkoutNewBranch(ctx, dbData, newBranch, "")
err = checkoutNewBranch(ctx, dbName, dbData, roots, newBranch, "")
}
if err != nil {
@@ -88,27 +93,16 @@ func (d DoltCheckoutFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, erro
if isBranch, err := actions.IsBranch(ctx, dbData.Ddb, name); err != nil {
return 1, err
} else if isBranch {
err = checkoutBranch(ctx, dbData, name)
err = checkoutBranch(ctx, dbName, roots, dbData, name)
if err != nil {
return 1, err
}
return 0, nil
}
// Check if user want to checkout table or docs.
tbls, docs, err := actions.GetTablesOrDocs(dbData.Drw, args)
if err != nil {
return 1, errors.New("error: unable to parse arguments.")
}
if len(docs) > 0 {
return 1, errors.New("error: docs not supported in sql mode")
}
err = checkoutTables(ctx, dbData, tbls)
err = checkoutTables(ctx, roots, dbName, args)
if err != nil && apr.NArg() == 1 {
err = checkoutRemoteBranch(ctx, dbData, name)
err = checkoutRemoteBranch(ctx, dbName, dbData, roots, name)
}
if err != nil {
@@ -118,7 +112,7 @@ func (d DoltCheckoutFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, erro
return 0, nil
}
func checkoutRemoteBranch(ctx *sql.Context, dbData env.DbData, branchName string) error {
func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, roots doltdb.Roots, branchName string) error {
if len(branchName) == 0 {
return ErrEmptyBranchName
}
@@ -126,13 +120,13 @@ func checkoutRemoteBranch(ctx *sql.Context, dbData env.DbData, branchName string
if ref, refExists, err := actions.GetRemoteBranchRef(ctx, dbData.Ddb, branchName); err != nil {
return errors.New("fatal: unable to read from data repository")
} else if refExists {
return checkoutNewBranch(ctx, dbData, branchName, ref.String())
return checkoutNewBranch(ctx, dbName, dbData, roots, branchName, ref.String())
} else {
return fmt.Errorf("error: could not find %s", branchName)
}
}
func checkoutNewBranch(ctx *sql.Context, dbData env.DbData, branchName, startPt string) error {
func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, roots doltdb.Roots, branchName, startPt string) error {
if len(branchName) == 0 {
return ErrEmptyBranchName
}
@@ -146,41 +140,24 @@ func checkoutNewBranch(ctx *sql.Context, dbData env.DbData, branchName, startPt
return err
}
return checkoutBranch(ctx, dbData, branchName)
return checkoutBranch(ctx, dbName, roots, dbData, branchName)
}
func checkoutBranch(ctx *sql.Context, dbData env.DbData, branchName string) error {
func checkoutBranch(ctx *sql.Context, dbName string, roots doltdb.Roots, dbData env.DbData, branchName string) error {
if len(branchName) == 0 {
return ErrEmptyBranchName
}
err := actions.CheckoutBranchWithoutDocs(ctx, dbData, branchName)
wsRef, err := ref.WorkingSetRefForHead(ref.NewBranchRef(branchName))
if err != nil {
if err == doltdb.ErrBranchNotFound {
return fmt.Errorf("fatal: Branch '%s' not found.", branchName)
} else if doltdb.IsRootValUnreachable(err) {
rt := doltdb.GetUnreachableRootType(err)
return fmt.Errorf("error: unable to read the %s", rt.String())
} else if actions.IsCheckoutWouldOverwrite(err) {
tbls := actions.CheckoutWouldOverwriteTables(err)
msg := "error: Your local changes to the following tables would be overwritten by checkout: \n"
for _, tbl := range tbls {
msg = msg + tbl + "\n"
}
return errors.New(msg)
} else if err == doltdb.ErrAlreadyOnBranch {
return nil // No need to return an error if on the same branch
} else {
return fmt.Errorf("fatal: Unexpected error checking out branch '%s'", branchName)
}
return err
}
return updateHeadAndWorkingSessionVars(ctx, dbData)
dSess := dsess.DSessFromSess(ctx.Session)
return dSess.SwitchWorkingSet(ctx, dbName, wsRef)
}
func checkoutTables(ctx *sql.Context, dbData env.DbData, tables []string) error {
err := actions.CheckoutTables(ctx, dbData, tables)
func checkoutTables(ctx *sql.Context, roots doltdb.Roots, name string, tables []string) error {
roots, err := actions.MoveTablesFromHeadToWorking(ctx, roots, tables)
if err != nil {
if doltdb.IsRootValUnreachable(err) {
@@ -193,37 +170,8 @@ func checkoutTables(ctx *sql.Context, dbData env.DbData, tables []string) error
}
}
return updateHeadAndWorkingSessionVars(ctx, dbData)
}
// updateHeadAndWorkingSessionVars explicitly sets the head and working hash.
func updateHeadAndWorkingSessionVars(ctx *sql.Context, dbData env.DbData) error {
headHash, err := dbData.Rsr.CWBHeadHash(ctx)
if err != nil {
return err
}
hs := headHash.String()
hasWorkingChanges := hasWorkingSetChanges(dbData.Rsr)
hasStagedChanges, err := hasStagedSetChanges(ctx, dbData.Ddb, dbData.Rsr)
if err != nil {
return err
}
workingHash := dbData.Rsr.WorkingHash().String()
// This will update the session table editor's root and clear its cache.
if !hasStagedChanges && !hasWorkingChanges {
return setHeadAndWorkingSessionRoot(ctx, hs)
}
err = setSessionRootExplicit(ctx, hs, sqle.HeadKeySuffix)
if err != nil {
return err
}
return setSessionRootExplicit(ctx, workingHash, sqle.WorkingKeySuffix)
dSess := dsess.DSessFromSess(ctx.Session)
return dSess.SetRoots(ctx, name, roots)
}
func (d DoltCheckoutFunc) String() string {
@@ -16,6 +16,7 @@ package dfunctions
import (
"fmt"
"time"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/vitess/go/vt/proto/query"
@@ -24,7 +25,8 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
const DoltCommitFuncName = "dolt_commit"
@@ -44,16 +46,6 @@ func NewDoltCommitFunc(ctx *sql.Context, args ...sql.Expression) (sql.Expression
func (d DoltCommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
// Get the information for the sql context.
dbName := ctx.GetCurrentDatabase()
dSess := sqle.DSessFromSess(ctx.Session)
dbData, ok := dSess.GetDbData(dbName)
if !ok {
return nil, fmt.Errorf("Could not load database %s", dbName)
}
ddb := dbData.Ddb
rsr := dbData.Rsr
ap := cli.CreateCommitArgParser()
// Get the args for DOLT_COMMIT.
@@ -67,34 +59,20 @@ func (d DoltCommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return nil, err
}
allFlag := apr.Contains(cli.AllFlag)
allowEmpty := apr.Contains(cli.AllowEmptyFlag)
dSess := dsess.DSessFromSess(ctx.Session)
// Check if there are no changes in the staged set but the -a flag is false
hasStagedChanges, err := hasStagedSetChanges(ctx, ddb, rsr)
if err != nil {
return nil, err
roots, ok := dSess.GetRoots(dbName)
if !ok {
return nil, fmt.Errorf("Could not load database %s", dbName)
}
if !allFlag && !hasStagedChanges && !allowEmpty {
return nil, fmt.Errorf("Cannot commit an empty commit. See the --allow-empty if you want to.")
if apr.Contains(cli.AllFlag) {
roots, err = actions.StageAllTablesNoDocs(ctx, roots)
if err != nil {
return nil, fmt.Errorf(err.Error())
}
}
// Check if there are no changes in the working set but the -a flag is true.
// The -a flag is fine when a merge is active or there are staged changes as result of a merge or an add.
if allFlag && !hasWorkingSetChanges(rsr) && !allowEmpty && !rsr.IsMergeActive() && !hasStagedChanges {
return nil, fmt.Errorf("Cannot commit an empty commit. See the --allow-empty if you want to.")
}
if allFlag {
err = actions.StageAllTables(ctx, dbData)
}
if err != nil {
return nil, fmt.Errorf(err.Error())
}
// Parse the author flag. Return an error if not.
var name, email string
if authorStr, ok := apr.GetValue(cli.AuthorParam); ok {
name, email, err = cli.ParseAuthor(authorStr)
@@ -112,7 +90,6 @@ func (d DoltCommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return nil, fmt.Errorf("Must provide commit message.")
}
// Specify the time if the date parameter is not.
t := ctx.QueryTime()
if commitTimeStr, ok := apr.GetValue(cli.DateParam); ok {
var err error
@@ -123,7 +100,67 @@ func (d DoltCommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
}
}
h, err := actions.CommitStaged(ctx, dbData, actions.CommitStagedProps{
// Commit any pending transaction before a dolt_commit
tx := ctx.Session.GetTransaction()
_, ok = tx.(*dsess.DoltTransaction)
if !ok {
return nil, fmt.Errorf("expected a DoltTransaction, got %T", tx)
}
err = dSess.SetRoots(ctx, dbName, roots)
if err != nil {
return nil, err
}
err = dSess.CommitTransaction(ctx, dbName, tx)
if err != nil {
return nil, err
}
// Unsetting the transaction here ensures that it won't be re-committed when this statement concludes
ctx.SetTransaction(nil)
// Now do a Dolt commit
commit, err := dSess.CommitToDolt(ctx, roots, dbName, actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag),
CheckForeignKeys: !apr.Contains(cli.ForceFlag),
Name: name,
Email: email,
})
if err != nil {
return 1, err
}
cmHash, err := commit.HashOf()
if err != nil {
return nil, err
}
return cmHash.String(), nil
}
func CommitToDolt(
ctx *sql.Context,
roots doltdb.Roots,
dbData env.DbData,
msg string,
t time.Time,
apr *argparser.ArgParseResults,
name string,
email string,
dSess *dsess.Session,
dbName string,
) (*doltdb.Commit, error) {
// TODO: this does several session state updates, and it really needs to just do one
// It's also not atomic with the above commit. We need a way to set both new HEAD and update the working
// set together, atomically. We can't easily do this in noms right now, because the the data set is the unit of
// atomic update at the API layer. There's a root value which is the unit of atomic updates at the storage layer,
// just no API which allows one to update more than one dataset in the same atomic transaction. We need to write
// one.
// Meanwhile, this is all kinds of thread-unsafe
commit, err := actions.CommitStaged(ctx, roots, dbData, actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag),
@@ -135,38 +172,33 @@ func (d DoltCommitFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return nil, err
}
if allFlag {
err = setHeadAndWorkingSessionRoot(ctx, h)
} else {
err = setSessionRootExplicit(ctx, h, sqle.HeadKeySuffix)
}
// Now we have to do *another* SQL transaction, because CommitStaged currently modifies the super schema of the root
// value before committing what it's given. We need that exact same root in our working set to stay consistent. It
// doesn't happen automatically like outside the SQL context because CommitStaged is writing to a session-based
// repo state writer, so we're never persisting the new working set to disk like in a command line context.
// TODO: fix this mess
ws := dSess.WorkingSet(ctx, dbName)
// StartTransaction sets the working set for the session, and we want the one we previous had, not the one on disk
// Updating the working set like this also updates the head commit and root info for the session
tx, err := dSess.StartTransaction(ctx, dbName)
if err != nil {
return nil, err
}
return h, nil
}
func hasWorkingSetChanges(rsr env.RepoStateReader) bool {
return rsr.WorkingHash() != rsr.StagedHash()
}
// TODO: We should not be dealing with root objects here but commit specs.
func hasStagedSetChanges(ctx *sql.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) {
root, err := env.HeadRoot(ctx, ddb, rsr)
err = dSess.SetWorkingSet(ctx, dbName, ws.ClearMerge(), nil)
if err != nil {
return false, err
return nil, err
}
headHash, err := root.HashOf()
err = dSess.CommitTransaction(ctx, dbName, tx)
if err != nil {
return false, err
return nil, err
}
return rsr.StagedHash() != headHash, nil
// Unsetting the transaction here ensures that it won't be re-committed when this statement concludes
ctx.SetTransaction(nil)
return commit, err
}
func getDoltArgs(ctx *sql.Context, row sql.Row, children []sql.Expression) ([]string, error) {
@@ -223,20 +255,3 @@ func (d DoltCommitFunc) Resolved() bool {
func (d DoltCommitFunc) Children() []sql.Expression {
return d.children
}
// setHeadAndWorkingSessionRoot takes in a ctx and the new head hashstring and updates the session head and working hashes.
func setHeadAndWorkingSessionRoot(ctx *sql.Context, headHashStr string) error {
key := ctx.GetCurrentDatabase() + sqle.HeadKeySuffix
dsess := sqle.DSessFromSess(ctx.Session)
return dsess.SetSessionVariable(ctx, key, headHashStr)
}
// setSessionRootExplicit sets a session variable (either HEAD or WORKING) to a hash string. For HEAD, the hash string
// should come from the commit string. For working the commit string needs to come from the root.
func setSessionRootExplicit(ctx *sql.Context, hashString string, suffix string) error {
key := ctx.GetCurrentDatabase() + suffix
dsess := sqle.DSessFromSess(ctx.Session)
return dsess.SetSessionVarDirectly(ctx, key, hashString)
}
@@ -27,7 +27,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
)
@@ -44,7 +44,7 @@ func (d DoltMergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return 1, fmt.Errorf("Empty database name.")
}
sess := sqle.DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
dbData, ok := sess.GetDbData(dbName)
if !ok {
@@ -67,34 +67,39 @@ func (d DoltMergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return 1, fmt.Errorf("error: Flags '--%s' and '--%s' cannot be used together.\n", cli.SquashParam, cli.NoFFParam)
}
ws := sess.WorkingSet(ctx, dbName)
roots, ok := sess.GetRoots(dbName)
// logrus.Errorf("heads are working: %s\nhead: %s", roots.Working.DebugString(ctx, true), roots.Head.DebugString(ctx, true))
if !ok {
return 1, fmt.Errorf("Could not load database %s", dbName)
}
if apr.Contains(cli.AbortParam) {
if !dbData.Rsr.IsMergeActive() {
if !ws.MergeActive() {
return 1, fmt.Errorf("fatal: There is no merge to abort")
}
err = abortMerge(ctx, dbData)
ws, err = abortMerge(ctx, ws, roots)
if err != nil {
return 1, err
}
err := sess.SetWorkingSet(ctx, dbName, ws, nil)
if err != nil {
return nil, err
}
return "Merge aborted", nil
}
// The first argument should be the branch name.
branchName := apr.Arg(0)
ddb, ok := sess.GetDoltDB(dbName)
if !ok {
return nil, sql.ErrDatabaseNotFound.New(dbName)
}
root, ok := sess.GetRoot(dbName)
if !ok {
return nil, sql.ErrDatabaseNotFound.New(dbName)
}
hasConflicts, err := root.HasConflicts(ctx)
hasConflicts, err := roots.Working.HasConflicts(ctx)
if err != nil {
return 1, err
}
@@ -103,36 +108,46 @@ func (d DoltMergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return 1, doltdb.ErrUnresolvedConflicts
}
if dbData.Rsr.IsMergeActive() {
if ws.MergeActive() {
return 1, doltdb.ErrMergeActive
}
head, hh, headRoot, err := getHead(ctx, sess, dbName)
err = checkForUncommittedChanges(roots.Working, roots.Head)
if err != nil {
return nil, err
}
err = checkForUncommittedChanges(root, headRoot)
branchName := apr.Arg(0)
mergeCommit, cmh, err := getBranchCommit(ctx, branchName, ddb)
if err != nil {
return nil, err
}
cm, cmh, err := getBranchCommit(ctx, branchName, ddb)
headCommit, err := sess.GetHeadCommit(ctx, dbName)
if err != nil {
return nil, err
}
// No need to write a merge commit, if the head can ffw to the commit coming from the branch.
canFF, err := head.CanFastForwardTo(ctx, cm)
canFF, err := headCommit.CanFastForwardTo(ctx, mergeCommit)
if err != nil {
return nil, err
}
if canFF {
if apr.Contains(cli.NoFFParam) {
err = executeNoFFMerge(ctx, sess, apr, dbName, dbData, head, cm)
ws, err = executeNoFFMerge(ctx, sess, apr, dbName, ws, dbData, headCommit, mergeCommit)
if err == doltdb.ErrUnresolvedConflicts {
// if there are unresolved conflicts, write the resulting working set back to the session and return an
// error message
wsErr := sess.SetWorkingSet(ctx, dbName, ws, nil)
if wsErr != nil {
return nil, wsErr
}
return err.Error(), nil
}
} else {
err = executeFFMerge(ctx, apr.Contains(cli.SquashParam), dbName, dbData, cm)
err = executeFFMerge(ctx, sess, apr.Contains(cli.SquashParam), dbName, ws, dbData, mergeCommit)
}
if err != nil {
@@ -141,152 +156,156 @@ func (d DoltMergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return cmh.String(), err
}
err = executeMerge(ctx, apr.Contains(cli.SquashParam), head, cm, dbName, dbData)
ws, err = executeMerge(ctx, apr.Contains(cli.SquashParam), headCommit, mergeCommit, ws)
if err == doltdb.ErrUnresolvedConflicts {
// if there are unresolved conflicts, write the resulting working set back to the session and return an
// error message
wsErr := sess.SetWorkingSet(ctx, dbName, ws, nil)
if wsErr != nil {
return nil, wsErr
}
return err.Error(), nil
} else if err != nil {
return nil, err
}
err = sess.SetWorkingSet(ctx, dbName, ws, nil)
if err != nil {
return nil, err
}
returnMsg := fmt.Sprintf("Updating %s..%s", cmh.String(), hh.String())
hch, err := headCommit.HashOf()
if err != nil {
return nil, err
}
returnMsg := fmt.Sprintf("Updating %s..%s", hch.String(), cmh.String())
return returnMsg, nil
}
func abortMerge(ctx *sql.Context, dbData env.DbData) error {
err := actions.CheckoutAllTables(ctx, dbData)
func abortMerge(ctx *sql.Context, workingSet *doltdb.WorkingSet, roots doltdb.Roots) (*doltdb.WorkingSet, error) {
tbls, err := doltdb.UnionTableNames(ctx, roots.Working, roots.Staged, roots.Head)
if err != nil {
return err
return nil, err
}
err = dbData.Rsw.AbortMerge()
roots, err = actions.MoveTablesFromHeadToWorking(ctx, roots, tbls)
if err != nil {
return err
return nil, err
}
hh, err := dbData.Rsr.CWBHeadHash(ctx)
if err != nil {
return err
}
return setHeadAndWorkingSessionRoot(ctx, hh.String())
// TODO: this doesn't seem right, it sets the root that we already edited above
workingSet = workingSet.AbortMerge()
return workingSet, nil
}
func executeMerge(ctx *sql.Context, squash bool, head, cm *doltdb.Commit, name string, dbData env.DbData) error {
func executeMerge(ctx *sql.Context, squash bool, head, cm *doltdb.Commit, ws *doltdb.WorkingSet) (*doltdb.WorkingSet, error) {
mergeRoot, mergeStats, err := merge.MergeCommits(ctx, head, cm)
if err != nil {
switch err {
case doltdb.ErrUpToDate:
return errors.New("Already up to date.")
return nil, errors.New("Already up to date.")
case merge.ErrFastForward:
panic("fast forward merge")
default:
return errors.New("Bad merge")
return nil, errors.New("Bad merge")
}
}
return mergeRootToWorking(ctx, squash, name, dbData, mergeRoot, cm, mergeStats)
return mergeRootToWorking(squash, ws, mergeRoot, cm, mergeStats)
}
func executeFFMerge(ctx *sql.Context, squash bool, dbName string, dbData env.DbData, cm2 *doltdb.Commit) error {
func executeFFMerge(ctx *sql.Context, sess *dsess.Session, squash bool, dbName string, ws *doltdb.WorkingSet, dbData env.DbData, cm2 *doltdb.Commit) error {
rv, err := cm2.GetRootValue()
if err != nil {
return errors.New("Failed to return root value.")
}
stagedHash, err := dbData.Ddb.WriteRootValue(ctx, rv)
if err != nil {
return err
}
workingHash := stagedHash
// TODO: This is all incredibly suspect, needs to be replaced with library code that is functional instead of
// altering global state
if !squash {
err = dbData.Ddb.FastForward(ctx, dbData.Rsr.CWBHeadRef(), cm2)
if err != nil {
return err
}
}
err = dbData.Rsw.SetWorkingHash(ctx, workingHash)
if err != nil {
return err
}
ws = ws.WithWorkingRoot(rv).WithStagedRoot(rv)
err = dbData.Rsw.SetStagedHash(ctx, stagedHash)
if err != nil {
return err
}
hh, err := dbData.Rsr.CWBHeadHash(ctx)
if err != nil {
return err
}
if squash {
return ctx.SetSessionVariable(ctx, sqle.WorkingKey(dbName), workingHash.String())
} else {
return setHeadAndWorkingSessionRoot(ctx, hh.String())
}
return sess.SetWorkingSet(ctx, dbName, ws, nil)
}
func executeNoFFMerge(
ctx *sql.Context,
dSess *sqle.DoltSession,
dSess *dsess.Session,
apr *argparser.ArgParseResults,
dbName string,
ws *doltdb.WorkingSet,
dbData env.DbData,
pr, cm2 *doltdb.Commit,
) error {
mergedRoot, err := cm2.GetRootValue()
headCommit, mergeCommit *doltdb.Commit,
) (*doltdb.WorkingSet, error) {
mergeRoot, err := mergeCommit.GetRootValue()
if err != nil {
return errors.New("Failed to return root value.")
return nil, err
}
err = mergeRootToWorking(ctx, false, dbName, dbData, mergedRoot, cm2, map[string]*merge.MergeStats{})
ws, err = mergeRootToWorking(false, ws, mergeRoot, mergeCommit, map[string]*merge.MergeStats{})
if err != nil {
return err
// This error is recoverable, so we return a working set value along with the error
return ws, err
}
msg, msgOk := apr.GetValue(cli.CommitMessageArg)
if !msgOk {
hh, err := pr.HashOf()
hh, err := headCommit.HashOf()
if err != nil {
return err
return nil, err
}
cmh, err := cm2.HashOf()
cmh, err := mergeCommit.HashOf()
if err != nil {
return err
return nil, err
}
msg = fmt.Sprintf("SQL Generated commit merging %s into %s", hh.String(), cmh.String())
}
// TODO: refactor, redundant
var name, email string
if authorStr, ok := apr.GetValue(cli.AuthorParam); ok {
name, email, err = cli.ParseAuthor(authorStr)
if err != nil {
return err
return nil, err
}
} else {
name = dSess.Username
email = dSess.Email
}
// Specify the time if the date parameter is not.
t := ctx.QueryTime()
if commitTimeStr, ok := apr.GetValue(cli.DateParam); ok {
var err error
t, err = cli.ParseDate(commitTimeStr)
if err != nil {
return err
return nil, err
}
}
h, err := actions.CommitStaged(ctx, dbData, actions.CommitStagedProps{
// Save our work so far in the session, as it will be referenced by the commit call below (badly in need of a
// refactoring)
err = dSess.SetWorkingSet(ctx, dbName, ws, nil)
if err != nil {
return nil, err
}
// The roots need refreshing after the above
roots, _ := dSess.GetRoots(dbName)
// TODO: this does several session state updates, and it really needs to just do one
// We also need to commit any pending transaction before we do this.
_, err = actions.CommitStaged(ctx, roots, dbData, actions.CommitStagedProps{
Message: msg,
Date: t,
AllowEmpty: apr.Contains(cli.AllowEmptyFlag),
@@ -294,61 +313,34 @@ func executeNoFFMerge(
Name: name,
Email: email,
})
if err != nil {
return err
return nil, err
}
return setHeadAndWorkingSessionRoot(ctx, h)
return ws, dSess.SetWorkingSet(ctx, dbName, ws, nil)
}
// TODO: this copied from commands/merge.go because the latter isn't reusable. Fix that.
func mergeRootToWorking(
ctx *sql.Context,
squash bool,
dbName string,
dbData env.DbData,
ws *doltdb.WorkingSet,
mergedRoot *doltdb.RootValue,
cm2 *doltdb.Commit,
mergeStats map[string]*merge.MergeStats,
) error {
h2, err := cm2.HashOf()
if err != nil {
return err
}
) (*doltdb.WorkingSet, error) {
workingRoot := mergedRoot
if !squash {
err = dbData.Rsw.StartMerge(h2.String())
if err != nil {
return err
}
ws = ws.StartMerge(cm2)
}
workingHash, err := env.UpdateWorkingRoot(ctx, dbData.Ddb, dbData.Rsw, workingRoot)
if err != nil {
return err
ws = ws.WithWorkingRoot(workingRoot).WithStagedRoot(workingRoot)
if checkForConflicts(mergeStats) {
// this error is recoverable in-session, so we return the new ws along with the error
return ws, doltdb.ErrUnresolvedConflicts
}
hasConflicts := checkForConflicts(mergeStats)
if hasConflicts {
// If there are conflicts write them to the working root anyway too allow for merge resolution via the dolt_conflicts
// table.
err := ctx.SetSessionVariable(ctx, sqle.WorkingKey(dbName), workingHash.String())
if err != nil {
return err
}
return doltdb.ErrUnresolvedConflicts
}
_, err = env.UpdateStagedRoot(ctx, dbData.Ddb, dbData.Rsw, workingRoot)
if err != nil {
return err
}
return ctx.SetSessionVariable(ctx, sqle.WorkingKey(dbName), workingHash.String())
return ws, nil
}
func checkForConflicts(tblToStats map[string]*merge.MergeStats) bool {
@@ -22,7 +22,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const DoltMergeBaseFuncName = "dolt_merge_base"
@@ -81,7 +81,7 @@ func resolveRefSpecs(ctx *sql.Context, leftSpec, rightSpec string) (left, right
return nil, nil, err
}
sess := sqle.DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
dbName := ctx.GetCurrentDatabase()
dbData, ok := sess.GetDbData(dbName)
@@ -21,9 +21,9 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const DoltResetFuncName = "dolt_reset"
@@ -39,7 +39,7 @@ func (d DoltResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
return 1, fmt.Errorf("Empty database name.")
}
dSess := sqle.DSessFromSess(ctx.Session)
dSess := dsess.DSessFromSess(ctx.Session)
dbData, ok := dSess.GetDbData(dbName)
if !ok {
@@ -64,9 +64,9 @@ func (d DoltResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
}
// Get all the needed roots.
working, staged, head, err := env.GetRoots(ctx, dbData.Ddb, dbData.Rsr)
if err != nil {
return 1, err
roots, ok := dSess.GetRoots(dbName)
if !ok {
return 1, fmt.Errorf("Could not load database %s", dbName)
}
if apr.Contains(cli.HardResetParam) {
@@ -78,34 +78,31 @@ func (d DoltResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error)
arg = apr.Arg(0)
}
h, err := actions.ResetHardTables(ctx, dbData, arg, working, staged, head)
var newHead *doltdb.Commit
newHead, roots, err = actions.ResetHardTables(ctx, dbData, arg, roots)
if err != nil {
return 1, err
}
// In this case we preserve untracked tables.
if h == "" {
headHash, err := dbData.Rsr.CWBHeadHash(ctx)
if err != nil {
return 1, err
}
h = headHash.String()
if err := ctx.SetSessionVariable(ctx, sqle.HeadKey(dbName), h); err != nil {
return 1, err
}
workingHash := dbData.Rsr.WorkingHash()
if err := ctx.SetSessionVariable(ctx, sqle.WorkingKey(dbName), workingHash.String()); err != nil {
return 1, err
}
} else {
if err := setHeadAndWorkingSessionRoot(ctx, h); err != nil {
// TODO: this overrides the transaction setting, needs to happen at commit, not here
if newHead != nil {
if err := dbData.Ddb.SetHeadToCommit(ctx, dbData.Rsr.CWBHeadRef(), newHead); err != nil {
return 1, err
}
}
ws := dSess.WorkingSet(ctx, dbName)
err = dSess.SetWorkingSet(ctx, dbName, ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged), nil)
if err != nil {
return 1, err
}
} else {
_, err = actions.ResetSoftTables(ctx, dbData, apr, staged, head)
roots, err = actions.ResetSoftTables(ctx, dbData, apr, roots)
if err != nil {
return 1, err
}
err = dSess.SetRoots(ctx, dbName, roots)
if err != nil {
return 1, err
}
@@ -25,7 +25,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const HashOfFuncName = "hashof"
@@ -63,16 +63,16 @@ func (t *HashOf) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
}
dbName := ctx.GetCurrentDatabase()
ddb, ok := sqle.DSessFromSess(ctx.Session).GetDoltDB(dbName)
ddb, ok := dsess.DSessFromSess(ctx.Session).GetDoltDB(dbName)
if !ok {
return nil, sql.ErrDatabaseNotFound.New(dbName)
}
var cm *doltdb.Commit
if strings.ToUpper(name) == "HEAD" {
sess := sqle.DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
cm, _, err = sess.GetHeadCommit(ctx, dbName)
cm, err = sess.GetHeadCommit(ctx, dbName)
if err != nil {
return nil, err
}
@@ -24,7 +24,7 @@ import (
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -41,7 +41,7 @@ func NewMergeFunc(ctx *sql.Context, args ...sql.Expression) (sql.Expression, err
// Eval implements the Expression interface.
func (cf *MergeFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
sess := sqle.DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
// TODO: Move to a separate MERGE argparser.
ap := cli.CreateCommitArgParser()
@@ -152,7 +152,6 @@ func checkForUncommittedChanges(root *doltdb.RootValue, headRoot *doltdb.RootVal
if rh != hrh {
return errors.New("cannot merge with uncommitted changes")
}
return nil
}
@@ -184,8 +183,13 @@ func getBranchCommit(ctx *sql.Context, val interface{}, ddb *doltdb.DoltDB) (*do
return cm, cmh, nil
}
func getHead(ctx *sql.Context, sess *sqle.DoltSession, dbName string) (*doltdb.Commit, hash.Hash, *doltdb.RootValue, error) {
head, hh, err := sess.GetHeadCommit(ctx, dbName)
func getHead(ctx *sql.Context, sess *dsess.Session, dbName string) (*doltdb.Commit, hash.Hash, *doltdb.RootValue, error) {
head, err := sess.GetHeadCommit(ctx, dbName)
if err != nil {
return nil, hash.Hash{}, nil, err
}
hh, err := head.HashOf()
if err != nil {
return nil, hash.Hash{}, nil, err
}
@@ -21,7 +21,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -41,6 +41,7 @@ func NewResetFunc(ctx *sql.Context, e sql.Expression) sql.Expression {
}
// Eval implements the Expression interface.
// TODO: this doesn't seem to do anything
func (rf ResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
val, err := rf.Child.Eval(ctx, row)
if err != nil {
@@ -52,14 +53,14 @@ func (rf ResetFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
}
dbName := ctx.GetCurrentDatabase()
dSess := sqle.DSessFromSess(ctx.Session)
dSess := dsess.DSessFromSess(ctx.Session)
var h hash.Hash
if strings.ToLower(arg) != resetHardParameter {
return nil, fmt.Errorf("invalid arugument to %s(): %s", resetFuncName, arg)
}
parent, _, err := dSess.GetHeadCommit(ctx, dbName)
parent, err := dSess.GetHeadCommit(ctx, dbName)
if err != nil {
return nil, err
}
@@ -21,7 +21,7 @@ import (
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
const SquashFuncName = "squash"
@@ -35,7 +35,7 @@ func NewSquashFunc(ctx *sql.Context, child sql.Expression) sql.Expression {
}
func (s SquashFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
sess := sqle.DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
branchVal, err := s.Child.Eval(ctx, row)
if err != nil {
@@ -1227,7 +1227,7 @@ func doltIndexSetup(t *testing.T) map[string]DoltIndex {
if err != nil {
panic(err)
}
root, err = ExecuteSql(dEnv, root, `
root, err = ExecuteSql(t, dEnv, root, `
CREATE TABLE onepk (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
-821
View File
@@ -1,821 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqle
import (
"errors"
"fmt"
"os"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/env/actions"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/hash"
)
type dbRoot struct {
hashStr string
root *doltdb.RootValue
}
const (
HeadKeySuffix = "_head"
HeadRefKeySuffix = "_head_ref"
WorkingKeySuffix = "_working"
)
const (
EnableTransactionsEnvKey = "DOLT_ENABLE_TRANSACTIONS"
DoltCommitOnTransactionCommit = "dolt_transaction_commit"
)
type batchMode int8
const (
single batchMode = iota
batched
)
const TransactionsEnabledSysVar = "dolt_transactions_enabled"
func init() {
txEnabledSessionVar := int8(0)
enableTx, ok := os.LookupEnv(EnableTransactionsEnvKey)
if ok {
if strings.ToLower(enableTx) == "true" {
txEnabledSessionVar = int8(1)
}
}
sql.SystemVariables.AddSystemVariables([]sql.SystemVariable{
{
Name: DoltCommitOnTransactionCommit,
Scope: sql.SystemVariableScope_Session,
Dynamic: true,
SetVarHintApplies: false,
Type: sql.NewSystemBoolType(DoltCommitOnTransactionCommit),
Default: int8(0),
},
{
Name: TransactionsEnabledSysVar,
Scope: sql.SystemVariableScope_Session,
Dynamic: true,
SetVarHintApplies: false,
Type: sql.NewSystemBoolType(TransactionsEnabledSysVar),
Default: txEnabledSessionVar,
},
})
}
func TransactionsEnabled(ctx *sql.Context) bool {
enabled, err := ctx.GetSessionVariable(ctx, TransactionsEnabledSysVar)
if err != nil {
panic(err)
}
switch enabled.(int8) {
case 0:
return false
case 1:
return true
default:
panic(fmt.Sprintf("Unexpected value %v", enabled))
}
}
func IsHeadKey(key string) (bool, string) {
if strings.HasSuffix(key, HeadKeySuffix) {
return true, key[:len(key)-len(HeadKeySuffix)]
}
return false, ""
}
func IsWorkingKey(key string) (bool, string) {
if strings.HasSuffix(key, WorkingKeySuffix) {
return true, key[:len(key)-len(WorkingKeySuffix)]
}
return false, ""
}
// DoltSession is the sql.Session implementation used by dolt. It is accessible through a *sql.Context instance
type DoltSession struct {
sql.Session
roots map[string]dbRoot
workingSets map[string]ref.WorkingSetRef
dbDatas map[string]env.DbData
editSessions map[string]*editor.TableEditSession
dirty map[string]bool
batchMode batchMode
Username string
Email string
tempTableRoots map[string]*doltdb.RootValue
tempTableEditSessions map[string]*editor.TableEditSession
}
var _ sql.Session = &DoltSession{}
// DefaultDoltSession creates a DoltSession object with default values
func DefaultDoltSession() *DoltSession {
sess := &DoltSession{
Session: sql.NewBaseSession(),
roots: make(map[string]dbRoot),
dbDatas: make(map[string]env.DbData),
editSessions: make(map[string]*editor.TableEditSession),
dirty: make(map[string]bool),
workingSets: make(map[string]ref.WorkingSetRef),
Username: "",
Email: "",
tempTableRoots: make(map[string]*doltdb.RootValue),
tempTableEditSessions: make(map[string]*editor.TableEditSession),
}
return sess
}
// NewDoltSession creates a DoltSession object from a standard sql.Session and 0 or more Database objects.
func NewDoltSession(ctx *sql.Context, sqlSess sql.Session, username, email string, dbs ...Database) (*DoltSession, error) {
dbDatas := make(map[string]env.DbData)
editSessions := make(map[string]*editor.TableEditSession)
for _, db := range dbs {
dbDatas[db.Name()] = env.DbData{Rsw: db.rsw, Ddb: db.ddb, Rsr: db.rsr, Drw: db.drw}
editSessions[db.Name()] = editor.CreateTableEditSession(nil, editor.TableEditSessionProps{})
}
sess := &DoltSession{
Session: sqlSess,
dbDatas: dbDatas,
editSessions: editSessions,
dirty: make(map[string]bool),
roots: make(map[string]dbRoot),
workingSets: make(map[string]ref.WorkingSetRef),
Username: username,
Email: email,
tempTableRoots: make(map[string]*doltdb.RootValue),
tempTableEditSessions: make(map[string]*editor.TableEditSession),
}
for _, db := range dbs {
err := sess.AddDB(ctx, db, db.DbData())
if err != nil {
return nil, err
}
}
return sess, nil
}
// EnableBatchedMode enables batched mode for this session. This is only safe to do during initialization.
// Sessions operating in batched mode don't flush any edit buffers except when told to do so explicitly, or when a
// transaction commits. Disable @@autocommit to prevent edit buffers from being flushed prematurely in this mode.
func (sess *DoltSession) EnableBatchedMode() {
sess.batchMode = batched
}
// DSessFromSess retrieves a dolt session from a standard sql.Session
func DSessFromSess(sess sql.Session) *DoltSession {
return sess.(*DoltSession)
}
// Flush flushes all changes sitting in edit sessions to the session root for the database named. This normally
// happens automatically as part of statement execution, and is only necessary when the session is manually batched (as
// for bulk SQL import)
func (sess *DoltSession) Flush(ctx *sql.Context, dbName string) error {
editSession := sess.editSessions[dbName]
newRoot, err := editSession.Flush(ctx)
if err != nil {
return err
}
return sess.SetRoot(ctx, dbName, newRoot)
}
// CommitTransaction commits the in-progress transaction for the database named
func (sess *DoltSession) CommitTransaction(ctx *sql.Context, dbName string, tx sql.Transaction) error {
if sess.batchMode == batched {
err := sess.Flush(ctx, dbName)
if err != nil {
return err
}
}
if !sess.dirty[dbName] {
return nil
}
// This is triggered when certain commands are sent to the server (ex. commit) when a database is not selected.
// These commands should not error.
if dbName == "" {
return nil
}
dbRoot, ok := sess.roots[dbName]
// It's possible that this returns false if the user has created an in-Memory database. Moreover,
// the analyzer will check for us whether a db exists or not.
if !ok {
return nil
}
// Old "commit" path, which just writes whatever the root for this session is to the repo state file with no care
// for concurrency. Over time we will disable this path.
if !TransactionsEnabled(ctx) {
dbData := sess.dbDatas[dbName]
h, err := dbData.Ddb.WriteRootValue(ctx, dbRoot.root)
if err != nil {
return err
}
sess.dirty[dbName] = false
return dbData.Rsw.SetWorkingHash(ctx, h)
}
// Newer commit path does a concurrent merge of the current root with the one other clients are editing, then
// updates the session with this new root.
// TODO: validate that the transaction belongs to the DB named
dtx, ok := tx.(*DoltTransaction)
if !ok {
return fmt.Errorf("expected a DoltTransaction")
}
mergedRoot, err := dtx.Commit(ctx, dbRoot.root)
if err != nil {
return err
}
err = sess.SetRoot(ctx, dbName, mergedRoot)
if err != nil {
return err
}
err = sess.CommitWorkingSetToDolt(ctx, dtx.dbData, dbName)
if err != nil {
return err
}
sess.dirty[dbName] = false
return nil
}
// CommitWorkingSetToDolt stages the working set and then immediately commits the staged changes. This is a Dolt commit
// rather than a transaction commit. If there are no changes to be staged, then no commit is created.
func (sess *DoltSession) CommitWorkingSetToDolt(ctx *sql.Context, dbData env.DbData, dbName string) error {
if commitBool, err := sess.Session.GetSessionVariable(ctx, DoltCommitOnTransactionCommit); err != nil {
return err
} else if commitBool.(int8) == 1 {
fkChecks, err := sess.Session.GetSessionVariable(ctx, "foreign_key_checks")
if err != nil {
return err
}
err = actions.StageAllTables(ctx, dbData)
if err != nil {
return err
}
queryTime := ctx.QueryTime()
_, err = actions.CommitStaged(ctx, dbData, actions.CommitStagedProps{
Message: fmt.Sprintf("Transaction commit at %s", queryTime.UTC().Format("2006-01-02T15:04:05Z")),
Date: queryTime,
AllowEmpty: false,
CheckForeignKeys: fkChecks.(int8) == 1,
Name: sess.Username,
Email: sess.Email,
})
if _, ok := err.(actions.NothingStaged); err != nil && !ok {
return err
}
headCommit, err := dbData.Ddb.Resolve(ctx, dbData.Rsr.CWBHeadSpec(), dbData.Rsr.CWBHeadRef())
if err != nil {
return err
}
headHash, err := headCommit.HashOf()
if err != nil {
return err
}
err = sess.Session.SetSessionVariable(ctx, HeadKey(dbName), headHash.String())
if err != nil {
return err
}
}
return nil
}
// RollbackTransaction rolls the given transaction back
func (sess *DoltSession) RollbackTransaction(ctx *sql.Context, dbName string, tx sql.Transaction) error {
if !TransactionsEnabled(ctx) || dbName == "" {
return nil
}
if !sess.dirty[dbName] {
return nil
}
dtx, ok := tx.(*DoltTransaction)
if !ok {
return fmt.Errorf("expected a DoltTransaction")
}
err := sess.SetRoot(ctx, dbName, dtx.startRoot)
if err != nil {
return err
}
sess.dirty[dbName] = false
return nil
}
// CreateSavepoint creates a new savepoint for this transaction with the name given. A previously created savepoint
// with the same name will be overwritten.
func (sess *DoltSession) CreateSavepoint(ctx *sql.Context, savepointName, dbName string, tx sql.Transaction) error {
if !TransactionsEnabled(ctx) || dbName == "" {
return nil
}
dtx, ok := tx.(*DoltTransaction)
if !ok {
return fmt.Errorf("expected a DoltTransaction")
}
dtx.CreateSavepoint(savepointName, sess.roots[dbName].root)
return nil
}
// RollbackToSavepoint sets this session's root to the one saved in the savepoint name. It's an error if no savepoint
// with that name exists.
func (sess *DoltSession) RollbackToSavepoint(ctx *sql.Context, savepointName, dbName string, tx sql.Transaction) error {
if !TransactionsEnabled(ctx) || dbName == "" {
return nil
}
dtx, ok := tx.(*DoltTransaction)
if !ok {
return fmt.Errorf("expected a DoltTransaction")
}
root := dtx.RollbackToSavepoint(savepointName)
if root == nil {
return sql.ErrSavepointDoesNotExist.New(savepointName)
}
err := sess.SetRoot(ctx, dbName, root)
if err != nil {
return err
}
return nil
}
// ReleaseSavepoint removes the savepoint name from the transaction. It's an error if no savepoint with that name
// exists.
func (sess *DoltSession) ReleaseSavepoint(ctx *sql.Context, savepointName, dbName string, tx sql.Transaction) error {
if !TransactionsEnabled(ctx) || dbName == "" {
return nil
}
dtx, ok := tx.(*DoltTransaction)
if !ok {
return fmt.Errorf("expected a DoltTransaction")
}
root := dtx.ClearSavepoint(savepointName)
if root == nil {
return sql.ErrSavepointDoesNotExist.New(savepointName)
}
return nil
}
// GetDoltDB returns the *DoltDB for a given database by name
func (sess *DoltSession) GetDoltDB(dbName string) (*doltdb.DoltDB, bool) {
d, ok := sess.dbDatas[dbName]
if !ok {
return nil, false
}
return d.Ddb, true
}
func (sess *DoltSession) GetDoltDBRepoStateWriter(dbName string) (env.RepoStateWriter, bool) {
d, ok := sess.dbDatas[dbName]
if !ok {
return nil, false
}
return d.Rsw, true
}
func (sess *DoltSession) GetDoltDBRepoStateReader(dbName string) (env.RepoStateReader, bool) {
d, ok := sess.dbDatas[dbName]
if !ok {
return nil, false
}
return d.Rsr, true
}
func (sess *DoltSession) GetDoltDBDocsReadWriter(dbName string) (env.DocsReadWriter, bool) {
d, ok := sess.dbDatas[dbName]
if !ok {
return nil, false
}
return d.Drw, true
}
func (sess *DoltSession) GetDbData(dbName string) (env.DbData, bool) {
ddb, ok := sess.GetDoltDB(dbName)
if !ok {
return env.DbData{}, false
}
rsr, ok := sess.GetDoltDBRepoStateReader(dbName)
if !ok {
return env.DbData{}, false
}
rsw, ok := sess.GetDoltDBRepoStateWriter(dbName)
if !ok {
return env.DbData{}, false
}
drw, ok := sess.GetDoltDBDocsReadWriter(dbName)
if !ok {
return env.DbData{}, false
}
return env.DbData{
Ddb: ddb,
Rsr: rsr,
Rsw: rsw,
Drw: drw,
}, true
}
// GetRoot returns the current *RootValue for a given database associated with the session
func (sess *DoltSession) GetRoot(dbName string) (*doltdb.RootValue, bool) {
dbRoot, ok := sess.roots[dbName]
if !ok {
return nil, false
}
return dbRoot.root, true
}
// SetRoot sets a new root value for the session for the database named. This is the primary mechanism by which data
// changes are communicated to the engine and persisted back to disk. All data changes should be followed by a call to
// update the session's root value via this method.
// Data changes contained in the |newRoot| aren't persisted until this session is committed.
func (sess *DoltSession) SetRoot(ctx *sql.Context, dbName string, newRoot *doltdb.RootValue) error {
if rootsEqual(sess.roots[dbName].root, newRoot) {
return nil
}
h, err := newRoot.HashOf()
if err != nil {
return err
}
hashStr := h.String()
err = sess.Session.SetSessionVariable(ctx, WorkingKey(dbName), hashStr)
if err != nil {
return err
}
sess.roots[dbName] = dbRoot{hashStr, newRoot}
err = sess.editSessions[dbName].SetRoot(ctx, newRoot)
if err != nil {
return err
}
sess.dirty[dbName] = true
return nil
}
func (sess *DoltSession) GetTempTableRootValue(ctx *sql.Context, dbName string) (*doltdb.RootValue, bool) {
tempTableRoot, ok := sess.tempTableRoots[dbName]
if !ok {
return nil, false
}
return tempTableRoot, true
}
func (sess *DoltSession) SetTempTableRoot(ctx *sql.Context, dbName string, newRoot *doltdb.RootValue) error {
sess.tempTableRoots[dbName] = newRoot
return sess.tempTableEditSessions[dbName].SetRoot(ctx, newRoot)
}
// GetHeadCommit returns the parent commit of the current session.
func (sess *DoltSession) GetHeadCommit(ctx *sql.Context, dbName string) (*doltdb.Commit, hash.Hash, error) {
dbd, dbFound := sess.dbDatas[dbName]
if !dbFound {
return nil, hash.Hash{}, sql.ErrDatabaseNotFound.New(dbName)
}
value, err := sess.Session.GetSessionVariable(ctx, dbName+HeadKeySuffix)
if err != nil {
return nil, hash.Hash{}, err
}
valStr, isStr := value.(string)
if !isStr || !hash.IsValid(valStr) {
return nil, hash.Hash{}, doltdb.ErrInvalidHash
}
h := hash.Parse(valStr)
cs, err := doltdb.NewCommitSpec(valStr)
if err != nil {
return nil, hash.Hash{}, err
}
cm, err := dbd.Ddb.Resolve(ctx, cs, nil)
if err != nil {
return nil, hash.Hash{}, err
}
return cm, h, nil
}
// SetSessionVariable is defined on sql.Session. We intercept it here to interpret the special semantics of the system
// vars that we define. Otherwise we pass it on to the base implementation.
func (sess *DoltSession) SetSessionVariable(ctx *sql.Context, key string, value interface{}) error {
// TODO: is working head ref
if isHead, dbName := IsHeadKey(key); isHead {
return sess.setHeadSessionVar(ctx, key, value, dbName)
}
if isWorking, dbName := IsWorkingKey(key); isWorking {
return sess.setWorkingSessionVar(ctx, value, dbName)
}
if strings.ToLower(key) == "foreign_key_checks" {
return sess.setForeignKeyChecksSessionVar(ctx, key, value)
}
return sess.Session.SetSessionVariable(ctx, key, value)
}
func (sess *DoltSession) setForeignKeyChecksSessionVar(ctx *sql.Context, key string, value interface{}) error {
convertedVal, err := sql.Int64.Convert(value)
if err != nil {
return err
}
intVal := int64(0)
if convertedVal != nil {
intVal = convertedVal.(int64)
}
if intVal == 0 {
for _, tableEditSession := range sess.editSessions {
tableEditSession.Props.ForeignKeyChecksDisabled = true
}
} else if intVal == 1 {
for _, tableEditSession := range sess.editSessions {
tableEditSession.Props.ForeignKeyChecksDisabled = false
}
} else {
return fmt.Errorf("variable 'foreign_key_checks' can't be set to the value of '%d'", intVal)
}
return sess.Session.SetSessionVariable(ctx, key, value)
}
func (sess *DoltSession) setWorkingSessionVar(ctx *sql.Context, value interface{}, dbName string) error {
valStr, isStr := value.(string) // valStr represents a root val hash
if !isStr || !hash.IsValid(valStr) {
return doltdb.ErrInvalidHash
}
// If there's a Root Value that's associated with this hash update dbRoots to include it
dbd, dbFound := sess.dbDatas[dbName]
if !dbFound {
return sql.ErrDatabaseNotFound.New(dbName)
}
root, err := dbd.Ddb.ReadRootValue(ctx, hash.Parse(valStr))
if errors.Is(doltdb.ErrNoRootValAtHash, err) {
return nil
} else if err != nil {
return err
}
return sess.SetRoot(ctx, dbName, root)
}
func (sess *DoltSession) setHeadSessionVar(ctx *sql.Context, key string, value interface{}, dbName string) error {
dbd, dbFound := sess.dbDatas[dbName]
if !dbFound {
return sql.ErrDatabaseNotFound.New(dbName)
}
valStr, isStr := value.(string)
if !isStr || !hash.IsValid(valStr) {
return doltdb.ErrInvalidHash
}
cs, err := doltdb.NewCommitSpec(valStr)
if err != nil {
return err
}
cm, err := dbd.Ddb.Resolve(ctx, cs, nil)
if err != nil {
return err
}
root, err := cm.GetRootValue()
if err != nil {
return err
}
err = sess.Session.SetSessionVariable(ctx, HeadKey(dbName), value)
if err != nil {
return err
}
// TODO: preserve working set changes?
return sess.SetRoot(ctx, dbName, root)
}
// SetSessionVarDirectly directly updates sess.Session. This is useful in the context of the sql shell where
// the working and head session variable may be updated at different times.
func (sess *DoltSession) SetSessionVarDirectly(ctx *sql.Context, key string, value interface{}) error {
return sess.Session.SetSessionVariable(ctx, key, value)
}
// AddDB adds the database given to this session. This establishes a starting root value for this session, as well as
// other state tracking metadata.
func (sess *DoltSession) AddDB(ctx *sql.Context, db sql.Database, dbData env.DbData) error {
defineSystemVariables(db.Name())
rsr := dbData.Rsr
ddb := dbData.Ddb
sess.dbDatas[db.Name()] = dbData
sess.editSessions[db.Name()] = editor.CreateTableEditSession(nil, editor.TableEditSessionProps{})
cs := rsr.CWBHeadSpec()
headRef := rsr.CWBHeadRef()
workingHashInRepoState := rsr.WorkingHash()
workingHashInWsRef := hash.Hash{}
// TODO: this resolve isn't necessary in all cases and slows things down
cm, err := ddb.Resolve(ctx, cs, headRef)
if err != nil {
return err
}
headCommitHash, err := cm.HashOf()
if err != nil {
return err
}
var workingRoot *doltdb.RootValue
// Get a working root to use for this session. This could come from the an independent working set not associated
// with any commit, or from the head commit itself in some use cases. Some implementors of RepoStateReader use the
// current HEAD hash as the working set hash, and in fact they have to -- there's not always an independently
// addressable root value available, only one persisted as a value in a Commit object.
if headCommitHash == workingHashInRepoState {
workingRoot, err = cm.GetRootValue()
if err != nil {
return err
}
}
if workingRoot == nil {
// If the root isn't a head commit value, assume it's a standalone value and look it up
workingRoot, err = ddb.ReadRootValue(ctx, workingHashInRepoState)
if err != nil {
return err
}
}
if TransactionsEnabled(ctx) {
// Not all dolt commands update the working set ref yet. So until that's true, we update it here with the contents
// of the repo_state.json file
workingSetRef, err := ref.WorkingSetRefForHead(headRef)
if err != nil {
return err
}
sess.workingSets[db.Name()] = workingSetRef
workingSet, err := ddb.ResolveWorkingSet(ctx, workingSetRef)
if err == doltdb.ErrWorkingSetNotFound {
// no working set ref established yet
} else if err != nil {
return err
} else {
workingHashInWsRef, err = workingSet.Struct().Hash(ddb.Format())
if err != nil {
return err
}
}
// TODO: there's a race here if more than one client connects at the same time. We need a retry
err = ddb.UpdateWorkingSet(ctx, workingSetRef, workingRoot, workingHashInWsRef)
if err != nil {
return err
}
err = sess.Session.SetSessionVariable(ctx, HeadRefKey(db.Name()), workingSetRef.GetPath())
if err != nil {
return err
}
}
err = sess.SetRoot(ctx, db.Name(), workingRoot)
if err != nil {
return err
}
err = sess.Session.SetSessionVariable(ctx, HeadKey(db.Name()), headCommitHash.String())
if err != nil {
return err
}
// After setting the initial root we have no state to commit
sess.dirty[db.Name()] = false
return nil
}
// CreateTemporaryTablesRoot creates an empty root value and a table edit session for the purposes of storing
// temporary tables. This should only be used on demand. That is only when a temporary table is created should we
// create the root map and edit session map.
func (sess *DoltSession) CreateTemporaryTablesRoot(ctx *sql.Context, dbName string, ddb *doltdb.DoltDB) error {
newRoot, err := doltdb.EmptyRootValue(ctx, ddb.ValueReadWriter())
if err != nil {
return err
}
sess.tempTableEditSessions[dbName] = editor.CreateTableEditSession(newRoot, editor.TableEditSessionProps{})
return sess.SetTempTableRoot(ctx, dbName, newRoot)
}
// defineSystemVariables defines dolt-session variables in the engine as necessary
func defineSystemVariables(name string) {
if _, _, ok := sql.SystemVariables.GetGlobal(name + HeadKeySuffix); !ok {
sql.SystemVariables.AddSystemVariables([]sql.SystemVariable{
{
Name: HeadRefKey(name),
Scope: sql.SystemVariableScope_Session,
Dynamic: true,
SetVarHintApplies: false,
Type: sql.NewSystemStringType(HeadRefKey(name)),
Default: "",
},
{
Name: HeadKey(name),
Scope: sql.SystemVariableScope_Session,
Dynamic: true,
SetVarHintApplies: false,
Type: sql.NewSystemStringType(HeadKey(name)),
Default: "",
},
{
Name: WorkingKey(name),
Scope: sql.SystemVariableScope_Session,
Dynamic: true,
SetVarHintApplies: false,
Type: sql.NewSystemStringType(WorkingKey(name)),
Default: "",
},
})
}
}
File diff suppressed because it is too large Load Diff
+105
View File
@@ -0,0 +1,105 @@
// Copyright 2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dsess
import (
"context"
"fmt"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
)
// SessionStateAdapter is an adapter for env.RepoStateReader in SQL contexts, getting information about the repo state
// from the session.
type SessionStateAdapter struct {
session *Session
dbName string
}
func (s SessionStateAdapter) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
roots, _ := s.session.GetRoots(s.dbName)
roots.Staged = newRoot
return s.session.SetRoots(ctx.(*sql.Context), s.dbName, roots)
}
func (s SessionStateAdapter) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error {
roots, _ := s.session.GetRoots(s.dbName)
roots.Working = newRoot
return s.session.SetRoots(ctx.(*sql.Context), s.dbName, roots)
}
func (s SessionStateAdapter) SetCWBHeadRef(ctx context.Context, marshalableRef ref.MarshalableRef) error {
return fmt.Errorf("Cannot set cwb head ref with a SessionStateAdapter")
}
func (s SessionStateAdapter) AbortMerge(ctx context.Context) error {
return fmt.Errorf("Cannot abort merge with a SessionStateAdapter")
}
func (s SessionStateAdapter) ClearMerge(ctx context.Context) error {
return nil
}
func (s SessionStateAdapter) StartMerge(ctx context.Context, commit *doltdb.Commit) error {
return fmt.Errorf("Cannot start merge with a SessionStateAdapter")
}
var _ env.RepoStateReader = SessionStateAdapter{}
var _ env.RepoStateWriter = SessionStateAdapter{}
var _ env.RootsProvider = SessionStateAdapter{}
func NewSessionStateAdapter(session *Session, dbName string) SessionStateAdapter {
return SessionStateAdapter{session: session, dbName: dbName}
}
func (s SessionStateAdapter) GetRoots(ctx context.Context) (doltdb.Roots, error) {
return s.session.DbStates[s.dbName].GetRoots(), nil
}
func (s SessionStateAdapter) CWBHeadRef() ref.DoltRef {
workingSet := s.session.DbStates[s.dbName].WorkingSet
headRef, err := workingSet.Ref().ToHeadRef()
// TODO: fix this interface
if err != nil {
panic(err)
}
return headRef
}
func (s SessionStateAdapter) CWBHeadSpec() *doltdb.CommitSpec {
// TODO: get rid of this
ref := s.CWBHeadRef()
spec, err := doltdb.NewCommitSpec(ref.GetPath())
if err != nil {
panic(err)
}
return spec
}
func (s SessionStateAdapter) IsMergeActive(ctx context.Context) (bool, error) {
return s.session.DbStates[s.dbName].WorkingSet.MergeActive(), nil
}
func (s SessionStateAdapter) GetMergeCommit(ctx context.Context) (*doltdb.Commit, error) {
return s.session.DbStates[s.dbName].WorkingSet.MergeState().Commit(), nil
}
func (s SessionStateAdapter) GetPreMergeWorking(ctx context.Context) (*doltdb.RootValue, error) {
return s.session.DbStates[s.dbName].WorkingSet.MergeState().PreMergeWorkingRoot(), nil
}
@@ -12,11 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package sqle
package dsess
import (
"fmt"
"strings"
"time"
"github.com/dolthub/go-mysql-server/sql"
@@ -25,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
)
const (
@@ -33,10 +33,10 @@ const (
)
type DoltTransaction struct {
startRoot *doltdb.RootValue
workingSet ref.WorkingSetRef
dbData env.DbData
savepoints []savepoint
startState *doltdb.WorkingSet
workingSetRef ref.WorkingSetRef
dbData env.DbData
savepoints []savepoint
}
type savepoint struct {
@@ -44,11 +44,11 @@ type savepoint struct {
root *doltdb.RootValue
}
func NewDoltTransaction(startRoot *doltdb.RootValue, workingSet ref.WorkingSetRef, dbData env.DbData) *DoltTransaction {
func NewDoltTransaction(startState *doltdb.WorkingSet, workingSet ref.WorkingSetRef, dbData env.DbData) *DoltTransaction {
return &DoltTransaction{
startRoot: startRoot,
workingSet: workingSet,
dbData: dbData,
startState: startState,
workingSetRef: workingSet,
dbData: dbData,
}
}
@@ -63,41 +63,54 @@ func (tx DoltTransaction) String() string {
// |newRoot| is the mergeRoot
// |tx.startRoot| is ancRoot
// if working set == ancRoot, attempt a fast-forward merge
func (tx *DoltTransaction) Commit(ctx *sql.Context, newRoot *doltdb.RootValue) (*doltdb.RootValue, error) {
// TODO: Non-working roots aren't merged into the working set and just stomp any changes made there. We need merge
// strategies for staged as well as merge state.
func (tx *DoltTransaction) Commit(ctx *sql.Context, workingSet *doltdb.WorkingSet) (*doltdb.WorkingSet, error) {
// logrus.Errorf("Committing working root %s", workingSet.WorkingRoot().DebugString(ctx, true))
// Don't allow a root value with conflicts to be committed. Later we may open this up via configuration
hasConflicts, err := workingSet.WorkingRoot().HasConflicts(ctx)
if err != nil {
return nil, err
}
if hasConflicts {
return nil, doltdb.ErrUnresolvedConflicts
}
for i := 0; i < maxTxCommitRetries; i++ {
ws, err := tx.dbData.Ddb.ResolveWorkingSet(ctx, tx.workingSet)
newWorkingSet := false
ws, err := tx.dbData.Ddb.ResolveWorkingSet(ctx, tx.workingSetRef)
if err == doltdb.ErrWorkingSetNotFound {
// initial commit
err = tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSet, newRoot, hash.Hash{})
if err == datas.ErrOptimisticLockFailed {
continue
}
// This is to handle the case where an existing DB pre working sets is committing to this HEAD for the
// first time. Can be removed and called an error post 1.0
ws = doltdb.EmptyWorkingSet(tx.workingSetRef)
newWorkingSet = true
} else if err != nil {
return nil, err
}
existingWorkingRoot := ws.RootValue()
hash, err := ws.HashOf()
if err != nil {
return nil, err
}
root := ws.RootValue()
hash, err := ws.Struct().Hash(tx.dbData.Ddb.Format())
if err != nil {
return nil, err
}
if rootsEqual(root, tx.startRoot) {
if newWorkingSet || rootsEqual(existingWorkingRoot, tx.startState.WorkingRoot()) {
// ff merge
err = tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSet, newRoot, hash)
err = tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx))
if err == datas.ErrOptimisticLockFailed {
continue
} else if err != nil {
return nil, err
}
return tx.updateRepoStateFile(ctx, newRoot)
return workingSet, nil
}
mergedRoot, stats, err := merge.MergeRoots(ctx, root, newRoot, tx.startRoot)
mergedRoot, stats, err := merge.MergeRoots(ctx, existingWorkingRoot, workingSet.WorkingRoot(), tx.startState.WorkingRoot())
if err != nil {
return nil, err
}
@@ -109,36 +122,21 @@ func (tx *DoltTransaction) Commit(ctx *sql.Context, newRoot *doltdb.RootValue) (
}
}
err = tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSet, mergedRoot, hash)
mergedWorkingSet := workingSet.WithWorkingRoot(mergedRoot)
err = tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, mergedWorkingSet, hash, tx.getWorkingSetMeta(ctx))
if err == datas.ErrOptimisticLockFailed {
continue
} else if err != nil {
return nil, err
}
// TODO: this is not thread safe, but will not be necessary after migrating all clients away from using the
// working set stored in repo_state.json, so should be good enough for now
return tx.updateRepoStateFile(ctx, mergedRoot)
return mergedWorkingSet, nil
}
// TODO: different error type for retries exhausted
return nil, datas.ErrOptimisticLockFailed
}
func (tx *DoltTransaction) updateRepoStateFile(ctx *sql.Context, mergedRoot *doltdb.RootValue) (*doltdb.RootValue, error) {
hash, err := mergedRoot.HashOf()
if err != nil {
return nil, err
}
err = tx.dbData.Rsw.SetWorkingHash(ctx, hash)
if err != nil {
return nil, err
}
return mergedRoot, err
}
// CreateSavepoint creates a new savepoint with the name and root value given. If a savepoint with the name given
// already exists, it's overwritten.
func (tx *DoltTransaction) CreateSavepoint(name string, root *doltdb.RootValue) {
@@ -183,6 +181,16 @@ func (tx *DoltTransaction) ClearSavepoint(name string) *doltdb.RootValue {
return existingRoot
}
func (tx DoltTransaction) getWorkingSetMeta(ctx *sql.Context) *doltdb.WorkingSetMeta {
sess := DSessFromSess(ctx.Session)
return &doltdb.WorkingSetMeta{
User: sess.Username,
Email: sess.Email,
Timestamp: uint64(time.Now().Unix()),
Description: "sql transaction",
}
}
func rootsEqual(left, right *doltdb.RootValue) bool {
if left == nil || right == nil {
return false
@@ -215,6 +215,9 @@ func (bWr branchWriter) Insert(ctx *sql.Context, r sql.Row) error {
}
branchRef := ref.NewBranchRef(branchName)
// TODO: this isn't safe in a SQL context, since we have to update the working set of the new branch and it's a
// race. It needs to be able to retry the same as committing a transaction.
return ddb.NewBranchAtCommit(ctx, branchRef, cm)
}
@@ -59,7 +59,7 @@ func TestInsertIntoQueryCatalogTable(t *testing.T) {
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName)
rows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName)
require.NoError(t, err)
expectedRows := []sql.Row{
{uint64(1), "select 1 from dual", "name", "description"},
@@ -82,7 +82,7 @@ func TestInsertIntoQueryCatalogTable(t *testing.T) {
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
rows, err = sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName+" order by display_order")
rows, err = sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, "select display_order, query, name, description from "+doltdb.DoltQueryCatalogTableName+" order by display_order")
require.NoError(t, err)
expectedRows = []sql.Row{
{uint64(1), "select 1 from dual", "name", "description"},
@@ -91,7 +91,7 @@ func TestInsertIntoQueryCatalogTable(t *testing.T) {
assert.Equal(t, expectedRows, rows)
rows, err = sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, "select id from "+doltdb.DoltQueryCatalogTableName)
rows, err = sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, "select id from "+doltdb.DoltQueryCatalogTableName)
require.NoError(t, err)
for _, r := range rows {
assert.NotEmpty(t, r)
@@ -30,9 +30,10 @@ import (
// StatusTable is a sql.Table implementation that implements a system table which shows the dolt branches
type StatusTable struct {
ddb *doltdb.DoltDB
rsr env.RepoStateReader
drw env.DocsReadWriter
ddb *doltdb.DoltDB
rootsProvider env.RootsProvider
drw env.DocsReadWriter
dbName string
}
func (s StatusTable) Name() string {
@@ -60,8 +61,13 @@ func (s StatusTable) PartitionRows(context *sql.Context, _ sql.Partition) (sql.R
}
// NewStatusTable creates a StatusTable
func NewStatusTable(_ *sql.Context, ddb *doltdb.DoltDB, rsr env.RepoStateReader, drw env.DocsReadWriter) sql.Table {
return &StatusTable{ddb: ddb, rsr: rsr, drw: drw}
func NewStatusTable(_ *sql.Context, dbName string, ddb *doltdb.DoltDB, rp env.RootsProvider, drw env.DocsReadWriter) sql.Table {
return &StatusTable{
ddb: ddb,
dbName: dbName,
rootsProvider: rp,
drw: drw,
}
}
// StatusIter is a sql.RowItr implementation which iterates over each commit as if it's a row in the table.
@@ -73,30 +79,30 @@ type StatusItr struct {
}
func newStatusItr(ctx *sql.Context, st *StatusTable) (*StatusItr, error) {
ddb := st.ddb
rsr := st.rsr
rp := st.rootsProvider
drw := st.drw
stagedTables, unstagedTables, err := diff.GetStagedUnstagedTableDeltas(ctx, ddb, rsr)
roots, err := rp.GetRoots(ctx)
if err != nil {
return nil, err
}
stagedTables, unstagedTables, err := diff.GetStagedUnstagedTableDeltas(ctx, roots)
if err != nil {
return &StatusItr{}, err
}
stagedDocDiffs, unStagedDocDiffs, err := diff.GetDocDiffs(ctx, ddb, rsr, drw)
stagedDocDiffs, unStagedDocDiffs, err := diff.GetDocDiffs(ctx, roots, drw)
if err != nil {
return &StatusItr{}, err
}
workingTblsInConflict, _, _, err := merge.GetTablesInConflict(ctx, ddb, rsr)
workingTblsInConflict, _, _, err := merge.GetTablesInConflict(ctx, roots)
if err != nil {
return &StatusItr{}, err
}
workingDocsInConflict, err := merge.GetDocsInConflict(ctx, ddb, rsr, drw)
workingDocsInConflict, err := merge.GetDocsInConflict(ctx, roots.Working, drw)
if err != nil {
return &StatusItr{}, err
}
@@ -28,12 +28,7 @@ func init() {
}
func TestQueries(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestQueries(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestQueries(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestQueries(t, newDoltHarness(t))
}
func TestSingleQuery(t *testing.T) {
@@ -99,12 +94,7 @@ func TestSingleScript(t *testing.T) {
}
func TestVersionedQueries(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestVersionedQueries(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestVersionedQueries(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestVersionedQueries(t, newDoltHarness(t))
}
// Tests of choosing the correct execution plan independent of result correctness. Mostly useful for confirming that
@@ -128,75 +118,35 @@ func TestQueryPlans(t *testing.T) {
}
func TestQueryErrors(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestQueryErrors(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestQueryErrors(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestQueryErrors(t, newDoltHarness(t))
}
func TestInfoSchema(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestInfoSchema(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestInfoSchema(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestInfoSchema(t, newDoltHarness(t))
}
func TestColumnAliases(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestColumnAliases(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestColumnAliases(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestColumnAliases(t, newDoltHarness(t))
}
func TestOrderByGroupBy(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestOrderByGroupBy(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestOrderByGroupBy(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestOrderByGroupBy(t, newDoltHarness(t))
}
func TestAmbiguousColumnResolution(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestAmbiguousColumnResolution(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestAmbiguousColumnResolution(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestAmbiguousColumnResolution(t, newDoltHarness(t))
}
func TestInsertInto(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestInsertInto(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestInsertInto(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestInsertInto(t, newDoltHarness(t))
}
func TestInsertIgnoreInto(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestInsertIgnoreInto(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestInsertIgnoreInto(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestInsertIgnoreInto(t, newDoltHarness(t))
}
func TestInsertIntoErrors(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestInsertIntoErrors(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestInsertIntoErrors(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestInsertIntoErrors(t, newDoltHarness(t))
}
func TestReplaceInto(t *testing.T) {
@@ -205,195 +155,90 @@ func TestReplaceInto(t *testing.T) {
}
func TestReplaceIntoErrors(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestReplaceIntoErrors(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestReplaceIntoErrors(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestReplaceIntoErrors(t, newDoltHarness(t))
}
func TestUpdate(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestUpdate(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestUpdate(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestUpdate(t, newDoltHarness(t))
}
func TestUpdateErrors(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestUpdateErrors(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestUpdateErrors(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestUpdateErrors(t, newDoltHarness(t))
}
func TestDeleteFrom(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDelete(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDelete(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDelete(t, newDoltHarness(t))
}
func TestDeleteFromErrors(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDeleteErrors(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDeleteErrors(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDeleteErrors(t, newDoltHarness(t))
}
func TestTruncate(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestTruncate(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestTruncate(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestTruncate(t, newDoltHarness(t))
}
func TestScripts(t *testing.T) {
skipped := []string{
"create index r_c0 on r (c0);",
}
t.Run("no transactions", func(t *testing.T) {
enginetest.TestScripts(t, newDoltHarness(t).WithSkippedQueries(skipped))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestScripts(t, newDoltHarness(t).withTransactionsEnabled(true).WithSkippedQueries(skipped))
})
enginetest.TestScripts(t, newDoltHarness(t).WithSkippedQueries(skipped))
}
func TestCreateTable(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestCreateTable(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestCreateTable(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestCreateTable(t, newDoltHarness(t))
}
func TestDropTable(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDropTable(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDropTable(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDropTable(t, newDoltHarness(t))
}
func TestRenameTable(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestRenameTable(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestRenameTable(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestRenameTable(t, newDoltHarness(t))
}
func TestRenameColumn(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestRenameColumn(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestRenameColumn(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestRenameColumn(t, newDoltHarness(t))
}
func TestAddColumn(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestAddColumn(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestAddColumn(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestAddColumn(t, newDoltHarness(t))
}
func TestModifyColumn(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestModifyColumn(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestModifyColumn(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestModifyColumn(t, newDoltHarness(t))
}
func TestDropColumn(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDropColumn(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDropColumn(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDropColumn(t, newDoltHarness(t))
}
func TestCreateForeignKeys(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestCreateForeignKeys(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestCreateForeignKeys(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestCreateForeignKeys(t, newDoltHarness(t))
}
func TestDropForeignKeys(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDropForeignKeys(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDropForeignKeys(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDropForeignKeys(t, newDoltHarness(t))
}
func TestCreateCheckConstraints(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestCreateCheckConstraints(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestCreateCheckConstraints(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestCreateCheckConstraints(t, newDoltHarness(t))
}
func TestChecksOnInsert(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestChecksOnInsert(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestChecksOnInsert(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestChecksOnInsert(t, newDoltHarness(t))
}
func TestChecksOnUpdate(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestChecksOnUpdate(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestChecksOnUpdate(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestChecksOnUpdate(t, newDoltHarness(t))
}
func TestDisallowedCheckConstraints(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDisallowedCheckConstraints(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDisallowedCheckConstraints(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDisallowedCheckConstraints(t, newDoltHarness(t))
}
func TestDropCheckConstraints(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestDropCheckConstraints(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestDropCheckConstraints(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestDropCheckConstraints(t, newDoltHarness(t))
}
func TestExplode(t *testing.T) {
@@ -402,108 +247,51 @@ func TestExplode(t *testing.T) {
}
func TestReadOnly(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestReadOnly(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestReadOnly(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestReadOnly(t, newDoltHarness(t))
}
func TestViews(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestViews(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestViews(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestViews(t, newDoltHarness(t))
}
func TestVersionedViews(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestVersionedViews(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestVersionedViews(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestVersionedViews(t, newDoltHarness(t))
}
func TestNaturalJoin(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestNaturalJoin(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestNaturalJoin(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestNaturalJoin(t, newDoltHarness(t))
}
func TestNaturalJoinEqual(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestNaturalJoinEqual(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestNaturalJoinEqual(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestNaturalJoinEqual(t, newDoltHarness(t))
}
func TestNaturalJoinDisjoint(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestNaturalJoinEqual(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestNaturalJoinEqual(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestNaturalJoinEqual(t, newDoltHarness(t))
}
func TestInnerNestedInNaturalJoins(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestInnerNestedInNaturalJoins(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestInnerNestedInNaturalJoins(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestInnerNestedInNaturalJoins(t, newDoltHarness(t))
}
func TestColumnDefaults(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestColumnDefaults(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestColumnDefaults(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestColumnDefaults(t, newDoltHarness(t))
}
func TestVariables(t *testing.T) {
// Can't run these tests more than once because they set and make assertions about global vars, which obviously
// persist outside sessions.
enginetest.TestVariables(t, newDoltHarness(t))
}
func TestVariableErrors(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestVariableErrors(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestVariableErrors(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestVariableErrors(t, newDoltHarness(t))
}
func TestJsonScripts(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestJsonScripts(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestJsonScripts(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestJsonScripts(t, newDoltHarness(t))
}
func TestTriggers(t *testing.T) {
t.Run("no transactions", func(t *testing.T) {
enginetest.TestTriggers(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestTriggers(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestTriggers(t, newDoltHarness(t))
}
func TestStoredProcedures(t *testing.T) {
@@ -516,21 +304,102 @@ func TestStoredProcedures(t *testing.T) {
}
enginetest.ProcedureLogicTests = tests
t.Run("no transactions", func(t *testing.T) {
enginetest.TestStoredProcedures(t, newDoltHarness(t))
})
t.Run("with transactions", func(t *testing.T) {
enginetest.TestStoredProcedures(t, newDoltHarness(t).withTransactionsEnabled(true))
})
enginetest.TestStoredProcedures(t, newDoltHarness(t))
}
func TestTransactions(t *testing.T) {
enginetest.TestTransactionScripts(t, newDoltHarness(t).withTransactionsEnabled(true))
enginetest.TestTransactionScripts(t, newDoltHarness(t))
for _, script := range DoltTransactionTests {
enginetest.TestTransactionScript(t, newDoltHarness(t).withTransactionsEnabled(true), script)
enginetest.TestTransactionScript(t, newDoltHarness(t), script)
}
}
// TestSingleTransactionScript is a convenience method for debugging a single transaction test. Unskip and set to the
// desired test.
func TestSingleTransactionScript(t *testing.T) {
//t.Skip()
script := enginetest.TransactionTest{
Name: "rollback",
SetUpScript: []string{
"create table t (x int primary key, y int)",
"insert into t values (1, 1)",
},
Assertions: []enginetest.ScriptTestAssertion{
{
Query: "/* client a */ set autocommit = off",
Expected: []sql.Row{{}},
},
{
Query: "/* client b */ set autocommit = off",
Expected: []sql.Row{{}},
},
{
Query: "/* client a */ start transaction",
Expected: []sql.Row{},
},
{
Query: "/* client b */ start transaction",
Expected: []sql.Row{},
},
{
Query: "/* client a */ insert into t values (2, 2)",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "/* client b */ insert into t values (3, 3)",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "/* client a */ select * from t order by x",
Expected: []sql.Row{{1, 1}, {2, 2}},
},
{
Query: "/* client b */ commit",
Expected: []sql.Row{},
},
{
Query: "/* client a */ select * from t order by x",
Expected: []sql.Row{{1, 1}, {2, 2}},
},
{
Query: "/* client a */ rollback",
Expected: []sql.Row{},
},
{
Query: "/* client a */ select * from t order by x",
Expected: []sql.Row{{1, 1}, {3, 3}},
},
{
Query: "/* client a */ insert into t values (2, 2)",
Expected: []sql.Row{{sql.NewOkResult(1)}},
},
{
Query: "/* client b */ select * from t order by x",
Expected: []sql.Row{{1, 1}, {3, 3}},
},
{
Query: "/* client a */ commit",
Expected: []sql.Row{},
},
{
Query: "/* client b */ select * from t order by x",
Expected: []sql.Row{{1, 1}, {3, 3}},
},
{
Query: "/* client b */ rollback",
Expected: []sql.Row{},
},
{
Query: "/* client b */ select * from t order by x",
Expected: []sql.Row{{1, 1}, {2, 2}, {3, 3}},
},
},
}
enginetest.TestTransactionScript(t, newDoltHarness(t), script)
}
func TestSystemTableQueries(t *testing.T) {
enginetest.RunQueryTests(t, newDoltHarness(t), BrokenSystemTableQueries)
}
@@ -25,17 +25,19 @@ import (
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
)
type DoltHarness struct {
t *testing.T
session *sqle.DoltSession
transactionsEnabled bool
databases []sqle.Database
parallelism int
skippedQueries []string
t *testing.T
env *env.DoltEnv
session *dsess.Session
databases []sqle.Database
parallelism int
skippedQueries []string
}
var _ enginetest.Harness = (*DoltHarness)(nil)
@@ -47,7 +49,7 @@ var _ enginetest.KeylessTableHarness = (*DoltHarness)(nil)
var _ enginetest.ReadOnlyDatabaseHarness = (*DoltHarness)(nil)
func newDoltHarness(t *testing.T) *DoltHarness {
session, err := sqle.NewDoltSession(sql.NewEmptyContext(), enginetest.NewBaseSession(), "test", "email@test.com")
session, err := dsess.NewSession(sql.NewEmptyContext(), enginetest.NewBaseSession(), "test", "email@test.com")
require.NoError(t, err)
return &DoltHarness{
t: t,
@@ -56,18 +58,6 @@ func newDoltHarness(t *testing.T) *DoltHarness {
}
}
// withTransactionsEnabled returns a copy of this harness with transactions enabled or not for all sessions
func (d DoltHarness) withTransactionsEnabled(enabled bool) *DoltHarness {
d.transactionsEnabled = enabled
d.setTransactionSessionVar(d.session, enabled)
return &d
}
func (d DoltHarness) setTransactionSessionVar(session *sqle.DoltSession, enabled bool) {
err := session.SetSessionVariable(sql.NewEmptyContext(), sqle.TransactionsEnabledSysVar, enabled)
require.NoError(d.t, err)
}
var defaultSkippedQueries = []string{
"show variables", // we set extra variables
"show create table fk_tbl", // we create an extra key for the FK that vanilla gms does not
@@ -126,17 +116,16 @@ func (d *DoltHarness) NewContext() *sql.Context {
}
func (d DoltHarness) NewSession() *sql.Context {
session, err := sqle.NewDoltSession(sql.NewEmptyContext(), enginetest.NewBaseSession(), "test", "email@test.com")
session, err := dsess.NewSession(sql.NewEmptyContext(), enginetest.NewBaseSession(), "test", "email@test.com")
require.NoError(d.t, err)
d.setTransactionSessionVar(session, d.transactionsEnabled)
ctx := sql.NewContext(
context.Background(),
sql.WithSession(session))
for _, db := range d.databases {
err := session.AddDB(ctx, db, db.DbData())
dbState := getDbState(d.t, db, d.env)
err := session.AddDB(ctx, dbState)
require.NoError(d.t, err)
}
@@ -161,22 +150,22 @@ func (d *DoltHarness) NewDatabase(name string) sql.Database {
func (d *DoltHarness) NewDatabases(names ...string) []sql.Database {
dEnv := dtestutils.CreateTestEnv()
d.env = dEnv
// TODO: it should be safe to reuse a session with a new database, but it isn't in all cases. Particularly, if you
// have a database that only ever receives read queries, and then you re-use its session for a new database with
// the same name, the first write query will panic on dangling references in the noms layer. Not sure why this is
// happening, but it only happens as a result of this test setup.
var err error
d.session, err = sqle.NewDoltSession(sql.NewEmptyContext(), enginetest.NewBaseSession(), "test", "email@test.com")
d.session, err = dsess.NewSession(sql.NewEmptyContext(), enginetest.NewBaseSession(), "test", "email@test.com")
require.NoError(d.t, err)
d.setTransactionSessionVar(d.session, d.transactionsEnabled)
var dbs []sql.Database
d.databases = nil
for _, name := range names {
db := sqle.NewDatabase(name, dEnv.DbData())
require.NoError(d.t, d.session.AddDB(enginetest.NewContext(d), db, db.DbData()))
dbState := getDbState(d.t, db, dEnv)
require.NoError(d.t, d.session.AddDB(enginetest.NewContext(d), dbState))
dbs = append(dbs, db)
d.databases = append(d.databases, db)
}
@@ -191,6 +180,24 @@ func (d *DoltHarness) NewReadOnlyDatabases(names ...string) (dbs []sql.ReadOnlyD
return
}
func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialDbState {
ctx := context.Background()
head := dEnv.RepoStateReader().CWBHeadSpec()
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
require.NoError(t, err)
ws, err := dEnv.WorkingSet(ctx)
require.NoError(t, err)
return dsess.InitialDbState{
Db: db,
HeadCommit: headCommit,
WorkingSet: ws,
DbData: dEnv.DbData(),
}
}
func (d *DoltHarness) NewTable(db sql.Database, name string, schema sql.Schema) (sql.Table, error) {
var err error
if ro, ok := db.(sqle.ReadOnlyDatabase); ok {
@@ -248,18 +255,18 @@ func (d *DoltHarness) SnapshotTable(db sql.VersionedDatabase, name string, asOf
ctx := enginetest.NewContext(d)
_, iter, err := e.Query(ctx,
"set @@"+sqle.HeadKey(db.Name())+" = COMMIT('-m', 'test commit');")
"set @@"+dsess.HeadKey(db.Name())+" = COMMIT('-m', 'test commit');")
require.NoError(d.t, err)
_, err = sql.RowIterToRows(ctx, iter)
require.NoError(d.t, err)
headHash, err := ctx.GetSessionVariable(ctx, sqle.HeadKey(db.Name()))
headHash, err := ctx.GetSessionVariable(ctx, dsess.HeadKey(db.Name()))
require.NoError(d.t, err)
ctx = enginetest.NewContext(d)
// TODO: there's a bug in test setup with transactions, where the HEAD session var gets overwritten on transaction
// start, so we quote it here instead
// query := "insert into dolt_branches (name, hash) values ('" + asOfString + "', @@" + sqle.HeadKey(ddb.Name()) + ")"
// query := "insert into dolt_branches (name, hash) values ('" + asOfString + "', @@" + dsess.HeadKey(ddb.Name()) + ")"
query := "insert into dolt_branches (name, hash) values ('" + asOfString + "', '" + headHash.(string) + "')"
_, iter, err = e.Query(ctx,
@@ -28,7 +28,7 @@ import (
func TestDoltTransactionCommitOneClient(t *testing.T) {
// In this test, we're setting only one client to match transaction commits to dolt commits.
// Autocommit is disabled for the enabled client, as it's the recommended way to use this feature.
harness := newDoltHarness(t).withTransactionsEnabled(true)
harness := newDoltHarness(t)
enginetest.TestTransactionScript(t, harness, enginetest.TransactionTest{
Name: "dolt commit after transaction commit one client",
SetUpScript: []string{
@@ -134,6 +134,7 @@ func TestDoltTransactionCommitOneClient(t *testing.T) {
},
},
})
db := harness.databases[0].GetDoltDB()
cs, err := doltdb.NewCommitSpec("HEAD")
require.NoError(t, err)
@@ -157,7 +158,7 @@ func TestDoltTransactionCommitOneClient(t *testing.T) {
func TestDoltTransactionCommitTwoClients(t *testing.T) {
// In this test, we're setting both clients to match transaction commits to dolt commits.
// Autocommit is disabled, as it's the recommended way to use this feature.
harness := newDoltHarness(t).withTransactionsEnabled(true)
harness := newDoltHarness(t)
enginetest.TestTransactionScript(t, harness, enginetest.TransactionTest{
Name: "dolt commit after transaction commit two clients",
SetUpScript: []string{
@@ -280,7 +281,7 @@ func TestDoltTransactionCommitTwoClients(t *testing.T) {
func TestDoltTransactionCommitAutocommit(t *testing.T) {
// In this test, each insertion from both clients cause a commit as autocommit is enabled.
// Not the recommended way to use the feature, but it's permitted.
harness := newDoltHarness(t).withTransactionsEnabled(true)
harness := newDoltHarness(t)
enginetest.TestTransactionScript(t, harness, enginetest.TransactionTest{
Name: "dolt commit after transaction commit autocommit",
SetUpScript: []string{
@@ -212,7 +212,7 @@ func setupHistoryTests(t *testing.T) *env.DoltEnv {
// get commit hashes from the log table
q := "select commit_hash, date from dolt_log order by date desc;"
rows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, q)
rows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, q)
require.NoError(t, err)
require.Equal(t, 5, len(rows))
HEAD = rows[0][0].(string)
@@ -234,7 +234,7 @@ func testHistoryTable(t *testing.T, test historyTableTest, dEnv *env.DoltEnv) {
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, test.query)
actRows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.rows), len(actRows))
@@ -130,7 +130,7 @@ func testJsonValue(t *testing.T, test jsonValueTest, setupCommon []testCommand)
root, err := dEnv.WorkingRoot(ctx)
require.NoError(t, err)
actRows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, test.query)
actRows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, test.query)
require.NoError(t, err)
require.Equal(t, len(test.rows), len(actRows))
@@ -20131,7 +20131,7 @@ func TestCreateTables(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(dEnv, root, createTables)
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
require.NoError(t, err)
table, _, err := root.GetTable(ctx, "daily_summary")
@@ -20150,10 +20150,10 @@ func TestInserts(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(dEnv, root, createTables)
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
require.NoError(t, err)
root, err = sqle.ExecuteSql(dEnv, root, insertRows)
root, err = sqle.ExecuteSql(t, dEnv, root, insertRows)
require.NoError(t, err)
table, _, err := root.GetTable(ctx, "daily_summary")
@@ -20176,13 +20176,13 @@ func TestInsertsWithIndexes(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(dEnv, root, createTables)
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
require.NoError(t, err)
root, err = sqle.ExecuteSql(dEnv, root, createIndexes)
root, err = sqle.ExecuteSql(t, dEnv, root, createIndexes)
require.NoError(t, err)
root, err = sqle.ExecuteSql(dEnv, root, insertRows)
root, err = sqle.ExecuteSql(t, dEnv, root, insertRows)
require.NoError(t, err)
table, _, err := root.GetTable(ctx, "daily_summary")
@@ -20211,20 +20211,18 @@ func TestJoin(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(dEnv, root, createTables)
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
require.NoError(t, err)
root, err = sqle.ExecuteSql(dEnv, root, insertRows)
root, err = sqle.ExecuteSql(t, dEnv, root, insertRows)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root,
`select Type, d.Symbol, Country, TradingDate, Open, High, Low, Close, Volume, OpenInt, Name, Sector, IPOYear
rows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, `select Type, d.Symbol, Country, TradingDate, Open, High, Low, Close, Volume, OpenInt, Name, Sector, IPOYear
from daily_summary d join symbols t on d.Symbol = t.Symbol order by d.Symbol, Country, TradingDate`)
require.NoError(t, err)
assert.Equal(t, 5210, len(rows))
expectedJoinRows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root,
`select * from join_result order by symbol, country, TradingDate`)
expectedJoinRows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, `select * from join_result order by symbol, country, TradingDate`)
require.NoError(t, err)
assertResultRowsEqual(t, expectedJoinRows, rows)
}
@@ -20261,10 +20259,10 @@ func TestExplain(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
var err error
root, err = sqle.ExecuteSql(dEnv, root, createTables)
root, err = sqle.ExecuteSql(t, dEnv, root, createTables)
require.NoError(t, err)
rows, err := sqle.ExecuteSelect(dEnv, dEnv.DoltDB, root, "explain select * from daily_summary d join symbols t on d.Symbol = t.Symbol")
rows, err := sqle.ExecuteSelect(t, dEnv, dEnv.DoltDB, root, "explain select * from daily_summary d join symbols t on d.Symbol = t.Symbol")
require.NoError(t, err)
rowStrings := make([]string, len(rows))
for i, row := range rows {
@@ -34,6 +34,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
dsql "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/types"
)
@@ -49,7 +50,7 @@ type DoltHarness struct {
Version string
engine *sqle.Engine
sess *dsql.DoltSession
sess *dsess.Session
idxReg *sql.IndexRegistry
viewReg *sql.ViewRegistry
}
@@ -138,7 +139,7 @@ func innerInit(h *DoltHarness, dEnv *env.DoltEnv) error {
return err
}
h.sess = dsql.DefaultDoltSession()
h.sess = dsess.DefaultSession()
h.idxReg = sql.NewIndexRegistry()
h.viewReg = sql.NewViewRegistry()
@@ -154,8 +155,8 @@ func innerInit(h *DoltHarness, dEnv *env.DoltEnv) error {
dsqlDB := db.(dsql.Database)
dsqlDBs[i] = dsqlDB
sess := dsql.DSessFromSess(ctx.Session)
err := sess.AddDB(ctx, dsqlDB, dsqlDB.DbData())
sess := dsess.DSessFromSess(ctx.Session)
err := sess.AddDB(ctx, getDbState(db, dEnv))
if err != nil {
return err
@@ -186,6 +187,28 @@ func innerInit(h *DoltHarness, dEnv *env.DoltEnv) error {
return nil
}
func getDbState(db sql.Database, dEnv *env.DoltEnv) dsess.InitialDbState {
ctx := context.Background()
head := dEnv.RepoStateReader().CWBHeadSpec()
headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
panic(err)
}
ws, err := dEnv.WorkingSet(ctx)
if err != nil {
panic(err)
}
return dsess.InitialDbState{
Db: db,
HeadCommit: headCommit,
WorkingSet: ws,
DbData: dEnv.DbData(),
}
}
// We cheat a little at these tests. A great many of them use tables without primary keys, which we don't currently
// support. Until we do, we just make every column in such tables part of the primary key.
func normalizeStatement(statement string) string {
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/lookup"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/store/types"
@@ -38,7 +39,7 @@ func setupMergeableIndexes(t *testing.T, tableName, insertQuery string) (*sqle.E
root, err := dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
db := NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := NewTestEngine(context.Background(), db, root)
engine, sqlCtx, err := NewTestEngine(t, dEnv, context.Background(), db, root)
require.NoError(t, err)
_, iter, err := engine.Query(sqlCtx, fmt.Sprintf(`CREATE TABLE %s (
@@ -99,7 +100,7 @@ func setupMergeableIndexes(t *testing.T, tableName, insertQuery string) (*sqle.E
engine.AddDatabase(mergeableDb)
// Get an updated root to use for the rest of the test
root, _ = DSessFromSess(sqlCtx.Session).GetRoot(mergeableDb.Name())
root, _ = dsess.DSessFromSess(sqlCtx.Session).GetRoot(mergeableDb.Name())
return engine, dEnv, mergeableDb, []*indexTuple{
idxv1ToTuple,
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/lookup"
)
@@ -1355,8 +1356,10 @@ func TestMergeableIndexes(t *testing.T) {
ctx := context.Background()
sqlCtx := NewTestSQLCtx(ctx)
session := DSessFromSess(sqlCtx.Session)
err := session.AddDB(sqlCtx, db, denv.DbData())
session := dsess.DSessFromSess(sqlCtx.Session)
dbState := getDbState(t, db, denv)
err := session.AddDB(sqlCtx, dbState)
require.NoError(t, err)
sqlCtx.SetCurrentDatabase(db.Name())
err = session.SetRoot(sqlCtx, db.Name(), initialRoot)
@@ -1561,8 +1564,9 @@ func TestMergeableIndexesNulls(t *testing.T) {
ctx := context.Background()
sqlCtx := NewTestSQLCtx(ctx)
session := DSessFromSess(sqlCtx.Session)
err := session.AddDB(sqlCtx, db, denv.DbData())
session := dsess.DSessFromSess(sqlCtx.Session)
dbState := getDbState(t, db, denv)
err := session.AddDB(sqlCtx, dbState)
require.NoError(t, err)
sqlCtx.SetCurrentDatabase(db.Name())
err = session.SetRoot(sqlCtx, db.Name(), initialRoot)
@@ -25,6 +25,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/store/types"
)
@@ -33,7 +34,8 @@ func TestSchemaTableRecreation(t *testing.T) {
ctx := NewTestSQLCtx(context.Background())
dEnv := dtestutils.CreateTestEnv()
db := NewDatabase("dolt", dEnv.DbData())
err := DSessFromSess(ctx.Session).AddDB(ctx, db, db.DbData())
dbState := getDbState(t, db, dEnv)
err := dsess.DSessFromSess(ctx.Session).AddDB(ctx, dbState)
require.NoError(t, err)
ctx.SetCurrentDatabase(db.Name())
@@ -21,22 +21,23 @@ import (
sqle "github.com/dolthub/go-mysql-server"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/tracing"
)
// These functions cannot be in the sqlfmt package as the reliance on the sqle package creates a circular reference.
func PrepareCreateTableStmt(ctx context.Context, sqlDb sql.Database) (*sql.Context, *sqle.Engine, *DoltSession) {
dsess := DefaultDoltSession()
func PrepareCreateTableStmt(ctx context.Context, sqlDb sql.Database) (*sql.Context, *sqle.Engine, *dsess.Session) {
sess := dsess.DefaultSession()
sqlCtx := sql.NewContext(ctx,
sql.WithSession(dsess),
sql.WithSession(sess),
sql.WithIndexRegistry(sql.NewIndexRegistry()),
sql.WithViewRegistry(sql.NewViewRegistry()),
sql.WithTracer(tracing.Tracer(ctx)))
engine := sqle.NewDefault()
engine.AddDatabase(sqlDb)
dsess.SetCurrentDatabase(sqlDb.Name())
return sqlCtx, engine, dsess
sqlCtx.SetCurrentDatabase(sqlDb.Name())
return sqlCtx, engine, sess
}
func GetCreateTableStmt(ctx *sql.Context, engine *sqle.Engine, tableName string) (string, error) {
+7 -6
View File
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/dtestutils"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
. "github.com/dolthub/dolt/go/libraries/doltcore/sql/sqltestutil"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/types"
)
@@ -64,9 +65,9 @@ func TestSqlBatchInserts(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
db := NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := NewTestEngine(ctx, db, root)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
require.NoError(t, err)
DSessFromSess(sqlCtx.Session).EnableBatchedMode()
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
for _, stmt := range insertStatements {
_, rowIter, err := engine.Query(sqlCtx, stmt)
@@ -153,9 +154,9 @@ func TestSqlBatchInsertIgnoreReplace(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
db := NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := NewTestEngine(ctx, db, root)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
require.NoError(t, err)
DSessFromSess(sqlCtx.Session).EnableBatchedMode()
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
for _, stmt := range insertStatements {
_, rowIter, err := engine.Query(sqlCtx, stmt)
@@ -192,9 +193,9 @@ func TestSqlBatchInsertErrors(t *testing.T) {
root, _ := dEnv.WorkingRoot(ctx)
db := NewDatabase("dolt", dEnv.DbData())
engine, sqlCtx, err := NewTestEngine(ctx, db, root)
engine, sqlCtx, err := NewTestEngine(t, dEnv, ctx, db, root)
require.NoError(t, err)
DSessFromSess(sqlCtx.Session).EnableBatchedMode()
dsess.DSessFromSess(sqlCtx.Session).EnableBatchedMode()
_, rowIter, err := engine.Query(sqlCtx, `insert into people (id, first_name, last_name, is_married, age, rating, uuid, num_episodes) values
(0, "Maggie", "Simpson", false, 1, 5.1, '00000000-0000-0000-0000-000000000007', 677)`)
+27 -27
View File
@@ -267,7 +267,7 @@ func TestCreateTable(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -341,7 +341,7 @@ func TestDropTable(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -474,7 +474,7 @@ func TestAddColumn(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -628,7 +628,7 @@ func TestModifyAndChangeColumn(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -799,10 +799,10 @@ func TestModifyColumnType(t *testing.T) {
var err error
for _, stmt := range test.setupStmts {
root, err = ExecuteSql(dEnv, root, stmt)
root, err = ExecuteSql(t, dEnv, root, stmt)
require.NoError(t, err)
}
root, err = ExecuteSql(dEnv, root, test.alterStmt)
root, err = ExecuteSql(t, dEnv, root, test.alterStmt)
if test.expectedErr == false {
require.NoError(t, err)
} else {
@@ -897,7 +897,7 @@ func TestDropColumn(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -1013,7 +1013,7 @@ func TestRenameColumn(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if tt.expectedErr == "" {
require.NoError(t, err)
@@ -1115,7 +1115,7 @@ func TestRenameTable(t *testing.T) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
updatedRoot, err := ExecuteSql(dEnv, root, tt.query)
updatedRoot, err := ExecuteSql(t, dEnv, root, tt.query)
if len(tt.expectedErr) > 0 {
require.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedErr)
@@ -1445,7 +1445,7 @@ func TestIndexOverwrite(t *testing.T) {
if err != nil {
panic(err)
}
root, err = ExecuteSql(dEnv, root, `
root, err = ExecuteSql(t, dEnv, root, `
CREATE TABLE parent (
pk bigint PRIMARY KEY,
v1 bigint,
@@ -1484,15 +1484,15 @@ INSERT INTO child_non_unq VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5'
`)
// test index creation
require.NoError(t, err)
root, err = ExecuteSql(dEnv, root, "CREATE INDEX abc ON child (parent_value);")
root, err = ExecuteSql(t, dEnv, root, "CREATE INDEX abc ON child (parent_value);")
require.NoError(t, err)
_, err = ExecuteSql(dEnv, root, "CREATE INDEX abc_idx ON child_idx (parent_value);")
_, err = ExecuteSql(t, dEnv, root, "CREATE INDEX abc_idx ON child_idx (parent_value);")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "duplicate")
}
root, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX abc_unq ON child_unq (parent_value);")
root, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX abc_unq ON child_unq (parent_value);")
require.NoError(t, err)
_, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX abc_non_unq ON child_non_unq (parent_value);")
_, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX abc_non_unq ON child_non_unq (parent_value);")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "UNIQUE constraint violation")
}
@@ -1514,31 +1514,31 @@ INSERT INTO child_non_unq VALUES ('1', 1), ('2', NULL), ('3', 3), ('4', 3), ('5'
require.Equal(t, "parent_value", fkChildNonUnq.TableIndex)
// insert tests against index
root, err = ExecuteSql(dEnv, root, "INSERT INTO child VALUES ('6', 5)")
root, err = ExecuteSql(t, dEnv, root, "INSERT INTO child VALUES ('6', 5)")
require.NoError(t, err)
root, err = ExecuteSql(dEnv, root, "INSERT INTO child_idx VALUES ('6', 5)")
root, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_idx VALUES ('6', 5)")
require.NoError(t, err)
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_unq VALUES ('6', 5)")
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_unq VALUES ('6', 5)")
if assert.Error(t, err) {
assert.True(t, sql.ErrUniqueKeyViolation.Is(err))
}
root, err = ExecuteSql(dEnv, root, "INSERT INTO child_non_unq VALUES ('6', 5)")
root, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_non_unq VALUES ('6', 5)")
require.NoError(t, err)
// insert tests against foreign key
_, err = ExecuteSql(dEnv, root, "INSERT INTO child VALUES ('9', 9)")
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_idx VALUES ('9', 9)")
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_idx VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_unq VALUES ('9', 9)")
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_unq VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
_, err = ExecuteSql(dEnv, root, "INSERT INTO child_non_unq VALUES ('9', 9)")
_, err = ExecuteSql(t, dEnv, root, "INSERT INTO child_non_unq VALUES ('9', 9)")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "Foreign key violation")
}
@@ -1550,7 +1550,7 @@ func TestCreateIndexUnique(t *testing.T) {
if err != nil {
panic(err)
}
root, err = ExecuteSql(dEnv, root, `
root, err = ExecuteSql(t, dEnv, root, `
CREATE TABLE pass_unique (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
@@ -1565,9 +1565,9 @@ INSERT INTO pass_unique VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);
INSERT INTO fail_unique VALUES (1, 1, 1), (2, 2, 2), (3, 2, 3);
`)
require.NoError(t, err)
root, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON pass_unique(v1)")
root, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON pass_unique(v1)")
assert.NoError(t, err)
root, err = ExecuteSql(dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON fail_unique(v1)")
root, err = ExecuteSql(t, dEnv, root, "CREATE UNIQUE INDEX idx_v1 ON fail_unique(v1)")
if assert.Error(t, err) {
assert.Contains(t, strings.ToLower(err.Error()), "unique")
}
@@ -1583,7 +1583,7 @@ func schemasTableDoltSchema() schema.Schema {
func assertFails(t *testing.T, dEnv *env.DoltEnv, query, expectedErr string) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
_, err := ExecuteSql(dEnv, root, query)
_, err := ExecuteSql(t, dEnv, root, query)
require.Error(t, err, query)
assert.Contains(t, err.Error(), expectedErr)
}
@@ -1591,6 +1591,6 @@ func assertFails(t *testing.T, dEnv *env.DoltEnv, query, expectedErr string) {
func assertSucceeds(t *testing.T, dEnv *env.DoltEnv, query string) {
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
_, err := ExecuteSql(dEnv, root, query)
_, err := ExecuteSql(t, dEnv, root, query)
assert.NoError(t, err, query)
}
+2 -2
View File
@@ -241,7 +241,7 @@ func testDeleteQuery(t *testing.T, test DeleteTest) {
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(context.Background(), dEnv, root, test.DeleteQuery)
root, err = executeModify(t, context.Background(), dEnv, root, test.DeleteQuery)
if len(test.ExpectedErr) > 0 {
require.Error(t, err)
return
@@ -249,7 +249,7 @@ func testDeleteQuery(t *testing.T, test DeleteTest) {
require.NoError(t, err)
}
actualRows, sch, err := executeSelect(context.Background(), dEnv, root, test.SelectQuery)
actualRows, sch, err := executeSelect(t, context.Background(), dEnv, root, test.SelectQuery)
require.NoError(t, err)
assert.Equal(t, test.ExpectedRows, actualRows)
+2 -2
View File
@@ -481,7 +481,7 @@ func testInsertQuery(t *testing.T, test InsertTest) {
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(context.Background(), dEnv, root, test.InsertQuery)
root, err = executeModify(t, context.Background(), dEnv, root, test.InsertQuery)
if len(test.ExpectedErr) > 0 {
require.Error(t, err)
return
@@ -489,7 +489,7 @@ func testInsertQuery(t *testing.T, test InsertTest) {
require.NoError(t, err)
}
actualRows, sch, err := executeSelect(context.Background(), dEnv, root, test.SelectQuery)
actualRows, sch, err := executeSelect(t, context.Background(), dEnv, root, test.SelectQuery)
require.NoError(t, err)
assert.Equal(t, test.ExpectedRows, actualRows)
@@ -315,7 +315,7 @@ func testReplaceQuery(t *testing.T, test ReplaceTest) {
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(context.Background(), dEnv, root, test.ReplaceQuery)
root, err = executeModify(t, context.Background(), dEnv, root, test.ReplaceQuery)
if len(test.ExpectedErr) > 0 {
require.Error(t, err)
return
@@ -323,7 +323,7 @@ func testReplaceQuery(t *testing.T, test ReplaceTest) {
require.NoError(t, err)
}
actualRows, sch, err := executeSelect(context.Background(), dEnv, root, test.SelectQuery)
actualRows, sch, err := executeSelect(t, context.Background(), dEnv, root, test.SelectQuery)
require.NoError(t, err)
assert.Equal(t, test.ExpectedRows, actualRows)
+3 -3
View File
@@ -1544,7 +1544,7 @@ func testSelectQuery(t *testing.T, test SelectTest) {
}
root, _ := dEnv.WorkingRoot(context.Background())
actualRows, sch, err := executeSelect(context.Background(), dEnv, root, test.Query)
actualRows, sch, err := executeSelect(t, context.Background(), dEnv, root, test.Query)
if len(test.ExpectedErr) > 0 {
require.Error(t, err)
// Too much work to synchronize error messages between the two implementations, so for now we'll just assert that an error occurred.
@@ -1588,7 +1588,7 @@ func testSelectDiffQuery(t *testing.T, test SelectTest) {
root, err := cm.GetRootValue()
require.NoError(t, err)
_, err = dEnv.UpdateStagedRoot(ctx, root)
err = dEnv.UpdateStagedRoot(ctx, root)
require.NoError(t, err)
err = dEnv.UpdateWorkingRoot(ctx, root)
@@ -1602,7 +1602,7 @@ func testSelectDiffQuery(t *testing.T, test SelectTest) {
err = dEnv.UpdateWorkingRoot(ctx, root)
require.NoError(t, err)
actualRows, sch, err := executeSelect(ctx, dEnv, root, test.Query)
actualRows, sch, err := executeSelect(t, ctx, dEnv, root, test.Query)
if len(test.ExpectedErr) > 0 {
require.Error(t, err)
return
+2 -2
View File
@@ -439,7 +439,7 @@ func testUpdateQuery(t *testing.T, test UpdateTest) {
var err error
root, _ := dEnv.WorkingRoot(context.Background())
root, err = executeModify(context.Background(), dEnv, root, test.UpdateQuery)
root, err = executeModify(t, context.Background(), dEnv, root, test.UpdateQuery)
if len(test.ExpectedErr) > 0 {
require.Error(t, err)
return
@@ -447,7 +447,7 @@ func testUpdateQuery(t *testing.T, test UpdateTest) {
require.NoError(t, err)
}
actualRows, sch, err := executeSelect(context.Background(), dEnv, root, test.SelectQuery)
actualRows, sch, err := executeSelect(t, context.Background(), dEnv, root, test.SelectQuery)
require.NoError(t, err)
assert.Equal(t, test.ExpectedRows, actualRows)
+6 -5
View File
@@ -18,6 +18,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/go-mysql-server/sql"
@@ -145,10 +146,10 @@ func (te *sqlTableEditor) SetAutoIncrementValue(ctx *sql.Context, val interface{
// Close implements Closer
func (te *sqlTableEditor) Close(ctx *sql.Context) error {
sess := DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
// If we're running in batched mode, don't flush the edits until explicitly told to do so
if sess.batchMode == batched {
if sess.BatchMode == dsess.Batched {
return nil
}
return te.flush(ctx)
@@ -179,11 +180,11 @@ func (te *sqlTableEditor) flush(ctx *sql.Context) error {
}
func (te *sqlTableEditor) setRoot(ctx *sql.Context, newRoot *doltdb.RootValue) error {
dSess := DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
if te.temporary {
return dSess.SetTempTableRoot(ctx, te.dbName, newRoot)
return sess.SetTempTableRoot(ctx, te.dbName, newRoot)
}
return dSess.SetRoot(ctx, te.dbName, newRoot)
return sess.SetRoot(ctx, te.dbName, newRoot)
}
@@ -40,7 +40,7 @@ func setupEditorFkTest(t *testing.T) (*env.DoltEnv, *doltdb.RootValue) {
if err != nil {
panic(err)
}
initialRoot, err := ExecuteSql(dEnv, root, `
initialRoot, err := ExecuteSql(t, dEnv, root, `
CREATE TABLE one (
pk BIGINT PRIMARY KEY,
v1 BIGINT,
@@ -156,7 +156,7 @@ func TestTableEditorForeignKeyCascade(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(dEnv, initialRoot, `
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE CASCADE ON UPDATE CASCADE;
ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASCADE ON UPDATE CASCADE;
`)
@@ -165,7 +165,7 @@ ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASC
root := testRoot
for _, sqlStatement := range strings.Split(test.sqlStatement, ";") {
var err error
root, err = executeModify(context.Background(), dEnv, root, sqlStatement)
root, err = executeModify(t, context.Background(), dEnv, root, sqlStatement)
require.NoError(t, err)
}
@@ -205,14 +205,14 @@ func TestTableEditorForeignKeySetNull(t *testing.T) {
t.Run(test.sqlStatement, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(dEnv, initialRoot, `
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE SET NULL ON UPDATE SET NULL;`)
require.NoError(t, err)
root := testRoot
for _, sqlStatement := range strings.Split(test.sqlStatement, ";") {
var err error
root, err = executeModify(context.Background(), dEnv, root, sqlStatement)
root, err = executeModify(t, context.Background(), dEnv, root, sqlStatement)
require.NoError(t, err)
}
@@ -287,7 +287,7 @@ func TestTableEditorForeignKeyRestrict(t *testing.T) {
t.Run(test.setup+test.trigger, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(dEnv, initialRoot, fmt.Sprintf(`
testRoot, err := ExecuteSql(t, dEnv, initialRoot, fmt.Sprintf(`
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) %s;
INSERT INTO one VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);
INSERT INTO two VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);`, referenceOption))
@@ -296,14 +296,14 @@ func TestTableEditorForeignKeyRestrict(t *testing.T) {
root := testRoot
for _, sqlStatement := range strings.Split(test.setup, ";") {
var err error
root, err = executeModify(context.Background(), dEnv, root, sqlStatement)
root, err = executeModify(t, context.Background(), dEnv, root, sqlStatement)
require.NoError(t, err)
}
if test.expectedErr {
root, err = executeModify(context.Background(), dEnv, root, test.trigger)
root, err = executeModify(t, context.Background(), dEnv, root, test.trigger)
assert.Error(t, err)
} else {
root, err = executeModify(context.Background(), dEnv, root, test.trigger)
root, err = executeModify(t, context.Background(), dEnv, root, test.trigger)
assert.NoError(t, err)
}
})
@@ -358,7 +358,7 @@ func TestTableEditorForeignKeyViolations(t *testing.T) {
t.Run(test.setup+test.trigger, func(t *testing.T) {
dEnv, initialRoot := setupEditorFkTest(t)
testRoot, err := ExecuteSql(dEnv, initialRoot, `
testRoot, err := ExecuteSql(t, dEnv, initialRoot, `
ALTER TABLE two ADD FOREIGN KEY (v1) REFERENCES one(v1) ON DELETE CASCADE ON UPDATE CASCADE;
ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASCADE ON UPDATE CASCADE;
`)
@@ -367,10 +367,10 @@ ALTER TABLE three ADD FOREIGN KEY (v1, v2) REFERENCES two(v1, v2) ON DELETE CASC
root := testRoot
for _, sqlStatement := range strings.Split(test.setup, ";") {
var err error
root, err = executeModify(context.Background(), dEnv, root, sqlStatement)
root, err = executeModify(t, context.Background(), dEnv, root, sqlStatement)
require.NoError(t, err)
}
root, err = executeModify(context.Background(), dEnv, root, test.trigger)
root, err = executeModify(t, context.Background(), dEnv, root, test.trigger)
assert.Error(t, err)
})
}
@@ -435,7 +435,7 @@ func TestTableEditorSelfReferentialForeignKeyRestrict(t *testing.T) {
}
for _, test := range sequentialTests {
newRoot, err := executeModify(ctx, dEnv, root, test.statement)
newRoot, err := executeModify(t, ctx, dEnv, root, test.statement)
if test.expectedErr {
require.Error(t, err)
continue
@@ -535,7 +535,7 @@ func TestTableEditorSelfReferentialForeignKeyCascade(t *testing.T) {
}
for _, test := range sequentialTests {
newRoot, err := executeModify(ctx, dEnv, root, test.statement)
newRoot, err := executeModify(t, ctx, dEnv, root, test.statement)
if test.expectedErr {
require.Error(t, err)
continue
@@ -635,7 +635,7 @@ func TestTableEditorSelfReferentialForeignKeySetNull(t *testing.T) {
}
for _, test := range sequentialTests {
newRoot, err := executeModify(ctx, dEnv, root, test.statement)
newRoot, err := executeModify(t, ctx, dEnv, root, test.statement)
if test.expectedErr {
require.Error(t, err)
continue
@@ -36,7 +36,7 @@ func setupEditorIndexTest(t *testing.T) (*env.DoltEnv, *doltdb.RootValue) {
root, err := index_dEnv.WorkingRoot(context.Background())
require.NoError(t, err)
index_initialRoot, err := ExecuteSql(index_dEnv, root, `
index_initialRoot, err := ExecuteSql(t, index_dEnv, root, `
CREATE TABLE onepk (
pk1 BIGINT PRIMARY KEY,
v1 BIGINT,
@@ -124,7 +124,7 @@ UPDATE onepk SET pk1 = v1 + pk1 ORDER BY pk1 DESC;
root := initialRoot
for _, sqlStatement := range strings.Split(test.sqlStatement, ";") {
var err error
root, err = executeModify(context.Background(), dEnv, root, sqlStatement)
root, err = executeModify(t, context.Background(), dEnv, root, sqlStatement)
require.NoError(t, err)
}
@@ -282,7 +282,7 @@ UPDATE oneuni SET v1 = v1 + pk1;
root := initialRoot
var err error
for _, sqlStatement := range strings.Split(test.sqlStatement, ";") {
root, err = executeModify(context.Background(), dEnv, root, sqlStatement)
root, err = executeModify(t, context.Background(), dEnv, root, sqlStatement)
if err != nil {
break
}
@@ -26,6 +26,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
. "github.com/dolthub/dolt/go/libraries/doltcore/sql/sqltestutil"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
)
@@ -159,9 +160,11 @@ func TestTableEditor(t *testing.T) {
ctx := NewTestSQLCtx(context.Background())
root, _ := dEnv.WorkingRoot(context.Background())
db := NewDatabase("dolt", dEnv.DbData())
_ = DSessFromSess(ctx.Session).AddDB(ctx, db, db.DbData())
err := dsess.DSessFromSess(ctx.Session).AddDB(ctx, getDbState(t, db, dEnv))
require.NoError(t, err)
ctx.SetCurrentDatabase(db.Name())
err := db.SetRoot(ctx, root)
err = db.SetRoot(ctx, root)
require.NoError(t, err)
peopleTable, _, err := db.GetTableInsensitive(ctx, "people")
require.NoError(t, err)
@@ -181,7 +184,7 @@ func TestTableEditor(t *testing.T) {
root, err = db.GetRoot(ctx)
require.NoError(t, err)
actualRows, _, err := executeSelect(context.Background(), dEnv, root, test.selectQuery)
actualRows, _, err := executeSelect(t, context.Background(), dEnv, root, test.selectQuery)
require.NoError(t, err)
assert.Equal(t, test.expectedRows, actualRows)
+3 -2
View File
@@ -34,6 +34,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema/alterschema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/encoding"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/libraries/utils/set"
@@ -501,10 +502,10 @@ func (t *WritableDoltTable) Inserter(ctx *sql.Context) sql.RowInserter {
}
func (t *WritableDoltTable) getTableEditor(ctx *sql.Context) (*sqlTableEditor, error) {
sess := DSessFromSess(ctx.Session)
sess := dsess.DSessFromSess(ctx.Session)
// In batched mode, reuse the same table editor. Otherwise, hand out a new one
if sess.batchMode == batched {
if sess.BatchMode == dsess.Batched {
if t.ed != nil {
return t.ed, nil
}

Some files were not shown because too many files have changed in this diff Show More