From 8403e32dc91be7e0a3c79a5b8b9a027c88d5e254 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Fri, 28 Apr 2023 16:14:03 -0700 Subject: [PATCH 01/82] Include check for commits in detached head mode. --- go/libraries/doltcore/sqle/dsess/session.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/go/libraries/doltcore/sqle/dsess/session.go b/go/libraries/doltcore/sqle/dsess/session.go index 7f4999a840..6fc4b5075b 100644 --- a/go/libraries/doltcore/sqle/dsess/session.go +++ b/go/libraries/doltcore/sqle/dsess/session.go @@ -558,6 +558,10 @@ func (d *DoltSession) NewPendingCommit(ctx *sql.Context, dbName string, roots do headCommit := sessionState.headCommit headHash, _ := headCommit.HashOf() + if sessionState.WorkingSet == nil { + return nil, fmt.Errorf("Cannot commit while not attached to a branch. ") + } + var mergeParentCommits []*doltdb.Commit if sessionState.WorkingSet.MergeActive() { mergeParentCommits = []*doltdb.Commit{sessionState.WorkingSet.MergeState().Commit()} From f12699ffce6066af45b339ecb4099519e91e6dd2 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Tue, 9 May 2023 13:51:52 -0700 Subject: [PATCH 02/82] Refactor `RepoStateReader`'s `CWBHeadRef` and `CWBHeadSpec` to return errors. --- go/cmd/dolt/commands/branch.go | 17 ++++++++-- go/cmd/dolt/commands/checkout.go | 23 ++++++++++--- go/cmd/dolt/commands/commit.go | 11 ++++-- go/cmd/dolt/commands/log.go | 22 +++++++++--- go/cmd/dolt/commands/push.go | 22 +++++++----- go/cmd/dolt/commands/reset.go | 5 ++- go/cmd/dolt/commands/show.go | 18 ++++++++-- go/cmd/dolt/commands/stashcmds/stash.go | 5 ++- go/cmd/dolt/commands/status.go | 14 ++++++-- go/go.mod | 4 +++ go/libraries/doltcore/doltdb/errors.go | 3 +- go/libraries/doltcore/env/actions/checkout.go | 12 +++++-- go/libraries/doltcore/env/environment.go | 19 +++++++---- go/libraries/doltcore/env/memory.go | 34 ++++++++++++++----- go/libraries/doltcore/env/remotes.go | 10 ++++-- go/libraries/doltcore/env/repo_state.go | 4 +-- go/libraries/doltcore/sqle/database.go | 7 ++-- .../doltcore/sqle/database_provider.go | 16 ++++++--- .../doltcore/sqle/dprocedures/dolt_tag.go | 5 ++- .../sqle/dsess/session_state_adapter.go | 19 ++++++----- .../index/mergeable_indexes_setup_test.go | 15 +++++--- 21 files changed, 211 insertions(+), 74 deletions(-) diff --git a/go/cmd/dolt/commands/branch.go b/go/cmd/dolt/commands/branch.go index d1ba4b1856..10d8ea5cee 100644 --- a/go/cmd/dolt/commands/branch.go +++ b/go/cmd/dolt/commands/branch.go @@ -138,7 +138,10 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar return HandleVErrAndExitCode(errhand.BuildDError("error: failed to read refs from db").AddCause(err).Build(), nil) } - currentBranch := dEnv.RepoStateReader().CWBHeadRef() + currentBranch, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return HandleVErrAndExitCode(errhand.BuildDError("error: failed to read refs from db").AddCause(err).Build(), nil) + } sort.Slice(branches, func(i, j int) bool { return branches[i].String() < branches[j].String() }) @@ -172,7 +175,11 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar } if verbose { - cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), nil) + } + cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) if err == nil { h, err := cm.HashOf() @@ -195,7 +202,11 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar } func printCurrentBranch(dEnv *env.DoltEnv) int { - cli.Println(dEnv.RepoStateReader().CWBHeadRef().GetPath()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), nil) + } + cli.Println(headRef.GetPath()) return 0 } diff --git a/go/cmd/dolt/commands/checkout.go b/go/cmd/dolt/commands/checkout.go index 59a2d3f206..d4e6990ee6 100644 --- a/go/cmd/dolt/commands/checkout.go +++ b/go/cmd/dolt/commands/checkout.go @@ -119,7 +119,10 @@ func (cmd CheckoutCmd) Exec(ctx context.Context, commandStr string, args []strin if err != nil { return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt) } - headRef := dEnv.RepoStateReader().CWBHeadRef() + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), nil) + } ws, err := dEnv.WorkingSet(ctx) if err != nil { HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt) @@ -179,7 +182,11 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.Ar // the new branch is checked out at this point if setTrackUpstream { - verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return errhand.BuildDError(err.Error()).Build() + } + verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, headRef) if verr != nil { return verr } @@ -195,7 +202,11 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.Ar if !remoteOk { return nil } - verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return errhand.BuildDError(err.Error()).Build() + } + verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, headRef) if verr != nil { return verr } @@ -230,7 +241,11 @@ func checkoutRemoteBranchOrSuggestNew(ctx context.Context, dEnv *env.DoltEnv, na if verr != nil { return verr } - return SetRemoteUpstreamForBranchRef(dEnv, remoteRefs[0].GetRemote(), remoteRefs[0].GetBranch(), dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return errhand.BuildDError(err.Error()).Build() + } + return SetRemoteUpstreamForBranchRef(dEnv, remoteRefs[0].GetRemote(), remoteRefs[0].GetBranch(), headRef) } else { // TODO : add hint of using `dolt checkout --track /` when --track flag is supported return errhand.BuildDError("'%s' matched multiple (%v) remote tracking branches", name, len(remoteRefs)).Build() diff --git a/go/cmd/dolt/commands/commit.go b/go/cmd/dolt/commands/commit.go index b0b7a53818..3fe0cb3a1d 100644 --- a/go/cmd/dolt/commands/commit.go +++ b/go/cmd/dolt/commands/commit.go @@ -221,9 +221,13 @@ func performCommit(ctx context.Context, commandStr string, args []string, dEnv * return handleCommitErr(ctx, dEnv, err, usage) } + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return handleCommitErr(ctx, dEnv, err, usage) + } _, err = dEnv.DoltDB.CommitWithWorkingSet( ctx, - dEnv.RepoStateReader().CWBHeadRef(), + headRef, ws.Ref(), pendingCommit, ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(), @@ -380,7 +384,10 @@ func buildInitalCommitMsg(ctx context.Context, dEnv *env.DoltEnv, suggestedMsg s return "", err } - currBranch := dEnv.RepoStateReader().CWBHeadRef() + currBranch, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return "", err + } initialCommitMessage := fmt.Sprintf("%s\n# Please enter the commit message for your changes. Lines starting"+ "\n# with '#' will be ignored, and an empty message aborts the commit."+ "\n# On branch %s\n#\n", suggestedMsg, currBranch) diff --git a/go/cmd/dolt/commands/log.go b/go/cmd/dolt/commands/log.go index 0c54791304..d249469c54 100644 --- a/go/cmd/dolt/commands/log.go +++ b/go/cmd/dolt/commands/log.go @@ -123,7 +123,11 @@ func (cmd LogCmd) logWithLoggerFunc(ctx context.Context, commandStr string, args return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage) } if len(opts.commitSpecs) == 0 { - opts.commitSpecs = append(opts.commitSpecs, dEnv.RepoStateReader().CWBHeadSpec()) + headRef, err := dEnv.RepoStateReader().CWBHeadSpec() + if err != nil { + return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage) + } + opts.commitSpecs = append(opts.commitSpecs, headRef) } if len(opts.tableName) > 0 { return handleErrAndExit(logTableCommits(ctx, dEnv, opts)) @@ -327,8 +331,12 @@ func getHashToRefs(ctx context.Context, dEnv *env.DoltEnv, decorationLevel strin func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int { hashes := make([]hash.Hash, len(opts.commitSpecs)) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return handleErrAndExit(err) + } for i, cs := range opts.commitSpecs { - commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef()) + commit, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) if err != nil { cli.PrintErrln(color.HiRedString("Fatal error: cannot get HEAD commit for current branch.")) return 1 @@ -360,7 +368,7 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int { excludingHashes := make([]hash.Hash, len(opts.excludingCommitSpecs)) for i, excludingSpec := range opts.excludingCommitSpecs { - excludingCommit, err := dEnv.DoltDB.Resolve(ctx, excludingSpec, dEnv.RepoStateReader().CWBHeadRef()) + excludingCommit, err := dEnv.DoltDB.Resolve(ctx, excludingSpec, headRef) if err != nil { cli.PrintErrln(color.HiRedString("Fatal error: cannot get excluding commit for current branch.")) return 1 @@ -383,7 +391,6 @@ func logCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) int { return 1 } - headRef := dEnv.RepoStateReader().CWBHeadRef() cwbHash, err := dEnv.DoltDB.GetHashForRefStr(ctx, headRef.String()) if err != nil { @@ -441,8 +448,13 @@ func tableExists(ctx context.Context, commit *doltdb.Commit, tableName string) ( func logTableCommits(ctx context.Context, dEnv *env.DoltEnv, opts *logOpts) error { hashes := make([]hash.Hash, len(opts.commitSpecs)) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + for i, cs := range opts.commitSpecs { - commit, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef()) + commit, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) if err != nil { return err } diff --git a/go/cmd/dolt/commands/push.go b/go/cmd/dolt/commands/push.go index c39650a451..8f425a7c02 100644 --- a/go/cmd/dolt/commands/push.go +++ b/go/cmd/dolt/commands/push.go @@ -96,16 +96,20 @@ func (cmd PushCmd) Exec(ctx context.Context, commandStr string, args []string, d var verr errhand.VerboseError switch err { case env.ErrNoUpstreamForBranch: - currentBranch := dEnv.RepoStateReader().CWBHeadRef() - remoteName := "" - if defRemote, verr := env.GetDefaultRemote(dEnv.RepoStateReader()); verr == nil { - remoteName = defRemote.Name + currentBranch, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + verr = errhand.BuildDError("fatal: The current branch could not be identified").Build() + } else { + remoteName := "" + if defRemote, verr := env.GetDefaultRemote(dEnv.RepoStateReader()); verr == nil { + remoteName = defRemote.Name + } + verr = errhand.BuildDError("fatal: The current branch " + currentBranch.GetPath() + " has no upstream branch.\n" + + "To push the current branch and set the remote as upstream, use\n" + + "\tdolt push --set-upstream " + remoteName + " " + currentBranch.GetPath() + "\n" + + "To have this happen automatically for branches without a tracking\n" + + "upstream, see 'push.autoSetupRemote' in 'dolt config --help'.").Build() } - verr = errhand.BuildDError("fatal: The current branch " + currentBranch.GetPath() + " has no upstream branch.\n" + - "To push the current branch and set the remote as upstream, use\n" + - "\tdolt push --set-upstream " + remoteName + " " + currentBranch.GetPath() + "\n" + - "To have this happen automatically for branches without a tracking\n" + - "upstream, see 'push.autoSetupRemote' in 'dolt config --help'.").Build() case env.ErrInvalidSetUpstreamArgs: verr = errhand.BuildDError("error: --set-upstream requires and params.").SetPrintUsage().Build() diff --git a/go/cmd/dolt/commands/reset.go b/go/cmd/dolt/commands/reset.go index f074870ac4..5194d2fa75 100644 --- a/go/cmd/dolt/commands/reset.go +++ b/go/cmd/dolt/commands/reset.go @@ -145,7 +145,10 @@ func handleResetHard(ctx context.Context, apr *argparser.ArgParseResults, usage arg = apr.Arg(0) } - headRef := dEnv.RepoStateReader().CWBHeadRef() + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage) + } ws, err := dEnv.WorkingSet(ctx) if err != nil { return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage) diff --git a/go/cmd/dolt/commands/show.go b/go/cmd/dolt/commands/show.go index c926593933..026889a099 100644 --- a/go/cmd/dolt/commands/show.go +++ b/go/cmd/dolt/commands/show.go @@ -159,7 +159,11 @@ func parseShowArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar func showObjects(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts) error { if len(opts.specRefs) == 0 { - return showCommitSpec(ctx, dEnv, opts, dEnv.RepoStateReader().CWBHeadSpec()) + headRef, err := dEnv.RepoStateReader().CWBHeadSpec() + if err != nil { + return err + } + return showCommitSpec(ctx, dEnv, opts, headRef) } for _, specRef := range opts.specRefs { @@ -243,7 +247,12 @@ func showSpecRef(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts, specRef func showCommitSpec(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts, commitSpec *doltdb.CommitSpec) error { - commit, err := dEnv.DoltDB.Resolve(ctx, commitSpec, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + + commit, err := dEnv.DoltDB.Resolve(ctx, commitSpec, headRef) if err != nil { return err } @@ -283,7 +292,10 @@ func showCommit(ctx context.Context, dEnv *env.DoltEnv, opts *showOpts, comm *do return err } - headRef := dEnv.RepoStateReader().CWBHeadRef() + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } cwbHash, err := dEnv.DoltDB.GetHashForRefStr(ctx, headRef.String()) if err != nil { return err diff --git a/go/cmd/dolt/commands/stashcmds/stash.go b/go/cmd/dolt/commands/stashcmds/stash.go index dae43c3066..d29bfc5ecd 100644 --- a/go/cmd/dolt/commands/stashcmds/stash.go +++ b/go/cmd/dolt/commands/stashcmds/stash.go @@ -222,7 +222,10 @@ func stashChanges(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars } } - curHeadRef := dEnv.RepoStateReader().CWBHeadRef() + curHeadRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } curBranchName := curHeadRef.String() commitSpec, err := doltdb.NewCommitSpec(curBranchName) if err != nil { diff --git a/go/cmd/dolt/commands/status.go b/go/cmd/dolt/commands/status.go index 02cf648877..14040bb4dc 100644 --- a/go/cmd/dolt/commands/status.go +++ b/go/cmd/dolt/commands/status.go @@ -95,9 +95,14 @@ func (cmd StatusCmd) Exec(ctx context.Context, commandStr string, args []string, } func PrintStatus(ctx context.Context, dEnv *env.DoltEnv, stagedTbls, notStagedTbls []diff.TableDelta, showIgnoredTables bool, as merge.ArtifactStatus) error { - cli.Printf(branchHeader, dEnv.RepoStateReader().CWBHeadRef().GetPath()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } - err := printRemoteRefTrackingInfo(ctx, dEnv) + cli.Printf(branchHeader, headRef.GetPath()) + + err = printRemoteRefTrackingInfo(ctx, dEnv) if err != nil { return err } @@ -141,7 +146,10 @@ func handleStatusVErr(err error) int { func printRemoteRefTrackingInfo(ctx context.Context, dEnv *env.DoltEnv) error { ddb := dEnv.DoltDB rsr := dEnv.RepoStateReader() - headRef := rsr.CWBHeadRef() + headRef, err := rsr.CWBHeadRef() + if err != nil { + return err + } branches, err := rsr.GetBranches() if err != nil { return err diff --git a/go/go.mod b/go/go.mod index 97d4106738..e2c300ba4d 100644 --- a/go/go.mod +++ b/go/go.mod @@ -148,4 +148,8 @@ require ( replace github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi => ./gen/proto/dolt/services/eventsapi +replace github.com/dolthub/vitess => ../../vitess + +replace github.com/dolthub/go-mysql-server => ../../go-mysql-server + go 1.19 diff --git a/go/libraries/doltcore/doltdb/errors.go b/go/libraries/doltcore/doltdb/errors.go index 418af7789e..c09f42b118 100644 --- a/go/libraries/doltcore/doltdb/errors.go +++ b/go/libraries/doltcore/doltdb/errors.go @@ -29,7 +29,6 @@ var ErrInvalidBranchOrHash = errors.New("string is not a valid branch or hash") var ErrInvalidHash = errors.New("string is not a valid hash") var ErrFoundHashNotACommit = errors.New("the value retrieved for this hash is not a commit") - var ErrHashNotFound = errors.New("could not find a value for this hash") var ErrBranchNotFound = errors.New("branch not found") var ErrTagNotFound = errors.New("tag not found") @@ -49,6 +48,8 @@ var ErrIsBehind = errors.New("cannot reverse from b to a. b is a is behind a alr var ErrUnresolvedConflictsOrViolations = errors.New("merge has unresolved conflicts or constraint violations") var ErrMergeActive = errors.New("merging is not possible because you have not committed an active merge") +var ErrOperationNotSupportedInDetachedHead = errors.New("this operation is not supported while in a detached head state") + type ErrClientOutOfDate struct { RepoVer FeatureVersion ClientVer FeatureVersion diff --git a/go/libraries/doltcore/env/actions/checkout.go b/go/libraries/doltcore/env/actions/checkout.go index 54b9e07980..abb41d4020 100644 --- a/go/libraries/doltcore/env/actions/checkout.go +++ b/go/libraries/doltcore/env/actions/checkout.go @@ -17,7 +17,6 @@ package actions import ( "context" "errors" - "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" @@ -159,7 +158,10 @@ func rootsForBranch(ctx context.Context, roots doltdb.Roots, branchRoot *doltdb. func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, force bool) error { branchRef := ref.NewBranchRef(brName) - initialHeadRef := dEnv.RepoStateReader().CWBHeadRef() + initialHeadRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } db := dEnv.DoltDB hasRef, err := db.HasRef(ctx, branchRef) @@ -170,7 +172,11 @@ func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, force return doltdb.ErrBranchNotFound } - if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), branchRef) { + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + if ref.Equals(headRef, branchRef) { return doltdb.ErrAlreadyOnBranch } diff --git a/go/libraries/doltcore/env/environment.go b/go/libraries/doltcore/env/environment.go index d7cb31a44b..c98bab8b24 100644 --- a/go/libraries/doltcore/env/environment.go +++ b/go/libraries/doltcore/env/environment.go @@ -206,7 +206,10 @@ func (dEnv *DoltEnv) Valid() bool { // initWorkingSetFromRepoState sets the working set for the env's head to mirror the contents of the repo state file. // This is only necessary to migrate repos written before this method was introduced, and can be removed after 1.0 func (dEnv *DoltEnv) initWorkingSetFromRepoState(ctx context.Context) error { - headRef := dEnv.RepoStateReader().CWBHeadRef() + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } wsRef, err := ref.WorkingSetRefForHead(headRef) if err != nil { return err @@ -591,7 +594,11 @@ func (dEnv *DoltEnv) WorkingSet(ctx context.Context) (*doltdb.WorkingSet, error) } func WorkingSet(ctx context.Context, ddb *doltdb.DoltDB, rsr RepoStateReader) (*doltdb.WorkingSet, error) { - workingSetRef, err := ref.WorkingSetRefForHead(rsr.CWBHeadRef()) + headRef, err := rsr.CWBHeadRef() + if err != nil { + return nil, err + } + workingSetRef, err := ref.WorkingSetRefForHead(headRef) if err != nil { return nil, err } @@ -655,12 +662,12 @@ type repoStateReader struct { *DoltEnv } -func (r *repoStateReader) CWBHeadRef() ref.DoltRef { - return r.RepoState.CWBHeadRef() +func (r *repoStateReader) CWBHeadRef() (ref.DoltRef, error) { + return r.RepoState.CWBHeadRef(), nil } -func (r *repoStateReader) CWBHeadSpec() *doltdb.CommitSpec { - return r.RepoState.CWBHeadSpec() +func (r *repoStateReader) CWBHeadSpec() (*doltdb.CommitSpec, error) { + return r.RepoState.CWBHeadSpec(), nil } func (dEnv *DoltEnv) RepoStateReader() RepoStateReader { diff --git a/go/libraries/doltcore/env/memory.go b/go/libraries/doltcore/env/memory.go index 7be9bdfadf..3bdb0a640b 100644 --- a/go/libraries/doltcore/env/memory.go +++ b/go/libraries/doltcore/env/memory.go @@ -101,16 +101,20 @@ type MemoryRepoState struct { var _ RepoStateReader = MemoryRepoState{} var _ RepoStateWriter = MemoryRepoState{} -func (m MemoryRepoState) CWBHeadRef() ref.DoltRef { - return m.Head +func (m MemoryRepoState) CWBHeadRef() (ref.DoltRef, error) { + return m.Head, nil } -func (m MemoryRepoState) CWBHeadSpec() *doltdb.CommitSpec { - spec, err := doltdb.NewCommitSpec(m.CWBHeadRef().GetPath()) +func (m MemoryRepoState) CWBHeadSpec() (*doltdb.CommitSpec, error) { + headRef, err := m.CWBHeadRef() if err != nil { - panic(err) + return nil, err } - return spec + spec, err := doltdb.NewCommitSpec(headRef.GetPath()) + if err != nil { + return nil, err + } + return spec, nil } func (m MemoryRepoState) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootValue) error { @@ -120,7 +124,11 @@ func (m MemoryRepoState) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.R ws, err := m.WorkingSet(ctx) if err == doltdb.ErrWorkingSetNotFound { // first time updating root - wsRef, err = ref.WorkingSetRefForHead(m.CWBHeadRef()) + headRef, err := m.CWBHeadRef() + if err != nil { + return err + } + wsRef, err = ref.WorkingSetRefForHead(headRef) if err != nil { return err } @@ -146,7 +154,11 @@ func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb. ws, err := m.WorkingSet(ctx) if err == doltdb.ErrWorkingSetNotFound { // first time updating root - wsRef, err = ref.WorkingSetRefForHead(m.CWBHeadRef()) + headRef, err := m.CWBHeadRef() + if err != nil { + return err + } + wsRef, err = ref.WorkingSetRefForHead(headRef) if err != nil { return err } @@ -166,7 +178,11 @@ func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb. } func (m MemoryRepoState) WorkingSet(ctx context.Context) (*doltdb.WorkingSet, error) { - workingSetRef, err := ref.WorkingSetRefForHead(m.CWBHeadRef()) + headRef, err := m.CWBHeadRef() + if err != nil { + return nil, err + } + workingSetRef, err := ref.WorkingSetRefForHead(headRef) if err != nil { return nil, err } diff --git a/go/libraries/doltcore/env/remotes.go b/go/libraries/doltcore/env/remotes.go index a5ee0e434a..96c4b4cccf 100644 --- a/go/libraries/doltcore/env/remotes.go +++ b/go/libraries/doltcore/env/remotes.go @@ -141,7 +141,10 @@ func NewPushOpts(ctx context.Context, apr *argparser.ArgParseResults, rsr RepoSt } remote, remoteOK := remotes[remoteName] - currentBranch := rsr.CWBHeadRef() + currentBranch, err := rsr.CWBHeadRef() + if err != nil { + return nil, err + } branches, err := rsr.GetBranches() if err != nil { return nil, err @@ -422,7 +425,10 @@ func NewPullSpec(_ context.Context, rsr RepoStateReader, remoteName, remoteRefNa var remoteRef ref.DoltRef if remoteRefName == "" { - branch := rsr.CWBHeadRef() + branch, err := rsr.CWBHeadRef() + if err != nil { + return nil, err + } trackedBranches, err := rsr.GetBranches() if err != nil { return nil, err diff --git a/go/libraries/doltcore/env/repo_state.go b/go/libraries/doltcore/env/repo_state.go index 80ce0ded1f..f3a265ecf1 100644 --- a/go/libraries/doltcore/env/repo_state.go +++ b/go/libraries/doltcore/env/repo_state.go @@ -27,8 +27,8 @@ import ( // TODO: change name to ClientStateReader, move out of env package type RepoStateReader interface { - CWBHeadRef() ref.DoltRef - CWBHeadSpec() *doltdb.CommitSpec + CWBHeadRef() (ref.DoltRef, error) + CWBHeadSpec() (*doltdb.CommitSpec, error) GetRemotes() (map[string]Remote, error) GetBackups() (map[string]Remote, error) GetBranches() (map[string]BranchConfig, error) diff --git a/go/libraries/doltcore/sqle/database.go b/go/libraries/doltcore/sqle/database.go index a1ea508977..b27b3f5dfa 100644 --- a/go/libraries/doltcore/sqle/database.go +++ b/go/libraries/doltcore/sqle/database.go @@ -417,14 +417,17 @@ func (db Database) getTableInsensitive(ctx *sql.Context, head *doltdb.Commit, ds // resolveAsOf resolves given expression to a commit, if one exists. func resolveAsOf(ctx *sql.Context, db Database, asOf interface{}) (*doltdb.Commit, *doltdb.RootValue, error) { - head := db.rsr.CWBHeadRef() + head, err := db.rsr.CWBHeadRef() + if err != nil { + return nil, nil, err + } switch x := asOf.(type) { case time.Time: return resolveAsOfTime(ctx, db.ddb, head, x) case string: return resolveAsOfCommitRef(ctx, db, head, x) default: - panic(fmt.Sprintf("unsupported AS OF type %T", asOf)) + return nil, nil, fmt.Errorf("unsupported AS OF type %T", asOf) } } diff --git a/go/libraries/doltcore/sqle/database_provider.go b/go/libraries/doltcore/sqle/database_provider.go index 811d61dd76..2b3a50d16e 100644 --- a/go/libraries/doltcore/sqle/database_provider.go +++ b/go/libraries/doltcore/sqle/database_provider.go @@ -864,7 +864,11 @@ func initialDbState(ctx context.Context, db dsess.SqlDatabase, branch string) (d if len(branch) > 0 { r = ref.NewBranchRef(branch) } else { - r = rsr.CWBHeadRef() + var err error + r, err = rsr.CWBHeadRef() + if err != nil { + return dsess.InitialDbState{}, err + } } var retainedErr error @@ -1418,7 +1422,11 @@ func initialStateForCommit(ctx context.Context, srcDb ReadOnlyDatabase) (dsess.I return dsess.InitialDbState{}, err } - cm, err := srcDb.DbData().Ddb.Resolve(ctx, spec, srcDb.DbData().Rsr.CWBHeadRef()) + headRef, err := srcDb.DbData().Rsr.CWBHeadRef() + if err != nil { + return dsess.InitialDbState{}, err + } + cm, err := srcDb.DbData().Ddb.Resolve(ctx, spec, headRef) if err != nil { return dsess.InitialDbState{}, err } @@ -1448,8 +1456,8 @@ type staticRepoState struct { env.RepoStateReader } -func (s staticRepoState) CWBHeadRef() ref.DoltRef { - return s.branch +func (s staticRepoState) CWBHeadRef() (ref.DoltRef, error) { + return s.branch, nil } // formatDbMapKeyName returns formatted string of database name and/or branch name. Database name is case-insensitive, diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_tag.go b/go/libraries/doltcore/sqle/dprocedures/dolt_tag.go index 4967481f82..95e54a3f9e 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_tag.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_tag.go @@ -97,7 +97,10 @@ func doDoltTag(ctx *sql.Context, args []string) (int, error) { if len(apr.Args) > 1 { startPoint = apr.Arg(1) } - headRef := dbData.Rsr.CWBHeadRef() + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return 0, err + } err = actions.CreateTagOnDB(ctx, dbData.Ddb, tagName, startPoint, props, headRef) if err != nil { return 1, err diff --git a/go/libraries/doltcore/sqle/dsess/session_state_adapter.go b/go/libraries/doltcore/sqle/dsess/session_state_adapter.go index 5bb8967043..700480d008 100644 --- a/go/libraries/doltcore/sqle/dsess/session_state_adapter.go +++ b/go/libraries/doltcore/sqle/dsess/session_state_adapter.go @@ -61,29 +61,30 @@ func (s SessionStateAdapter) GetRoots(ctx context.Context) (doltdb.Roots, error) return state.GetRoots(), nil } -func (s SessionStateAdapter) CWBHeadRef() ref.DoltRef { +func (s SessionStateAdapter) CWBHeadRef() (ref.DoltRef, error) { workingSet, err := s.session.WorkingSet(sql.NewContext(context.Background()), s.dbName) if err != nil { - // TODO: fix this interface - panic(err) + return nil, err } headRef, err := workingSet.Ref().ToHeadRef() - // TODO: fix this interface if err != nil { - panic(err) + return nil, err } - return headRef + return headRef, nil } -func (s SessionStateAdapter) CWBHeadSpec() *doltdb.CommitSpec { +func (s SessionStateAdapter) CWBHeadSpec() (*doltdb.CommitSpec, error) { // TODO: get rid of this - ref := s.CWBHeadRef() + ref, err := s.CWBHeadRef() + if err != nil { + return nil, err + } spec, err := doltdb.NewCommitSpec(ref.GetPath()) if err != nil { panic(err) } - return spec + return spec, nil } func (s SessionStateAdapter) GetRemotes() (map[string]env.Remote, error) { diff --git a/go/libraries/doltcore/sqle/index/mergeable_indexes_setup_test.go b/go/libraries/doltcore/sqle/index/mergeable_indexes_setup_test.go index 18d72fe312..cee830b107 100644 --- a/go/libraries/doltcore/sqle/index/mergeable_indexes_setup_test.go +++ b/go/libraries/doltcore/sqle/index/mergeable_indexes_setup_test.go @@ -169,11 +169,18 @@ func drainIter(ctx *sql.Context, iter sql.RowIter) error { return iter.Close(ctx) } -func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialDbState { +func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) (dsess.InitialDbState, error) { ctx := context.Background() - head := dEnv.RepoStateReader().CWBHeadSpec() - headCommit, err := dEnv.DoltDB.Resolve(ctx, head, dEnv.RepoStateReader().CWBHeadRef()) + headSpec, err := dEnv.RepoStateReader().CWBHeadSpec() + if err != nil { + return dsess.InitialDbState{}, err + } + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return dsess.InitialDbState{}, err + } + headCommit, err := dEnv.DoltDB.Resolve(ctx, headSpec, headRef) require.NoError(t, err) ws, err := dEnv.WorkingSet(ctx) @@ -185,5 +192,5 @@ func getDbState(t *testing.T, db sql.Database, dEnv *env.DoltEnv) dsess.InitialD WorkingSet: ws, DbData: dEnv.DbData(), Remotes: dEnv.RepoState.Remotes, - } + }, nil } From d5584942c5a9386be6e13ad4194928997557c9fd Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Tue, 9 May 2023 14:12:44 -0700 Subject: [PATCH 03/82] Migrate uses of `CWBHeadRef` and `CWBHeadSpec` in doltcore. --- go/libraries/doltcore/diff/diffsplitter.go | 6 +++- go/libraries/doltcore/env/actions/branch.go | 30 +++++++++++++++---- go/libraries/doltcore/env/actions/remotes.go | 6 +++- go/libraries/doltcore/env/actions/reset.go | 30 +++++++++++++------ go/libraries/doltcore/env/actions/tag.go | 6 +++- .../doltcore/env/actions/workspace.go | 12 ++++++-- go/libraries/doltcore/merge/action.go | 15 ++++++++-- go/libraries/doltcore/rebase/rebase.go | 12 ++++++-- 8 files changed, 93 insertions(+), 24 deletions(-) diff --git a/go/libraries/doltcore/diff/diffsplitter.go b/go/libraries/doltcore/diff/diffsplitter.go index 65b162aab0..2bcecdb80f 100644 --- a/go/libraries/doltcore/diff/diffsplitter.go +++ b/go/libraries/doltcore/diff/diffsplitter.go @@ -219,7 +219,11 @@ func MaybeResolveRoot(ctx context.Context, rsr env.RepoStateReader, doltDB *dolt return nil, false } - cm, err := doltDB.Resolve(ctx, cs, rsr.CWBHeadRef()) + headRef, err := rsr.CWBHeadRef() + if err != nil { + return nil, false + } + cm, err := doltDB.Resolve(ctx, cs, headRef) if err != nil { return nil, false } diff --git a/go/libraries/doltcore/env/actions/branch.go b/go/libraries/doltcore/env/actions/branch.go index 313bdf7004..1c47bd9d2b 100644 --- a/go/libraries/doltcore/env/actions/branch.go +++ b/go/libraries/doltcore/env/actions/branch.go @@ -40,7 +40,11 @@ func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch s return err } - if ref.Equals(dbData.Rsr.CWBHeadRef(), oldRef) { + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return err + } + if ref.Equals(headRef, oldRef) { err = dbData.Rsw.SetCWBHeadRef(ctx, ref.MarshalableRef{Ref: newRef}) if err != nil { return err @@ -122,7 +126,11 @@ func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts De } } else { branchRef = ref.NewBranchRef(brName) - if ref.Equals(dbData.Rsr.CWBHeadRef(), branchRef) { + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return err + } + if ref.Equals(headRef, branchRef) { return ErrCOBranchDelete } } @@ -193,7 +201,11 @@ func validateBranchMergedIntoCurrentWorkingBranch(ctx context.Context, dbdata en return err } - cwbHead, err := dbdata.Ddb.Resolve(ctx, cwbCs, dbdata.Rsr.CWBHeadRef()) + headRef, err := dbdata.Rsr.CWBHeadRef() + if err != nil { + return err + } + cwbHead, err := dbdata.Ddb.Resolve(ctx, cwbCs, headRef) if err != nil { return err } @@ -323,7 +335,11 @@ func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, starti } func createBranch(ctx context.Context, dbData env.DbData, newBranch, startingPoint string, force bool) error { - return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, dbData.Rsr.CWBHeadRef()) + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return err + } + return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, headRef) } var emptyHash = hash.Hash{} @@ -341,7 +357,11 @@ func MaybeGetCommit(ctx context.Context, dEnv *env.DoltEnv, str string) (*doltdb cs, err := doltdb.NewCommitSpec(str) if err == nil { - cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil, err + } + cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) if errors.Is(err, doltdb.ErrBranchNotFound) { return nil, nil diff --git a/go/libraries/doltcore/env/actions/remotes.go b/go/libraries/doltcore/env/actions/remotes.go index c0a67bbf05..d50468789a 100644 --- a/go/libraries/doltcore/env/actions/remotes.go +++ b/go/libraries/doltcore/env/actions/remotes.go @@ -168,7 +168,11 @@ func PushToRemoteBranch(ctx context.Context, rsr env.RepoStateReader, tempTableD } cs, _ := doltdb.NewCommitSpec(srcRef.GetPath()) - cm, err := localDB.Resolve(ctx, cs, rsr.CWBHeadRef()) + headRef, err := rsr.CWBHeadRef() + if err != nil { + return err + } + cm, err := localDB.Resolve(ctx, cs, headRef) if err != nil { return fmt.Errorf("%w; refspec not found: '%s'; %s", ref.ErrInvalidRefSpec, srcRef.GetPath(), err.Error()) diff --git a/go/libraries/doltcore/env/actions/reset.go b/go/libraries/doltcore/env/actions/reset.go index 0a601ff365..527c02130a 100644 --- a/go/libraries/doltcore/env/actions/reset.go +++ b/go/libraries/doltcore/env/actions/reset.go @@ -39,7 +39,11 @@ func resetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, ro return nil, doltdb.Roots{}, err } - newHead, err = ddb.Resolve(ctx, cs, rsr.CWBHeadRef()) + headRef, err := rsr.CWBHeadRef() + if err != nil { + return nil, doltdb.Roots{}, err + } + newHead, err = ddb.Resolve(ctx, cs, headRef) if err != nil { return nil, doltdb.Roots{}, err } @@ -220,7 +224,11 @@ func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (do return doltdb.Roots{}, err } - newHead, err := dbData.Ddb.Resolve(ctx, cs, dbData.Rsr.CWBHeadRef()) + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return doltdb.Roots{}, err + } + newHead, err := dbData.Ddb.Resolve(ctx, cs, headRef) if err != nil { return doltdb.Roots{}, err } @@ -231,7 +239,7 @@ func ResetSoftToRef(ctx context.Context, dbData env.DbData, cSpecStr string) (do } // Update the head to this commit - if err = dbData.Ddb.SetHeadToCommit(ctx, dbData.Rsr.CWBHeadRef(), newHead); err != nil { + if err = dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return doltdb.Roots{}, err } @@ -265,19 +273,23 @@ func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb } // IsValidRef validates whether the input parameter is a valid cString -// TODO: this doesn't belong int his package -func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) bool { +// TODO: this doesn't belong in this package +func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { - return false + return false, err } - _, err = ddb.Resolve(ctx, cs, rsr.CWBHeadRef()) + headRef, err := rsr.CWBHeadRef() if err != nil { - return false + return false, err + } + _, err = ddb.Resolve(ctx, cs, headRef) + if err != nil { + return false, err } - return true + return true, nil } // CleanUntracked deletes untracked tables from the working root. diff --git a/go/libraries/doltcore/env/actions/tag.go b/go/libraries/doltcore/env/actions/tag.go index d34859f02b..52a8eb95d8 100644 --- a/go/libraries/doltcore/env/actions/tag.go +++ b/go/libraries/doltcore/env/actions/tag.go @@ -32,7 +32,11 @@ type TagProps struct { } func CreateTag(ctx context.Context, dEnv *env.DoltEnv, tagName, startPoint string, props TagProps) error { - return CreateTagOnDB(ctx, dEnv.DoltDB, tagName, startPoint, props, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + return CreateTagOnDB(ctx, dEnv.DoltDB, tagName, startPoint, props, headRef) } func CreateTagOnDB(ctx context.Context, ddb *doltdb.DoltDB, tagName, startPoint string, props TagProps, headRef ref.DoltRef) error { diff --git a/go/libraries/doltcore/env/actions/workspace.go b/go/libraries/doltcore/env/actions/workspace.go index 7f40da3a1e..7f525532af 100644 --- a/go/libraries/doltcore/env/actions/workspace.go +++ b/go/libraries/doltcore/env/actions/workspace.go @@ -28,7 +28,11 @@ var ErrCOWorkspaceDelete = errors.New("attempted to delete checked out workspace var ErrBranchNameExists = errors.New("workspace name must not be existing branch name") func CreateWorkspace(ctx context.Context, dEnv *env.DoltEnv, name, startPoint string) error { - return CreateWorkspaceOnDB(ctx, dEnv.DoltDB, name, startPoint, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil + } + return CreateWorkspaceOnDB(ctx, dEnv.DoltDB, name, startPoint, headRef) } func CreateWorkspaceOnDB(ctx context.Context, ddb *doltdb.DoltDB, name, startPoint string, headRef ref.DoltRef) error { @@ -86,7 +90,11 @@ func DeleteWorkspace(ctx context.Context, dEnv *env.DoltEnv, workspaceName strin } } else { dref = ref.NewWorkspaceRef(workspaceName) - if ref.Equals(dEnv.RepoStateReader().CWBHeadRef(), dref) { + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + if ref.Equals(headRef, dref) { return ErrCOWorkspaceDelete } } diff --git a/go/libraries/doltcore/merge/action.go b/go/libraries/doltcore/merge/action.go index 8d1ff9b6a8..e4f0303b74 100644 --- a/go/libraries/doltcore/merge/action.go +++ b/go/libraries/doltcore/merge/action.go @@ -62,7 +62,12 @@ func NewMergeSpec(ctx context.Context, rsr env.RepoStateReader, ddb *doltdb.Dolt return nil, err } - headCM, err := ddb.Resolve(context.TODO(), headCS, rsr.CWBHeadRef()) + headRef, err := rsr.CWBHeadRef() + if err != nil { + return nil, err + } + + headCM, err := ddb.Resolve(context.TODO(), headCS, headRef) if err != nil { return nil, err } @@ -72,7 +77,7 @@ func NewMergeSpec(ctx context.Context, rsr env.RepoStateReader, ddb *doltdb.Dolt return nil, err } - mergeCM, err := ddb.Resolve(context.TODO(), mergeCS, rsr.CWBHeadRef()) + mergeCM, err := ddb.Resolve(context.TODO(), mergeCS, headRef) if err != nil { return nil, err } @@ -159,7 +164,11 @@ func ExecuteFFMerge( } if !spec.Squash { - err = dEnv.DoltDB.FastForward(ctx, dEnv.RepoStateReader().CWBHeadRef(), spec.MergeC) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + err = dEnv.DoltDB.FastForward(ctx, headRef, spec.MergeC) if err != nil { return err diff --git a/go/libraries/doltcore/rebase/rebase.go b/go/libraries/doltcore/rebase/rebase.go index 5ade41afba..d31bc6ffde 100644 --- a/go/libraries/doltcore/rebase/rebase.go +++ b/go/libraries/doltcore/rebase/rebase.go @@ -113,7 +113,11 @@ func AllBranches(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn, // CurrentBranch rewrites the history of the current branch using the |replay| function. func CurrentBranch(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn, nerf NeedsRebaseFn) error { - return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil + } + return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, headRef) } // AllBranchesByRoots rewrites the history of all branches in the repo using the |replay| function. @@ -130,7 +134,11 @@ func AllBranchesByRoots(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRoo // CurrentBranchByRoot rewrites the history of the current branch using the |replay| function. func CurrentBranchByRoot(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRootFn, nerf NeedsRebaseFn) error { replayCommit := wrapReplayRootFn(replay) - return rebaseRefs(ctx, dEnv.DbData(), replayCommit, nerf, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil + } + return rebaseRefs(ctx, dEnv.DbData(), replayCommit, nerf, headRef) } func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, nerf NeedsRebaseFn, refs ...ref.DoltRef) error { From 75c0b505e5d5a79cf5774cabb9ff6b3054b36479 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Tue, 9 May 2023 15:40:07 -0700 Subject: [PATCH 04/82] go/{merge, sqle, prolly}: Added NOT NULL constraints to constraint violations table --- .../doltcore/doltdb/durable/artifact_index.go | 6 +- go/libraries/doltcore/doltdb/table.go | 2 +- .../doltcore/merge/merge_prolly_rows.go | 100 +++++++++++++++--- go/libraries/doltcore/merge/merge_rows.go | 8 +- .../doltcore/merge/schema_merge_test.go | 27 +++++ go/libraries/doltcore/merge/violations_fk.go | 1 + .../merge/violations_unique_prolly.go | 31 ++++++ .../dtables/constraint_violations_prolly.go | 17 +++ .../sqle/enginetest/dolt_queries_merge.go | 46 ++++++++ go/store/prolly/artifact_map.go | 17 +-- go/store/val/tuple_descriptor.go | 5 + 11 files changed, 235 insertions(+), 25 deletions(-) diff --git a/go/libraries/doltcore/doltdb/durable/artifact_index.go b/go/libraries/doltcore/doltdb/durable/artifact_index.go index a078584c38..07da0dd342 100644 --- a/go/libraries/doltcore/doltdb/durable/artifact_index.go +++ b/go/libraries/doltcore/doltdb/durable/artifact_index.go @@ -137,7 +137,11 @@ func (i prollyArtifactIndex) ConflictCount(ctx context.Context) (uint64, error) } func (i prollyArtifactIndex) ConstraintViolationCount(ctx context.Context) (uint64, error) { - return i.index.CountOfTypes(ctx, prolly.ArtifactTypeForeignKeyViol, prolly.ArtifactTypeUniqueKeyViol, prolly.ArtifactTypeChkConsViol) + return i.index.CountOfTypes(ctx, + prolly.ArtifactTypeForeignKeyViol, + prolly.ArtifactTypeUniqueKeyViol, + prolly.ArtifactTypeChkConsViol, + prolly.ArtifactTypeNullViol) } func (i prollyArtifactIndex) ClearConflicts(ctx context.Context) (ArtifactIndex, error) { diff --git a/go/libraries/doltcore/doltdb/table.go b/go/libraries/doltcore/doltdb/table.go index cc889b13dc..95cb79fb20 100644 --- a/go/libraries/doltcore/doltdb/table.go +++ b/go/libraries/doltcore/doltdb/table.go @@ -355,7 +355,7 @@ func (t *Table) GetConstraintViolationsSchema(ctx context.Context) (schema.Schem } typeType, err := typeinfo.FromSqlType( - gmstypes.MustCreateEnumType([]string{"foreign key", "unique index", "check constraint"}, sql.Collation_Default)) + gmstypes.MustCreateEnumType([]string{"foreign key", "unique index", "check constraint", "not null"}, sql.Collation_Default)) if err != nil { return nil, err } diff --git a/go/libraries/doltcore/merge/merge_prolly_rows.go b/go/libraries/doltcore/merge/merge_prolly_rows.go index e462b186cf..9971a80750 100644 --- a/go/libraries/doltcore/merge/merge_prolly_rows.go +++ b/go/libraries/doltcore/merge/merge_prolly_rows.go @@ -156,12 +156,17 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch return nil, nil, err } - // validator shares editor with conflict merge + // validator shares artifact editor with conflict merge uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, ae) if err != nil { return nil, nil, err } + nullChk, err := newNullValidator(ctx, finalSch, tm, valueMerger, ae) + if err != nil { + return nil, nil, err + } + s := &MergeStats{ Operation: TableModified, } @@ -177,7 +182,16 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch if err != nil { return nil, nil, err } - s.DataConflicts += cnt + s.ConstraintViolations += cnt + + cnt, err = nullChk.validateDiff(ctx, diff) + if err != nil { + return nil, nil, err + } + s.ConstraintViolations += cnt + if cnt > 0 { + continue + } switch diff.Op { case tree.DiffOpDivergentModifyConflict, tree.DiffOpDivergentDeleteConflict: @@ -379,7 +393,7 @@ func newUniqValidator(ctx context.Context, sch schema.Schema, tm *TableMerger, v return uv, nil } -func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (conflicts int, err error) { +func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (violations int, err error) { var value val.Tuple switch diff.Op { case tree.DiffOpRightAdd, tree.DiffOpRightModify: @@ -399,7 +413,7 @@ func (uv uniqValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff for _, idx := range uv.indexes { err = idx.findCollisions(ctx, diff.Key, value, func(k, v val.Tuple) error { - conflicts++ + violations++ return uv.insertArtifact(ctx, k, v, idx.meta) }) if err != nil { @@ -491,6 +505,68 @@ func (idx uniqIndex) findCollisions(ctx context.Context, key, value val.Tuple, c return cb(key, value) } +// nullValidator enforces NOT NULL constraints on merge +type nullValidator struct { + // final is the merge result schema + final schema.Schema + // rightMap maps right-side value tuples to |final| + rightMap val.OrdinalMapping + // edits is the artifacts maps editor + edits *prolly.ArtifactsEditor + // theirRootish is the hash.Hash of the right-side + // rootish being merged + theirRootish hash.Hash +} + +func newNullValidator(ctx context.Context, final schema.Schema, tm *TableMerger, vm *valueMerger, edits *prolly.ArtifactsEditor) (nullValidator, error) { + theirRootish, err := tm.rightSrc.HashOf() + if err != nil { + return nullValidator{}, err + } + return nullValidator{ + final: final, + rightMap: vm.rightMapping, + edits: edits, + theirRootish: theirRootish, + }, nil +} + +func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (count int, err error) { + var violations []string + switch diff.Op { + case tree.DiffOpRightAdd, tree.DiffOpRightModify: + for to, from := range nv.rightMap { + col := nv.final.GetNonPKCols().GetByIndex(to) + if col.IsNullable() { + continue + } + if from < 0 { + // non-nullable column in |nv.final| does not exist + // on the right side of the merge, check if it will + // be populated with a default value + if col.Default == "" { + violations = append(violations, col.Name) + } + } else { + if diff.Right.FieldIsNull(from) { + violations = append(violations, col.Name) + } + } + } + } + if len(violations) > 0 { + var meta prolly.ConstraintViolationMeta + if meta, err = newNotNullViolationMeta(violations, diff.Right); err != nil { + return 0, err + } + err = nv.edits.ReplaceConstraintViolation(ctx, diff.Key, nv.theirRootish, prolly.ArtifactTypeNullViol, meta) + if err != nil { + return 0, err + } + } + return len(violations), nil +} + // conflictMerger processing primary key diffs // with conflict types into artifact table writes. type conflictMerger struct { @@ -596,12 +672,12 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc return fmt.Errorf("cannot merge keyless tables with reordered columns") } } else { - tempTupleValue, err := remapTupleWithColumnDefaults(ctx, &diff.Right, sourceSch.GetValueDescriptor(), + tempTupleValue, err := remapTupleWithColumnDefaults(ctx, diff.Right, sourceSch.GetValueDescriptor(), m.valueMerger.rightMapping, m.tableMerger, m.finalSch, m.valueMerger.syncPool) if err != nil { return err } - newTupleValue = *tempTupleValue + newTupleValue = tempTupleValue } return m.mut.Put(ctx, diff.Key, newTupleValue) case tree.DiffOpRightDelete: @@ -732,7 +808,7 @@ func remapTuple(tuple val.Tuple, desc val.TupleDesc, mapping val.OrdinalMapping) // currently being merged and associated node store. |mergedSch| is the new schema of the table and is used to look up // column default values to apply to any existing rows when a new column is added as part of a merge. |pool| is used to // to allocate memory for the new tuple. A pointer to the new tuple data is returned, along with any error encountered. -func remapTupleWithColumnDefaults(ctx *sql.Context, tuple *val.Tuple, tupleDesc val.TupleDesc, mapping val.OrdinalMapping, tm *TableMerger, mergedSch schema.Schema, pool pool.BuffPool) (*val.Tuple, error) { +func remapTupleWithColumnDefaults(ctx *sql.Context, tuple val.Tuple, tupleDesc val.TupleDesc, mapping val.OrdinalMapping, tm *TableMerger, mergedSch schema.Schema, pool pool.BuffPool) (val.Tuple, error) { tb := val.NewTupleBuilder(mergedSch.GetValueDescriptor()) for to, from := range mapping { @@ -768,12 +844,10 @@ func remapTupleWithColumnDefaults(ctx *sql.Context, tuple *val.Tuple, tupleDesc } } } else { - tb.PutRaw(to, tupleDesc.GetField(from, *tuple)) + tb.PutRaw(to, tupleDesc.GetField(from, tuple)) } } - - newTuple := tb.Build(pool) - return &newTuple, nil + return tb.Build(pool), nil } func mergeTableArtifacts(ctx context.Context, tm *TableMerger, mergeTbl *doltdb.Table) (*doltdb.Table, error) { @@ -912,12 +986,12 @@ func migrateDataToMergedSchema(ctx *sql.Context, tm *TableMerger, vm *valueMerge return err } - newValueTuple, err := remapTupleWithColumnDefaults(ctx, &valueTuple, valueDescriptor, vm.leftMapping, tm, mergedSch, vm.syncPool) + newValueTuple, err := remapTupleWithColumnDefaults(ctx, valueTuple, valueDescriptor, vm.leftMapping, tm, mergedSch, vm.syncPool) if err != nil { return err } - err = mut.Put(ctx, keyTuple, *newValueTuple) + err = mut.Put(ctx, keyTuple, newValueTuple) if err != nil { return err } diff --git a/go/libraries/doltcore/merge/merge_rows.go b/go/libraries/doltcore/merge/merge_rows.go index cc2fac0174..271d514e66 100644 --- a/go/libraries/doltcore/merge/merge_rows.go +++ b/go/libraries/doltcore/merge/merge_rows.go @@ -318,10 +318,10 @@ func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bo return false, nil } - // If a not-null constraint was added, bail. - if existingVD.Types[existingIndex].Nullable && !targetVD.Types[targetIndex].Nullable { - return false, nil - } + //// If a not-null constraint was added, bail. + //if existingVD.Types[existingIndex].Nullable && !targetVD.Types[targetIndex].Nullable { + // return false, nil + //} // If the collation was changed, bail. // Different collations will affect the ordering of any secondary indexes using this column. diff --git a/go/libraries/doltcore/merge/schema_merge_test.go b/go/libraries/doltcore/merge/schema_merge_test.go index df50f4d452..f127dfffaa 100644 --- a/go/libraries/doltcore/merge/schema_merge_test.go +++ b/go/libraries/doltcore/merge/schema_merge_test.go @@ -66,6 +66,9 @@ func TestSchemaMerge(t *testing.T) { t.Run("column default tests", func(t *testing.T) { testSchemaMerge(t, columnDefaultTests) }) + t.Run("nullability tests", func(t *testing.T) { + testSchemaMerge(t, nullabilityTests) + }) t.Run("column type change tests", func(t *testing.T) { testSchemaMerge(t, typeChangeTests) }) @@ -286,6 +289,30 @@ var columnDefaultTests = []schemaMergeTest{ }, } +var nullabilityTests = []schemaMergeTest{ + { + name: "add not null column to empty table", + ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")), + }, + { + name: "add not null constraint to existing column", + ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)), + }, + { + name: "add not null column to non-empty table", + ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1)), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 1)), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1), row(2)), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 1), row(2, 19)), + }, +} + var columnReorderingTests = []schemaMergeTest{} var typeChangeTests = []schemaMergeTest{ diff --git a/go/libraries/doltcore/merge/violations_fk.go b/go/libraries/doltcore/merge/violations_fk.go index 8cbbd4fc42..ecc3a5bfcc 100644 --- a/go/libraries/doltcore/merge/violations_fk.go +++ b/go/libraries/doltcore/merge/violations_fk.go @@ -57,6 +57,7 @@ const ( CvType_ForeignKey CvType = iota + 1 CvType_UniqueIndex CvType_CheckConstraint + CvType_NotNull ) type FKViolationReceiver interface { diff --git a/go/libraries/doltcore/merge/violations_unique_prolly.go b/go/libraries/doltcore/merge/violations_unique_prolly.go index aa2e03c11d..cb8b413160 100644 --- a/go/libraries/doltcore/merge/violations_unique_prolly.go +++ b/go/libraries/doltcore/merge/violations_unique_prolly.go @@ -16,6 +16,7 @@ package merge import ( "context" + "encoding/json" "fmt" "strings" @@ -149,3 +150,33 @@ func ordinalMappingFromIndex(def schema.Index) (m val.OrdinalMapping) { } return } + +type NullViolationMeta struct { + Columns []string `json:"Columns"` +} + +func newNotNullViolationMeta(violations []string, value val.Tuple) (prolly.ConstraintViolationMeta, error) { + info, err := json.Marshal(NullViolationMeta{Columns: violations}) + if err != nil { + return prolly.ConstraintViolationMeta{}, err + } + return prolly.ConstraintViolationMeta{ + VInfo: info, + Value: value, + }, nil +} + +func (m NullViolationMeta) Unmarshall(ctx *sql.Context) (val types.JSONDocument, err error) { + return types.JSONDocument{Val: m}, nil +} + +func (m NullViolationMeta) Compare(ctx *sql.Context, v types.JSONValue) (cmp int, err error) { + ours := types.JSONDocument{Val: m} + return ours.Compare(ctx, v) +} + +func (m NullViolationMeta) ToString(ctx *sql.Context) (string, error) { + return fmt.Sprintf("{Columns: [%s]}", strings.Join(m.Columns, ",")), nil +} + +var _ types.JSONValue = FkCVMeta{} diff --git a/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go b/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go index 3bbe6444a3..cbc593555a 100644 --- a/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go +++ b/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go @@ -117,6 +117,12 @@ func (cvt *prollyConstraintViolationsTable) PartitionRows(ctx *sql.Context, part return nil, err } kd, vd := sch.GetMapDescriptors() + + // value tuples encoded in ConstraintViolationMeta may + // violate the not null constraints assumed by fixed access + kd = kd.WithoutFixedAccess() + vd = vd.WithoutFixedAccess() + return prollyCVIter{ itr: itr, sch: sch, @@ -206,6 +212,13 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) { return nil, err } r[o] = m + case prolly.ArtifactTypeNullViol: + var m merge.NullViolationMeta + err = json.Unmarshal(meta.VInfo, &m) + if err != nil { + return nil, err + } + r[o] = m default: panic("json not implemented for artifact type") } @@ -295,6 +308,8 @@ func mapCVType(artifactType prolly.ArtifactType) (outType uint64) { outType = uint64(merge.CvType_UniqueIndex) case prolly.ArtifactTypeChkConsViol: outType = uint64(merge.CvType_CheckConstraint) + case prolly.ArtifactTypeNullViol: + outType = uint64(merge.CvType_NotNull) default: panic("unhandled cv type") } @@ -309,6 +324,8 @@ func unmapCVType(in merge.CvType) (out prolly.ArtifactType) { out = prolly.ArtifactTypeUniqueKeyViol case merge.CvType_CheckConstraint: out = prolly.ArtifactTypeChkConsViol + case merge.CvType_NotNull: + out = prolly.ArtifactTypeNullViol default: panic("unhandled cv type") } diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go index a52dcf8375..3251163479 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go @@ -1606,6 +1606,52 @@ var Dolt1MergeScripts = []queries.ScriptTest{ }, }, }, + { + Name: "try to merge a nullable field into a non-null column", + SetUpScript: []string{ + "SET dolt_force_transaction_commit = on;", + "create table test (pk int primary key, c0 int)", + "insert into test values (1,1),(3,3);", + "call dolt_commit('-Am', 'new table with NULL value');", + "call dolt_checkout('-b', 'other')", + "insert into test values (2,NULL);", + "call dolt_commit('-am', 'inserted null value')", + "call dolt_checkout('main');", + "alter table test modify c0 int not null;", + "insert into test values (4,4)", + "call dolt_commit('-am', 'modified column c0 to not null');", + }, + Assertions: []queries.ScriptTestAssertion{ + { + Query: "call dolt_merge('other')", + Expected: []sql.Row{{0, 1}}, + }, + { + Query: "select violation_type, pk, violation_info from dolt_constraint_violations_test", + Expected: []sql.Row{ + {uint16(4), 2, types.JSONDocument{Val: merge.NullViolationMeta{Columns: []string{"c0"}}}}, + }, + }, + }, + }, + { + Name: "dolt_revert() detects not null violation (issue #4527)", + SetUpScript: []string{ + "create table test2 (pk int primary key, c0 int)", + "insert into test2 values (1,1),(2,NULL),(3,3);", + "call dolt_commit('-Am', 'new table with NULL value');", + "delete from test2 where pk = 2;", + "call dolt_commit('-am', 'deleted row with NULL value');", + "alter table test2 modify c0 int not null", + "call dolt_commit('-am', 'modified column c0 to not null');", + }, + Assertions: []queries.ScriptTestAssertion{ + { + Query: "call dolt_revert('head~1');", + ExpectedErrStr: "revert currently does not handle constraint violations", + }, + }, + }, } var KeylessMergeCVsAndConflictsScripts = []queries.ScriptTest{ diff --git a/go/store/prolly/artifact_map.go b/go/store/prolly/artifact_map.go index 6f6b723ccd..b987d790bd 100644 --- a/go/store/prolly/artifact_map.go +++ b/go/store/prolly/artifact_map.go @@ -41,6 +41,11 @@ const ( ArtifactTypeUniqueKeyViol // ArtifactTypeChkConsViol is the type for check constraint violations. ArtifactTypeChkConsViol + // ArtifactTypeNullViol is the type for nullability violations. + ArtifactTypeNullViol +) + +const ( artifactMapPendingBufferSize = 650_000 ) @@ -191,11 +196,11 @@ func (m ArtifactMap) IterAll(ctx context.Context) (ArtifactIter, error) { } func (m ArtifactMap) IterAllCVs(ctx context.Context) (ArtifactIter, error) { - itr, err := m.iterAllOfTypes(ctx, ArtifactTypeForeignKeyViol, ArtifactTypeUniqueKeyViol, ArtifactTypeChkConsViol) - if err != nil { - return nil, err - } - return itr, nil + return m.iterAllOfTypes(ctx, + ArtifactTypeForeignKeyViol, + ArtifactTypeUniqueKeyViol, + ArtifactTypeChkConsViol, + ArtifactTypeNullViol) } // IterAllConflicts returns an iterator for the conflicts. @@ -495,7 +500,7 @@ var _ ArtifactIter = multiArtifactTypeItr{} // newMultiArtifactTypeItr creates an iter that iterates an artifact if its type exists in |types|. func newMultiArtifactTypeItr(itr ArtifactIter, types []ArtifactType) multiArtifactTypeItr { - members := make([]bool, 5) + members := make([]bool, 6) for _, t := range types { members[uint8(t)] = true } diff --git a/go/store/val/tuple_descriptor.go b/go/store/val/tuple_descriptor.go index b7a3b1fcef..7af7a38f18 100644 --- a/go/store/val/tuple_descriptor.go +++ b/go/store/val/tuple_descriptor.go @@ -176,6 +176,11 @@ func (td TupleDesc) GetFixedAccess() FixedAccess { return td.fast } +// WithoutFixedAccess returns a copy of |td| without fixed access metadata. +func (td TupleDesc) WithoutFixedAccess() TupleDesc { + return TupleDesc{Types: td.Types, cmp: td.cmp} +} + // GetBool reads a bool from the ith field of the Tuple. // If the ith field is NULL, |ok| is set to false. func (td TupleDesc) GetBool(i int, tup Tuple) (v bool, ok bool) { From 2f191f544e9e01434bce4f377693bf17c689f1dc Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Tue, 9 May 2023 16:36:14 -0700 Subject: [PATCH 05/82] go/doltcore/sqle: handle un-mergable not-null violations from the left side --- .../doltcore/merge/merge_prolly_rows.go | 34 ++++++++- go/libraries/doltcore/merge/merge_rows.go | 12 --- .../sqle/enginetest/dolt_engine_test.go | 75 +++++++++++++++++++ .../sqle/enginetest/dolt_queries_merge.go | 27 +++++-- 4 files changed, 127 insertions(+), 21 deletions(-) diff --git a/go/libraries/doltcore/merge/merge_prolly_rows.go b/go/libraries/doltcore/merge/merge_prolly_rows.go index 9971a80750..f2e520df32 100644 --- a/go/libraries/doltcore/merge/merge_prolly_rows.go +++ b/go/libraries/doltcore/merge/merge_prolly_rows.go @@ -156,7 +156,7 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch return nil, nil, err } - // validator shares artifact editor with conflict merge + // validator shares an artifact editor with conflict merge uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, ae) if err != nil { return nil, nil, err @@ -507,10 +507,11 @@ func (idx uniqIndex) findCollisions(ctx context.Context, key, value val.Tuple, c // nullValidator enforces NOT NULL constraints on merge type nullValidator struct { + table string // final is the merge result schema final schema.Schema - // rightMap maps right-side value tuples to |final| - rightMap val.OrdinalMapping + // leftMap and rightMap map value tuples to |final| + leftMap, rightMap val.OrdinalMapping // edits is the artifacts maps editor edits *prolly.ArtifactsEditor // theirRootish is the hash.Hash of the right-side @@ -524,7 +525,9 @@ func newNullValidator(ctx context.Context, final schema.Schema, tm *TableMerger, return nullValidator{}, err } return nullValidator{ + table: tm.name, final: final, + leftMap: vm.leftMapping, rightMap: vm.rightMapping, edits: edits, theirRootish: theirRootish, @@ -553,6 +556,31 @@ func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff } } } + case tree.DiffOpLeftAdd, tree.DiffOpLeftModify: + for to, from := range nv.leftMap { + col := nv.final.GetNonPKCols().GetByIndex(to) + if col.IsNullable() { + continue + } + if from < 0 { + // non-nullable column in |nv.final| does not exist + // on the left side of the merge, check if it will + // be populated with a default value + if col.Default == "" { + // todo: we cannot record row-level conflicts originating from + // the left side of the merge, this should be a schema conflict + return 0, fmt.Errorf("table %s can't be automatically merged.\n"+ + "To merge this table, make the schema on the source and target branch equal.", nv.table) + } + } else { + if diff.Left.FieldIsNull(from) { + // todo: we cannot record row-level conflicts originating from + // the left side of the merge, this should be a schema conflict + return 0, fmt.Errorf("table %s can't be automatically merged.\n"+ + "To merge this table, make the schema on the source and target branch equal.", nv.table) + } + } + } } if len(violations) > 0 { var meta prolly.ConstraintViolationMeta diff --git a/go/libraries/doltcore/merge/merge_rows.go b/go/libraries/doltcore/merge/merge_rows.go index 271d514e66..0a6e03fd42 100644 --- a/go/libraries/doltcore/merge/merge_rows.go +++ b/go/libraries/doltcore/merge/merge_rows.go @@ -299,9 +299,7 @@ func (rm *RootMerger) maybeShortCircuit(ctx context.Context, tm *TableMerger, op } func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bool, error) { - existingVD := existingSch.GetValueDescriptor() targetVD := targetSch.GetValueDescriptor() - _, valMapping, err := schema.MapSchemaBasedOnTagAndName(existingSch, targetSch) if err != nil { return false, err @@ -313,16 +311,6 @@ func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bo continue } - // If the field types have changed between existing and target, bail. - if existingVD.Types[existingIndex].Enc != targetVD.Types[targetIndex].Enc { - return false, nil - } - - //// If a not-null constraint was added, bail. - //if existingVD.Types[existingIndex].Nullable && !targetVD.Types[targetIndex].Nullable { - // return false, nil - //} - // If the collation was changed, bail. // Different collations will affect the ordering of any secondary indexes using this column. existingStr, ok1 := existingSch.GetNonPKCols().GetByIndex(existingIndex).TypeInfo.ToSqlType().(sql.StringType) diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go b/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go index 3543d6b906..b6973e1e11 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go @@ -158,6 +158,81 @@ func TestSingleScript(t *testing.T) { } } +// Convenience test for debugging a single query. Unskip and set to the desired query. +func TestSingleMergeScript(t *testing.T) { + var scripts = []MergeScriptTest{ + { + Name: "adding a not-null constraint and default value to a column", + AncSetUpScript: []string{ + "set dolt_force_transaction_commit = on;", + "create table t (pk int primary key, col1 int);", + "insert into t values (1, null), (2, null);", + }, + RightSetUpScript: []string{ + "update t set col1 = 9999 where col1 is null;", + "alter table t modify column col1 int not null default 9999;", + "insert into t values (3, 30), (4, 40);", + }, + LeftSetUpScript: []string{ + "insert into t values (5, null), (6, null);", + }, + Assertions: []queries.ScriptTestAssertion{ + { + Query: "call dolt_merge('right');", + }, + { + Query: "select pk, col1 from t;", + Expected: []sql.Row{ + {1, 9999}, + {2, 9999}, + {3, 30}, + {4, 40}, + }, + }, + }, + }, + { + Name: "adding a not-null constraint to one side", + AncSetUpScript: []string{ + "set dolt_force_transaction_commit = on;", + "create table t (pk int primary key, col1 int);", + "insert into t values (1, null), (2, null);", + }, + RightSetUpScript: []string{ + "update t set col1 = 0 where col1 is null;", + "alter table t modify col1 int not null;", + }, + LeftSetUpScript: []string{ + "insert into t values (3, null);", + }, + Assertions: []queries.ScriptTestAssertion{ + { + Query: "call dolt_merge('right');", + }, + { + Skip: true, + Query: "select pk, col1 from t;", + Expected: []sql.Row{ + {1, 0}, + {2, 0}, + }, + }, + { + Query: "select violation_type, pk from dolt_constraint_violations_t", + Expected: []sql.Row{ + {uint16(4), 3}, + }, + }, + }, + }, + } + + for _, test := range scripts { + enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, true)) + //enginetest.TestScript(t, harness, convertMergeScriptTest(test, false)) + } +} + func TestSingleQueryPrepared(t *testing.T) { t.Skip() diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go index 3251163479..50e2e8e460 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go @@ -4657,6 +4657,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ { Name: "adding a not-null constraint and default value to a column", AncSetUpScript: []string{ + "set dolt_force_transaction_commit = on;", "create table t (pk int primary key, col1 int);", "insert into t values (1, null), (2, null);", }, @@ -4670,8 +4671,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Query: "call dolt_merge('right');", - ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"), + Skip: true, + Query: "call dolt_merge('right');", }, { Skip: true, @@ -4681,8 +4682,6 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ {2, 9999}, {3, 30}, {4, 40}, - {5, 9999}, - {6, 9999}, }, }, }, @@ -4690,6 +4689,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ { Name: "adding a not-null constraint to one side", AncSetUpScript: []string{ + "set dolt_force_transaction_commit = on;", "create table t (pk int primary key, col1 int);", "insert into t values (1, null), (2, null);", }, @@ -4702,8 +4702,23 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Query: "call dolt_merge('right');", - ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"), + Skip: true, + Query: "call dolt_merge('right');", + }, + { + Skip: true, + Query: "select pk, col1 from t;", + Expected: []sql.Row{ + {1, 0}, + {2, 0}, + }, + }, + { + Skip: true, + Query: "select violation_type, pk, violation_info from dolt_constraint_violations_t", + Expected: []sql.Row{ + {uint16(4), 3, types.JSONDocument{Val: merge.NullViolationMeta{Columns: []string{"col1"}}}}, + }, }, }, }, From e8a13711587071767c029ac931505574680da641 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Tue, 9 May 2023 17:26:06 -0700 Subject: [PATCH 06/82] go/doltcore/merge: fix default value logic, skip non-symmetric tests --- .../doltcore/merge/merge_prolly_rows.go | 5 ++++- go/libraries/doltcore/merge/merge_rows.go | 16 +++++++++++----- .../doltcore/merge/schema_merge_test.go | 17 ++++++++++++----- .../sqle/enginetest/dolt_queries_merge.go | 5 ++--- 4 files changed, 29 insertions(+), 14 deletions(-) diff --git a/go/libraries/doltcore/merge/merge_prolly_rows.go b/go/libraries/doltcore/merge/merge_prolly_rows.go index f2e520df32..fb1f39a4ba 100644 --- a/go/libraries/doltcore/merge/merge_prolly_rows.go +++ b/go/libraries/doltcore/merge/merge_prolly_rows.go @@ -865,7 +865,10 @@ func remapTupleWithColumnDefaults(ctx *sql.Context, tuple val.Tuple, tupleDesc v if err != nil { return nil, err } - + value, _, err = col.TypeInfo.ToSqlType().Convert(value) + if err != nil { + return nil, err + } err = index.PutField(ctx, tm.ns, tb, to, value) if err != nil { return nil, err diff --git a/go/libraries/doltcore/merge/merge_rows.go b/go/libraries/doltcore/merge/merge_rows.go index 0a6e03fd42..c590704060 100644 --- a/go/libraries/doltcore/merge/merge_rows.go +++ b/go/libraries/doltcore/merge/merge_rows.go @@ -299,6 +299,7 @@ func (rm *RootMerger) maybeShortCircuit(ctx context.Context, tm *TableMerger, op } func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bool, error) { + existingVD := existingSch.GetValueDescriptor() targetVD := targetSch.GetValueDescriptor() _, valMapping, err := schema.MapSchemaBasedOnTagAndName(existingSch, targetSch) if err != nil { @@ -311,6 +312,11 @@ func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bo continue } + // If the field types have changed between existing and target, bail. + if existingVD.Types[existingIndex].Enc != targetVD.Types[targetIndex].Enc { + return false, nil + } + // If the collation was changed, bail. // Different collations will affect the ordering of any secondary indexes using this column. existingStr, ok1 := existingSch.GetNonPKCols().GetByIndex(existingIndex).TypeInfo.ToSqlType().(sql.StringType) @@ -326,18 +332,18 @@ func validateTupleFields(existingSch schema.Schema, targetSch schema.Schema) (bo return false, err } - for i, j := range valMapping { - if i == j { + for targetIndex, existingIndex := range valMapping { + if targetIndex == existingIndex { continue } + col := targetSch.GetNonPKCols().GetByIndex(targetIndex) // If we haven't bailed so far, then these fields were added at the end. - // If they are not-null bail. - if !targetVD.Types[i].Nullable { + // If one of these fields is NOT NULL, without a default value, then fail. + if !col.IsNullable() && col.Default == "" { return false, nil } } - return true, nil } diff --git a/go/libraries/doltcore/merge/schema_merge_test.go b/go/libraries/doltcore/merge/schema_merge_test.go index f127dfffaa..d8084d8e5a 100644 --- a/go/libraries/doltcore/merge/schema_merge_test.go +++ b/go/libraries/doltcore/merge/schema_merge_test.go @@ -301,15 +301,15 @@ var nullabilityTests = []schemaMergeTest{ name: "add not null constraint to existing column", ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)), left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)), - right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)), - merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1), row(2, 2)), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1), row(2, 2)), }, { name: "add not null column to non-empty table", ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1)), - left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 1)), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19)), right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1), row(2)), - merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 1), row(2, 19)), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19), row(2, 19)), }, } @@ -540,7 +540,14 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool for name, addr := range exp { a, ok := act[name] assert.True(t, ok) - assert.Equal(t, addr, a) + if !assert.Equal(t, addr, a) { + expTbl, _, err := m.GetTable(ctx, name) + require.NoError(t, err) + t.Logf("expected rows: %s", expTbl.DebugString(ctx)) + actTbl, _, err := result.Root.GetTable(ctx, name) + require.NoError(t, err) + t.Logf("actual rows: %s", actTbl.DebugString(ctx)) + } } } }) diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go index 50e2e8e460..c9a016d747 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go @@ -15,7 +15,6 @@ package enginetest import ( - "fmt" "strings" "github.com/dolthub/go-mysql-server/enginetest/queries" @@ -4630,6 +4629,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ { Name: "adding a non-null column with a default value to one side", AncSetUpScript: []string{ + "set dolt_force_transaction_commit = on;", "create table t (pk int primary key, col1 int);", "insert into t values (1, 1);", }, @@ -4644,8 +4644,7 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Query: "call dolt_merge('right');", - ExpectedErrStr: fmt.Sprintf(errTmplNoAutomaticMerge, "t"), + Query: "call dolt_merge('right');", }, { Skip: true, From 2bccd3241000274718a24d97f640830a61aae9cd Mon Sep 17 00:00:00 2001 From: Brian Hendriks Date: Wed, 10 May 2023 12:06:25 -0700 Subject: [PATCH 07/82] add replication metrics --- .../commands/sqlserver/metrics_listener.go | 109 +++++++++++++++++- go/cmd/dolt/commands/sqlserver/server.go | 2 +- 2 files changed, 109 insertions(+), 2 deletions(-) diff --git a/go/cmd/dolt/commands/sqlserver/metrics_listener.go b/go/cmd/dolt/commands/sqlserver/metrics_listener.go index a6f00cb52e..3ba1e3e6c6 100644 --- a/go/cmd/dolt/commands/sqlserver/metrics_listener.go +++ b/go/cmd/dolt/commands/sqlserver/metrics_listener.go @@ -16,6 +16,9 @@ package sqlserver import ( "fmt" + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster" + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/clusterdb" + "sync" "time" "github.com/dolthub/dolt/go/libraries/utils/version" @@ -24,19 +27,35 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ( + clusterUpdateInterval = time.Second * 5 +) + var _ server.ServerEventListener = (*metricsListener)(nil) type metricsListener struct { + labels prometheus.Labels + cntConnections prometheus.Counter cntDisconnects prometheus.Counter gaugeConcurrentConn prometheus.Gauge gaugeConcurrentQueries prometheus.Gauge histQueryDur prometheus.Histogram gaugeVersion prometheus.Gauge + + // replication metrics + dbToIsReplica map[string]prometheus.Gauge + dbToReplicationLag map[string]prometheus.Gauge + + // used in updating cluster metrics + clusterStatus clusterdb.ClusterStatusProvider + mu *sync.Mutex + done bool } -func newMetricsListener(labels prometheus.Labels, versionStr string) (*metricsListener, error) { +func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStatus clusterdb.ClusterStatusProvider) (*metricsListener, error) { ml := &metricsListener{ + labels: labels, cntConnections: prometheus.NewCounter(prometheus.CounterOpts{ Name: "dss_connects", Help: "Count of server connects", @@ -68,6 +87,10 @@ func newMetricsListener(labels prometheus.Labels, versionStr string) (*metricsLi Help: "The version of dolt currently running on the machine", ConstLabels: labels, }), + dbToIsReplica: make(map[string]prometheus.Gauge), + dbToReplicationLag: make(map[string]prometheus.Gauge), + clusterStatus: clusterStatus, + mu: &sync.Mutex{}, } u32Version, err := version.Encode(versionStr) @@ -88,10 +111,78 @@ func newMetricsListener(labels prometheus.Labels, versionStr string) (*metricsLi prometheus.MustRegister(ml.gaugeConcurrentQueries) prometheus.MustRegister(ml.histQueryDur) + go func() { + for ml.updateReplMetrics(ml.clusterStatus.GetClusterStatus()) { + time.Sleep(clusterUpdateInterval) + } + }() + ml.gaugeVersion.Set(f64Version) return ml, nil } +func (ml *metricsListener) updateReplMetrics(perDbStatus []clusterdb.ReplicaStatus) bool { + ml.mu.Lock() + defer ml.mu.Unlock() + + if ml.done { + return false + } + + dbNames := make(map[string]struct{}) + + for _, status := range perDbStatus { + dbName := status.Database + dbNames[dbName] = struct{}{} + + // if we haven't seen this db before, register the metrics + if _, ok := ml.dbToIsReplica[dbName]; !ok { + labels := prometheus.Labels{"database": dbName} + for k, v := range ml.labels { + labels[k] = v + } + + ml.dbToIsReplica[dbName] = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "dss_is_replica", + Help: "Whether this dolt sql server is a replica of the database", + ConstLabels: labels, + }) + ml.dbToReplicationLag[dbName] = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "dss_replication_lag", + Help: "The replication lag of this dolt sql server from the master", + ConstLabels: labels, + }) + + prometheus.MustRegister(ml.dbToIsReplica[dbName]) + prometheus.MustRegister(ml.dbToReplicationLag[dbName]) + } + + // update the metrics + isReplica := ml.dbToIsReplica[dbName] + replicationLag := ml.dbToReplicationLag[dbName] + + isReplica.Set(1.0) + if status.Role == string(cluster.RolePrimary) { + isReplica.Set(0.0) + } + + replicationLag.Set(float64(status.ReplicationLag.Milliseconds())) + } + + // deregister metrics for deleted databases + for dbName := range ml.dbToIsReplica { + if _, ok := dbNames[dbName]; !ok { + isReplica := ml.dbToIsReplica[dbName] + replicationLag := ml.dbToReplicationLag[dbName] + + prometheus.Unregister(isReplica) + prometheus.Unregister(replicationLag) + } + } + + return true +} + func (ml *metricsListener) ClientConnected() { ml.gaugeConcurrentConn.Add(1.0) ml.cntConnections.Add(1.0) @@ -118,4 +209,20 @@ func (ml *metricsListener) Close() { prometheus.Unregister(ml.gaugeConcurrentConn) prometheus.Unregister(ml.gaugeConcurrentQueries) prometheus.Unregister(ml.histQueryDur) + + ml.closeReplicationMetrics() } + +func (ml *metricsListener) closeReplicationMetrics() { + ml.mu.Lock() + defer ml.mu.Unlock() + for dbName := range ml.dbToIsReplica { + isReplica := ml.dbToIsReplica[dbName] + replicationLag := ml.dbToReplicationLag[dbName] + + prometheus.Unregister(isReplica) + prometheus.Unregister(replicationLag) + } + + ml.done = true +} \ No newline at end of file diff --git a/go/cmd/dolt/commands/sqlserver/server.go b/go/cmd/dolt/commands/sqlserver/server.go index 1fffec26b9..46bf71d141 100644 --- a/go/cmd/dolt/commands/sqlserver/server.go +++ b/go/cmd/dolt/commands/sqlserver/server.go @@ -178,7 +178,7 @@ func Serve( labels := serverConfig.MetricsLabels() var listener *metricsListener - listener, startError = newMetricsListener(labels, version) + listener, startError = newMetricsListener(labels, version, clusterController) if startError != nil { cli.Println(startError) return From e54fdb7025346ec86572d1b93a73424687ac5403 Mon Sep 17 00:00:00 2001 From: Brian Hendriks Date: Wed, 10 May 2023 12:08:53 -0700 Subject: [PATCH 08/82] refactor --- go/cmd/dolt/commands/sqlserver/metrics_listener.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/go/cmd/dolt/commands/sqlserver/metrics_listener.go b/go/cmd/dolt/commands/sqlserver/metrics_listener.go index 3ba1e3e6c6..b5fb432a86 100644 --- a/go/cmd/dolt/commands/sqlserver/metrics_listener.go +++ b/go/cmd/dolt/commands/sqlserver/metrics_listener.go @@ -112,7 +112,7 @@ func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStat prometheus.MustRegister(ml.histQueryDur) go func() { - for ml.updateReplMetrics(ml.clusterStatus.GetClusterStatus()) { + for ml.updateReplMetrics() { time.Sleep(clusterUpdateInterval) } }() @@ -121,7 +121,7 @@ func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStat return ml, nil } -func (ml *metricsListener) updateReplMetrics(perDbStatus []clusterdb.ReplicaStatus) bool { +func (ml *metricsListener) updateReplMetrics() bool { ml.mu.Lock() defer ml.mu.Unlock() @@ -129,6 +129,7 @@ func (ml *metricsListener) updateReplMetrics(perDbStatus []clusterdb.ReplicaStat return false } + perDbStatus := ml.clusterStatus.GetClusterStatus() dbNames := make(map[string]struct{}) for _, status := range perDbStatus { From 11d94c3bca8dd0e403eed94b66c034390e161b09 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 13:09:54 -0700 Subject: [PATCH 09/82] Migrate uses of `CWBHeadRef` and `CWBHeadSpec` in dprocedures and dfunctions. --- .../doltcore/sqle/dfunctions/dolt_merge_base.go | 9 +++++++-- .../doltcore/sqle/dprocedures/dolt_checkout.go | 7 ++++++- .../doltcore/sqle/dprocedures/dolt_cherry_pick.go | 6 +++++- .../doltcore/sqle/dprocedures/dolt_merge.go | 13 +++++++++++-- go/libraries/doltcore/sqle/dprocedures/dolt_pull.go | 6 +++++- .../doltcore/sqle/dprocedures/dolt_reset.go | 6 +++++- 6 files changed, 39 insertions(+), 8 deletions(-) diff --git a/go/libraries/doltcore/sqle/dfunctions/dolt_merge_base.go b/go/libraries/doltcore/sqle/dfunctions/dolt_merge_base.go index 38cac2809a..cdca5af2e4 100644 --- a/go/libraries/doltcore/sqle/dfunctions/dolt_merge_base.go +++ b/go/libraries/doltcore/sqle/dfunctions/dolt_merge_base.go @@ -94,11 +94,16 @@ func resolveRefSpecs(ctx *sql.Context, leftSpec, rightSpec string) (left, right return nil, nil, sql.ErrDatabaseNotFound.New(dbName) } - left, err = doltDB.Resolve(ctx, lcs, dbData.Rsr.CWBHeadRef()) + headRef, err := dbData.Rsr.CWBHeadRef() if err != nil { return nil, nil, err } - right, err = doltDB.Resolve(ctx, rcs, dbData.Rsr.CWBHeadRef()) + + left, err = doltDB.Resolve(ctx, lcs, headRef) + if err != nil { + return nil, nil, err + } + right, err = doltDB.Resolve(ctx, rcs, headRef) if err != nil { return nil, nil, err } diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go b/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go index 21ef428198..1b2b8413ca 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go @@ -220,7 +220,12 @@ func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, br return errhand.BuildDError(fmt.Errorf("%w: '%s'", err, remoteRef.GetRemote()).Error()).Build() } - return env.SetRemoteUpstreamForRefSpec(dbData.Rsw, refSpec, remoteRef.GetRemote(), dbData.Rsr.CWBHeadRef()) + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return err + } + + return env.SetRemoteUpstreamForRefSpec(dbData.Rsw, refSpec, remoteRef.GetRemote(), headRef) } else { return fmt.Errorf("'%s' matched multiple (%v) remote tracking branches", branchName, len(remoteRefs)) } diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_cherry_pick.go b/go/libraries/doltcore/sqle/dprocedures/dolt_cherry_pick.go index 132a9804ec..c73d138f4c 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_cherry_pick.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_cherry_pick.go @@ -136,7 +136,11 @@ func cherryPick(ctx *sql.Context, dSess *dsess.DoltSession, roots doltdb.Roots, if err != nil { return nil, "", err } - cherryCommit, err := doltDB.Resolve(ctx, cherryCommitSpec, dbData.Rsr.CWBHeadRef()) + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return nil, "", err + } + cherryCommit, err := doltDB.Resolve(ctx, cherryCommitSpec, headRef) if err != nil { return nil, "", err } diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_merge.go b/go/libraries/doltcore/sqle/dprocedures/dolt_merge.go index a098e288f4..a587de2328 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_merge.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_merge.go @@ -120,7 +120,12 @@ func doDoltMerge(ctx *sql.Context, args []string) (int, int, error) { if !ok { return noConflictsOrViolations, threeWayMerge, fmt.Errorf("Could not load database %s", dbName) } - msg := fmt.Sprintf("Merge branch '%s' into %s", branchName, dbData.Rsr.CWBHeadRef().GetPath()) + + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return noConflictsOrViolations, threeWayMerge, err + } + msg := fmt.Sprintf("Merge branch '%s' into %s", branchName, headRef.GetPath()) if userMsg, mOk := apr.GetValue(cli.MessageArg); mOk { msg = userMsg } @@ -266,7 +271,11 @@ func executeFFMerge(ctx *sql.Context, dbName string, squash bool, ws *doltdb.Wor // TODO: This is all incredibly suspect, needs to be replaced with library code that is functional instead of // altering global state if !squash { - err = dbData.Ddb.FastForward(ctx, dbData.Rsr.CWBHeadRef(), cm2) + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return nil, err + } + err = dbData.Ddb.FastForward(ctx, headRef, cm2) if err != nil { return ws, err } diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_pull.go b/go/libraries/doltcore/sqle/dprocedures/dolt_pull.go index 3f600c3a02..98cf54fbdb 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_pull.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_pull.go @@ -148,7 +148,11 @@ func doDoltPull(ctx *sql.Context, args []string) (int, int, error) { return noConflictsOrViolations, threeWayMerge, err } - msg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, dbData.Rsr.CWBHeadRef().GetPath()) + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return noConflictsOrViolations, threeWayMerge, err + } + msg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, headRef.GetPath()) ws, conflicts, fastForward, err = performMerge(ctx, sess, roots, ws, dbName, mergeSpec, apr.Contains(cli.NoCommitFlag), msg) if err != nil && !errors.Is(doltdb.ErrUpToDate, err) { return conflicts, fastForward, err diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_reset.go b/go/libraries/doltcore/sqle/dprocedures/dolt_reset.go index b23ddeaab6..d6d4269d38 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_reset.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_reset.go @@ -100,7 +100,11 @@ func doDoltReset(ctx *sql.Context, args []string) (int, error) { // TODO: this overrides the transaction setting, needs to happen at commit, not here if newHead != nil { - if err := dbData.Ddb.SetHeadToCommit(ctx, dbData.Rsr.CWBHeadRef(), newHead); err != nil { + headRef, err := dbData.Rsr.CWBHeadRef() + if err != nil { + return 1, err + } + if err := dbData.Ddb.SetHeadToCommit(ctx, headRef, newHead); err != nil { return 1, err } } From 29ed451728653f522a1867ed20286609acd38700 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Wed, 10 May 2023 13:53:00 -0700 Subject: [PATCH 10/82] go/store/prolly: rename 'their' fields to 'source' to clarify artifact map semantics --- .../dtables/constraint_violations_prolly.go | 4 +-- go/store/prolly/artifact_map.go | 30 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go b/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go index cbc593555a..67cab70df2 100644 --- a/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go +++ b/go/libraries/doltcore/sqle/dtables/constraint_violations_prolly.go @@ -161,7 +161,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) { } r := make(sql.Row, itr.sch.GetAllCols().Size()+3) - r[0] = art.TheirRootIsh.String() + r[0] = art.SourceRootish.String() r[1] = mapCVType(art.ArtType) var meta prolly.ConstraintViolationMeta @@ -173,7 +173,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) { o := 2 if !schema.IsKeyless(itr.sch) { for i := 0; i < itr.kd.Count(); i++ { - r[o+i], err = index.GetField(ctx, itr.kd, i, art.Key, itr.ns) + r[o+i], err = index.GetField(ctx, itr.kd, i, art.SourceKey, itr.ns) if err != nil { return nil, err } diff --git a/go/store/prolly/artifact_map.go b/go/store/prolly/artifact_map.go index b987d790bd..fb92d45a63 100644 --- a/go/store/prolly/artifact_map.go +++ b/go/store/prolly/artifact_map.go @@ -323,11 +323,11 @@ type ArtifactsEditor struct { pool pool.BuffPool } -func (wr *ArtifactsEditor) Add(ctx context.Context, srcKey val.Tuple, theirRootIsh hash.Hash, artType ArtifactType, meta []byte) error { +func (wr *ArtifactsEditor) Add(ctx context.Context, srcKey val.Tuple, srcRootish hash.Hash, artType ArtifactType, meta []byte) error { for i := 0; i < srcKey.Count(); i++ { wr.artKB.PutRaw(i, srcKey.GetField(i)) } - wr.artKB.PutCommitAddr(srcKey.Count(), theirRootIsh) + wr.artKB.PutCommitAddr(srcKey.Count(), srcRootish) wr.artKB.PutUint8(srcKey.Count()+1, uint8(artType)) key := wr.artKB.Build(wr.pool) @@ -342,7 +342,7 @@ func (wr *ArtifactsEditor) Add(ctx context.Context, srcKey val.Tuple, theirRootI // the given will be inserted. Returns true if a violation was replaced. If an // existing violation exists but has a different |meta.VInfo| value then // ErrMergeArtifactCollision is a returned. -func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKey val.Tuple, theirRootIsh hash.Hash, artType ArtifactType, meta ConstraintViolationMeta) error { +func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKey val.Tuple, srcRootish hash.Hash, artType ArtifactType, meta ConstraintViolationMeta) error { itr, err := wr.mut.IterRange(ctx, PrefixRange(srcKey, wr.srcKeyDesc)) if err != nil { return err @@ -360,7 +360,7 @@ func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKe var currMeta ConstraintViolationMeta for art, err = aItr.Next(ctx); err == nil; art, err = aItr.Next(ctx) { // prefix scanning sometimes returns keys not in the range - if bytes.Compare(art.Key, srcKey) != 0 { + if bytes.Compare(art.SourceKey, srcKey) != 0 { continue } if art.ArtType != artType { @@ -391,7 +391,7 @@ func (wr *ArtifactsEditor) ReplaceConstraintViolation(ctx context.Context, srcKe if err != nil { return err } - err = wr.Add(ctx, srcKey, theirRootIsh, artType, d) + err = wr.Add(ctx, srcKey, srcRootish, artType, d) if err != nil { return err } @@ -446,8 +446,8 @@ func (itr *ConflictArtifactIter) Next(ctx context.Context) (ConflictArtifact, er } return ConflictArtifact{ - Key: art.Key, - TheirRootIsh: art.TheirRootIsh, + Key: art.SourceKey, + TheirRootIsh: art.SourceRootish, Metadata: parsedMeta, }, nil } @@ -545,11 +545,11 @@ func (itr artifactIterImpl) Next(ctx context.Context) (Artifact, error) { metadata, _ := itr.artVD.GetJSON(0, v) return Artifact{ - ArtKey: artKey, - Key: srcKey, - TheirRootIsh: cmHash, - ArtType: ArtifactType(artType), - Metadata: metadata, + ArtKey: artKey, + SourceKey: srcKey, + SourceRootish: cmHash, + ArtType: ArtifactType(artType), + Metadata: metadata, }, nil } @@ -564,10 +564,10 @@ func (itr artifactIterImpl) getSrcKeyFromArtKey(k val.Tuple) val.Tuple { type Artifact struct { // ArtKey is the key of the artifact itself ArtKey val.Tuple - // Key is the key of the source row that the artifact references - Key val.Tuple + // SourceKey is the key of the source row that the artifact references + SourceKey val.Tuple // TheirRootIsh is the working set hash or commit hash of the right in the merge - TheirRootIsh hash.Hash + SourceRootish hash.Hash // ArtType is the type of the artifact ArtType ArtifactType // Metadata is the encoded json metadata From 3d9d806887cbd80bf66d13cccb383e0d094e9092 Mon Sep 17 00:00:00 2001 From: Brian Hendriks Date: Wed, 10 May 2023 14:07:48 -0700 Subject: [PATCH 11/82] pr feedback --- .../commands/sqlserver/metrics_listener.go | 94 ++++++++----------- 1 file changed, 40 insertions(+), 54 deletions(-) diff --git a/go/cmd/dolt/commands/sqlserver/metrics_listener.go b/go/cmd/dolt/commands/sqlserver/metrics_listener.go index b5fb432a86..2e88a67546 100644 --- a/go/cmd/dolt/commands/sqlserver/metrics_listener.go +++ b/go/cmd/dolt/commands/sqlserver/metrics_listener.go @@ -29,6 +29,10 @@ import ( const ( clusterUpdateInterval = time.Second * 5 + + dbLabel = "database" + roleLabel = "role" + remoteLabel = "remote" ) var _ server.ServerEventListener = (*metricsListener)(nil) @@ -44,13 +48,14 @@ type metricsListener struct { gaugeVersion prometheus.Gauge // replication metrics - dbToIsReplica map[string]prometheus.Gauge - dbToReplicationLag map[string]prometheus.Gauge + isReplicaGauges *prometheus.GaugeVec + replicationLagGauges *prometheus.GaugeVec // used in updating cluster metrics - clusterStatus clusterdb.ClusterStatusProvider - mu *sync.Mutex - done bool + clusterStatus clusterdb.ClusterStatusProvider + mu *sync.Mutex + done bool + clusterSeenDbs map[string]struct{} } func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStatus clusterdb.ClusterStatusProvider) (*metricsListener, error) { @@ -87,10 +92,19 @@ func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStat Help: "The version of dolt currently running on the machine", ConstLabels: labels, }), - dbToIsReplica: make(map[string]prometheus.Gauge), - dbToReplicationLag: make(map[string]prometheus.Gauge), - clusterStatus: clusterStatus, - mu: &sync.Mutex{}, + replicationLagGauges: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "dss_replication_lag", + Help: "The reported replication lag of this server when it is a primary to the given standby.", + ConstLabels: labels, + }, []string{dbLabel, remoteLabel}), + isReplicaGauges: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "dss_is_replica", + Help: "one if the server is currently in this role, zero otherwise", + ConstLabels: labels, + }, []string{dbLabel}), + clusterStatus: clusterStatus, + mu: &sync.Mutex{}, + clusterSeenDbs: make(map[string]struct{}), } u32Version, err := version.Encode(versionStr) @@ -130,56 +144,32 @@ func (ml *metricsListener) updateReplMetrics() bool { } perDbStatus := ml.clusterStatus.GetClusterStatus() - dbNames := make(map[string]struct{}) + if perDbStatus == nil { + return true + } + dbNames := make(map[string]struct{}) for _, status := range perDbStatus { dbName := status.Database dbNames[dbName] = struct{}{} - // if we haven't seen this db before, register the metrics - if _, ok := ml.dbToIsReplica[dbName]; !ok { - labels := prometheus.Labels{"database": dbName} - for k, v := range ml.labels { - labels[k] = v - } - - ml.dbToIsReplica[dbName] = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "dss_is_replica", - Help: "Whether this dolt sql server is a replica of the database", - ConstLabels: labels, - }) - ml.dbToReplicationLag[dbName] = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "dss_replication_lag", - Help: "The replication lag of this dolt sql server from the master", - ConstLabels: labels, - }) - - prometheus.MustRegister(ml.dbToIsReplica[dbName]) - prometheus.MustRegister(ml.dbToReplicationLag[dbName]) - } - - // update the metrics - isReplica := ml.dbToIsReplica[dbName] - replicationLag := ml.dbToReplicationLag[dbName] - - isReplica.Set(1.0) if status.Role == string(cluster.RolePrimary) { - isReplica.Set(0.0) + ml.isReplicaGauges.WithLabelValues(status.Database).Set(0.0) + ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(float64(status.ReplicationLag.Milliseconds())) + } else { + ml.isReplicaGauges.WithLabelValues(status.Database).Set(1.0) + ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(-1.0) } - - replicationLag.Set(float64(status.ReplicationLag.Milliseconds())) } // deregister metrics for deleted databases - for dbName := range ml.dbToIsReplica { - if _, ok := dbNames[dbName]; !ok { - isReplica := ml.dbToIsReplica[dbName] - replicationLag := ml.dbToReplicationLag[dbName] - - prometheus.Unregister(isReplica) - prometheus.Unregister(replicationLag) + for db := range ml.clusterSeenDbs { + if _, ok := dbNames[db]; !ok { + ml.isReplicaGauges.DeletePartialMatch(prometheus.Labels{"database": db}) + ml.replicationLagGauges.DeletePartialMatch(prometheus.Labels{"database": db}) } } + ml.clusterSeenDbs = dbNames return true } @@ -217,13 +207,9 @@ func (ml *metricsListener) Close() { func (ml *metricsListener) closeReplicationMetrics() { ml.mu.Lock() defer ml.mu.Unlock() - for dbName := range ml.dbToIsReplica { - isReplica := ml.dbToIsReplica[dbName] - replicationLag := ml.dbToReplicationLag[dbName] - prometheus.Unregister(isReplica) - prometheus.Unregister(replicationLag) - } + prometheus.Unregister(ml.replicationLagGauges) + prometheus.Unregister(ml.isReplicaGauges) ml.done = true -} \ No newline at end of file +} From 6f624f8e5604b5d0eec89c7faf7d1b064307f7e4 Mon Sep 17 00:00:00 2001 From: bheni Date: Wed, 10 May 2023 21:16:49 +0000 Subject: [PATCH 12/82] [ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh --- go/cmd/dolt/commands/sqlserver/metrics_listener.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/go/cmd/dolt/commands/sqlserver/metrics_listener.go b/go/cmd/dolt/commands/sqlserver/metrics_listener.go index 2e88a67546..fd03aac159 100644 --- a/go/cmd/dolt/commands/sqlserver/metrics_listener.go +++ b/go/cmd/dolt/commands/sqlserver/metrics_listener.go @@ -16,15 +16,15 @@ package sqlserver import ( "fmt" - "github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster" - "github.com/dolthub/dolt/go/libraries/doltcore/sqle/clusterdb" "sync" "time" - "github.com/dolthub/dolt/go/libraries/utils/version" - "github.com/dolthub/go-mysql-server/server" "github.com/prometheus/client_golang/prometheus" + + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster" + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/clusterdb" + "github.com/dolthub/dolt/go/libraries/utils/version" ) const ( From 663d2e318174fad65ad6a74689ec095007371e87 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Wed, 10 May 2023 14:25:41 -0700 Subject: [PATCH 13/82] sql-server: Expose the current server log level through the dolt_log_level system variable. --- go/cmd/dolt/commands/sqlserver/server.go | 21 +++++++++++++++++++ go/libraries/doltcore/sqle/dsess/variables.go | 1 + .../tests/sql-server-config.yaml | 20 ++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/go/cmd/dolt/commands/sqlserver/server.go b/go/cmd/dolt/commands/sqlserver/server.go index 1fffec26b9..e33b2997e9 100644 --- a/go/cmd/dolt/commands/sqlserver/server.go +++ b/go/cmd/dolt/commands/sqlserver/server.go @@ -27,6 +27,7 @@ import ( "github.com/dolthub/go-mysql-server/server" "github.com/dolthub/go-mysql-server/sql" + "github.com/dolthub/go-mysql-server/sql/types" "github.com/dolthub/vitess/go/mysql" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" @@ -39,6 +40,7 @@ import ( "github.com/dolthub/dolt/go/libraries/doltcore/sqle" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster" + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess" _ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions" "github.com/dolthub/dolt/go/libraries/doltcore/sqlserver" ) @@ -87,6 +89,25 @@ func Serve( } logrus.SetFormatter(LogFormat{}) + sql.SystemVariables.AddSystemVariables([]sql.SystemVariable{ + { + Name: dsess.DoltLogLevel, + Scope: sql.SystemVariableScope_Global, + Dynamic: true, + SetVarHintApplies: false, + Type: types.NewSystemEnumType(dsess.DoltLogLevel, + logrus.PanicLevel.String(), + logrus.FatalLevel.String(), + logrus.ErrorLevel.String(), + logrus.WarnLevel.String(), + logrus.InfoLevel.String(), + logrus.DebugLevel.String(), + logrus.TraceLevel.String(), + ), + Default: logrus.GetLevel().String(), + }, + }) + var mrEnv *env.MultiRepoEnv var err error fs := dEnv.FS diff --git a/go/libraries/doltcore/sqle/dsess/variables.go b/go/libraries/doltcore/sqle/dsess/variables.go index 79457e2801..c979dded55 100644 --- a/go/libraries/doltcore/sqle/dsess/variables.go +++ b/go/libraries/doltcore/sqle/dsess/variables.go @@ -50,6 +50,7 @@ const ( AwsCredsProfile = "aws_credentials_profile" AwsCredsRegion = "aws_credentials_region" ShowBranchDatabases = "dolt_show_branch_databases" + DoltLogLevel = "dolt_log_level" DoltClusterRoleVariable = "dolt_cluster_role" DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch" diff --git a/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml b/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml index cbd7cc7290..27b30d490e 100644 --- a/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml +++ b/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml @@ -275,3 +275,23 @@ tests: result: columns: ["@@GLOBAL.max_connections"] rows: [["555"]] +- name: "@@global.dolt_log_level read behavior" + repos: + - name: repo1 + server: + args: ["-l", "trace"] + connections: + - on: repo1 + queries: + - query: "select @@GLOBAL.dolt_log_level" + result: + columns: ["@@GLOBAL.dolt_log_level"] + rows: [["trace"]] + restart_server: + args: ["-l", "debug"] + - on: repo1 + queries: + - query: "select @@GLOBAL.dolt_log_level" + result: + columns: ["@@GLOBAL.dolt_log_level"] + rows: [["debug"]] From 59f9099082c17984a09ac6d8f5a90f85979d2892 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 14:42:13 -0700 Subject: [PATCH 14/82] Remove cry for help from `dolt sql --help` --- go/cmd/dolt/commands/sql.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/go/cmd/dolt/commands/sql.go b/go/cmd/dolt/commands/sql.go index 335a81176f..40381d35a9 100644 --- a/go/cmd/dolt/commands/sql.go +++ b/go/cmd/dolt/commands/sql.go @@ -176,16 +176,16 @@ func (cmd SqlCmd) RequiresRepo() bool { func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int { ap := cmd.ArgParser() help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, sqlDocs, ap)) - apr, verr := cmd.handleLegacyArguments(ap, commandStr, args) - if verr != nil { - if verr == argparser.ErrHelp { + apr, err := cmd.handleLegacyArguments(ap, commandStr, args) + if err != nil { + if err == argparser.ErrHelp { help() return 0 } - return HandleVErrAndExitCode(verr, usage) + return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage) } - err := validateSqlArgs(apr) + err = validateSqlArgs(apr) if err != nil { return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage) } @@ -286,7 +286,7 @@ func (cmd SqlCmd) Exec(ctx context.Context, commandStr string, args []string, dE } // handleLegacyArguments is a temporary function to parse args, and print a error and explanation when the old form is provided. -func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, errhand.VerboseError) { +func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr string, args []string) (*argparser.ArgParseResults, error) { apr, err := ap.Parse(args) @@ -312,12 +312,12 @@ func (cmd SqlCmd) handleLegacyArguments(ap *argparser.ArgParser, commandStr stri if newErr != nil { // Neither form of the arguments works. Print the usage and the error of the first parse. - return nil, errhand.VerboseErrorFromError(err) + return nil, err } // The legacy form worked, so print an error and exit. err = fmt.Errorf("SQL arguments have changed. Move --data-dir, --doltcfg-dir to before the sql sub command.") - return nil, errhand.VerboseErrorFromError(err) + return nil, err } return apr, nil From 2674c3cf2afe52329738cb848517470f3edfe933 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Wed, 10 May 2023 14:46:45 -0700 Subject: [PATCH 15/82] sql-server: Allow dolt_log_level system variable to receive writes and dynamically update the log level. --- go/cmd/dolt/commands/sqlserver/server.go | 7 +++++++ .../tests/sql-server-config.yaml | 19 +++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/go/cmd/dolt/commands/sqlserver/server.go b/go/cmd/dolt/commands/sqlserver/server.go index e33b2997e9..b7d5d681b7 100644 --- a/go/cmd/dolt/commands/sqlserver/server.go +++ b/go/cmd/dolt/commands/sqlserver/server.go @@ -105,6 +105,13 @@ func Serve( logrus.TraceLevel.String(), ), Default: logrus.GetLevel().String(), + NotifyChanged: func(scope sql.SystemVariableScope, v sql.SystemVarValue) { + if level, err := logrus.ParseLevel(v.Val.(string)); err == nil { + logrus.SetLevel(level) + } else { + logrus.Warnf("could not parse requested log level %s as a log level. dolt_log_level variable value and logging behavior will diverge.", v.Val.(string)) + } + }, }, }) diff --git a/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml b/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml index 27b30d490e..d3750e77ab 100644 --- a/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml +++ b/integration-tests/go-sql-server-driver/tests/sql-server-config.yaml @@ -275,23 +275,34 @@ tests: result: columns: ["@@GLOBAL.max_connections"] rows: [["555"]] -- name: "@@global.dolt_log_level read behavior" +- name: "@@global.dolt_log_level behavior" repos: - name: repo1 server: - args: ["-l", "trace"] + args: ["-l", "warning"] + log_matches: + - "Starting query" connections: - on: repo1 queries: + - query: "select @@GLOBAL.dolt_log_level" + result: + columns: ["@@GLOBAL.dolt_log_level"] + rows: [["warning"]] + - exec: "set @@GLOBAL.dolt_log_level = 'trace'" + - query: "select 2+2 from dual" + result: + columns: ["2+2"] + rows: [["4"]] - query: "select @@GLOBAL.dolt_log_level" result: columns: ["@@GLOBAL.dolt_log_level"] rows: [["trace"]] restart_server: - args: ["-l", "debug"] + args: ["-l", "info"] - on: repo1 queries: - query: "select @@GLOBAL.dolt_log_level" result: columns: ["@@GLOBAL.dolt_log_level"] - rows: [["debug"]] + rows: [["info"]] From 2bd393bcf9169d247c85528f1f2db71f79b621c5 Mon Sep 17 00:00:00 2001 From: reltuk Date: Wed, 10 May 2023 21:56:25 +0000 Subject: [PATCH 16/82] [ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh --- go/cmd/dolt/commands/sqlserver/server.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/go/cmd/dolt/commands/sqlserver/server.go b/go/cmd/dolt/commands/sqlserver/server.go index b7d5d681b7..7ea8315040 100644 --- a/go/cmd/dolt/commands/sqlserver/server.go +++ b/go/cmd/dolt/commands/sqlserver/server.go @@ -40,8 +40,8 @@ import ( "github.com/dolthub/dolt/go/libraries/doltcore/sqle" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/binlogreplication" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/cluster" - "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess" _ "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions" + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess" "github.com/dolthub/dolt/go/libraries/doltcore/sqlserver" ) @@ -90,12 +90,12 @@ func Serve( logrus.SetFormatter(LogFormat{}) sql.SystemVariables.AddSystemVariables([]sql.SystemVariable{ - { + { Name: dsess.DoltLogLevel, Scope: sql.SystemVariableScope_Global, Dynamic: true, SetVarHintApplies: false, - Type: types.NewSystemEnumType(dsess.DoltLogLevel, + Type: types.NewSystemEnumType(dsess.DoltLogLevel, logrus.PanicLevel.String(), logrus.FatalLevel.String(), logrus.ErrorLevel.String(), @@ -104,8 +104,8 @@ func Serve( logrus.DebugLevel.String(), logrus.TraceLevel.String(), ), - Default: logrus.GetLevel().String(), - NotifyChanged: func(scope sql.SystemVariableScope, v sql.SystemVarValue) { + Default: logrus.GetLevel().String(), + NotifyChanged: func(scope sql.SystemVariableScope, v sql.SystemVarValue) { if level, err := logrus.ParseLevel(v.Val.(string)); err == nil { logrus.SetLevel(level) } else { From dab974cd5bfd3be82be5e82326fe74bb9aec6843 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 15:19:07 -0700 Subject: [PATCH 17/82] Generate per-table names for constraint checks. --- go/libraries/doltcore/sqle/tables.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/libraries/doltcore/sqle/tables.go b/go/libraries/doltcore/sqle/tables.go index 8e24beb2dd..5b71a957a8 100644 --- a/go/libraries/doltcore/sqle/tables.go +++ b/go/libraries/doltcore/sqle/tables.go @@ -2541,7 +2541,7 @@ func (t *AlterableDoltTable) generateCheckName(ctx *sql.Context, check *sql.Chec bb.Write([]byte(check.CheckExpression)) hash := hash.Of(bb.Bytes()) - hashedName := fmt.Sprintf("chk_%s", hash.String()[:8]) + hashedName := fmt.Sprintf("%s_chk_%s", t.tableName, hash.String()[:8]) name := hashedName var i int From 630a53b68ff88af1f9214bf4ef7ae28fb2753bf3 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 15:19:21 -0700 Subject: [PATCH 18/82] Add bats test for per-table constraint check names. --- integration-tests/bats/sql-create-tables.bats | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/integration-tests/bats/sql-create-tables.bats b/integration-tests/bats/sql-create-tables.bats index e86918c8ef..e607201c6f 100644 --- a/integration-tests/bats/sql-create-tables.bats +++ b/integration-tests/bats/sql-create-tables.bats @@ -776,3 +776,18 @@ SQL dolt sql -q "INSERT INTO budgets VALUES (UUID());" dolt sql -q "INSERT INTO budgets2 VALUES (UUID());" } + +@test "sql-create-tables: tables should not reuse constraint names" { + run dolt sql -r csv < 0) +); +CREATE TABLE t2 LIKE t1; +SELECT count(CONSTRAINT_NAME), count(distinct CONSTRAINT_NAME) FROM information_schema.table_constraints WHERE CONSTRAINT_TYPE="CHECK"; +SQL + + [ "$status" -eq 0 ] + [[ "$output" =~ "2,2" ]] || false + +} \ No newline at end of file From 05a5dcb5a4b705ca8c475df3d4295346140b49df Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Wed, 10 May 2023 15:35:30 -0700 Subject: [PATCH 19/82] go: doltdb: hooksdatabase.go: Execute commit hooks in parallel so that synchronous commit hooks do not serialize on each other. --- go/libraries/doltcore/doltdb/hooksdatabase.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index ed412682ea..54d128d043 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -21,6 +21,8 @@ import ( "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/store/hash" "github.com/dolthub/dolt/go/store/types" + + "sync" ) type hooksDatabase struct { @@ -58,14 +60,21 @@ func (db hooksDatabase) PostCommitHooks() []CommitHook { func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset, onlyWS bool) { var err error + var wg sync.WaitGroup for _, hook := range db.postCommitHooks { if !onlyWS || hook.ExecuteForWorkingSets() { - err = hook.Execute(ctx, ds, db) - if err != nil { - hook.HandleError(ctx, err) - } + hook := hook + wg.Add(1) + go func() { + defer wg.Done() + err = hook.Execute(ctx, ds, db) + if err != nil { + hook.HandleError(ctx, err) + } + }() } } + wg.Wait() } func (db hooksDatabase) CommitWithWorkingSet( From ab6152c4f3c48ab3409fbb68a0df2b5200199471 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 15:49:48 -0700 Subject: [PATCH 20/82] Fix bats test that verifies the behavior of --help --- integration-tests/bats/no-repo.bats | 36 +++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/integration-tests/bats/no-repo.bats b/integration-tests/bats/no-repo.bats index 1cad4d31a6..6515d2cb0f 100755 --- a/integration-tests/bats/no-repo.bats +++ b/integration-tests/bats/no-repo.bats @@ -77,17 +77,18 @@ teardown() { @test "no-repo: check all commands for valid help text" { # pipe all commands to a file # cut -s suppresses the line if it doesn't contain the delim - dolt | cut -f 1 -d " - " -s | sed "s/ //g" > all.txt + dolt | awk -F ' - ' '/ - / {print $1}' > all_raw.txt + sed "s/ //g" all_raw.txt > all.txt # filter out commands without "-h" + # and filter out stash because of https://github.com/dolthub/dolt/issues/5920 cat all.txt \ - | sed "s/creds//g" \ | sed "s/version//g" \ - | sed "s/schema//g" \ - | sed "s/table//g" \ - | sed "s/conflicts//g" \ + | sed "s/stash//g" \ > commands.txt + touch subcommands.txt + cat commands.txt | while IFS= read -r cmd; do if [ -z "$cmd" ]; then @@ -96,9 +97,34 @@ teardown() { run dolt "$cmd" -h [ "$status" -eq 0 ] + + if [[ "$output" =~ "Valid commands for dolt $cmd are" ]]; then + echo "/ - / {print \"$cmd\", \$1}" + echo "$output" | awk -F ' - ' "/ - / {print \"$cmd\", \$1}" >> subcommands.txt + continue + fi + [[ "$output" =~ "NAME" ]] || false [[ "$output" =~ "DESCRIPTION" ]] || false done + + cat subcommands.txt | while IFS= read -r cmd; + do + if [ -z "$cmd" ]; then + continue + fi + + echo $cmd + + run dolt $cmd -h + echo $status + echo "$output" + [ "$status" -eq 0 ] + + [[ "$output" =~ "NAME" ]] || false + [[ "$output" =~ "DESCRIPTION" ]] || false + done + } @test "no-repo: testing dolt version output" { From 1186d6de807cbbb22eda6b8f46670a12a4ccd073 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Wed, 10 May 2023 15:58:57 -0700 Subject: [PATCH 21/82] go/doltcore/merge: register not null constraint violations for left-side NULLs --- .../doltcore/merge/merge_prolly_rows.go | 105 ++++++++++++------ .../sqle/enginetest/dolt_engine_test.go | 62 +++-------- .../sqle/enginetest/dolt_queries_merge.go | 52 +++++++-- .../bats/merge-3way-schema-changes.bats | 20 +--- 4 files changed, 136 insertions(+), 103 deletions(-) diff --git a/go/libraries/doltcore/merge/merge_prolly_rows.go b/go/libraries/doltcore/merge/merge_prolly_rows.go index fb1f39a4ba..9e653afce5 100644 --- a/go/libraries/doltcore/merge/merge_prolly_rows.go +++ b/go/libraries/doltcore/merge/merge_prolly_rows.go @@ -133,17 +133,17 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch if err != nil { return nil, nil, err } - leftRows := durable.ProllyMapFromIndex(lr) + leftEditor := durable.ProllyMapFromIndex(lr).Mutate() ai, err := mergeTbl.GetArtifacts(ctx) if err != nil { return nil, nil, err } - ae := durable.ProllyMapFromArtifactIndex(ai).Editor() + artEditor := durable.ProllyMapFromArtifactIndex(ai).Editor() keyless := schema.IsKeyless(tm.leftSch) - pri, err := newPrimaryMerger(leftRows, tm, valueMerger, finalSch) + pri, err := newPrimaryMerger(leftEditor, tm, valueMerger, finalSch) if err != nil { return nil, nil, err } @@ -151,18 +151,18 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch if err != nil { return nil, nil, err } - conflicts, err := newConflictMerger(ctx, tm, ae) + conflicts, err := newConflictMerger(ctx, tm, artEditor) if err != nil { return nil, nil, err } // validator shares an artifact editor with conflict merge - uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, ae) + uniq, err := newUniqValidator(ctx, finalSch, tm, valueMerger, artEditor) if err != nil { return nil, nil, err } - nullChk, err := newNullValidator(ctx, finalSch, tm, valueMerger, ae) + nullChk, err := newNullValidator(ctx, finalSch, tm, valueMerger, artEditor, leftEditor, sec.leftMut) if err != nil { return nil, nil, err } @@ -513,31 +513,51 @@ type nullValidator struct { // leftMap and rightMap map value tuples to |final| leftMap, rightMap val.OrdinalMapping // edits is the artifacts maps editor - edits *prolly.ArtifactsEditor - // theirRootish is the hash.Hash of the right-side - // rootish being merged + artEditor *prolly.ArtifactsEditor + // leftEdits if the left-side row editor + leftEditor *prolly.MutableMap + // secEditors are the secondary index editors + secEditors []MutableSecondaryIdx + // theirRootish is the hash.Hash of the right-side revision theirRootish hash.Hash + // ourRootish is the hash.Hash of the left-side revision + ourRootish hash.Hash } -func newNullValidator(ctx context.Context, final schema.Schema, tm *TableMerger, vm *valueMerger, edits *prolly.ArtifactsEditor) (nullValidator, error) { +func newNullValidator( + ctx context.Context, + final schema.Schema, + tm *TableMerger, + vm *valueMerger, + artEditor *prolly.ArtifactsEditor, + leftEditor *prolly.MutableMap, + secEditors []MutableSecondaryIdx, +) (nullValidator, error) { theirRootish, err := tm.rightSrc.HashOf() if err != nil { return nullValidator{}, err } + ourRootish, err := tm.rightSrc.HashOf() + if err != nil { + return nullValidator{}, err + } return nullValidator{ table: tm.name, final: final, leftMap: vm.leftMapping, rightMap: vm.rightMapping, - edits: edits, + artEditor: artEditor, + leftEditor: leftEditor, + secEditors: secEditors, theirRootish: theirRootish, + ourRootish: ourRootish, }, nil } func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff) (count int, err error) { - var violations []string switch diff.Op { case tree.DiffOpRightAdd, tree.DiffOpRightModify: + var violations []string for to, from := range nv.rightMap { col := nv.final.GetNonPKCols().GetByIndex(to) if col.IsNullable() { @@ -556,7 +576,22 @@ func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff } } } + // for right-side NULL violations, we insert a constraint violation and + // set |count| > 0 to signal to the caller that |diff| should not be applied + if len(violations) > 0 { + var meta prolly.ConstraintViolationMeta + if meta, err = newNotNullViolationMeta(violations, diff.Right); err != nil { + return 0, err + } + err = nv.artEditor.ReplaceConstraintViolation(ctx, diff.Key, nv.theirRootish, prolly.ArtifactTypeNullViol, meta) + if err != nil { + return 0, err + } + } + count = len(violations) + case tree.DiffOpLeftAdd, tree.DiffOpLeftModify: + var violations []string for to, from := range nv.leftMap { col := nv.final.GetNonPKCols().GetByIndex(to) if col.IsNullable() { @@ -567,32 +602,36 @@ func (nv nullValidator) validateDiff(ctx context.Context, diff tree.ThreeWayDiff // on the left side of the merge, check if it will // be populated with a default value if col.Default == "" { - // todo: we cannot record row-level conflicts originating from - // the left side of the merge, this should be a schema conflict - return 0, fmt.Errorf("table %s can't be automatically merged.\n"+ - "To merge this table, make the schema on the source and target branch equal.", nv.table) + violations = append(violations, col.Name) } } else { if diff.Left.FieldIsNull(from) { - // todo: we cannot record row-level conflicts originating from - // the left side of the merge, this should be a schema conflict - return 0, fmt.Errorf("table %s can't be automatically merged.\n"+ - "To merge this table, make the schema on the source and target branch equal.", nv.table) + violations = append(violations, col.Name) + } + } + } + // for left-side NULL violations, we insert a constraint violation and + // then must explicitly remove this row from all left-side indexes + if len(violations) > 0 { + var meta prolly.ConstraintViolationMeta + if meta, err = newNotNullViolationMeta(violations, diff.Left); err != nil { + return 0, err + } + err = nv.artEditor.ReplaceConstraintViolation(ctx, diff.Key, nv.ourRootish, prolly.ArtifactTypeNullViol, meta) + if err != nil { + return 0, err + } + if err = nv.leftEditor.Delete(ctx, diff.Key); err != nil { + return 0, err + } + for _, editor := range nv.secEditors { + if err = editor.DeleteEntry(ctx, diff.Key, diff.Left); err != nil { + return 0, err } } } } - if len(violations) > 0 { - var meta prolly.ConstraintViolationMeta - if meta, err = newNotNullViolationMeta(violations, diff.Right); err != nil { - return 0, err - } - err = nv.edits.ReplaceConstraintViolation(ctx, diff.Key, nv.theirRootish, prolly.ArtifactTypeNullViol, meta) - if err != nil { - return 0, err - } - } - return len(violations), nil + return } // conflictMerger processing primary key diffs @@ -674,9 +713,9 @@ type primaryMerger struct { finalSch schema.Schema } -func newPrimaryMerger(leftRows prolly.Map, tableMerger *TableMerger, valueMerger *valueMerger, finalSch schema.Schema) (*primaryMerger, error) { +func newPrimaryMerger(leftEditor *prolly.MutableMap, tableMerger *TableMerger, valueMerger *valueMerger, finalSch schema.Schema) (*primaryMerger, error) { return &primaryMerger{ - mut: leftRows.Mutate(), + mut: leftEditor, valueMerger: valueMerger, tableMerger: tableMerger, finalSch: finalSch, diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go b/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go index b6973e1e11..f4a46f31f6 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go @@ -49,7 +49,7 @@ var skipPrepared bool // SkipPreparedsCount is used by the "ci-check-repo CI workflow // as a reminder to consider prepareds when adding a new // enginetest suite. -const SkipPreparedsCount = 85 +const SkipPreparedsCount = 86 const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS" @@ -160,76 +160,42 @@ func TestSingleScript(t *testing.T) { // Convenience test for debugging a single query. Unskip and set to the desired query. func TestSingleMergeScript(t *testing.T) { + t.Skip() var scripts = []MergeScriptTest{ { - Name: "adding a not-null constraint and default value to a column", + Name: "adding a non-null column with a default value to one side", AncSetUpScript: []string{ "set dolt_force_transaction_commit = on;", "create table t (pk int primary key, col1 int);", - "insert into t values (1, null), (2, null);", + "insert into t values (1, 1);", }, RightSetUpScript: []string{ - "update t set col1 = 9999 where col1 is null;", - "alter table t modify column col1 int not null default 9999;", - "insert into t values (3, 30), (4, 40);", + "alter table t add column col2 int not null default 0", + "alter table t add column col3 int;", + "update t set col2 = 1 where pk = 1;", + "insert into t values (2, 2, 2, null);", }, LeftSetUpScript: []string{ - "insert into t values (5, null), (6, null);", + "insert into t values (3, 3);", }, Assertions: []queries.ScriptTestAssertion{ { Query: "call dolt_merge('right');", }, { - Query: "select pk, col1 from t;", - Expected: []sql.Row{ - {1, 9999}, - {2, 9999}, - {3, 30}, - {4, 40}, - }, - }, - }, - }, - { - Name: "adding a not-null constraint to one side", - AncSetUpScript: []string{ - "set dolt_force_transaction_commit = on;", - "create table t (pk int primary key, col1 int);", - "insert into t values (1, null), (2, null);", - }, - RightSetUpScript: []string{ - "update t set col1 = 0 where col1 is null;", - "alter table t modify col1 int not null;", - }, - LeftSetUpScript: []string{ - "insert into t values (3, null);", - }, - Assertions: []queries.ScriptTestAssertion{ - { - Query: "call dolt_merge('right');", + Query: "select * from t;", + Expected: []sql.Row{{1, 1, 1, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}}, }, { - Skip: true, - Query: "select pk, col1 from t;", - Expected: []sql.Row{ - {1, 0}, - {2, 0}, - }, - }, - { - Query: "select violation_type, pk from dolt_constraint_violations_t", - Expected: []sql.Row{ - {uint16(4), 3}, - }, + Query: "select pk, violation_type from dolt_constraint_violations_t", + Expected: []sql.Row{}, }, }, }, } - for _, test := range scripts { + enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, false)) enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, true)) - //enginetest.TestScript(t, harness, convertMergeScriptTest(test, false)) } } diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go index c9a016d747..4db1f94ac9 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go @@ -4633,6 +4633,35 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ "create table t (pk int primary key, col1 int);", "insert into t values (1, 1);", }, + RightSetUpScript: []string{ + "alter table t add column col2 int not null default 0", + "alter table t add column col3 int;", + "insert into t values (2, 2, 2, null);", + }, + LeftSetUpScript: []string{ + "insert into t values (3, 3);", + }, + Assertions: []queries.ScriptTestAssertion{ + { + Query: "call dolt_merge('right');", + }, + { + Query: "select * from t;", + Expected: []sql.Row{{1, 1, 0, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}}, + }, + { + Query: "select pk, violation_type from dolt_constraint_violations_t", + Expected: []sql.Row{}, + }, + }, + }, + { + Name: "adding a non-null column with a default value to one side (with update to existing row)", + AncSetUpScript: []string{ + "set dolt_force_transaction_commit = on;", + "create table t (pk int primary key, col1 int);", + "insert into t values (1, 1);", + }, RightSetUpScript: []string{ "alter table t add column col2 int not null default 0", "alter table t add column col3 int;", @@ -4648,9 +4677,13 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, { Skip: true, - Query: "select * from t;", + Query: "select * from t;", // fails with row(1,1,0,NULL) Expected: []sql.Row{{1, 1, 1, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}}, }, + { + Query: "select pk, violation_type from dolt_constraint_violations_t", + Expected: []sql.Row{}, + }, }, }, { @@ -4670,8 +4703,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Skip: true, - Query: "call dolt_merge('right');", + Query: "call dolt_merge('right');", + Expected: []sql.Row{{0, 0x1}}, }, { Skip: true, @@ -4683,6 +4716,13 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ {4, 40}, }, }, + { + Query: "select pk, violation_type from dolt_constraint_violations_t", + Expected: []sql.Row{ + {5, uint16(4)}, + {6, uint16(4)}, + }, + }, }, }, { @@ -4701,11 +4741,10 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Skip: true, - Query: "call dolt_merge('right');", + Query: "call dolt_merge('right');", + Expected: []sql.Row{{0, 0x1}}, }, { - Skip: true, Query: "select pk, col1 from t;", Expected: []sql.Row{ {1, 0}, @@ -4713,7 +4752,6 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, }, { - Skip: true, Query: "select violation_type, pk, violation_info from dolt_constraint_violations_t", Expected: []sql.Row{ {uint16(4), 3, types.JSONDocument{Val: merge.NullViolationMeta{Columns: []string{"col1"}}}}, diff --git a/integration-tests/bats/merge-3way-schema-changes.bats b/integration-tests/bats/merge-3way-schema-changes.bats index 818bcbaeb5..41dd936176 100644 --- a/integration-tests/bats/merge-3way-schema-changes.bats +++ b/integration-tests/bats/merge-3way-schema-changes.bats @@ -11,7 +11,7 @@ teardown() { } -@test "merge-3way-schema-changes: blocked merge can be fixed by making the schema identical" { +@test "merge-3way-schema-changes: add a NOT NULL column with default value on a branch" { dolt sql -q "create table t (pk int primary key);" dolt commit -Am "ancestor" @@ -24,20 +24,10 @@ teardown() { dolt sql -q "insert into t values (2);" dolt commit -am "left" - run dolt merge right - [ $status -ne 0 ] - [[ $output =~ "table t can't be automatically merged." ]] - - run dolt diff main right --schema -r sql - [ $status -eq 0 ] - [[ $output =~ 'ALTER TABLE `t` ADD `col1` int NOT NULL DEFAULT 0;' ]] - - dolt sql -q 'ALTER TABLE `t` ADD `col1` int NOT NULL DEFAULT 0;' - dolt commit -am "fix merge" dolt merge right - run dolt sql -r csv -q "select * from t;" - [[ $output =~ "pk,col1" ]] - [[ $output =~ "1,0" ]] - [[ $output =~ "2,0" ]] + run dolt sql -q "select * from t" -r csv + log_status_eq 0 + [[ "$output" =~ "1,0" ]] || false + [[ "$output" =~ "2,0" ]] || false } \ No newline at end of file From 865c08531a3fd9ecd81e9e0515fbd68083a1095f Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 16:10:51 -0700 Subject: [PATCH 22/82] Delay format check used in `dolt stash` until after args parsing, in case `--help` is being passed. --- go/cmd/dolt/commands/stashcmds/stash.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/go/cmd/dolt/commands/stashcmds/stash.go b/go/cmd/dolt/commands/stashcmds/stash.go index dae43c3066..fff56f3c25 100644 --- a/go/cmd/dolt/commands/stashcmds/stash.go +++ b/go/cmd/dolt/commands/stashcmds/stash.go @@ -91,13 +91,15 @@ func (cmd StashCmd) EventType() eventsapi.ClientEventType { // Exec executes the command func (cmd StashCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int { + ap := cmd.ArgParser() + help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, stashDocs, ap)) + apr := cli.ParseArgsOrDie(ap, args, help) + if !dEnv.DoltDB.Format().UsesFlatbuffers() { cli.PrintErrln(ErrStashNotSupportedForOldFormat.Error()) return 1 } - ap := cmd.ArgParser() - help, _ := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, stashDocs, ap)) - apr := cli.ParseArgsOrDie(ap, args, help) + if dEnv.IsLocked() { return commands.HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help) } From abad9bc58590a4dde0e989d12bf60fb8d65eb4b4 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 16:12:48 -0700 Subject: [PATCH 23/82] Use bats tests to verify `dolt stash --help` --- integration-tests/bats/no-repo.bats | 2 -- 1 file changed, 2 deletions(-) diff --git a/integration-tests/bats/no-repo.bats b/integration-tests/bats/no-repo.bats index 6515d2cb0f..894145095b 100755 --- a/integration-tests/bats/no-repo.bats +++ b/integration-tests/bats/no-repo.bats @@ -81,10 +81,8 @@ teardown() { sed "s/ //g" all_raw.txt > all.txt # filter out commands without "-h" - # and filter out stash because of https://github.com/dolthub/dolt/issues/5920 cat all.txt \ | sed "s/version//g" \ - | sed "s/stash//g" \ > commands.txt touch subcommands.txt From d847360467441cd7ac4fcd031cc0ef4f8db88025 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Wed, 10 May 2023 16:14:03 -0700 Subject: [PATCH 24/82] go/doltcore/merge: skip nullability tests on NBF __LD_1__ --- .../doltcore/merge/schema_merge_test.go | 36 +++++++++++-------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/go/libraries/doltcore/merge/schema_merge_test.go b/go/libraries/doltcore/merge/schema_merge_test.go index d8084d8e5a..cbe922e345 100644 --- a/go/libraries/doltcore/merge/schema_merge_test.go +++ b/go/libraries/doltcore/merge/schema_merge_test.go @@ -291,25 +291,31 @@ var columnDefaultTests = []schemaMergeTest{ var nullabilityTests = []schemaMergeTest{ { - name: "add not null column to empty table", - ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")), - left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")), - right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")), - merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")), + name: "add not null column to empty table", + ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) ")), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)")), + skipOldFmt: true, + skipFlipOnOldFormat: true, }, { - name: "add not null constraint to existing column", - ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)), - left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)), - right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1), row(2, 2)), - merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1), row(2, 2)), + name: "add not null constraint to existing column", + ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1)), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1)), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int) "), row(1, 1), row(2, 2)), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL)"), row(1, 1), row(2, 2)), + skipOldFmt: true, + skipFlipOnOldFormat: true, }, { - name: "add not null column to non-empty table", - ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1)), - left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19)), - right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1), row(2)), - merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19), row(2, 19)), + name: "add not null column to non-empty table", + ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1)), + left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19)), + right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY) "), row(1), row(2)), + merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int NOT NULL DEFAULT '19')"), row(1, 19), row(2, 19)), + skipOldFmt: true, + skipFlipOnOldFormat: true, }, } From 68f6d036ba8e2d4f6f381a57c22098fa8c20481c Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 16:15:20 -0700 Subject: [PATCH 25/82] Remove print debugging. --- integration-tests/bats/no-repo.bats | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/integration-tests/bats/no-repo.bats b/integration-tests/bats/no-repo.bats index 894145095b..1ef7df05c1 100755 --- a/integration-tests/bats/no-repo.bats +++ b/integration-tests/bats/no-repo.bats @@ -97,7 +97,6 @@ teardown() { [ "$status" -eq 0 ] if [[ "$output" =~ "Valid commands for dolt $cmd are" ]]; then - echo "/ - / {print \"$cmd\", \$1}" echo "$output" | awk -F ' - ' "/ - / {print \"$cmd\", \$1}" >> subcommands.txt continue fi @@ -112,11 +111,8 @@ teardown() { continue fi - echo $cmd - run dolt $cmd -h - echo $status - echo "$output" + [ "$status" -eq 0 ] [[ "$output" =~ "NAME" ]] || false From e708c177f84bb12ce8cfa516b1093a9847f76166 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 10 May 2023 17:03:37 -0700 Subject: [PATCH 26/82] Migrate uses of `CWBHeadRef` and `CWBHeadSpec` in commands module. --- go/cmd/dolt/commands/cherry-pick.go | 6 +++++- go/cmd/dolt/commands/filter-branch.go | 7 ++++++- go/cmd/dolt/commands/log.go | 5 ++++- go/cmd/dolt/commands/ls.go | 7 ++++++- go/cmd/dolt/commands/merge.go | 14 ++++++++++++-- go/cmd/dolt/commands/merge_base.go | 7 ++++++- go/cmd/dolt/commands/pull.go | 7 ++++++- go/cmd/dolt/commands/reset.go | 6 +++++- go/cmd/dolt/commands/stashcmds/pop.go | 6 +++++- 9 files changed, 55 insertions(+), 10 deletions(-) diff --git a/go/cmd/dolt/commands/cherry-pick.go b/go/cmd/dolt/commands/cherry-pick.go index 82ba75dcd5..75ae000b7f 100644 --- a/go/cmd/dolt/commands/cherry-pick.go +++ b/go/cmd/dolt/commands/cherry-pick.go @@ -183,7 +183,11 @@ func getCherryPickedRootValue(ctx context.Context, dEnv *env.DoltEnv, workingRoo if err != nil { return nil, "", err } - cherryCm, err := dEnv.DoltDB.Resolve(ctx, cherrySpec, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil, "", err + } + cherryCm, err := dEnv.DoltDB.Resolve(ctx, cherrySpec, headRef) if err != nil { return nil, "", err } diff --git a/go/cmd/dolt/commands/filter-branch.go b/go/cmd/dolt/commands/filter-branch.go index 039c3b3f07..931918f617 100644 --- a/go/cmd/dolt/commands/filter-branch.go +++ b/go/cmd/dolt/commands/filter-branch.go @@ -186,7 +186,12 @@ func getNerf(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseResu return nil, err } - cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil, err + } + + cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) if err != nil { return nil, err } diff --git a/go/cmd/dolt/commands/log.go b/go/cmd/dolt/commands/log.go index d249469c54..e590e97699 100644 --- a/go/cmd/dolt/commands/log.go +++ b/go/cmd/dolt/commands/log.go @@ -249,7 +249,10 @@ func (opts *logOpts) parseRefsAndTable(ctx context.Context, apr *argparser.ArgPa opts.excludingCommitSpecs = append(opts.excludingCommitSpecs, notCs) } else { - argIsRef := actions.IsValidRef(ctx, arg, dEnv.DoltDB, dEnv.RepoStateReader()) + argIsRef, err := actions.IsValidRef(ctx, arg, dEnv.DoltDB, dEnv.RepoStateReader()) + if err != nil { + return nil + } // if argIsRef && !seenRefs[arg] { cs, err := getCommitSpec(arg) diff --git a/go/cmd/dolt/commands/ls.go b/go/cmd/dolt/commands/ls.go index 243a671aca..8ee63aaccb 100644 --- a/go/cmd/dolt/commands/ls.go +++ b/go/cmd/dolt/commands/ls.go @@ -121,7 +121,12 @@ func getRootForCommitSpecStr(ctx context.Context, csStr string, dEnv *env.DoltEn return "", nil, bdr.AddCause(err).Build() } - cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return "", nil, errhand.VerboseErrorFromError(err) + } + + cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) if err != nil { return "", nil, errhand.BuildDError(`Unable to resolve "%s"`, csStr).AddCause(err).Build() diff --git a/go/cmd/dolt/commands/merge.go b/go/cmd/dolt/commands/merge.go index 0b3731ca48..7a23d9fefa 100644 --- a/go/cmd/dolt/commands/merge.go +++ b/go/cmd/dolt/commands/merge.go @@ -159,7 +159,12 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string, return handleCommitErr(ctx, dEnv, err, usage) } - suggestedMsg := fmt.Sprintf("Merge branch '%s' into %s", commitSpecStr, dEnv.RepoStateReader().CWBHeadRef().GetPath()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return handleCommitErr(ctx, dEnv, err, usage) + } + + suggestedMsg := fmt.Sprintf("Merge branch '%s' into %s", commitSpecStr, headRef.GetPath()) msg := "" if m, ok := apr.GetValue(cli.MessageArg); ok { msg = m @@ -529,10 +534,15 @@ func executeNoFFMergeAndCommit(ctx context.Context, dEnv *env.DoltEnv, spec *mer Email: spec.Email, }) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return tblToStats, err + } + wsHash, err := ws.HashOf() _, err = dEnv.DoltDB.CommitWithWorkingSet( ctx, - dEnv.RepoStateReader().CWBHeadRef(), + headRef, ws.Ref(), pendingCommit, ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(), diff --git a/go/cmd/dolt/commands/merge_base.go b/go/cmd/dolt/commands/merge_base.go index d2612d61d2..c860844972 100644 --- a/go/cmd/dolt/commands/merge_base.go +++ b/go/cmd/dolt/commands/merge_base.go @@ -119,7 +119,12 @@ func ResolveCommitWithVErr(dEnv *env.DoltEnv, cSpecStr string) (*doltdb.Commit, return nil, errhand.BuildDError("'%s' is not a valid commit", cSpecStr).Build() } - cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return nil, errhand.VerboseErrorFromError(err) + } + + cm, err := dEnv.DoltDB.Resolve(context.TODO(), cs, headRef) if err != nil { if errors.Is(err, doltdb.ErrInvalidAncestorSpec) { return nil, errhand.BuildDError("'%s' could not resolve ancestor spec", cSpecStr).Build() diff --git a/go/cmd/dolt/commands/pull.go b/go/cmd/dolt/commands/pull.go index d4b4ff9089..0841b246a8 100644 --- a/go/cmd/dolt/commands/pull.go +++ b/go/cmd/dolt/commands/pull.go @@ -202,7 +202,12 @@ func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec) return err } - suggestedMsg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, dEnv.RepoStateReader().CWBHeadRef().GetPath()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return err + } + + suggestedMsg := fmt.Sprintf("Merge branch '%s' of %s into %s", pullSpec.Branch.GetPath(), pullSpec.Remote.Url, headRef.GetPath()) tblStats, err := performMerge(ctx, dEnv, mergeSpec, suggestedMsg) printSuccessStats(tblStats) if err != nil { diff --git a/go/cmd/dolt/commands/reset.go b/go/cmd/dolt/commands/reset.go index 5194d2fa75..eae57e8acb 100644 --- a/go/cmd/dolt/commands/reset.go +++ b/go/cmd/dolt/commands/reset.go @@ -103,7 +103,11 @@ func (cmd ResetCmd) Exec(ctx context.Context, commandStr string, args []string, } else { if apr.NArg() == 1 { ref := apr.Arg(0) - if actions.IsValidRef(ctx, ref, dEnv.DoltDB, dEnv.RepoStateReader()) { + isValidRef, err := actions.IsValidRef(ctx, ref, dEnv.DoltDB, dEnv.RepoStateReader()) + if err != nil { + return handleErrAndExit(err) + } + if isValidRef { return handleResetSoftToRef(ctx, dEnv, ref, usage) } } diff --git a/go/cmd/dolt/commands/stashcmds/pop.go b/go/cmd/dolt/commands/stashcmds/pop.go index 6d273bf690..a75c5d6b76 100644 --- a/go/cmd/dolt/commands/stashcmds/pop.go +++ b/go/cmd/dolt/commands/stashcmds/pop.go @@ -133,7 +133,11 @@ func applyStashAtIdx(ctx context.Context, dEnv *env.DoltEnv, curWorkingRoot *dol if err != nil { return false, err } - parentCommit, err := dEnv.DoltDB.Resolve(ctx, headCommitSpec, dEnv.RepoStateReader().CWBHeadRef()) + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return false, err + } + parentCommit, err := dEnv.DoltDB.Resolve(ctx, headCommitSpec, headRef) if err != nil { return false, err } From c3660a1d0cc7db889a80e4234cf191c4557b5456 Mon Sep 17 00:00:00 2001 From: Maximilian Hoffman Date: Thu, 11 May 2023 10:18:01 -0700 Subject: [PATCH 27/82] bump gms (#5926) --- go/go.mod | 2 +- go/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/go.mod b/go/go.mod index b660f7b638..5b3632a2c7 100644 --- a/go/go.mod +++ b/go/go.mod @@ -59,7 +59,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.15.1-0.20230510045813-ba911392b553 + github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9 github.com/dolthub/swiss v0.1.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/jmoiron/sqlx v1.3.4 diff --git a/go/go.sum b/go/go.sum index bf5542d7e7..2b2c2d8eb1 100644 --- a/go/go.sum +++ b/go/go.sum @@ -166,8 +166,8 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY= github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U= github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0= -github.com/dolthub/go-mysql-server v0.15.1-0.20230510045813-ba911392b553 h1:dLgqnwh32cJPrjV3dja/hWluXwcrG1QrIKd29Vc5tfw= -github.com/dolthub/go-mysql-server v0.15.1-0.20230510045813-ba911392b553/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= +github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9 h1:+0W2FuuaoOtyFkw7vtklJxbibBxRv+tuWKaRo6OyMnU= +github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto= github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0= From e8dc92e4350582af6a4a96aa075017e77fd7281f Mon Sep 17 00:00:00 2001 From: Dustin Brown Date: Thu, 11 May 2023 10:19:39 -0700 Subject: [PATCH 28/82] [ga-bump-dep] Bump dependency in Dolt by max-hoffman (#5927) Co-authored-by: max-hoffman From 1c1db7a62eedebd98591fa0bfc3aced93280f4a8 Mon Sep 17 00:00:00 2001 From: max-hoffman Date: Thu, 11 May 2023 17:29:10 +0000 Subject: [PATCH 29/82] [ga-bump-release] Update Dolt version to 1.0.1 and release v1.0.1 --- go/cmd/dolt/dolt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/cmd/dolt/dolt.go b/go/cmd/dolt/dolt.go index f585625d6d..2b97e77572 100644 --- a/go/cmd/dolt/dolt.go +++ b/go/cmd/dolt/dolt.go @@ -62,7 +62,7 @@ import ( ) const ( - Version = "1.0.0" + Version = "1.0.1" ) var dumpDocsCommand = &commands.DumpDocsCmd{} From 094919fecdbb7eaa85cc841a34af431e9fb102be Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Thu, 11 May 2023 11:43:43 -0700 Subject: [PATCH 30/82] go/doltcore/sqle: pr feedback --- .../doltcore/sqle/enginetest/dolt_queries_merge.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go index 4db1f94ac9..7d6b761fd4 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_merge.go @@ -1625,6 +1625,10 @@ var Dolt1MergeScripts = []queries.ScriptTest{ Query: "call dolt_merge('other')", Expected: []sql.Row{{0, 1}}, }, + { + Query: "select * from dolt_constraint_violations", + Expected: []sql.Row{{"test", uint(1)}}, + }, { Query: "select violation_type, pk, violation_info from dolt_constraint_violations_test", Expected: []sql.Row{ @@ -4643,7 +4647,8 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Query: "call dolt_merge('right');", + Query: "call dolt_merge('right');", + Expected: []sql.Row{{0, 0}}, }, { Query: "select * from t;", @@ -4673,7 +4678,9 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ }, Assertions: []queries.ScriptTestAssertion{ { - Query: "call dolt_merge('right');", + SkipResultsCheck: true, + Query: "call dolt_merge('right');", + Expected: []sql.Row{{0, 0}}, // non-symmetric result }, { Skip: true, @@ -4707,7 +4714,6 @@ var ThreeWayMergeWithSchemaChangeTestScripts = []MergeScriptTest{ Expected: []sql.Row{{0, 0x1}}, }, { - Skip: true, Query: "select pk, col1 from t;", Expected: []sql.Row{ {1, 9999}, From 560eb45754fa1c1b62f6b13d043a56d2cbf12ea6 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Thu, 11 May 2023 13:12:28 -0700 Subject: [PATCH 31/82] Clean up remaining unchecked uses of dbState.WorkingSet --- go/libraries/doltcore/sqle/database.go | 7 +++++++ go/libraries/doltcore/sqle/dsess/session.go | 15 +++++++++++++-- .../doltcore/sqle/dtables/ignore_table.go | 9 ++++++--- go/libraries/doltcore/sqle/tables.go | 3 +++ go/libraries/doltcore/sqle/temp_table.go | 6 ++++++ 5 files changed, 35 insertions(+), 5 deletions(-) diff --git a/go/libraries/doltcore/sqle/database.go b/go/libraries/doltcore/sqle/database.go index 274d6cd186..7441235dbe 100644 --- a/go/libraries/doltcore/sqle/database.go +++ b/go/libraries/doltcore/sqle/database.go @@ -648,6 +648,10 @@ func (db Database) GetRoot(ctx *sql.Context) (*doltdb.RootValue, error) { return dbState.GetRoots().Working, nil } +// GetWorkingSet gets the current working set for the database. +// If there is no working set (most likely because the DB is in Detached Head mode, return an error. +// If a command needs to work while in Detached Head, that command should call sess.LookupDbState directly. +// TODO: Replace all uses of dbState.WorkingSet, including this, with a new interface. func (db Database) GetWorkingSet(ctx *sql.Context) (*doltdb.WorkingSet, error) { sess := dsess.DSessFromSess(ctx.Session) dbState, ok, err := sess.LookupDbState(ctx, db.Name()) @@ -657,6 +661,9 @@ func (db Database) GetWorkingSet(ctx *sql.Context) (*doltdb.WorkingSet, error) { if !ok { return nil, fmt.Errorf("no root value found in session") } + if dbState.WorkingSet == nil { + return nil, doltdb.ErrOperationNotSupportedInDetachedHead + } return dbState.WorkingSet, nil } diff --git a/go/libraries/doltcore/sqle/dsess/session.go b/go/libraries/doltcore/sqle/dsess/session.go index 3d9cde6570..aec019701b 100644 --- a/go/libraries/doltcore/sqle/dsess/session.go +++ b/go/libraries/doltcore/sqle/dsess/session.go @@ -573,7 +573,7 @@ func (d *DoltSession) NewPendingCommit(ctx *sql.Context, dbName string, roots do headHash, _ := headCommit.HashOf() if sessionState.WorkingSet == nil { - return nil, fmt.Errorf("Cannot commit while not attached to a branch. ") + return nil, doltdb.ErrOperationNotSupportedInDetachedHead } var mergeParentCommits []*doltdb.Commit @@ -841,6 +841,10 @@ func (d *DoltSession) SetRoot(ctx *sql.Context, dbName string, newRoot *doltdb.R return err } + if sessionState.WorkingSet == nil { + return doltdb.ErrOperationNotSupportedInDetachedHead + } + if rootsEqual(sessionState.GetRoots().Working, newRoot) { return nil } @@ -864,6 +868,10 @@ func (d *DoltSession) SetRoots(ctx *sql.Context, dbName string, roots doltdb.Roo return err } + if sessionState.WorkingSet == nil { + return doltdb.ErrOperationNotSupportedInDetachedHead + } + workingSet := sessionState.WorkingSet.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged) return d.SetWorkingSet(ctx, dbName, workingSet) } @@ -1039,6 +1047,9 @@ func (d *DoltSession) WorkingSet(ctx *sql.Context, dbName string) (*doltdb.Worki if err != nil { return nil, err } + if sessionState.WorkingSet == nil { + return nil, doltdb.ErrOperationNotSupportedInDetachedHead + } return sessionState.WorkingSet, nil } @@ -1266,7 +1277,7 @@ func (d *DoltSession) CWBHeadRef(ctx *sql.Context, dbName string) (ref.DoltRef, } if dbState.WorkingSet == nil { - return nil, nil + return nil, doltdb.ErrOperationNotSupportedInDetachedHead } return dbState.WorkingSet.Ref().ToHeadRef() diff --git a/go/libraries/doltcore/sqle/dtables/ignore_table.go b/go/libraries/doltcore/sqle/dtables/ignore_table.go index b0b3c71691..d9ff8009c2 100644 --- a/go/libraries/doltcore/sqle/dtables/ignore_table.go +++ b/go/libraries/doltcore/sqle/dtables/ignore_table.go @@ -117,13 +117,12 @@ var _ sql.RowDeleter = (*ignoreWriter)(nil) type ignoreWriter struct { it *IgnoreTable errDuringStatementBegin error - workingSet *doltdb.WorkingSet prevHash *hash.Hash tableWriter writer.TableWriter } func newIgnoreWriter(it *IgnoreTable) *ignoreWriter { - return &ignoreWriter{it, nil, nil, nil, nil} + return &ignoreWriter{it, nil, nil, nil} } // Insert inserts the row given, returning an error if it cannot. Insert will be called once for each row to process @@ -179,7 +178,6 @@ func (iw *ignoreWriter) StatementBegin(ctx *sql.Context) { iw.prevHash = &prevHash - iw.workingSet = dbState.WorkingSet found, err := roots.Working.HasTable(ctx, doltdb.IgnoreTableName) if err != nil { @@ -229,6 +227,11 @@ func (iw *ignoreWriter) StatementBegin(ctx *sql.Context) { return } + if dbState.WorkingSet == nil { + iw.errDuringStatementBegin = doltdb.ErrOperationNotSupportedInDetachedHead + return + } + // We use WriteSession.SetWorkingSet instead of DoltSession.SetRoot because we want to avoid modifying the root // until the end of the transaction, but we still want the WriteSession to be able to find the newly // created table. diff --git a/go/libraries/doltcore/sqle/tables.go b/go/libraries/doltcore/sqle/tables.go index 8e24beb2dd..5e507bf398 100644 --- a/go/libraries/doltcore/sqle/tables.go +++ b/go/libraries/doltcore/sqle/tables.go @@ -1258,6 +1258,9 @@ func (t *AlterableDoltTable) RewriteInserter( } ws := dbState.WorkingSet + if ws == nil { + return nil, doltdb.ErrOperationNotSupportedInDetachedHead + } head, err := sess.GetHeadCommit(ctx, t.db.Name()) if err != nil { diff --git a/go/libraries/doltcore/sqle/temp_table.go b/go/libraries/doltcore/sqle/temp_table.go index 7af1383840..2795e309e3 100644 --- a/go/libraries/doltcore/sqle/temp_table.go +++ b/go/libraries/doltcore/sqle/temp_table.go @@ -79,6 +79,9 @@ func NewTempTable( } ws := dbState.WorkingSet + if ws == nil { + return nil, doltdb.ErrOperationNotSupportedInDetachedHead + } sch, err := temporaryDoltSchema(ctx, pkSch, collation) if err != nil { @@ -153,6 +156,9 @@ func setTempTableRoot(t *TempTable) func(ctx *sql.Context, dbName string, newRoo } ws := dbState.WorkingSet + if ws == nil { + return doltdb.ErrOperationNotSupportedInDetachedHead + } newWs := ws.WithWorkingRoot(newRoot) ait, err := globalstate.NewAutoIncrementTracker(ctx, newWs) From c62eb688eefda6142e4c82f96fff090bcdab2179 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Thu, 11 May 2023 13:57:53 -0700 Subject: [PATCH 32/82] ActiveBranchFunc should work in detached head state. --- go/libraries/doltcore/sqle/dfunctions/active_branch.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/go/libraries/doltcore/sqle/dfunctions/active_branch.go b/go/libraries/doltcore/sqle/dfunctions/active_branch.go index ddb987482d..755f4049ac 100644 --- a/go/libraries/doltcore/sqle/dfunctions/active_branch.go +++ b/go/libraries/doltcore/sqle/dfunctions/active_branch.go @@ -16,6 +16,7 @@ package dfunctions import ( "fmt" + "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/go-mysql-server/sql" "github.com/dolthub/go-mysql-server/sql/types" @@ -46,6 +47,10 @@ func (ab *ActiveBranchFunc) Eval(ctx *sql.Context, row sql.Row) (interface{}, er } currentBranchRef, err := dSess.CWBHeadRef(ctx, dbName) + if err == doltdb.ErrOperationNotSupportedInDetachedHead { + // active_branch should return NULL if we're in detached head state + return nil, nil + } if err != nil { return nil, err } From e75f6408cbd8589ea4d118afe3c907e09fa124f3 Mon Sep 17 00:00:00 2001 From: Brian Hendriks Date: Thu, 11 May 2023 14:17:12 -0700 Subject: [PATCH 33/82] nil check --- go/cmd/dolt/commands/sqlserver/metrics_listener.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/go/cmd/dolt/commands/sqlserver/metrics_listener.go b/go/cmd/dolt/commands/sqlserver/metrics_listener.go index fd03aac159..dceff9a8c9 100644 --- a/go/cmd/dolt/commands/sqlserver/metrics_listener.go +++ b/go/cmd/dolt/commands/sqlserver/metrics_listener.go @@ -155,7 +155,12 @@ func (ml *metricsListener) updateReplMetrics() bool { if status.Role == string(cluster.RolePrimary) { ml.isReplicaGauges.WithLabelValues(status.Database).Set(0.0) - ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(float64(status.ReplicationLag.Milliseconds())) + + if status.ReplicationLag == nil { + ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(-1.0) + } else { + ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(float64(status.ReplicationLag.Milliseconds())) + } } else { ml.isReplicaGauges.WithLabelValues(status.Database).Set(1.0) ml.replicationLagGauges.WithLabelValues(status.Database, status.Remote).Set(-1.0) From 4ce8f8c37e204573346b5675980059c945f28991 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Thu, 11 May 2023 14:28:31 -0700 Subject: [PATCH 34/82] go/libraries/doltcore/merge: fix non-deterministic map iteration in schema merge column mapping --- go/libraries/doltcore/merge/merge_schema.go | 13 ++++++++----- .../doltcore/sqle/enginetest/dolt_engine_test.go | 14 +++++++++----- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/go/libraries/doltcore/merge/merge_schema.go b/go/libraries/doltcore/merge/merge_schema.go index 33ade444f2..f677154543 100644 --- a/go/libraries/doltcore/merge/merge_schema.go +++ b/go/libraries/doltcore/merge/merge_schema.go @@ -595,16 +595,19 @@ func mapColumns(ourCC, theirCC, ancCC *schema.ColCollection) (columnMappings, er }) // Handle any remaining columns on the "their" side - for _, theirCol := range theirTagsToCols { - ancCol, foundAncByTag := ancCC.GetByTag(theirCol.Tag) + _ = theirCC.Iter(func(tag uint64, theirCol schema.Column) (stop bool, err error) { + if _, ok := theirTagsToCols[tag]; !ok { + return // already added + } + + ancCol, foundAncByTag := ancCC.GetByTag(tag) if !foundAncByTag { // Ditto for finding the ancestor column ancCol, _ = ancCC.GetByNameCaseInsensitive(theirCol.Name) } - columnMappings = append(columnMappings, newColumnMapping(ancCol, schema.InvalidCol, theirCol)) - } - + return + }) return columnMappings, nil } diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go b/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go index f4a46f31f6..845dbbc516 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go @@ -172,7 +172,6 @@ func TestSingleMergeScript(t *testing.T) { RightSetUpScript: []string{ "alter table t add column col2 int not null default 0", "alter table t add column col3 int;", - "update t set col2 = 1 where pk = 1;", "insert into t values (2, 2, 2, null);", }, LeftSetUpScript: []string{ @@ -180,11 +179,12 @@ func TestSingleMergeScript(t *testing.T) { }, Assertions: []queries.ScriptTestAssertion{ { - Query: "call dolt_merge('right');", + Query: "call dolt_merge('right');", + Expected: []sql.Row{{0, 0}}, }, { Query: "select * from t;", - Expected: []sql.Row{{1, 1, 1, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}}, + Expected: []sql.Row{{1, 1, 0, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}}, }, { Query: "select pk, violation_type from dolt_constraint_violations_t", @@ -194,8 +194,12 @@ func TestSingleMergeScript(t *testing.T) { }, } for _, test := range scripts { - enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, false)) - enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, true)) + t.Run("merge right into left", func(t *testing.T) { + enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, false)) + }) + t.Run("merge left into right", func(t *testing.T) { + enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, true)) + }) } } From 7acaeb2df1e061abb5a26f82a2e8e64bfe9dcbb1 Mon Sep 17 00:00:00 2001 From: fulghum Date: Thu, 11 May 2023 21:56:58 +0000 Subject: [PATCH 35/82] [ga-bump-dep] Bump dependency in Dolt by fulghum --- go/go.mod | 2 +- go/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/go.mod b/go/go.mod index 5b3632a2c7..ada89d6a53 100644 --- a/go/go.mod +++ b/go/go.mod @@ -59,7 +59,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9 + github.com/dolthub/go-mysql-server v0.15.1-0.20230511215534-6dca53c0d236 github.com/dolthub/swiss v0.1.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/jmoiron/sqlx v1.3.4 diff --git a/go/go.sum b/go/go.sum index 2b2c2d8eb1..c8dbb1dbc7 100644 --- a/go/go.sum +++ b/go/go.sum @@ -166,8 +166,8 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY= github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U= github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0= -github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9 h1:+0W2FuuaoOtyFkw7vtklJxbibBxRv+tuWKaRo6OyMnU= -github.com/dolthub/go-mysql-server v0.15.1-0.20230511161328-b4ddc44585e9/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= +github.com/dolthub/go-mysql-server v0.15.1-0.20230511215534-6dca53c0d236 h1:dcMIfGLWniby9dQq2RN+hWtrNKFQlCd+9uFBNXB4uiw= +github.com/dolthub/go-mysql-server v0.15.1-0.20230511215534-6dca53c0d236/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto= github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0= From a7ff41c5265d0f7483be1215d9e99bfc31731e07 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Thu, 11 May 2023 15:24:27 -0700 Subject: [PATCH 36/82] go/store/val: improve doc comments, remove todos --- go/serial/schema.fbs | 4 +- go/store/types/edits/async_sorted_edits.go | 1 - go/store/val/codec.go | 9 ---- go/store/val/tuple.go | 60 ++++++++-------------- go/store/val/tuple_descriptor.go | 1 - go/store/val/tuple_test.go | 1 - 6 files changed, 22 insertions(+), 54 deletions(-) diff --git a/go/serial/schema.fbs b/go/serial/schema.fbs index 86bbde28a0..d2cfc6a8ef 100644 --- a/go/serial/schema.fbs +++ b/go/serial/schema.fbs @@ -42,9 +42,7 @@ table Column { // sql display order display_order:int16; - // todo(andy): ideally we'd resolve column identity - // without using tags, but the current implementation - // of schema.Schema is tightly coupled to tags. + // column tag tag: uint64; // storage encoding diff --git a/go/store/types/edits/async_sorted_edits.go b/go/store/types/edits/async_sorted_edits.go index 3d3b6b58ed..c7083281de 100644 --- a/go/store/types/edits/async_sorted_edits.go +++ b/go/store/types/edits/async_sorted_edits.go @@ -77,7 +77,6 @@ func (ase *AsyncSortedEdits) EditsAdded() int { func (ase *AsyncSortedEdits) AddEdit(k types.LesserValuable, v types.Valuable) { ase.editsAdded++ if ase.accumulating == nil { - // TODO: buffer pool ase.accumulating = make([]types.KVP, 0, ase.sliceSize) } diff --git a/go/store/val/codec.go b/go/store/val/codec.go index c52515a137..5e95bc6686 100644 --- a/go/store/val/codec.go +++ b/go/store/val/codec.go @@ -105,15 +105,6 @@ const ( DecimalEnc = Encoding(serial.EncodingDecimal) JSONEnc = Encoding(serial.EncodingJSON) GeometryEnc = Encoding(serial.EncodingGeometry) - - // TODO - // CharEnc - // BinaryEnc - // TextEnc - // BlobEnc - // EnumEnc - // SetEnc - // ExpressionEnc ) func sizeFromType(t Type) (ByteSize, bool) { diff --git a/go/store/val/tuple.go b/go/store/val/tuple.go index 62e9462de4..6382081bf8 100644 --- a/go/store/val/tuple.go +++ b/go/store/val/tuple.go @@ -27,50 +27,32 @@ const ( countSize ByteSize = 2 ) -// todo(andy): update comment -// Tuples are byte slices containing field values and a footer. Tuples only -// contain Values for non-NULL Fields. Value i contains the data for ith non- -// NULL Field. Values are packed contiguously from the front of the Tuple. The -// footer contains offsets, a member mask, and a field count. offsets enable -// random access to Values. The member mask enables NULL-compaction for Values. +// A Tuple is a vector of fields encoded as a byte slice. Key-Value Tuple pairs +// are used to store row data within clustered and secondary indexes in Dolt. // -// Tuples read and write Values as byte slices. (De)serialization is delegated -// to Tuple Descriptors, which know a Tuple's schema and associated encodings. -// When reading and writing Values, NULLs are encoded as nil byte slices. Note -// that these are not the same as zero-length byte slices. An empty string may -// be encoded as a zero-length byte slice and will be distinct from a NULL -// string both logically and semantically. +// The encoding format for Tuples starts with field values packed contiguously from +// the front of the Tuple, followed by field offsets, and finally a field count: // -// Tuple: -// +---------+---------+-----+---------+---------+-------------+-------------+ -// | Value 0 | Value 1 | ... | Value K | offsets | Member Mask | Field Count | -// +---------+---------+-----+---------+---------+-------------+-------------+ +// +---------+---------+-----+---------+----------+-----+----------+-------+ +// | Value 0 | Value 1 | ... | Value K | Offset 1 | ... | Offset K | Count | +// +---------+---------+-----+---------+----------+-----+----------+-------+ // -// offsets: -// The offset array contains a uint16 for each non-NULL field after field 0. -// Offset i encodes the distance to the ith Value from the front of the Tuple. -// The size of the offset array is 2*(K-1) bytes, where K is the number of -// Values in the Tuple. -// +----------+----------+-----+----------+ -// | Offset 1 | Offset 2 | ... | Offset K | -// +----------+----------+-----+----------+ +// Field offsets encode the byte-offset from the front of the Tuple to the beginning +// of the corresponding field in the Tuple. The offset for the first field is always +// zero and is therefor omitted. Offsets and the field count are little-endian +// encoded uint16 values. // -// Member Mask: -// The member mask is a bit-array encoding field membership in Tuples. Fields -// with non-NULL values are present, and encoded as 1, NULL fields are absent -// and encoded as 0. The size of the bit array is math.Ceil(N/8) bytes, where -// N is the number of Fields in the Tuple. -// +------------+-------------+-----+ -// | Bits 0 - 7 | Bits 8 - 15 | ... | -// +------------+-------------+-----+ +// Tuples read and write field values as byte slices. Interpreting these encoded +// values is left up to TupleDesc which knows about a Tuple's schema and associated +// field encodings. Zero-length fields are interpreted as NULL values, all non-NULL +// values must be encoded with non-zero length. For this reason, variable-length +// strings are encoded with a NUL terminator (see codec.go). // -// Field Count: -// The field fieldCount is a uint16 containing the number of fields in the -// Tuple, it is stored in 2 bytes. -// +----------------------+ -// | Field Count (uint16) | -// +----------------------+ - +// Accessing the ith field where i > count will return a NULL value. This allows us +// to implicitly add nullable columns to the end of a schema without needing to +// rewrite index storage. However, because Dolt storage in content-addressed, we +// must have a single canonical encoding for any given Tuple. For this reason, the +// NULL suffix of a Tuple is explicitly truncated and the field count reduced. type Tuple []byte var EmptyTuple = Tuple([]byte{0, 0}) diff --git a/go/store/val/tuple_descriptor.go b/go/store/val/tuple_descriptor.go index b7a3b1fcef..1f7feeaf82 100644 --- a/go/store/val/tuple_descriptor.go +++ b/go/store/val/tuple_descriptor.go @@ -518,7 +518,6 @@ func (td TupleDesc) FormatValue(i int, value []byte) string { return formatValue(td.Types[i].Enc, value) } func formatValue(enc Encoding, value []byte) string { - // todo(andy): complete cases switch enc { case Int8Enc: v := readInt8(value) diff --git a/go/store/val/tuple_test.go b/go/store/val/tuple_test.go index 9345df8df2..1d81f07ccf 100644 --- a/go/store/val/tuple_test.go +++ b/go/store/val/tuple_test.go @@ -96,7 +96,6 @@ func tuplePrefix(pool pool.BuffPool, tup Tuple, k int) Tuple { } func tupleSuffix(pool pool.BuffPool, tup Tuple, k int) Tuple { - // todo(andy) cnt := tup.Count() if k == 0 { return EmptyTuple From 2caab100147eeb0bfa920d96de3c3e197636ca2c Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Thu, 11 May 2023 15:34:57 -0700 Subject: [PATCH 37/82] go/serial: improve comment docs for flatbuffers defs --- go/serial/addressmap.fbs | 3 ++- go/serial/blob.fbs | 3 ++- go/serial/commitclosure.fbs | 3 ++- go/serial/mergeartifacts.fbs | 3 ++- go/serial/prolly.fbs | 5 ++--- go/serial/table.fbs | 2 ++ 6 files changed, 12 insertions(+), 7 deletions(-) diff --git a/go/serial/addressmap.fbs b/go/serial/addressmap.fbs index 42cc02e733..e94e038a73 100644 --- a/go/serial/addressmap.fbs +++ b/go/serial/addressmap.fbs @@ -26,7 +26,8 @@ table AddressMap { // - value addresses for AddressMap leaf nodes address_array:[ubyte] (required); - // array of uvarint encoded subtree counts + // array of varint encoded subtree counts + // see: go/store/prolly/message/varint.go subtree_counts:[ubyte]; // total count of prolly tree tree_count:uint64; diff --git a/go/serial/blob.fbs b/go/serial/blob.fbs index be72fa6c4c..d99edf682e 100644 --- a/go/serial/blob.fbs +++ b/go/serial/blob.fbs @@ -21,7 +21,8 @@ table Blob { // array of subtree addresses for internal tree nodes address_array:[ubyte]; - // array of uvarint encoded subtree sizes + // array of varint encoded subtree counts + // see: go/store/prolly/message/varint.go subtree_sizes:[ubyte]; tree_size:uint64; tree_level:uint8; diff --git a/go/serial/commitclosure.fbs b/go/serial/commitclosure.fbs index f4bba59a48..edbd827dc1 100644 --- a/go/serial/commitclosure.fbs +++ b/go/serial/commitclosure.fbs @@ -22,7 +22,8 @@ table CommitClosure { // array of subtree addresses for internal prolly tree nodes address_array:[ubyte]; - // array of uvarint encoded subtree counts + // array of varint encoded subtree counts + // see: go/store/prolly/message/varint.go subtree_counts:[ubyte]; // total count of prolly tree tree_count:uint64; diff --git a/go/serial/mergeartifacts.fbs b/go/serial/mergeartifacts.fbs index 0ebf6ce77c..ca1d1d44be 100644 --- a/go/serial/mergeartifacts.fbs +++ b/go/serial/mergeartifacts.fbs @@ -35,7 +35,8 @@ table MergeArtifacts { // array of subtree addresses for internal tree nodes address_array:[ubyte]; - // array of uvarint encoded subtree counts + // array of varint encoded subtree counts + // see: go/store/prolly/message/varint.go subtree_counts:[ubyte]; // total count of prolly tree tree_count:uint64; diff --git a/go/serial/prolly.fbs b/go/serial/prolly.fbs index 93777a7a21..8c7f1ea49d 100644 --- a/go/serial/prolly.fbs +++ b/go/serial/prolly.fbs @@ -39,14 +39,13 @@ table ProllyTreeNode { // (eg value tuples containing out-of-line BLOB addresses) value_address_offsets:[uint16]; - // array of chunk addresses // - subtree addresses for internal prolly tree nodes // - value addresses for AddressMap leaf nodes address_array:[ubyte]; - - // array of uvarint encoded subtree counts + // array of varint encoded subtree counts + // see: go/store/prolly/message/varint.go subtree_counts:[ubyte]; // total count of prolly tree tree_count:uint64; diff --git a/go/serial/table.fbs b/go/serial/table.fbs index aacd42a117..38a3ddec0d 100644 --- a/go/serial/table.fbs +++ b/go/serial/table.fbs @@ -34,12 +34,14 @@ table Table { conflicts:Conflicts; // address of a violations types.Map (for __DOLT_DEV__). + // todo: deprecate violations:[ubyte]; // address of artifacts artifacts:[ubyte]; } +// todo: deprecate table Conflicts { // address of a conflicts types.Map (for __DOLT_DEV__). data:[ubyte] (required); From dfdb4a4b2a920184a7f641f7dad3e12a275a3207 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Thu, 11 May 2023 15:46:55 -0700 Subject: [PATCH 38/82] go: sqle: cluster: Implement a first pass at dolt_cluster_ack_writes_timeout_secs. Setting this system variable to a non-zero value on a primary replica in a sql-server cluster will cause dolt to block a SQL client performing a commit until that client's commit is fully replicated to the replicas. If there is a timeout, currently a warning is logged in the logs. --- .../doltcore/sqle/cluster/commithook.go | 81 ++++++++++++- .../doltcore/sqle/cluster/controller.go | 19 ++++ .../doltcore/sqle/cluster/initdbhook.go | 4 + go/libraries/doltcore/sqle/dsess/variables.go | 5 +- .../doltcore/sqle/system_variables.go | 7 ++ .../tests/sql-server-cluster.yaml | 106 ++++++++++++++++++ 6 files changed, 219 insertions(+), 3 deletions(-) diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index 1b540f028d..f3b0bbb94a 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -53,6 +53,16 @@ type commithook struct { // commithooks are caught up with replicating to the standby. waitNotify func() + // This is a slice of notification channels maintained by the + // commithook. The semantics are: + // 1. All accesses to |successChs| must happen with |mu| held. + // 2. There maybe be |0| or more channels in the slice. + // 3. As a reader, if |successChs| is non-empty, you should just read a value, for example, |successChs[0]| and use it. All entries will be closed at the same time. If |successChs| is empty when you need a channel, you should add one to it. + // 4. If you read a channel out of |successChs|, that channel will be closed on the next successful replication attempt. It will not be closed before then. + successChs []chan struct{} + + execTimeout time.Duration + role Role // The standby replica to which the new root gets replicated. @@ -143,6 +153,12 @@ func (h *commithook) replicate(ctx context.Context) { if h.waitNotify != nil { h.waitNotify() } + if len(h.successChs) != 0 { + for _, ch := range h.successChs { + close(ch) + } + h.successChs = nil + } h.cond.Wait() lgr.Tracef("cluster/commithook: background thread: woken up.") } @@ -192,6 +208,13 @@ func (h *commithook) attemptReplicate(ctx context.Context) { } h.cancelReplicate = nil }() + successChs := h.successChs + h.successChs = nil + defer func() { + if len(successChs) != 0 { + h.successChs = append(h.successChs, successChs...) + } + }() h.mu.Unlock() if destDB == nil { @@ -242,6 +265,12 @@ func (h *commithook) attemptReplicate(ctx context.Context) { h.lastPushedHead = toPush h.lastSuccess = incomingTime h.nextPushAttempt = time.Time{} + if len(successChs) != 0 { + for _, ch := range successChs { + close(ch) + } + successChs = nil + } } else { h.currentError = new(string) *h.currentError = fmt.Sprintf("failed to commit chunks on destDB: %v", err) @@ -350,6 +379,47 @@ func (h *commithook) setWaitNotify(f func()) bool { return true } +type replicationResult int + +const replicationResultTimeout = 0 +const replicationResultContextCanceled = 1 +const replicationResultSuccess = 2 + +// Blocks the current goroutine until: +// 1. There is no replication necessary, i.e., isCaughtUp() == true. This returns replicationResultSuccess. +// 2. The replication of |nextHead|, or a later head, at the time this method was called succeeds. This returns replicationResultSuccess. +// 3. ctx.Done() closes. This returns replicationResultContextCanceled. +// 4. timeout passes. This returns replicationResultSuccess. +func (h *commithook) waitForReplicationSuccess(ctx context.Context, timeout time.Duration) replicationResult { + h.mu.Lock() + if h.isCaughtUp() { + h.mu.Unlock() + return replicationResultSuccess + } + if len(h.successChs) == 0 { + h.successChs = append(h.successChs, make(chan struct{})) + } + ch := h.successChs[0] + h.mu.Unlock() + select { + case <-ch: + return replicationResultSuccess + case <-ctx.Done(): + return replicationResultContextCanceled + case <-time.After(timeout): + return replicationResultTimeout + } +} + +// Set by the controller. If it is non-zero, the Execute() DatabaseHook +// callback will block the calling goroutine for that many seconds waiting for +// replication quiescence. +func (h *commithook) setExecTimeout(timeout time.Duration) { + h.mu.Lock() + h.execTimeout = timeout + h.mu.Unlock() +} + var errDetectedBrokenConfigStr = "error: more than one server was configured as primary in the same epoch. this server has stopped accepting writes. choose a primary in the cluster and call dolt_assume_cluster_role() on servers in the cluster to start replication at a higher epoch" // Execute on this commithook updates the target root hash we're attempting to @@ -364,10 +434,10 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat return err } h.mu.Lock() - defer h.mu.Unlock() lgr = h.logger() if h.role != RolePrimary { lgr.Warnf("cluster/commithook received commit callback for a commit on %s, but we are not role primary; not replicating the commit, which is likely to be lost.", ds.ID()) + h.mu.Unlock() return nil } if root != h.nextHead { @@ -377,6 +447,15 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat h.nextPushAttempt = time.Time{} h.cond.Signal() } + execTimeout := h.execTimeout + h.mu.Unlock() + if execTimeout != time.Duration(0) { + res := h.waitForReplicationSuccess(ctx, execTimeout) + if res != replicationResultSuccess { + // TODO: Get this failure into the *sql.Context warnings. + lgr.Warnf("cluster/commithook failed to replicate write before the timeout. timeout: %d, wait result: %v", execTimeout, res) + } + } return nil } diff --git a/go/libraries/doltcore/sqle/cluster/controller.go b/go/libraries/doltcore/sqle/cluster/controller.go index 322af3ed63..e4facd8eab 100644 --- a/go/libraries/doltcore/sqle/cluster/controller.go +++ b/go/libraries/doltcore/sqle/cluster/controller.go @@ -82,6 +82,7 @@ type Controller struct { type sqlvars interface { AddSystemVariables(sysVars []sql.SystemVariable) + GetGlobal(name string) (sql.SystemVariable, interface{}, bool) } // We can manage certain aspects of the exposed databases on the server through @@ -175,6 +176,20 @@ func (c *Controller) ManageSystemVariables(variables sqlvars) { c.mu.Lock() defer c.mu.Unlock() c.systemVars = variables + + // We reset this system variable here to put our NotifyChanged on it. + v, _, ok := variables.GetGlobal(dsess.DoltClusterAckWritesTimeoutSecs) + if !ok { + panic(fmt.Sprintf("internal error: did not find required global system variable %s", dsess.DoltClusterAckWritesTimeoutSecs)) + } + v.NotifyChanged = func(scope sql.SystemVariableScope, v sql.SystemVarValue) { + c.mu.Lock() + defer c.mu.Unlock() + for _, hook := range c.commithooks { + hook.setExecTimeout(time.Duration(v.Val.(int64)) * time.Second) + } + } + variables.AddSystemVariables([]sql.SystemVariable{v}) c.refreshSystemVars() } @@ -194,6 +209,10 @@ func (c *Controller) ApplyStandbyReplicationConfig(ctx context.Context, bt *sql. if err != nil { return err } + _, execTimeoutVal, _ := c.systemVars.GetGlobal(dsess.DoltClusterAckWritesTimeoutSecs) + for _, h := range hooks { + h.setExecTimeout(time.Duration(execTimeoutVal.(int64)) * time.Second) + } c.commithooks = append(c.commithooks, hooks...) } return nil diff --git a/go/libraries/doltcore/sqle/cluster/initdbhook.go b/go/libraries/doltcore/sqle/cluster/initdbhook.go index 7713c91540..588f35a235 100644 --- a/go/libraries/doltcore/sqle/cluster/initdbhook.go +++ b/go/libraries/doltcore/sqle/cluster/initdbhook.go @@ -17,6 +17,7 @@ package cluster import ( "context" "strings" + "time" "github.com/dolthub/go-mysql-server/sql" @@ -57,6 +58,8 @@ func NewInitDatabaseHook(controller *Controller, bt *sql.BackgroundThreads, orig }) } + _, execTimeoutVal, _ := controller.systemVars.GetGlobal(dsess.DoltClusterAckWritesTimeoutSecs) + role, _ := controller.roleAndEpoch() for i, r := range controller.cfg.StandbyRemotes() { ttfdir, err := denv.TempTableFilesDir() @@ -64,6 +67,7 @@ func NewInitDatabaseHook(controller *Controller, bt *sql.BackgroundThreads, orig return err } commitHook := newCommitHook(controller.lgr, r.Name(), name, role, remoteDBs[i], denv.DoltDB, ttfdir) + commitHook.setExecTimeout(time.Duration(execTimeoutVal.(int64)) * time.Second) denv.DoltDB.PrependCommitHook(ctx, commitHook) controller.registerCommitHook(commitHook) if err := commitHook.Run(bt); err != nil { diff --git a/go/libraries/doltcore/sqle/dsess/variables.go b/go/libraries/doltcore/sqle/dsess/variables.go index c979dded55..07b18431e7 100644 --- a/go/libraries/doltcore/sqle/dsess/variables.go +++ b/go/libraries/doltcore/sqle/dsess/variables.go @@ -52,8 +52,9 @@ const ( ShowBranchDatabases = "dolt_show_branch_databases" DoltLogLevel = "dolt_log_level" - DoltClusterRoleVariable = "dolt_cluster_role" - DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch" + DoltClusterRoleVariable = "dolt_cluster_role" + DoltClusterRoleEpochVariable = "dolt_cluster_role_epoch" + DoltClusterAckWritesTimeoutSecs = "dolt_cluster_ack_writes_timeout_secs" ) const URLTemplateDatabasePlaceholder = "{database}" diff --git a/go/libraries/doltcore/sqle/system_variables.go b/go/libraries/doltcore/sqle/system_variables.go index 9875b7642b..a18ec75336 100644 --- a/go/libraries/doltcore/sqle/system_variables.go +++ b/go/libraries/doltcore/sqle/system_variables.go @@ -166,6 +166,13 @@ func AddDoltSystemVariables() { Type: types.NewSystemBoolType(dsess.ShowBranchDatabases), Default: int8(0), }, + { + Name: dsess.DoltClusterAckWritesTimeoutSecs, + Dynamic: true, + Scope: sql.SystemVariableScope_Persist, + Type: types.NewSystemIntType(dsess.DoltClusterAckWritesTimeoutSecs, 0, 60, false), + Default: int64(0), + }, }) } diff --git a/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml b/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml index 1bd5b6ec00..c8a75205d5 100644 --- a/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml +++ b/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml @@ -992,6 +992,112 @@ tests: result: columns: ["count(*)"] rows: [["5"]] +- name: dolt_cluster_ack_writes_timeout_secs behavior + multi_repos: + - name: server1 + with_files: + - name: server.yaml + contents: | + log_level: trace + listener: + host: 0.0.0.0 + port: 3309 + cluster: + standby_remotes: + - name: standby + remote_url_template: http://localhost:3852/{database} + bootstrap_role: primary + bootstrap_epoch: 1 + remotesapi: + port: 3851 + server: + args: ["--config", "server.yaml"] + port: 3309 + - name: server2 + with_files: + - name: server.yaml + contents: | + log_level: trace + listener: + host: 0.0.0.0 + port: 3310 + cluster: + standby_remotes: + - name: standby + remote_url_template: http://localhost:3851/{database} + bootstrap_role: standby + bootstrap_epoch: 1 + remotesapi: + port: 3852 + server: + args: ["--config", "server.yaml"] + port: 3310 + # This test writes new commits on the primary and quickly checks for them on + # the secondary. If dolt_cluster_ack_writes_timeout_secs is working as + # intended, the new writes will always be present. + connections: + - on: server1 + queries: + - exec: 'SET @@PERSIST.dolt_cluster_ack_writes_timeout_secs = 10' + - exec: 'CREATE DATABASE repo1' + - exec: 'USE repo1' + - exec: 'CREATE TABLE vals (i INT PRIMARY KEY)' + - on: server2 + queries: + - exec: 'USE repo1' + - query: "SHOW TABLES" + result: + columns: ["Tables_in_repo1"] + rows: [["vals"]] + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'INSERT INTO vals VALUES (0),(1),(2),(3),(4)' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM vals' + result: + columns: ["COUNT(*)"] + rows: [["5"]] + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'INSERT INTO vals VALUES (5),(6),(7),(8),(9)' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM vals' + result: + columns: ["COUNT(*)"] + rows: [["10"]] + # Restart both servers to test the behavior of the persisted variable. + - on: server1 + restart_server: {} + - on: server2 + restart_server: {} + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'INSERT INTO vals VALUES (10),(11),(12),(13),(14)' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM vals' + result: + columns: ["COUNT(*)"] + rows: [["15"]] + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'INSERT INTO vals VALUES (15),(16),(17),(18),(19)' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM vals' + result: + columns: ["COUNT(*)"] + rows: [["20"]] - name: call dolt checkout multi_repos: - name: server1 From 08d7520073a2f4b78c24c8309e0d40fd97c7408a Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Thu, 11 May 2023 15:54:21 -0700 Subject: [PATCH 39/82] go/store/prolly: cleanup todos, remove error from cursor.isLeaf --- go/store/prolly/map_test.go | 1 - go/store/prolly/message/merge_artifacts.go | 1 - go/store/prolly/message/prolly_map.go | 1 - go/store/prolly/message/serialize.go | 1 - go/store/prolly/tree/content_address_test.go | 2 +- go/store/prolly/tree/node_cursor.go | 63 ++++---------------- go/store/prolly/tree/node_splitter.go | 1 - 7 files changed, 11 insertions(+), 59 deletions(-) diff --git a/go/store/prolly/map_test.go b/go/store/prolly/map_test.go index 3fe2fa6ac8..34858418f5 100644 --- a/go/store/prolly/map_test.go +++ b/go/store/prolly/map_test.go @@ -33,7 +33,6 @@ import ( "github.com/dolthub/dolt/go/store/val" ) -// todo(andy): randomize test seed var testRand = rand.New(rand.NewSource(1)) var sharedPool = pool.NewBuffPool() diff --git a/go/store/prolly/message/merge_artifacts.go b/go/store/prolly/message/merge_artifacts.go index 5db43efe0e..e18815e7df 100644 --- a/go/store/prolly/message/merge_artifacts.go +++ b/go/store/prolly/message/merge_artifacts.go @@ -224,7 +224,6 @@ func estimateMergeArtifactSize(keys, values [][]byte, subtrees []uint64, keyAddr panic(fmt.Sprintf("value vector exceeds Size limit ( %d > %d )", valSz, MaxVectorOffset)) } - // todo(andy): better estimates bufSz += keySz + valSz // tuples bufSz += refCntSz // subtree counts bufSz += len(keys)*2 + len(values)*2 // offStart diff --git a/go/store/prolly/message/prolly_map.go b/go/store/prolly/message/prolly_map.go index d631c71d5a..f5e914abe2 100644 --- a/go/store/prolly/message/prolly_map.go +++ b/go/store/prolly/message/prolly_map.go @@ -222,7 +222,6 @@ func estimateProllyMapSize(keys, values [][]byte, subtrees []uint64, valAddrsCnt panic(fmt.Sprintf("value vector exceeds Size limit ( %d > %d )", valSz, MaxVectorOffset)) } - // todo(andy): better estimates bufSz += keySz + valSz // tuples bufSz += subtreesSz // subtree counts bufSz += len(keys)*2 + len(values)*2 // offStart diff --git a/go/store/prolly/message/serialize.go b/go/store/prolly/message/serialize.go index 27903f7cb4..70b8fcaf36 100644 --- a/go/store/prolly/message/serialize.go +++ b/go/store/prolly/message/serialize.go @@ -100,7 +100,6 @@ func writeAddressOffsets(b *fb.Builder, items [][]byte, sumSz int, td val.TupleD } func writeCountArray(b *fb.Builder, counts []uint64) fb.UOffsetT { - // todo(andy): write without alloc buf := make([]byte, maxEncodedSize(len(counts))) return b.CreateByteVector(encodeVarints(counts, buf)) } diff --git a/go/store/prolly/tree/content_address_test.go b/go/store/prolly/tree/content_address_test.go index e378180b69..31b487f621 100644 --- a/go/store/prolly/tree/content_address_test.go +++ b/go/store/prolly/tree/content_address_test.go @@ -33,7 +33,7 @@ var goldenHash = hash.Hash{ 0xea, 0x7d, 0x47, 0x69, 0x6c, } -// todo(andy): need and analogous test in pkg prolly +// todo(andy): need an analogous test in pkg prolly func TestContentAddress(t *testing.T) { tups, _ := AscendingUintTuples(12345) m := makeTree(t, tups) diff --git a/go/store/prolly/tree/node_cursor.go b/go/store/prolly/tree/node_cursor.go index 10bf882e46..672212e6fd 100644 --- a/go/store/prolly/tree/node_cursor.go +++ b/go/store/prolly/tree/node_cursor.go @@ -45,12 +45,7 @@ type Ordering[K ~[]byte] interface { func newCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, err error) { cur = &Cursor{nd: nd, nrw: ns} - var leaf bool - leaf, err = cur.isLeaf() - if err != nil { - return nil, err - } - for !leaf { + for !cur.isLeaf() { nd, err = fetchChild(ctx, ns, cur.currentRef()) if err != nil { return nil, err @@ -58,10 +53,6 @@ func newCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, parent := cur cur = &Cursor{nd: nd, parent: parent, nrw: ns} - leaf, err = cur.isLeaf() - if err != nil { - return nil, err - } } return } @@ -70,12 +61,7 @@ func newCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, er cur = &Cursor{nd: nd, nrw: ns} cur.skipToNodeEnd() - var leaf bool - leaf, err = cur.isLeaf() - if err != nil { - return nil, err - } - for !leaf { + for !cur.isLeaf() { nd, err = fetchChild(ctx, ns, cur.currentRef()) if err != nil { return nil, err @@ -84,10 +70,6 @@ func newCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, er parent := cur cur = &Cursor{nd: nd, parent: parent, nrw: ns} cur.skipToNodeEnd() - leaf, err = cur.isLeaf() - if err != nil { - return nil, err - } } return } @@ -140,11 +122,7 @@ func newCursorAtOrdinal(ctx context.Context, ns NodeStore, nd Node, ord uint64) // GetOrdinalOfCursor returns the ordinal position of a Cursor. func getOrdinalOfCursor(curr *Cursor) (ord uint64, err error) { - leaf, err := curr.isLeaf() - if err != nil { - return 0, err - } - if !leaf { + if !curr.isLeaf() { return 0, fmt.Errorf("|cur| must be at a leaf") } @@ -186,13 +164,7 @@ func newCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search Se cur = &Cursor{nd: nd, nrw: ns} cur.idx = search(cur.nd) - var leaf bool - leaf, err = cur.isLeaf() - if err != nil { - return nil, err - } - for !leaf { - + for !cur.isLeaf() { // stay in bounds for internal nodes cur.keepInBounds() @@ -205,16 +177,12 @@ func newCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search Se cur = &Cursor{nd: nd, parent: parent, nrw: ns} cur.idx = search(cur.nd) - leaf, err = cur.isLeaf() - if err != nil { - return nil, err - } } - return } func newLeafCursorAtKey[K ~[]byte, O Ordering[K]](ctx context.Context, ns NodeStore, nd Node, key K, order O) (Cursor, error) { + var err error cur := Cursor{nd: nd, nrw: ns} for { // binary search |cur.nd| for |key| @@ -230,10 +198,7 @@ func newLeafCursorAtKey[K ~[]byte, O Ordering[K]](ctx context.Context, ns NodeSt } cur.idx = i - leaf, err := cur.isLeaf() - if err != nil { - return cur, err - } else if leaf { + if cur.isLeaf() { break // done } @@ -402,13 +367,10 @@ func (cur *Cursor) currentRef() hash.Hash { } func (cur *Cursor) currentSubtreeSize() (uint64, error) { - leaf, err := cur.isLeaf() - if err != nil { - return 0, err - } - if leaf { + if cur.isLeaf() { return 1, nil } + var err error cur.nd, err = cur.nd.loadSubtrees() if err != nil { return 0, err @@ -455,13 +417,8 @@ func (cur *Cursor) atNodeEnd() bool { return cur.idx == lastKeyIdx } -func (cur *Cursor) isLeaf() (bool, error) { - // todo(andy): cache Level - lvl, err := cur.level() - if err != nil { - return false, err - } - return lvl == 0, nil +func (cur *Cursor) isLeaf() bool { + return cur.nd.level == 0 } func (cur *Cursor) level() (uint64, error) { diff --git a/go/store/prolly/tree/node_splitter.go b/go/store/prolly/tree/node_splitter.go index 2154f30389..b21b70ad05 100644 --- a/go/store/prolly/tree/node_splitter.go +++ b/go/store/prolly/tree/node_splitter.go @@ -190,7 +190,6 @@ func newKeySplitter(level uint8) nodeSplitter { var _ splitterFactory = newKeySplitter func (ks *keySplitter) Append(key, value Item) error { - // todo(andy): account for key/value offsets, vtable, etc. thisSize := uint32(len(key) + len(value)) ks.size += thisSize From 6516432947c08dbca6c559c20d020246db8a54f6 Mon Sep 17 00:00:00 2001 From: Andy Arthur Date: Thu, 11 May 2023 15:57:19 -0700 Subject: [PATCH 40/82] go/store/prolly/tree: make cursor pkg private --- go/store/prolly/tree/chunker.go | 18 ++-- go/store/prolly/tree/diff.go | 26 ++--- go/store/prolly/tree/map.go | 36 +++---- go/store/prolly/tree/mutator.go | 2 +- go/store/prolly/tree/node_cursor.go | 124 +++++++++++------------ go/store/prolly/tree/node_cursor_test.go | 6 +- 6 files changed, 106 insertions(+), 106 deletions(-) diff --git a/go/store/prolly/tree/chunker.go b/go/store/prolly/tree/chunker.go index ecad36aae4..08bd0c5f4a 100644 --- a/go/store/prolly/tree/chunker.go +++ b/go/store/prolly/tree/chunker.go @@ -35,7 +35,7 @@ type Chunker interface { } type chunker[S message.Serializer] struct { - cur *Cursor + cur *cursor parent *chunker[S] level int done bool @@ -57,7 +57,7 @@ func newEmptyChunker[S message.Serializer](ctx context.Context, ns NodeStore, se return newChunker(ctx, nil, 0, ns, serializer) } -func newChunker[S message.Serializer](ctx context.Context, cur *Cursor, level int, ns NodeStore, serializer S) (*chunker[S], error) { +func newChunker[S message.Serializer](ctx context.Context, cur *cursor, level int, ns NodeStore, serializer S) (*chunker[S], error) { // |cur| will be nil if this is a new Node, implying this is a new tree, or the tree has grown in height relative // to its original chunked form. @@ -144,21 +144,21 @@ func (tc *chunker[S]) DeletePair(ctx context.Context, _, _ Item) error { return tc.skip(ctx) } -// advanceTo progresses the chunker until its tracking Cursor catches up with -// |next|, a Cursor indicating next key where an edit will be applied. +// advanceTo progresses the chunker until its tracking cursor catches up with +// |next|, a cursor indicating next key where an edit will be applied. // // The method proceeds from the deepest chunker recursively into its // linked list parents: // -// (1) If the current Cursor and all of its parents are aligned with |next|, +// (1) If the current cursor and all of its parents are aligned with |next|, // we are done. // -// (2) In lockstep, a) append to the chunker and b) increment the Cursor until +// (2) In lockstep, a) append to the chunker and b) increment the cursor until // we either meet condition (1) and return, or we synchronize and progress to // (3) or (4). Synchronizing means that the current tree being built has // reached a chunk boundary that aligns with a chunk boundary in the old tree // being mutated. Synchronization means chunks between this boundary and -// |next| at the current Cursor level will be unchanged and can be skipped. +// |next| at the current cursor level will be unchanged and can be skipped. // // (3) All parent cursors are (1) current or (2) synchronized, or there are no // parents, and we are done. @@ -168,7 +168,7 @@ func (tc *chunker[S]) DeletePair(ctx context.Context, _, _ Item) error { // anticipation of impending edits that may edit the current chunk. Note that // processPrefix is only necessary for the "fast forward" case where we // synchronized the tree level before reaching |next|. -func (tc *chunker[S]) advanceTo(ctx context.Context, next *Cursor) error { +func (tc *chunker[S]) advanceTo(ctx context.Context, next *cursor) error { cmp := tc.cur.compare(next) if cmp == 0 { // step (1) return nil @@ -347,7 +347,7 @@ func (tc *chunker[S]) handleChunkBoundary(ctx context.Context) error { func (tc *chunker[S]) createParentChunker(ctx context.Context) (err error) { assertTrue(tc.parent == nil, "chunker parent must be nil") - var parent *Cursor + var parent *cursor if tc.cur != nil && tc.cur.parent != nil { // todo(andy): does this comment make sense? cloning a pointer? // Clone the parent cursor because otherwise calling cur.forward() will affect our parent - and vice versa - diff --git a/go/store/prolly/tree/diff.go b/go/store/prolly/tree/diff.go index fbc7c4bce1..62e3f00d54 100644 --- a/go/store/prolly/tree/diff.go +++ b/go/store/prolly/tree/diff.go @@ -37,8 +37,8 @@ type Diff struct { type DiffFn func(context.Context, Diff) error type Differ[K ~[]byte, O Ordering[K]] struct { - from, to *Cursor - fromStop, toStop *Cursor + from, to *cursor + fromStop, toStop *cursor order O } @@ -48,7 +48,7 @@ func DifferFromRoots[K ~[]byte, O Ordering[K]]( from, to Node, order O, ) (Differ[K, O], error) { - var fc, tc *Cursor + var fc, tc *cursor var err error if !from.empty() { @@ -57,7 +57,7 @@ func DifferFromRoots[K ~[]byte, O Ordering[K]]( return Differ[K, O]{}, err } } else { - fc = &Cursor{} + fc = &cursor{} } if !to.empty() { @@ -66,7 +66,7 @@ func DifferFromRoots[K ~[]byte, O Ordering[K]]( return Differ[K, O]{}, err } } else { - tc = &Cursor{} + tc = &cursor{} } fs, err := newCursorPastEnd(ctx, fromNs, from) @@ -156,7 +156,7 @@ func (td Differ[K, O]) Next(ctx context.Context) (diff Diff, err error) { return Diff{}, io.EOF } -func sendRemoved(ctx context.Context, from *Cursor) (diff Diff, err error) { +func sendRemoved(ctx context.Context, from *cursor) (diff Diff, err error) { diff = Diff{ Type: RemovedDiff, Key: from.CurrentKey(), @@ -169,7 +169,7 @@ func sendRemoved(ctx context.Context, from *Cursor) (diff Diff, err error) { return } -func sendAdded(ctx context.Context, to *Cursor) (diff Diff, err error) { +func sendAdded(ctx context.Context, to *cursor) (diff Diff, err error) { diff = Diff{ Type: AddedDiff, Key: to.CurrentKey(), @@ -182,7 +182,7 @@ func sendAdded(ctx context.Context, to *Cursor) (diff Diff, err error) { return } -func sendModified(ctx context.Context, from, to *Cursor) (diff Diff, err error) { +func sendModified(ctx context.Context, from, to *cursor) (diff Diff, err error) { diff = Diff{ Type: ModifiedDiff, Key: from.CurrentKey(), @@ -199,7 +199,7 @@ func sendModified(ctx context.Context, from, to *Cursor) (diff Diff, err error) return } -func skipCommon(ctx context.Context, from, to *Cursor) (err error) { +func skipCommon(ctx context.Context, from, to *cursor) (err error) { // track when |from.parent| and |to.parent| change // to avoid unnecessary comparisons. parentsAreNew := true @@ -238,7 +238,7 @@ func skipCommon(ctx context.Context, from, to *Cursor) (err error) { return err } -func skipCommonParents(ctx context.Context, from, to *Cursor) (err error) { +func skipCommonParents(ctx context.Context, from, to *cursor) (err error) { err = skipCommon(ctx, from.parent, to.parent) if err != nil { return err @@ -266,18 +266,18 @@ func skipCommonParents(ctx context.Context, from, to *Cursor) (err error) { } // todo(andy): assumes equal byte representations -func equalItems(from, to *Cursor) bool { +func equalItems(from, to *cursor) bool { return bytes.Equal(from.CurrentKey(), to.CurrentKey()) && bytes.Equal(from.currentValue(), to.currentValue()) } -func equalParents(from, to *Cursor) (eq bool) { +func equalParents(from, to *cursor) (eq bool) { if from.parent != nil && to.parent != nil { eq = equalItems(from.parent, to.parent) } return } -func equalcursorValues(from, to *Cursor) bool { +func equalcursorValues(from, to *cursor) bool { return bytes.Equal(from.currentValue(), to.currentValue()) } diff --git a/go/store/prolly/tree/map.go b/go/store/prolly/tree/map.go index 8e3e320e36..844aeeae14 100644 --- a/go/store/prolly/tree/map.go +++ b/go/store/prolly/tree/map.go @@ -66,7 +66,7 @@ func DiffKeyRangeOrderedTrees[K, V ~[]byte, O Ordering[K]]( start, stop K, cb DiffFn, ) error { - var fromStart, fromStop, toStart, toStop *Cursor + var fromStart, fromStop, toStart, toStop *cursor var err error if len(start) == 0 { @@ -80,12 +80,12 @@ func DiffKeyRangeOrderedTrees[K, V ~[]byte, O Ordering[K]]( return err } } else { - fromStart, err = NewCursorAtKey(ctx, from.NodeStore, from.Root, start, from.Order) + fromStart, err = newCursorAtKey(ctx, from.NodeStore, from.Root, start, from.Order) if err != nil { return err } - toStart, err = NewCursorAtKey(ctx, to.NodeStore, to.Root, start, to.Order) + toStart, err = newCursorAtKey(ctx, to.NodeStore, to.Root, start, to.Order) if err != nil { return err } @@ -102,12 +102,12 @@ func DiffKeyRangeOrderedTrees[K, V ~[]byte, O Ordering[K]]( return err } } else { - fromStop, err = NewCursorAtKey(ctx, from.NodeStore, from.Root, stop, from.Order) + fromStop, err = newCursorAtKey(ctx, from.NodeStore, from.Root, stop, from.Order) if err != nil { return err } - toStop, err = NewCursorAtKey(ctx, to.NodeStore, to.Root, stop, to.Order) + toStop, err = newCursorAtKey(ctx, to.NodeStore, to.Root, stop, to.Order) if err != nil { return err } @@ -299,7 +299,7 @@ func (t StaticMap[K, V, O]) IterAll(ctx context.Context) (*OrderedTreeIter[K, V] return nil, err } - stop := func(curr *Cursor) bool { + stop := func(curr *cursor) bool { return curr.compare(s) >= 0 } @@ -326,7 +326,7 @@ func (t StaticMap[K, V, O]) IterAllReverse(ctx context.Context) (*OrderedTreeIte return nil, err } - stop := func(curr *Cursor) bool { + stop := func(curr *cursor) bool { return curr.compare(beginning) <= 0 } @@ -364,7 +364,7 @@ func (t StaticMap[K, V, O]) IterOrdinalRange(ctx context.Context, start, stop ui return nil, err } - stopF := func(curr *Cursor) bool { + stopF := func(curr *cursor) bool { return curr.compare(hi) >= 0 } @@ -412,7 +412,7 @@ func (t StaticMap[K, V, O]) IterKeyRange(ctx context.Context, start, stop K) (*O return nil, err } - stopF := func(curr *Cursor) bool { + stopF := func(curr *cursor) bool { return curr.compare(hi) >= 0 } @@ -445,14 +445,14 @@ func (t StaticMap[K, V, O]) GetKeyRangeCardinality(ctx context.Context, start, s return endOrd - startOrd, nil } -func (t StaticMap[K, V, O]) getKeyRangeCursors(ctx context.Context, startInclusive, stopExclusive K) (lo, hi *Cursor, err error) { +func (t StaticMap[K, V, O]) getKeyRangeCursors(ctx context.Context, startInclusive, stopExclusive K) (lo, hi *cursor, err error) { if len(startInclusive) == 0 { lo, err = newCursorAtStart(ctx, t.NodeStore, t.Root) if err != nil { return nil, nil, err } } else { - lo, err = NewCursorAtKey(ctx, t.NodeStore, t.Root, startInclusive, t.Order) + lo, err = newCursorAtKey(ctx, t.NodeStore, t.Root, startInclusive, t.Order) if err != nil { return nil, nil, err } @@ -464,7 +464,7 @@ func (t StaticMap[K, V, O]) getKeyRangeCursors(ctx context.Context, startInclusi return nil, nil, err } } else { - hi, err = NewCursorAtKey(ctx, t.NodeStore, t.Root, stopExclusive, t.Order) + hi, err = newCursorAtKey(ctx, t.NodeStore, t.Root, stopExclusive, t.Order) if err != nil { return nil, nil, err } @@ -474,7 +474,7 @@ func (t StaticMap[K, V, O]) getKeyRangeCursors(ctx context.Context, startInclusi // GetOrdinalForKey returns the smallest ordinal position at which the key >= |query|. func (t StaticMap[K, V, O]) GetOrdinalForKey(ctx context.Context, query K) (uint64, error) { - cur, err := NewCursorAtKey(ctx, t.NodeStore, t.Root, query, t.Order) + cur, err := newCursorAtKey(ctx, t.NodeStore, t.Root, query, t.Order) if err != nil { return 0, err } @@ -483,12 +483,12 @@ func (t StaticMap[K, V, O]) GetOrdinalForKey(ctx context.Context, query K) (uint type OrderedTreeIter[K, V ~[]byte] struct { // current tuple location - curr *Cursor + curr *cursor // the function called to moved |curr| forward in the direction of iteration. step func(context.Context) error - // should return |true| if the passed in Cursor is past the iteration's stopping point. - stop func(*Cursor) bool + // should return |true| if the passed in cursor is past the iteration's stopping point. + stop func(*cursor) bool } func ReverseOrderedTreeIterFromCursors[K, V ~[]byte]( @@ -509,7 +509,7 @@ func ReverseOrderedTreeIterFromCursors[K, V ~[]byte]( return nil, err } - stopFn := func(curr *Cursor) bool { + stopFn := func(curr *cursor) bool { return curr.compare(start) < 0 } @@ -534,7 +534,7 @@ func OrderedTreeIterFromCursors[K, V ~[]byte]( return nil, err } - stopFn := func(curr *Cursor) bool { + stopFn := func(curr *cursor) bool { return curr.compare(stop) >= 0 } diff --git a/go/store/prolly/tree/mutator.go b/go/store/prolly/tree/mutator.go index 2c8bce2338..812a8c7c54 100644 --- a/go/store/prolly/tree/mutator.go +++ b/go/store/prolly/tree/mutator.go @@ -69,7 +69,7 @@ func ApplyMutations[K ~[]byte, O Ordering[K], S message.Serializer]( return root, nil // no mutations } - cur, err := NewCursorAtKey(ctx, ns, root, K(newKey), order) + cur, err := newCursorAtKey(ctx, ns, root, K(newKey), order) if err != nil { return Node{}, err } diff --git a/go/store/prolly/tree/node_cursor.go b/go/store/prolly/tree/node_cursor.go index 672212e6fd..aa6e2d9d5f 100644 --- a/go/store/prolly/tree/node_cursor.go +++ b/go/store/prolly/tree/node_cursor.go @@ -29,11 +29,11 @@ import ( "github.com/dolthub/dolt/go/store/hash" ) -// Cursor explores a tree of Nodes. -type Cursor struct { +// cursor explores a tree of Nodes. +type cursor struct { nd Node idx int - parent *Cursor + parent *cursor nrw NodeStore } @@ -43,8 +43,8 @@ type Ordering[K ~[]byte] interface { Compare(left, right K) int } -func newCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, err error) { - cur = &Cursor{nd: nd, nrw: ns} +func newCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *cursor, err error) { + cur = &cursor{nd: nd, nrw: ns} for !cur.isLeaf() { nd, err = fetchChild(ctx, ns, cur.currentRef()) if err != nil { @@ -52,13 +52,13 @@ func newCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, } parent := cur - cur = &Cursor{nd: nd, parent: parent, nrw: ns} + cur = &cursor{nd: nd, parent: parent, nrw: ns} } return } -func newCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, err error) { - cur = &Cursor{nd: nd, nrw: ns} +func newCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *cursor, err error) { + cur = &cursor{nd: nd, nrw: ns} cur.skipToNodeEnd() for !cur.isLeaf() { @@ -68,13 +68,13 @@ func newCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, er } parent := cur - cur = &Cursor{nd: nd, parent: parent, nrw: ns} + cur = &cursor{nd: nd, parent: parent, nrw: ns} cur.skipToNodeEnd() } return } -func newCursorPastEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, err error) { +func newCursorPastEnd(ctx context.Context, ns NodeStore, nd Node) (cur *cursor, err error) { cur, err = newCursorAtEnd(ctx, ns, nd) if err != nil { return nil, err @@ -92,7 +92,7 @@ func newCursorPastEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, return } -func newCursorAtOrdinal(ctx context.Context, ns NodeStore, nd Node, ord uint64) (cur *Cursor, err error) { +func newCursorAtOrdinal(ctx context.Context, ns NodeStore, nd Node, ord uint64) (cur *cursor, err error) { cnt, err := nd.TreeCount() if err != nil { return nil, err @@ -120,8 +120,8 @@ func newCursorAtOrdinal(ctx context.Context, ns NodeStore, nd Node, ord uint64) }) } -// GetOrdinalOfCursor returns the ordinal position of a Cursor. -func getOrdinalOfCursor(curr *Cursor) (ord uint64, err error) { +// GetOrdinalOfCursor returns the ordinal position of a cursor. +func getOrdinalOfCursor(curr *cursor) (ord uint64, err error) { if !curr.isLeaf() { return 0, fmt.Errorf("|cur| must be at a leaf") } @@ -156,12 +156,12 @@ func getOrdinalOfCursor(curr *Cursor) (ord uint64, err error) { return ord, nil } -func NewCursorAtKey[K ~[]byte, O Ordering[K]](ctx context.Context, ns NodeStore, nd Node, key K, order O) (cur *Cursor, err error) { +func newCursorAtKey[K ~[]byte, O Ordering[K]](ctx context.Context, ns NodeStore, nd Node, key K, order O) (cur *cursor, err error) { return newCursorFromSearchFn(ctx, ns, nd, searchForKey(key, order)) } -func newCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search SearchFn) (cur *Cursor, err error) { - cur = &Cursor{nd: nd, nrw: ns} +func newCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search SearchFn) (cur *cursor, err error) { + cur = &cursor{nd: nd, nrw: ns} cur.idx = search(cur.nd) for !cur.isLeaf() { @@ -174,16 +174,16 @@ func newCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search Se } parent := cur - cur = &Cursor{nd: nd, parent: parent, nrw: ns} + cur = &cursor{nd: nd, parent: parent, nrw: ns} cur.idx = search(cur.nd) } return } -func newLeafCursorAtKey[K ~[]byte, O Ordering[K]](ctx context.Context, ns NodeStore, nd Node, key K, order O) (Cursor, error) { +func newLeafCursorAtKey[K ~[]byte, O Ordering[K]](ctx context.Context, ns NodeStore, nd Node, key K, order O) (cursor, error) { var err error - cur := Cursor{nd: nd, nrw: ns} + cur := cursor{nd: nd, nrw: ns} for { // binary search |cur.nd| for |key| i, j := 0, cur.nd.Count() @@ -310,17 +310,17 @@ func recursiveFetchLeafNodeSpan(ctx context.Context, ns NodeStore, nodes []Node, return recursiveFetchLeafNodeSpan(ctx, ns, children, start, stop) } -func currentCursorItems(cur *Cursor) (key, value Item) { +func currentCursorItems(cur *cursor) (key, value Item) { key = cur.nd.keys.GetItem(cur.idx, cur.nd.msg) value = cur.nd.values.GetItem(cur.idx, cur.nd.msg) return } -// Seek updates the Cursor's node to one whose range spans the key's value, or the last +// Seek updates the cursor's node to one whose range spans the key's value, or the last // node if the key is greater than all existing keys. -// If a node does not contain the key, we recurse upwards to the parent Cursor. If the +// If a node does not contain the key, we recurse upwards to the parent cursor. If the // node contains a key, we recurse downwards into child nodes. -func Seek[K ~[]byte, O Ordering[K]](ctx context.Context, cur *Cursor, key K, order O) (err error) { +func Seek[K ~[]byte, O Ordering[K]](ctx context.Context, cur *cursor, key K, order O) (err error) { inBounds := true if cur.parent != nil { inBounds = inBounds && order.Compare(key, K(cur.firstKey())) >= 0 @@ -347,26 +347,26 @@ func Seek[K ~[]byte, O Ordering[K]](ctx context.Context, cur *Cursor, key K, ord return } -func (cur *Cursor) Valid() bool { +func (cur *cursor) Valid() bool { return cur.nd.count != 0 && cur.nd.bytes() != nil && cur.idx >= 0 && cur.idx < int(cur.nd.count) } -func (cur *Cursor) CurrentKey() Item { +func (cur *cursor) CurrentKey() Item { return cur.nd.GetKey(cur.idx) } -func (cur *Cursor) currentValue() Item { +func (cur *cursor) currentValue() Item { return cur.nd.GetValue(cur.idx) } -func (cur *Cursor) currentRef() hash.Hash { +func (cur *cursor) currentRef() hash.Hash { return cur.nd.getAddress(cur.idx) } -func (cur *Cursor) currentSubtreeSize() (uint64, error) { +func (cur *cursor) currentSubtreeSize() (uint64, error) { if cur.isLeaf() { return 1, nil } @@ -378,25 +378,25 @@ func (cur *Cursor) currentSubtreeSize() (uint64, error) { return cur.nd.getSubtreeCount(cur.idx) } -func (cur *Cursor) firstKey() Item { +func (cur *cursor) firstKey() Item { return cur.nd.GetKey(0) } -func (cur *Cursor) lastKey() Item { +func (cur *cursor) lastKey() Item { lastKeyIdx := int(cur.nd.count) - 1 return cur.nd.GetKey(lastKeyIdx) } -func (cur *Cursor) skipToNodeStart() { +func (cur *cursor) skipToNodeStart() { cur.idx = 0 } -func (cur *Cursor) skipToNodeEnd() { +func (cur *cursor) skipToNodeEnd() { lastKeyIdx := int(cur.nd.count) - 1 cur.idx = lastKeyIdx } -func (cur *Cursor) keepInBounds() { +func (cur *cursor) keepInBounds() { if cur.idx < 0 { cur.skipToNodeStart() } @@ -406,59 +406,59 @@ func (cur *Cursor) keepInBounds() { } } -func (cur *Cursor) atNodeStart() bool { +func (cur *cursor) atNodeStart() bool { return cur.idx == 0 } -// atNodeEnd returns true if the Cursor's current |idx| +// atNodeEnd returns true if the cursor's current |idx| // points to the last node item -func (cur *Cursor) atNodeEnd() bool { +func (cur *cursor) atNodeEnd() bool { lastKeyIdx := int(cur.nd.count) - 1 return cur.idx == lastKeyIdx } -func (cur *Cursor) isLeaf() bool { +func (cur *cursor) isLeaf() bool { return cur.nd.level == 0 } -func (cur *Cursor) level() (uint64, error) { +func (cur *cursor) level() (uint64, error) { return uint64(cur.nd.level), nil } -// invalidateAtEnd sets the Cursor's index to the node count. -func (cur *Cursor) invalidateAtEnd() { +// invalidateAtEnd sets the cursor's index to the node count. +func (cur *cursor) invalidateAtEnd() { cur.idx = int(cur.nd.count) } -// invalidateAtStart sets the Cursor's index to -1. -func (cur *Cursor) invalidateAtStart() { +// invalidateAtStart sets the cursor's index to -1. +func (cur *cursor) invalidateAtStart() { cur.idx = -1 } // hasNext returns true if we do not need to recursively -// check the parent to know that the current Cursor +// check the parent to know that the current cursor // has more keys. hasNext can be false even if parent // cursors are not exhausted. -func (cur *Cursor) hasNext() bool { +func (cur *cursor) hasNext() bool { return cur.idx < int(cur.nd.count)-1 } // hasPrev returns true if the current node has preceding // keys. hasPrev can be false even in a parent node has // preceding keys. -func (cur *Cursor) hasPrev() bool { +func (cur *cursor) hasPrev() bool { return cur.idx > 0 } -// outOfBounds returns true if the current Cursor and +// outOfBounds returns true if the current cursor and // all parents are exhausted. -func (cur *Cursor) outOfBounds() bool { +func (cur *cursor) outOfBounds() bool { return cur.idx < 0 || cur.idx >= int(cur.nd.count) } // advance either increments the current key index by one, // or has reached the end of the current node and skips to the next -// child of the parent Cursor, recursively if necessary, returning +// child of the parent cursor, recursively if necessary, returning // either an error or nil. // // More specifically, one of three things happens: @@ -466,14 +466,14 @@ func (cur *Cursor) outOfBounds() bool { // 1) The current chunk still has keys, iterate to // the next |idx|; // -// 2) We've exhausted the current Cursor, but there is at least -// one |parent| Cursor with more keys. We find that |parent| recursively, +// 2) We've exhausted the current cursor, but there is at least +// one |parent| cursor with more keys. We find that |parent| recursively, // perform step (1), and then have every child initialize itself // using the new |parent|. // -// 3) We've exhausted the current Cursor and every |parent|. Jump +// 3) We've exhausted the current cursor and every |parent|. Jump // to an end state (idx = node.count). -func (cur *Cursor) advance(ctx context.Context) error { +func (cur *cursor) advance(ctx context.Context) error { if cur.hasNext() { cur.idx++ return nil @@ -508,7 +508,7 @@ func (cur *Cursor) advance(ctx context.Context) error { // retreat decrements to the previous key, if necessary by // recursively decrementing parent nodes. -func (cur *Cursor) retreat(ctx context.Context) error { +func (cur *cursor) retreat(ctx context.Context) error { if cur.hasPrev() { cur.idx-- return nil @@ -541,9 +541,9 @@ func (cur *Cursor) retreat(ctx context.Context) error { return nil } -// fetchNode loads the Node that the Cursor index points to. -// It's called whenever the Cursor advances/retreats to a different chunk. -func (cur *Cursor) fetchNode(ctx context.Context) (err error) { +// fetchNode loads the Node that the cursor index points to. +// It's called whenever the cursor advances/retreats to a different chunk. +func (cur *cursor) fetchNode(ctx context.Context) (err error) { assertTrue(cur.parent != nil, "cannot fetch node for cursor with nil parent") cur.nd, err = fetchChild(ctx, cur.nrw, cur.parent.currentRef()) cur.idx = -1 // caller must set @@ -551,7 +551,7 @@ func (cur *Cursor) fetchNode(ctx context.Context) (err error) { } // Compare returns the highest relative index difference -// between two Cursor trees. A parent has a higher precedence +// between two cursor trees. A parent has a higher precedence // than its child. // // Ex: @@ -565,12 +565,12 @@ func (cur *Cursor) fetchNode(ctx context.Context) (err error) { // other: L3 -> 4, L2 -> 3, L1 -> 5, L0 -> 4 // // res => +1 (from level 2) -func (cur *Cursor) compare(other *Cursor) int { +func (cur *cursor) compare(other *cursor) int { return compareCursors(cur, other) } -func (cur *Cursor) clone() *Cursor { - cln := Cursor{ +func (cur *cursor) clone() *cursor { + cln := cursor{ nd: cur.nd, idx: cur.idx, nrw: cur.nrw, @@ -583,7 +583,7 @@ func (cur *Cursor) clone() *Cursor { return &cln } -func (cur *Cursor) copy(other *Cursor) { +func (cur *cursor) copy(other *cursor) { cur.nd = other.nd cur.idx = other.idx cur.nrw = other.nrw @@ -596,7 +596,7 @@ func (cur *Cursor) copy(other *Cursor) { } } -func compareCursors(left, right *Cursor) (diff int) { +func compareCursors(left, right *cursor) (diff int) { diff = 0 for { d := left.idx - right.idx diff --git a/go/store/prolly/tree/node_cursor_test.go b/go/store/prolly/tree/node_cursor_test.go index d40abdd20c..1d46b5cfa6 100644 --- a/go/store/prolly/tree/node_cursor_test.go +++ b/go/store/prolly/tree/node_cursor_test.go @@ -78,7 +78,7 @@ func testNewCursorAtItem(t *testing.T, count int) { ctx := context.Background() for i := range items { key, value := items[i][0], items[i][1] - cur, err := NewCursorAtKey(ctx, ns, root, val.Tuple(key), keyDesc) + cur, err := newCursorAtKey(ctx, ns, root, val.Tuple(key), keyDesc) require.NoError(t, err) assert.Equal(t, key, cur.CurrentKey()) assert.Equal(t, value, cur.currentValue()) @@ -104,7 +104,7 @@ func testGetOrdinalOfCursor(t *testing.T, count int) { assert.NoError(t, err) for i := 0; i < len(tuples); i++ { - curr, err := NewCursorAtKey(ctx, ns, nd, tuples[i][0], desc) + curr, err := newCursorAtKey(ctx, ns, nd, tuples[i][0], desc) require.NoError(t, err) ord, err := getOrdinalOfCursor(curr) @@ -117,7 +117,7 @@ func testGetOrdinalOfCursor(t *testing.T, count int) { b.PutUint32(0, uint32(len(tuples))) aboveItem := b.Build(sharedPool) - curr, err := NewCursorAtKey(ctx, ns, nd, aboveItem, desc) + curr, err := newCursorAtKey(ctx, ns, nd, aboveItem, desc) require.NoError(t, err) ord, err := getOrdinalOfCursor(curr) From 406cc9da6b04ff6ac257307c804ad6f69e40738c Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Thu, 11 May 2023 16:28:46 -0700 Subject: [PATCH 41/82] Restore original behavior of `IsValidRef`. I'm not proud of it, it could use a rewrite, but it's outside the scope of this PR. --- go/libraries/doltcore/env/actions/reset.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/go/libraries/doltcore/env/actions/reset.go b/go/libraries/doltcore/env/actions/reset.go index 527c02130a..9a57a5e151 100644 --- a/go/libraries/doltcore/env/actions/reset.go +++ b/go/libraries/doltcore/env/actions/reset.go @@ -275,18 +275,26 @@ func resetStaged(ctx context.Context, roots doltdb.Roots, tbls []string) (doltdb // IsValidRef validates whether the input parameter is a valid cString // TODO: this doesn't belong in this package func IsValidRef(ctx context.Context, cSpecStr string, ddb *doltdb.DoltDB, rsr env.RepoStateReader) (bool, error) { + // The error return value is only for propagating unhandled errors from rsr.CWBHeadRef() + // All other errors merely indicate an invalid ref spec. + // TODO: It's much better to enumerate the expected errors, to make sure we don't suppress any unexpected ones. cs, err := doltdb.NewCommitSpec(cSpecStr) if err != nil { - return false, err + return false, nil } headRef, err := rsr.CWBHeadRef() - if err != nil { + if err == doltdb.ErrOperationNotSupportedInDetachedHead { + // This is safe because ddb.Resolve checks if headRef is nil, but only when the value is actually needed. + // Basically, this guarentees that resolving "HEAD" or similar will return an error but other resolves will work. + headRef = nil + } else if err != nil { return false, err } + _, err = ddb.Resolve(ctx, cs, headRef) if err != nil { - return false, err + return false, nil } return true, nil From 2953275da9d26e5c4bdbe76f775a0d5e67034e84 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Thu, 11 May 2023 16:47:48 -0700 Subject: [PATCH 42/82] Update tests. --- go/libraries/doltcore/sqle/enginetest/dolt_queries_diff.go | 2 +- integration-tests/bats/sql-check-constraints.bats | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go/libraries/doltcore/sqle/enginetest/dolt_queries_diff.go b/go/libraries/doltcore/sqle/enginetest/dolt_queries_diff.go index bf5ec972e0..186a84c956 100644 --- a/go/libraries/doltcore/sqle/enginetest/dolt_queries_diff.go +++ b/go/libraries/doltcore/sqle/enginetest/dolt_queries_diff.go @@ -3393,7 +3393,7 @@ var PatchTableFunctionScriptTests = []queries.ScriptTest{ Assertions: []queries.ScriptTestAssertion{ { Query: "SELECT statement_order, table_name, diff_type, statement FROM dolt_patch('HEAD', 'WORKING')", - Expected: []sql.Row{{1, "foo", "schema", "CREATE TABLE `foo` (\n `pk` int NOT NULL,\n `c1` int,\n PRIMARY KEY (`pk`),\n CONSTRAINT `chk_eq3jn5ra` CHECK ((c1 > 3))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}}, + Expected: []sql.Row{{1, "foo", "schema", "CREATE TABLE `foo` (\n `pk` int NOT NULL,\n `c1` int,\n PRIMARY KEY (`pk`),\n CONSTRAINT `foo_chk_eq3jn5ra` CHECK ((c1 > 3))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"}}, }, }, }, diff --git a/integration-tests/bats/sql-check-constraints.bats b/integration-tests/bats/sql-check-constraints.bats index 425b7e9f35..e9758887ed 100644 --- a/integration-tests/bats/sql-check-constraints.bats +++ b/integration-tests/bats/sql-check-constraints.bats @@ -81,7 +81,7 @@ SQL # check information_schema.CHECK_CONSTRAINTS table run dolt sql -q "select constraint_catalog, constraint_name, check_clause from information_schema.CHECK_CONSTRAINTS;" -r csv - [[ "$output" =~ "def,chk_eq3jn5ra,(c1 > 3)" ]] || false + [[ "$output" =~ "def,foo_chk_eq3jn5ra,(c1 > 3)" ]] || false } @test "sql-check-constraints: check constraints survive renaming a column" { From 3a47e11a0c4aed2c8714a256eb9c04e0ca71bb78 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Thu, 11 May 2023 16:49:21 -0700 Subject: [PATCH 43/82] Revert accidentally committed changes to go.mod --- go/go.mod | 4 ---- 1 file changed, 4 deletions(-) diff --git a/go/go.mod b/go/go.mod index 5cb9db3a78..b660f7b638 100644 --- a/go/go.mod +++ b/go/go.mod @@ -148,8 +148,4 @@ require ( replace github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi => ./gen/proto/dolt/services/eventsapi -replace github.com/dolthub/vitess => ../../vitess - -replace github.com/dolthub/go-mysql-server => ../../go-mysql-server - go 1.19 From 365ce8d8530e9e53e0e899f1e289bf99b6abc1a3 Mon Sep 17 00:00:00 2001 From: Brian Hendriks Date: Thu, 11 May 2023 16:49:30 -0700 Subject: [PATCH 44/82] fix --- go/cmd/dolt/commands/sqlserver/metrics_listener.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/go/cmd/dolt/commands/sqlserver/metrics_listener.go b/go/cmd/dolt/commands/sqlserver/metrics_listener.go index dceff9a8c9..81b59e3689 100644 --- a/go/cmd/dolt/commands/sqlserver/metrics_listener.go +++ b/go/cmd/dolt/commands/sqlserver/metrics_listener.go @@ -124,6 +124,8 @@ func newMetricsListener(labels prometheus.Labels, versionStr string, clusterStat prometheus.MustRegister(ml.gaugeConcurrentConn) prometheus.MustRegister(ml.gaugeConcurrentQueries) prometheus.MustRegister(ml.histQueryDur) + prometheus.MustRegister(ml.replicationLagGauges) + prometheus.MustRegister(ml.isReplicaGauges) go func() { for ml.updateReplMetrics() { From 25cdf945c797c7770daf77c80b0e40960f86a289 Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Thu, 11 May 2023 16:58:01 -0700 Subject: [PATCH 45/82] Update multienv command to account for new `CWBHeadRef` signature. --- go/libraries/doltcore/dtestutils/testcommands/multienv.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/go/libraries/doltcore/dtestutils/testcommands/multienv.go b/go/libraries/doltcore/dtestutils/testcommands/multienv.go index 202c249e96..b5bd36e155 100644 --- a/go/libraries/doltcore/dtestutils/testcommands/multienv.go +++ b/go/libraries/doltcore/dtestutils/testcommands/multienv.go @@ -260,9 +260,14 @@ func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit panic("pending commit error: " + err.Error()) } + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + panic("couldn't get working set: " + err.Error()) + } + commit, err := dEnv.DoltDB.CommitWithWorkingSet( ctx, - dEnv.RepoStateReader().CWBHeadRef(), + headRef, ws.Ref(), pendingCommit, ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(), From 9c60ac67748dfd3da450b160db2a20d1093bfb37 Mon Sep 17 00:00:00 2001 From: nicktobey Date: Fri, 12 May 2023 00:17:02 +0000 Subject: [PATCH 46/82] [ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh --- go/libraries/doltcore/env/actions/checkout.go | 1 + go/libraries/doltcore/sqle/dfunctions/active_branch.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/go/libraries/doltcore/env/actions/checkout.go b/go/libraries/doltcore/env/actions/checkout.go index abb41d4020..cb8b47a010 100644 --- a/go/libraries/doltcore/env/actions/checkout.go +++ b/go/libraries/doltcore/env/actions/checkout.go @@ -17,6 +17,7 @@ package actions import ( "context" "errors" + "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" diff --git a/go/libraries/doltcore/sqle/dfunctions/active_branch.go b/go/libraries/doltcore/sqle/dfunctions/active_branch.go index 755f4049ac..771c9bd84c 100644 --- a/go/libraries/doltcore/sqle/dfunctions/active_branch.go +++ b/go/libraries/doltcore/sqle/dfunctions/active_branch.go @@ -16,11 +16,11 @@ package dfunctions import ( "fmt" - "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/go-mysql-server/sql" "github.com/dolthub/go-mysql-server/sql/types" + "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess" ) From 725b148a78f899673da23efddc23a9dea75ac3be Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Fri, 12 May 2023 12:18:19 -0700 Subject: [PATCH 47/82] Update `drop-create.bats` to use new generated constraint name. --- integration-tests/bats/drop-create.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/bats/drop-create.bats b/integration-tests/bats/drop-create.bats index f8cc09657e..3923465cca 100755 --- a/integration-tests/bats/drop-create.bats +++ b/integration-tests/bats/drop-create.bats @@ -332,7 +332,7 @@ SQL [[ "$output" =~ "+ \`b\` tinyint NOT NULL," ]] || false [[ "$output" =~ "+ \`c\` varchar(10)," ]] || false [[ "$output" =~ "+ PRIMARY KEY (\`a\`)," ]] || false - [[ "$output" =~ "+ CONSTRAINT \`chk_vk8cbuqc\` CHECK ((\`b\` > 0))" ]] || false + [[ "$output" =~ "+ CONSTRAINT \`test_chk_vk8cbuqc\` CHECK ((\`b\` > 0))" ]] || false } @test "drop-create: default changes" { From af966fa9fdf798be17a456e526cf0b8393e99211 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Fri, 12 May 2023 16:13:15 -0700 Subject: [PATCH 48/82] Add ReplicationStatusController out parameter to some DoltDB write methods. --- go/cmd/dolt/commands/commit.go | 1 + go/cmd/dolt/commands/merge.go | 1 + go/libraries/doltcore/doltdb/commit_hooks.go | 16 +++---- .../doltcore/doltdb/commit_hooks_test.go | 4 +- go/libraries/doltcore/doltdb/doltdb.go | 17 ++++++-- go/libraries/doltcore/doltdb/hooksdatabase.go | 43 +++++++++++++++++-- .../dtestutils/testcommands/multienv.go | 1 + go/libraries/doltcore/env/actions/checkout.go | 1 + go/libraries/doltcore/env/actions/reset.go | 2 +- go/libraries/doltcore/env/environment.go | 8 ++-- go/libraries/doltcore/env/memory.go | 4 +- go/libraries/doltcore/migrate/progress.go | 2 +- go/libraries/doltcore/migrate/transform.go | 2 +- .../binlog_replica_applier.go | 2 +- .../doltcore/sqle/cluster/commithook.go | 8 ++-- .../sqle/dprocedures/dolt_checkout.go | 2 +- .../doltcore/sqle/dsess/transactions.go | 4 +- .../doltcore/sqle/read_replica_database.go | 2 +- 18 files changed, 85 insertions(+), 35 deletions(-) diff --git a/go/cmd/dolt/commands/commit.go b/go/cmd/dolt/commands/commit.go index b0b7a53818..40c197c120 100644 --- a/go/cmd/dolt/commands/commit.go +++ b/go/cmd/dolt/commands/commit.go @@ -229,6 +229,7 @@ func performCommit(ctx context.Context, commandStr string, args []string, dEnv * ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(), prevHash, dEnv.NewWorkingSetMeta(fmt.Sprintf("Updated by %s %s", commandStr, strings.Join(args, " "))), + nil, ) if err != nil { if apr.Contains(cli.AmendFlag) { diff --git a/go/cmd/dolt/commands/merge.go b/go/cmd/dolt/commands/merge.go index 0b3731ca48..b9751f2fd5 100644 --- a/go/cmd/dolt/commands/merge.go +++ b/go/cmd/dolt/commands/merge.go @@ -538,6 +538,7 @@ func executeNoFFMergeAndCommit(ctx context.Context, dEnv *env.DoltEnv, spec *mer ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(), wsHash, dEnv.NewWorkingSetMeta(msg), + nil, ) if err != nil { diff --git a/go/libraries/doltcore/doltdb/commit_hooks.go b/go/libraries/doltcore/doltdb/commit_hooks.go index faf8cac34c..9fc349f78e 100644 --- a/go/libraries/doltcore/doltdb/commit_hooks.go +++ b/go/libraries/doltcore/doltdb/commit_hooks.go @@ -49,8 +49,8 @@ func NewPushOnWriteHook(destDB *DoltDB, tmpDir string) *PushOnWriteHook { } // Execute implements CommitHook, replicates head updates to the destDb field -func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error { - return pushDataset(ctx, ph.destDB, db, ds, ph.tmpDir) +func (ph *PushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) { + return nil, pushDataset(ctx, ph.destDB, db, ds, ph.tmpDir) } func pushDataset(ctx context.Context, destDB, srcDB datas.Database, ds datas.Dataset, tmpDir string) error { @@ -135,16 +135,16 @@ func (*AsyncPushOnWriteHook) ExecuteForWorkingSets() bool { } // Execute implements CommitHook, replicates head updates to the destDb field -func (ah *AsyncPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error { +func (ah *AsyncPushOnWriteHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) { addr, _ := ds.MaybeHeadAddr() select { case ah.ch <- PushArg{ds: ds, db: db, hash: addr}: case <-ctx.Done(): ah.ch <- PushArg{ds: ds, db: db, hash: addr} - return ctx.Err() + return nil, ctx.Err() } - return nil + return nil, nil } // HandleError implements CommitHook @@ -174,12 +174,12 @@ func NewLogHook(msg []byte) *LogHook { } // Execute implements CommitHook, writes message to log channel -func (lh *LogHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error { +func (lh *LogHook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) { if lh.out != nil { _, err := lh.out.Write(lh.msg) - return err + return nil, err } - return nil + return nil, nil } // HandleError implements CommitHook diff --git a/go/libraries/doltcore/doltdb/commit_hooks_test.go b/go/libraries/doltcore/doltdb/commit_hooks_test.go index 02c5806673..80e10facff 100644 --- a/go/libraries/doltcore/doltdb/commit_hooks_test.go +++ b/go/libraries/doltcore/doltdb/commit_hooks_test.go @@ -136,7 +136,7 @@ func TestPushOnWriteHook(t *testing.T) { ds, err := ddb.db.GetDataset(ctx, "refs/heads/main") require.NoError(t, err) - err = hook.Execute(ctx, ds, ddb.db) + _, err = hook.Execute(ctx, ds, ddb.db) require.NoError(t, err) cs, _ = NewCommitSpec(defaultBranch) @@ -269,7 +269,7 @@ func TestAsyncPushOnWrite(t *testing.T) { require.NoError(t, err) ds, err := ddb.db.GetDataset(ctx, "refs/heads/main") require.NoError(t, err) - err = hook.Execute(ctx, ds, ddb.db) + _, err = hook.Execute(ctx, ds, ddb.db) require.NoError(t, err) } }) diff --git a/go/libraries/doltcore/doltdb/doltdb.go b/go/libraries/doltcore/doltdb/doltdb.go index 07654e85bb..56a2fc1e54 100644 --- a/go/libraries/doltcore/doltdb/doltdb.go +++ b/go/libraries/doltcore/doltdb/doltdb.go @@ -1124,7 +1124,7 @@ func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, } ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot) - return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta()) + return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta(), nil) } // CopyWorkingSet copies a WorkingSetRef from one ref to another. If `force` is @@ -1155,7 +1155,7 @@ func (ddb *DoltDB) CopyWorkingSet(ctx context.Context, fromWSRef ref.WorkingSetR } } - return ddb.UpdateWorkingSet(ctx, toWSRef, ws, currWsHash, TodoWorkingSetMeta()) + return ddb.UpdateWorkingSet(ctx, toWSRef, ws, currWsHash, TodoWorkingSetMeta(), nil) } // DeleteBranch deletes the branch given, returning an error if it doesn't exist. @@ -1216,6 +1216,13 @@ func (ddb *DoltDB) NewTagAtCommit(ctx context.Context, tagRef ref.DoltRef, c *Co return err } +type ReplicationStatusController struct { + // A slice of funcs which can be called to wait for the replication + // associated with a commithook to complete. Must return if the + // associated Context is canceled. + Wait []func(ctx context.Context) error +} + // UpdateWorkingSet updates the working set with the ref given to the root value given // |prevHash| is the hash of the expected WorkingSet struct stored in the ref, not the hash of the RootValue there. func (ddb *DoltDB) UpdateWorkingSet( @@ -1224,6 +1231,7 @@ func (ddb *DoltDB) UpdateWorkingSet( workingSet *WorkingSet, prevHash hash.Hash, meta *datas.WorkingSetMeta, + replicationStatus *ReplicationStatusController, ) error { ds, err := ddb.db.GetDataset(ctx, workingSetRef.String()) if err != nil { @@ -1235,6 +1243,7 @@ func (ddb *DoltDB) UpdateWorkingSet( return err } + ctx = withReplicaState(ctx, replicationStatus) _, err = ddb.db.UpdateWorkingSet(ctx, ds, datas.WorkingSetSpec{ Meta: meta, WorkingRoot: workingRootRef, @@ -1255,6 +1264,7 @@ func (ddb *DoltDB) CommitWithWorkingSet( commit *PendingCommit, workingSet *WorkingSet, prevHash hash.Hash, meta *datas.WorkingSetMeta, + replicationStatus *ReplicationStatusController, ) (*Commit, error) { wsDs, err := ddb.db.GetDataset(ctx, workingSetRef.String()) if err != nil { @@ -1271,7 +1281,8 @@ func (ddb *DoltDB) CommitWithWorkingSet( return nil, err } - commitDataset, _, err := ddb.db.CommitWithWorkingSet(ctx, headDs, wsDs, commit.Roots.Staged.nomsValue(), datas.WorkingSetSpec{ + rsCtx := withReplicaState(ctx, replicationStatus) + commitDataset, _, err := ddb.db.CommitWithWorkingSet(rsCtx, headDs, wsDs, commit.Roots.Staged.nomsValue(), datas.WorkingSetSpec{ Meta: meta, WorkingRoot: workingRootRef, StagedRoot: stagedRef, diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index 54d128d043..656d666251 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -25,6 +25,24 @@ import ( "sync" ) +type replicaStateContextKey struct{ +} + +func withReplicaState(ctx context.Context, c *ReplicationStatusController) context.Context { + if c != nil { + return context.WithValue(ctx, replicaStateContextKey{}, c) + } + return ctx +} + +func getReplicaState(ctx context.Context) *ReplicationStatusController { + v := ctx.Value(replicaStateContextKey{}) + if v == nil { + return nil + } + return v.(*ReplicationStatusController) +} + type hooksDatabase struct { datas.Database postCommitHooks []CommitHook @@ -33,7 +51,7 @@ type hooksDatabase struct { // CommitHook is an abstraction for executing arbitrary commands after atomic database commits type CommitHook interface { // Execute is arbitrary read-only function whose arguments are new Dataset commit into a specific Database - Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error + Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) // HandleError is an bridge function to handle Execute errors HandleError(ctx context.Context, err error) error // SetLogger lets clients specify an output stream for HandleError @@ -59,22 +77,39 @@ func (db hooksDatabase) PostCommitHooks() []CommitHook { } func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset, onlyWS bool) { - var err error var wg sync.WaitGroup - for _, hook := range db.postCommitHooks { + rsc := getReplicaState(ctx) + if rsc != nil { + rsc.Wait = make([]func(context.Context) error, len(db.postCommitHooks)) + } + for il, hook := range db.postCommitHooks { if !onlyWS || hook.ExecuteForWorkingSets() { + i := il hook := hook wg.Add(1) go func() { defer wg.Done() - err = hook.Execute(ctx, ds, db) + f, err := hook.Execute(ctx, ds, db) if err != nil { hook.HandleError(ctx, err) } + if rsc != nil { + rsc.Wait[i] = f + } }() } } wg.Wait() + if rsc != nil { + j := 0 + for i := range rsc.Wait { + if rsc.Wait[i] != nil { + rsc.Wait[j] = rsc.Wait[i] + j++ + } + } + rsc.Wait = rsc.Wait[:j] + } } func (db hooksDatabase) CommitWithWorkingSet( diff --git a/go/libraries/doltcore/dtestutils/testcommands/multienv.go b/go/libraries/doltcore/dtestutils/testcommands/multienv.go index 202c249e96..1a02907169 100644 --- a/go/libraries/doltcore/dtestutils/testcommands/multienv.go +++ b/go/libraries/doltcore/dtestutils/testcommands/multienv.go @@ -268,6 +268,7 @@ func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit ws.WithStagedRoot(pendingCommit.Roots.Staged).WithWorkingRoot(pendingCommit.Roots.Working).ClearMerge(), prevHash, doltdb.TodoWorkingSetMeta(), + nil, ) if err != nil { panic("couldn't commit: " + err.Error()) diff --git a/go/libraries/doltcore/env/actions/checkout.go b/go/libraries/doltcore/env/actions/checkout.go index 54b9e07980..9f8a8bc26e 100644 --- a/go/libraries/doltcore/env/actions/checkout.go +++ b/go/libraries/doltcore/env/actions/checkout.go @@ -331,6 +331,7 @@ func cleanOldWorkingSet( initialWs.WithWorkingRoot(newRoots.Working).WithStagedRoot(newRoots.Staged).ClearMerge(), h, dEnv.NewWorkingSetMeta("reset hard"), + nil, ) if err != nil { return err diff --git a/go/libraries/doltcore/env/actions/reset.go b/go/libraries/doltcore/env/actions/reset.go index 0a601ff365..62b8903b0e 100644 --- a/go/libraries/doltcore/env/actions/reset.go +++ b/go/libraries/doltcore/env/actions/reset.go @@ -164,7 +164,7 @@ func ResetHard( return err } - err = dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, dEnv.NewWorkingSetMeta("reset hard")) + err = dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge(), h, dEnv.NewWorkingSetMeta("reset hard"), nil) if err != nil { return err } diff --git a/go/libraries/doltcore/env/environment.go b/go/libraries/doltcore/env/environment.go index d7cb31a44b..4eca2b6a9a 100644 --- a/go/libraries/doltcore/env/environment.go +++ b/go/libraries/doltcore/env/environment.go @@ -629,7 +629,7 @@ func (dEnv *DoltEnv) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.Root wsRef = ws.Ref() } - return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, dEnv.workingSetMeta()) + return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, dEnv.workingSetMeta(), nil) } // UpdateWorkingSet updates the working set for the current working branch to the value given. @@ -648,7 +648,7 @@ func (dEnv *DoltEnv) UpdateWorkingSet(ctx context.Context, ws *doltdb.WorkingSet } } - return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws, h, dEnv.workingSetMeta()) + return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws, h, dEnv.workingSetMeta(), nil) } type repoStateReader struct { @@ -758,7 +758,7 @@ func (dEnv *DoltEnv) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.RootV wsRef = ws.Ref() } - return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, dEnv.workingSetMeta()) + return dEnv.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, dEnv.workingSetMeta(), nil) } func (dEnv *DoltEnv) AbortMerge(ctx context.Context) error { @@ -772,7 +772,7 @@ func (dEnv *DoltEnv) AbortMerge(ctx context.Context) error { return err } - return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.AbortMerge(), h, dEnv.workingSetMeta()) + return dEnv.DoltDB.UpdateWorkingSet(ctx, ws.Ref(), ws.AbortMerge(), h, dEnv.workingSetMeta(), nil) } func (dEnv *DoltEnv) workingSetMeta() *datas.WorkingSetMeta { diff --git a/go/libraries/doltcore/env/memory.go b/go/libraries/doltcore/env/memory.go index 7be9bdfadf..c973fc428f 100644 --- a/go/libraries/doltcore/env/memory.go +++ b/go/libraries/doltcore/env/memory.go @@ -136,7 +136,7 @@ func (m MemoryRepoState) UpdateStagedRoot(ctx context.Context, newRoot *doltdb.R wsRef = ws.Ref() } - return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, m.workingSetMeta()) + return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithStagedRoot(newRoot), h, m.workingSetMeta(), nil) } func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb.RootValue) error { @@ -162,7 +162,7 @@ func (m MemoryRepoState) UpdateWorkingRoot(ctx context.Context, newRoot *doltdb. wsRef = ws.Ref() } - return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, m.workingSetMeta()) + return m.DoltDB.UpdateWorkingSet(ctx, wsRef, ws.WithWorkingRoot(newRoot), h, m.workingSetMeta(), nil) } func (m MemoryRepoState) WorkingSet(ctx context.Context) (*doltdb.WorkingSet, error) { diff --git a/go/libraries/doltcore/migrate/progress.go b/go/libraries/doltcore/migrate/progress.go index 12139e6b54..26592dee8c 100644 --- a/go/libraries/doltcore/migrate/progress.go +++ b/go/libraries/doltcore/migrate/progress.go @@ -298,6 +298,6 @@ func commitRoot( Name: meta.Name, Email: meta.Email, Timestamp: uint64(time.Now().Unix()), - }) + }, nil) return err } diff --git a/go/libraries/doltcore/migrate/transform.go b/go/libraries/doltcore/migrate/transform.go index 5c17cbc605..5101475c31 100644 --- a/go/libraries/doltcore/migrate/transform.go +++ b/go/libraries/doltcore/migrate/transform.go @@ -93,7 +93,7 @@ func migrateWorkingSet(ctx context.Context, menv Environment, brRef ref.BranchRe newWs := doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(wr).WithStagedRoot(sr) - return new.UpdateWorkingSet(ctx, wsRef, newWs, hash.Hash{}, oldWs.Meta()) + return new.UpdateWorkingSet(ctx, wsRef, newWs, hash.Hash{}, oldWs.Meta(), nil) } func migrateCommit(ctx context.Context, menv Environment, oldCm *doltdb.Commit, new *doltdb.DoltDB, prog *progress) error { diff --git a/go/libraries/doltcore/sqle/binlogreplication/binlog_replica_applier.go b/go/libraries/doltcore/sqle/binlogreplication/binlog_replica_applier.go index f286fcbb95..c83c9e25d0 100644 --- a/go/libraries/doltcore/sqle/binlogreplication/binlog_replica_applier.go +++ b/go/libraries/doltcore/sqle/binlogreplication/binlog_replica_applier.go @@ -706,7 +706,7 @@ func closeWriteSession(ctx *sql.Context, engine *gms.Engine, databaseName string return err } - return sqlDatabase.DbData().Ddb.UpdateWorkingSet(ctx, newWorkingSet.Ref(), newWorkingSet, hash, newWorkingSet.Meta()) + return sqlDatabase.DbData().Ddb.UpdateWorkingSet(ctx, newWorkingSet.Ref(), newWorkingSet, hash, newWorkingSet.Meta(), nil) } // getTableSchema returns a sql.Schema for the specified table in the specified database. diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index f3b0bbb94a..0ed653d5dc 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -424,21 +424,21 @@ var errDetectedBrokenConfigStr = "error: more than one server was configured as // Execute on this commithook updates the target root hash we're attempting to // replicate and wakes the replication thread. -func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) error { +func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Database) (func(context.Context) error, error) { lgr := h.logger() lgr.Tracef("cluster/commithook: Execute called post commit") cs := datas.ChunkStoreFromDatabase(db) root, err := cs.Root(ctx) if err != nil { lgr.Errorf("cluster/commithook: Execute: error retrieving local database root: %v", err) - return err + return nil, err } h.mu.Lock() lgr = h.logger() if h.role != RolePrimary { lgr.Warnf("cluster/commithook received commit callback for a commit on %s, but we are not role primary; not replicating the commit, which is likely to be lost.", ds.ID()) h.mu.Unlock() - return nil + return nil, nil } if root != h.nextHead { lgr.Tracef("signaling replication thread to push new head: %v", root.String()) @@ -456,7 +456,7 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat lgr.Warnf("cluster/commithook failed to replicate write before the timeout. timeout: %d, wait result: %v", execTimeout, res) } } - return nil + return nil, nil } func (h *commithook) HandleError(ctx context.Context, err error) error { diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go b/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go index 21ef428198..5ab0417340 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go @@ -168,7 +168,7 @@ func createWorkingSetForLocalBranch(ctx *sql.Context, ddb *doltdb.DoltDB, branch } ws := doltdb.EmptyWorkingSet(wsRef).WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot) - return ddb.UpdateWorkingSet(ctx, wsRef, ws, hash.Hash{} /* current hash... */, doltdb.TodoWorkingSetMeta()) + return ddb.UpdateWorkingSet(ctx, wsRef, ws, hash.Hash{} /* current hash... */, doltdb.TodoWorkingSetMeta(), nil) } // getRevisionForRevisionDatabase returns the root database name and revision for a database, or just the root database name if the specified db name is not a revision database. diff --git a/go/libraries/doltcore/sqle/dsess/transactions.go b/go/libraries/doltcore/sqle/dsess/transactions.go index f3786e2379..0c1a66201b 100644 --- a/go/libraries/doltcore/sqle/dsess/transactions.go +++ b/go/libraries/doltcore/sqle/dsess/transactions.go @@ -216,7 +216,7 @@ func doltCommit(ctx *sql.Context, workingSet = workingSet.ClearMerge() - newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx)) + newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx), nil) return workingSet, newCommit, err } @@ -227,7 +227,7 @@ func txCommit(ctx *sql.Context, workingSet *doltdb.WorkingSet, hash hash.Hash, ) (*doltdb.WorkingSet, *doltdb.Commit, error) { - return workingSet, nil, tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx)) + return workingSet, nil, tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx), nil) } // DoltCommit commits the working set and creates a new DoltCommit as specified, in one atomic write diff --git a/go/libraries/doltcore/sqle/read_replica_database.go b/go/libraries/doltcore/sqle/read_replica_database.go index 511878bc95..beb4ae50fd 100644 --- a/go/libraries/doltcore/sqle/read_replica_database.go +++ b/go/libraries/doltcore/sqle/read_replica_database.go @@ -334,7 +334,7 @@ func pullBranchesAndUpdateWorkingSet( if commitRootHash != wsWorkingRootHash || commitRootHash != wsStagedRootHash { ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot) - err = rrd.ddb.UpdateWorkingSet(ctx, ws.Ref(), ws, prevHash, doltdb.TodoWorkingSetMeta()) + err = rrd.ddb.UpdateWorkingSet(ctx, ws.Ref(), ws, prevHash, doltdb.TodoWorkingSetMeta(), nil) if err == nil { return nil } From 261b10c92bbd7262f4c8daf38d995d4dbb7bfde6 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Fri, 12 May 2023 16:40:42 -0700 Subject: [PATCH 49/82] go: sqle: transactions,cluster: Move block-for-cluster-replication behavior to Dolt transaction logic and out of the commit hook. --- .../doltcore/sqle/cluster/commithook.go | 67 +++++-------------- .../doltcore/sqle/cluster/controller.go | 18 ----- .../doltcore/sqle/cluster/initdbhook.go | 4 -- .../doltcore/sqle/dsess/transactions.go | 48 ++++++++++++- 4 files changed, 61 insertions(+), 76 deletions(-) diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index 0ed653d5dc..f49ce7123d 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -61,8 +61,6 @@ type commithook struct { // 4. If you read a channel out of |successChs|, that channel will be closed on the next successful replication attempt. It will not be closed before then. successChs []chan struct{} - execTimeout time.Duration - role Role // The standby replica to which the new root gets replicated. @@ -379,47 +377,6 @@ func (h *commithook) setWaitNotify(f func()) bool { return true } -type replicationResult int - -const replicationResultTimeout = 0 -const replicationResultContextCanceled = 1 -const replicationResultSuccess = 2 - -// Blocks the current goroutine until: -// 1. There is no replication necessary, i.e., isCaughtUp() == true. This returns replicationResultSuccess. -// 2. The replication of |nextHead|, or a later head, at the time this method was called succeeds. This returns replicationResultSuccess. -// 3. ctx.Done() closes. This returns replicationResultContextCanceled. -// 4. timeout passes. This returns replicationResultSuccess. -func (h *commithook) waitForReplicationSuccess(ctx context.Context, timeout time.Duration) replicationResult { - h.mu.Lock() - if h.isCaughtUp() { - h.mu.Unlock() - return replicationResultSuccess - } - if len(h.successChs) == 0 { - h.successChs = append(h.successChs, make(chan struct{})) - } - ch := h.successChs[0] - h.mu.Unlock() - select { - case <-ch: - return replicationResultSuccess - case <-ctx.Done(): - return replicationResultContextCanceled - case <-time.After(timeout): - return replicationResultTimeout - } -} - -// Set by the controller. If it is non-zero, the Execute() DatabaseHook -// callback will block the calling goroutine for that many seconds waiting for -// replication quiescence. -func (h *commithook) setExecTimeout(timeout time.Duration) { - h.mu.Lock() - h.execTimeout = timeout - h.mu.Unlock() -} - var errDetectedBrokenConfigStr = "error: more than one server was configured as primary in the same epoch. this server has stopped accepting writes. choose a primary in the cluster and call dolt_assume_cluster_role() on servers in the cluster to start replication at a higher epoch" // Execute on this commithook updates the target root hash we're attempting to @@ -434,10 +391,10 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat return nil, err } h.mu.Lock() + defer h.mu.Unlock() lgr = h.logger() if h.role != RolePrimary { lgr.Warnf("cluster/commithook received commit callback for a commit on %s, but we are not role primary; not replicating the commit, which is likely to be lost.", ds.ID()) - h.mu.Unlock() return nil, nil } if root != h.nextHead { @@ -447,16 +404,22 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat h.nextPushAttempt = time.Time{} h.cond.Signal() } - execTimeout := h.execTimeout - h.mu.Unlock() - if execTimeout != time.Duration(0) { - res := h.waitForReplicationSuccess(ctx, execTimeout) - if res != replicationResultSuccess { - // TODO: Get this failure into the *sql.Context warnings. - lgr.Warnf("cluster/commithook failed to replicate write before the timeout. timeout: %d, wait result: %v", execTimeout, res) + var waitF func(context.Context) error + if !h.isCaughtUp() { + if len(h.successChs) == 0 { + h.successChs = append(h.successChs, make(chan struct{})) + } + successCh := h.successChs[0] + waitF = func(ctx context.Context) error { + select { + case <-successCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } } } - return nil, nil + return waitF, nil } func (h *commithook) HandleError(ctx context.Context, err error) error { diff --git a/go/libraries/doltcore/sqle/cluster/controller.go b/go/libraries/doltcore/sqle/cluster/controller.go index e4facd8eab..7c821aa6e0 100644 --- a/go/libraries/doltcore/sqle/cluster/controller.go +++ b/go/libraries/doltcore/sqle/cluster/controller.go @@ -176,20 +176,6 @@ func (c *Controller) ManageSystemVariables(variables sqlvars) { c.mu.Lock() defer c.mu.Unlock() c.systemVars = variables - - // We reset this system variable here to put our NotifyChanged on it. - v, _, ok := variables.GetGlobal(dsess.DoltClusterAckWritesTimeoutSecs) - if !ok { - panic(fmt.Sprintf("internal error: did not find required global system variable %s", dsess.DoltClusterAckWritesTimeoutSecs)) - } - v.NotifyChanged = func(scope sql.SystemVariableScope, v sql.SystemVarValue) { - c.mu.Lock() - defer c.mu.Unlock() - for _, hook := range c.commithooks { - hook.setExecTimeout(time.Duration(v.Val.(int64)) * time.Second) - } - } - variables.AddSystemVariables([]sql.SystemVariable{v}) c.refreshSystemVars() } @@ -209,10 +195,6 @@ func (c *Controller) ApplyStandbyReplicationConfig(ctx context.Context, bt *sql. if err != nil { return err } - _, execTimeoutVal, _ := c.systemVars.GetGlobal(dsess.DoltClusterAckWritesTimeoutSecs) - for _, h := range hooks { - h.setExecTimeout(time.Duration(execTimeoutVal.(int64)) * time.Second) - } c.commithooks = append(c.commithooks, hooks...) } return nil diff --git a/go/libraries/doltcore/sqle/cluster/initdbhook.go b/go/libraries/doltcore/sqle/cluster/initdbhook.go index 588f35a235..7713c91540 100644 --- a/go/libraries/doltcore/sqle/cluster/initdbhook.go +++ b/go/libraries/doltcore/sqle/cluster/initdbhook.go @@ -17,7 +17,6 @@ package cluster import ( "context" "strings" - "time" "github.com/dolthub/go-mysql-server/sql" @@ -58,8 +57,6 @@ func NewInitDatabaseHook(controller *Controller, bt *sql.BackgroundThreads, orig }) } - _, execTimeoutVal, _ := controller.systemVars.GetGlobal(dsess.DoltClusterAckWritesTimeoutSecs) - role, _ := controller.roleAndEpoch() for i, r := range controller.cfg.StandbyRemotes() { ttfdir, err := denv.TempTableFilesDir() @@ -67,7 +64,6 @@ func NewInitDatabaseHook(controller *Controller, bt *sql.BackgroundThreads, orig return err } commitHook := newCommitHook(controller.lgr, r.Name(), name, role, remoteDBs[i], denv.DoltDB, ttfdir) - commitHook.setExecTimeout(time.Duration(execTimeoutVal.(int64)) * time.Second) denv.DoltDB.PrependCommitHook(ctx, commitHook) controller.registerCommitHook(commitHook) if err := commitHook.Run(bt); err != nil { diff --git a/go/libraries/doltcore/sqle/dsess/transactions.go b/go/libraries/doltcore/sqle/dsess/transactions.go index 0c1a66201b..3234ec9e5a 100644 --- a/go/libraries/doltcore/sqle/dsess/transactions.go +++ b/go/libraries/doltcore/sqle/dsess/transactions.go @@ -15,6 +15,7 @@ package dsess import ( + "context" "errors" "fmt" "strings" @@ -216,7 +217,9 @@ func doltCommit(ctx *sql.Context, workingSet = workingSet.ClearMerge() - newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx), nil) + var rsc doltdb.ReplicationStatusController + newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx), &rsc) + waitForReplicationController(ctx, rsc) return workingSet, newCommit, err } @@ -227,7 +230,10 @@ func txCommit(ctx *sql.Context, workingSet *doltdb.WorkingSet, hash hash.Hash, ) (*doltdb.WorkingSet, *doltdb.Commit, error) { - return workingSet, nil, tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx), nil) + var rsc doltdb.ReplicationStatusController + err := tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx), &rsc) + waitForReplicationController(ctx, rsc) + return workingSet, nil, err } // DoltCommit commits the working set and creates a new DoltCommit as specified, in one atomic write @@ -235,6 +241,44 @@ func (tx *DoltTransaction) DoltCommit(ctx *sql.Context, workingSet *doltdb.Worki return tx.doCommit(ctx, workingSet, commit, doltCommit) } +func waitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatusController) { + if len(rsc.Wait) == 0 { + return + } + _, timeout, ok := sql.SystemVariables.GetGlobal(DoltClusterAckWritesTimeoutSecs) + if !ok { + return + } + timeoutI := timeout.(int64) + if timeoutI == 0 { + return + } + + cCtx, cancel := context.WithCancel(ctx) + defer cancel() + var wg sync.WaitGroup + wg.Add(len(rsc.Wait)) + for _, f := range rsc.Wait { + f := f + go func() error { + defer wg.Done() + return f(cCtx) + }() + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-time.After(time.Duration(timeoutI) * time.Second): + // TODO: Error, warning, something... + case <-done: + } +} + // doCommit commits this transaction with the write function provided. It takes the same params as DoltCommit func (tx *DoltTransaction) doCommit( ctx *sql.Context, From 2e4107fe22948a383ec68872b0281dc32458dd84 Mon Sep 17 00:00:00 2001 From: reltuk Date: Fri, 12 May 2023 23:50:15 +0000 Subject: [PATCH 50/82] [ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh --- go/libraries/doltcore/doltdb/hooksdatabase.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index 656d666251..96bf911e3e 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -25,7 +25,7 @@ import ( "sync" ) -type replicaStateContextKey struct{ +type replicaStateContextKey struct { } func withReplicaState(ctx context.Context, c *ReplicationStatusController) context.Context { From 40acb28e45dbbe7439d9a65eb1e845bb420642c4 Mon Sep 17 00:00:00 2001 From: Hydrocharged Date: Mon, 15 May 2023 16:07:06 +0000 Subject: [PATCH 51/82] [ga-bump-dep] Bump dependency in Dolt by Hydrocharged --- go/go.mod | 2 +- go/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/go.mod b/go/go.mod index ada89d6a53..1a32d8b4d4 100644 --- a/go/go.mod +++ b/go/go.mod @@ -59,7 +59,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.15.1-0.20230511215534-6dca53c0d236 + github.com/dolthub/go-mysql-server v0.15.1-0.20230515160548-5bd8954a0f02 github.com/dolthub/swiss v0.1.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/jmoiron/sqlx v1.3.4 diff --git a/go/go.sum b/go/go.sum index c8dbb1dbc7..4e0aa642b6 100644 --- a/go/go.sum +++ b/go/go.sum @@ -166,8 +166,8 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY= github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U= github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0= -github.com/dolthub/go-mysql-server v0.15.1-0.20230511215534-6dca53c0d236 h1:dcMIfGLWniby9dQq2RN+hWtrNKFQlCd+9uFBNXB4uiw= -github.com/dolthub/go-mysql-server v0.15.1-0.20230511215534-6dca53c0d236/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= +github.com/dolthub/go-mysql-server v0.15.1-0.20230515160548-5bd8954a0f02 h1:rLb85kS5mwt4HnHN+hXJIyge9rlqA35Kgsw4i+bTuvc= +github.com/dolthub/go-mysql-server v0.15.1-0.20230515160548-5bd8954a0f02/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto= github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0= From a55806750c25366f9d5a39f22e496b78aa8ec962 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Mon, 15 May 2023 11:21:04 -0700 Subject: [PATCH 52/82] go: sqle: cluster: commithook: Periodically heartbeat to a standby when we are primary. This allows replication_lag on the standby to more accurately reflect the possible drift locally. --- .../doltcore/sqle/cluster/commithook.go | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index 1b540f028d..c29bc10ddd 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -108,6 +108,7 @@ func (h *commithook) replicate(ctx context.Context) { defer h.logger().Tracef("cluster/commithook: background thread: replicate: shutdown.") h.mu.Lock() defer h.mu.Unlock() + shouldHeartbeat := false for { lgr := h.logger() // Shutdown for context canceled. @@ -138,11 +139,17 @@ func (h *commithook) replicate(ctx context.Context) { h.nextHeadIncomingTime = time.Now() } else if h.shouldReplicate() { h.attemptReplicate(ctx) + shouldHeartbeat = false } else { lgr.Tracef("cluster/commithook: background thread: waiting for signal.") if h.waitNotify != nil { h.waitNotify() } + if shouldHeartbeat { + h.attemptHeartbeat(ctx) + } else { + shouldHeartbeat = true + } h.cond.Wait() lgr.Tracef("cluster/commithook: background thread: woken up.") } @@ -175,6 +182,37 @@ func (h *commithook) primaryNeedsInit() bool { return h.role == RolePrimary && h.nextHead == (hash.Hash{}) } +// Called by the replicate thread to periodically heartbeat liveness to a +// standby if we are a primary. These heartbeats are best effort and currently +// do not affect the data plane much. +// +// preconditions: h.mu is locked and shouldReplicate() returned false. +func (h *commithook) attemptHeartbeat(ctx context.Context) { + if h.role != RolePrimary { + return + } + head := h.lastPushedHead + if head.IsEmpty() { + return + } + destDB := h.destDB + if destDB == nil { + return + } + ctx, h.cancelReplicate = context.WithTimeout(ctx, 5 * time.Second) + defer func() { + if h.cancelReplicate != nil { + h.cancelReplicate() + } + h.cancelReplicate = nil + }() + h.mu.Unlock() + datasDB := doltdb.HackDatasDatabaseFromDoltDB(destDB) + cs := datas.ChunkStoreFromDatabase(datasDB) + cs.Commit(ctx, head, head) + h.mu.Lock() +} + // Called by the replicate thread to push the nextHead to the destDB and set // its root to the new value. // From 5ce8cc03d90d94ee3ff5a42938d6e28df6115bbd Mon Sep 17 00:00:00 2001 From: reltuk Date: Mon, 15 May 2023 18:31:45 +0000 Subject: [PATCH 53/82] [ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh --- go/libraries/doltcore/sqle/cluster/commithook.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index c29bc10ddd..37ae58896e 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -199,7 +199,7 @@ func (h *commithook) attemptHeartbeat(ctx context.Context) { if destDB == nil { return } - ctx, h.cancelReplicate = context.WithTimeout(ctx, 5 * time.Second) + ctx, h.cancelReplicate = context.WithTimeout(ctx, 5*time.Second) defer func() { if h.cancelReplicate != nil { h.cancelReplicate() From 0a632eb8cea8afc28977fac20e5c0d4968ce812b Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Mon, 15 May 2023 13:15:03 -0700 Subject: [PATCH 54/82] Incorporate feedback for https://github.com/dolthub/dolt/pull/5943. --- go/cmd/dolt/commands/branch.go | 6 +----- go/cmd/dolt/commands/checkout.go | 13 +++++-------- go/cmd/dolt/commands/push.go | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/go/cmd/dolt/commands/branch.go b/go/cmd/dolt/commands/branch.go index 10d8ea5cee..d78ca00bcf 100644 --- a/go/cmd/dolt/commands/branch.go +++ b/go/cmd/dolt/commands/branch.go @@ -175,11 +175,7 @@ func printBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar } if verbose { - headRef, err := dEnv.RepoStateReader().CWBHeadRef() - if err != nil { - return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), nil) - } - cm, err := dEnv.DoltDB.Resolve(ctx, cs, headRef) + cm, err := dEnv.DoltDB.Resolve(ctx, cs, currentBranch) if err == nil { h, err := cm.HashOf() diff --git a/go/cmd/dolt/commands/checkout.go b/go/cmd/dolt/commands/checkout.go index d4e6990ee6..cc4641a0fd 100644 --- a/go/cmd/dolt/commands/checkout.go +++ b/go/cmd/dolt/commands/checkout.go @@ -180,12 +180,13 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.Ar return verr } + headRef, err := dEnv.RepoStateReader().CWBHeadRef() + if err != nil { + return errhand.BuildDError(err.Error()).Build() + } + // the new branch is checked out at this point if setTrackUpstream { - headRef, err := dEnv.RepoStateReader().CWBHeadRef() - if err != nil { - return errhand.BuildDError(err.Error()).Build() - } verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, headRef) if verr != nil { return verr @@ -202,10 +203,6 @@ func checkoutNewBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.Ar if !remoteOk { return nil } - headRef, err := dEnv.RepoStateReader().CWBHeadRef() - if err != nil { - return errhand.BuildDError(err.Error()).Build() - } verr = SetRemoteUpstreamForBranchRef(dEnv, remoteName, remoteBranchName, headRef) if verr != nil { return verr diff --git a/go/cmd/dolt/commands/push.go b/go/cmd/dolt/commands/push.go index 8f425a7c02..7fa06517fb 100644 --- a/go/cmd/dolt/commands/push.go +++ b/go/cmd/dolt/commands/push.go @@ -98,7 +98,7 @@ func (cmd PushCmd) Exec(ctx context.Context, commandStr string, args []string, d case env.ErrNoUpstreamForBranch: currentBranch, err := dEnv.RepoStateReader().CWBHeadRef() if err != nil { - verr = errhand.BuildDError("fatal: The current branch could not be identified").Build() + verr = errhand.BuildDError("fatal: The current branch could not be identified").AddCause(err).Build() } else { remoteName := "" if defRemote, verr := env.GetDefaultRemote(dEnv.RepoStateReader()); verr == nil { From 535e59141d9a75af63e4ef8097c88957d68df548 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Sat, 13 May 2023 07:59:39 -0700 Subject: [PATCH 55/82] integration-tests/go-sql-server-driver: Cluster, dolt_cluster_ack_writes_timeout_secs, add a failing test to assert that dolt_branch modifications are waited on. --- .../tests/sql-server-cluster.yaml | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml b/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml index c8a75205d5..48d9892d97 100644 --- a/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml +++ b/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml @@ -1098,6 +1098,51 @@ tests: result: columns: ["COUNT(*)"] rows: [["20"]] + # Assert that branch creation and deletion also blocks on replication. + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'CALL DOLT_BRANCH("new_branch")' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM dolt_branches' + result: + columns: ["COUNT(*)"] + rows: [["2"]] + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'CALL DOLT_BRANCH("-d", "new_branch")' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM dolt_branches' + result: + columns: ["COUNT(*)"] + rows: [["1"]] + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'CALL DOLT_BRANCH("new_branch")' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM dolt_branches' + result: + columns: ["COUNT(*)"] + rows: [["2"]] + - on: server1 + queries: + - exec: 'USE repo1' + - exec: 'CALL DOLT_BRANCH("-d", "new_branch")' + - on: server2 + queries: + - exec: 'USE repo1' + - query: 'SELECT COUNT(*) FROM dolt_branches' + result: + columns: ["COUNT(*)"] + rows: [["1"]] - name: call dolt checkout multi_repos: - name: server1 From c1fedfc4577c078bb0586e9f22e0a79d7ea79116 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Mon, 15 May 2023 10:20:12 -0700 Subject: [PATCH 56/82] go/libraries/doltcore/sqle: dprocedures: dolt_branch: Make dolt branch operations (and some related operations, like dolt checkout) able to block on cluster replication. --- go/cmd/dolt/commands/branch.go | 6 +-- go/cmd/dolt/commands/checkout.go | 2 +- go/libraries/doltcore/doltdb/doltdb.go | 9 +++-- go/libraries/doltcore/env/actions/branch.go | 34 +++++++++-------- go/libraries/doltcore/env/actions/clone.go | 2 +- go/libraries/doltcore/env/actions/remotes.go | 4 +- go/libraries/doltcore/env/environment.go | 2 +- go/libraries/doltcore/migrate/progress.go | 2 +- go/libraries/doltcore/rebase/rebase.go | 2 +- .../doltcore/sqle/dprocedures/dolt_branch.go | 38 +++++++++++-------- .../sqle/dprocedures/dolt_checkout.go | 16 +++++--- .../doltcore/sqle/dprocedures/dolt_remote.go | 12 ++++-- .../doltcore/sqle/dsess/transactions.go | 6 +-- .../doltcore/sqle/read_replica_database.go | 6 +-- 14 files changed, 80 insertions(+), 61 deletions(-) diff --git a/go/cmd/dolt/commands/branch.go b/go/cmd/dolt/commands/branch.go index d1ba4b1856..a63f6d99a6 100644 --- a/go/cmd/dolt/commands/branch.go +++ b/go/cmd/dolt/commands/branch.go @@ -245,7 +245,7 @@ func moveBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgParseR force := apr.Contains(cli.ForceFlag) src := apr.Arg(0) dest := apr.Arg(1) - err := actions.RenameBranch(ctx, dEnv.DbData(), src, apr.Arg(1), dEnv, force) + err := actions.RenameBranch(ctx, dEnv.DbData(), src, apr.Arg(1), dEnv, force, nil) var verr errhand.VerboseError if err != nil { @@ -306,7 +306,7 @@ func deleteBranches(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPa err := actions.DeleteBranch(ctx, dEnv.DbData(), brName, actions.DeleteOptions{ Force: force, Remote: apr.Contains(cli.RemoteParam), - }, dEnv) + }, dEnv, nil) if err != nil { var verr errhand.VerboseError @@ -379,7 +379,7 @@ func createBranch(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars } } - err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, apr.Contains(cli.ForceFlag)) + err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, apr.Contains(cli.ForceFlag), nil) if err != nil { return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usage) } diff --git a/go/cmd/dolt/commands/checkout.go b/go/cmd/dolt/commands/checkout.go index 59a2d3f206..5badb3d5e7 100644 --- a/go/cmd/dolt/commands/checkout.go +++ b/go/cmd/dolt/commands/checkout.go @@ -238,7 +238,7 @@ func checkoutRemoteBranchOrSuggestNew(ctx context.Context, dEnv *env.DoltEnv, na } func checkoutNewBranchFromStartPt(ctx context.Context, dEnv *env.DoltEnv, newBranch, startPt string) errhand.VerboseError { - err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, false) + err := actions.CreateBranchWithStartPt(ctx, dEnv.DbData(), newBranch, startPt, false, nil) if err != nil { return errhand.BuildDError(err.Error()).Build() } diff --git a/go/libraries/doltcore/doltdb/doltdb.go b/go/libraries/doltcore/doltdb/doltdb.go index 56a2fc1e54..a7d81b508d 100644 --- a/go/libraries/doltcore/doltdb/doltdb.go +++ b/go/libraries/doltcore/doltdb/doltdb.go @@ -1079,7 +1079,7 @@ func (ddb *DoltDB) GetRefsOfTypeByNomsRoot(ctx context.Context, refTypeFilter ma // NewBranchAtCommit creates a new branch with HEAD at the commit given. Branch names must pass IsValidUserBranchName. // Silently overwrites any existing branch with the same name given, if one exists. -func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, commit *Commit) error { +func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, commit *Commit, replicationStatus *ReplicationStatusController) error { if !IsValidBranchRef(branchRef) { panic(fmt.Sprintf("invalid branch name %s, use IsValidUserBranchName check", branchRef.String())) } @@ -1124,7 +1124,7 @@ func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, } ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot) - return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta(), nil) + return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta(), replicationStatus) } // CopyWorkingSet copies a WorkingSetRef from one ref to another. If `force` is @@ -1159,8 +1159,9 @@ func (ddb *DoltDB) CopyWorkingSet(ctx context.Context, fromWSRef ref.WorkingSetR } // DeleteBranch deletes the branch given, returning an error if it doesn't exist. -func (ddb *DoltDB) DeleteBranch(ctx context.Context, branch ref.DoltRef) error { - return ddb.deleteRef(ctx, branch) +func (ddb *DoltDB) DeleteBranch(ctx context.Context, branch ref.DoltRef, replicationStatus *ReplicationStatusController) error { + rsCtx := withReplicaState(ctx, replicationStatus) + return ddb.deleteRef(rsCtx, branch) } func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef) error { diff --git a/go/libraries/doltcore/env/actions/branch.go b/go/libraries/doltcore/env/actions/branch.go index 313bdf7004..0c182cce86 100644 --- a/go/libraries/doltcore/env/actions/branch.go +++ b/go/libraries/doltcore/env/actions/branch.go @@ -31,11 +31,13 @@ var ErrCOBranchDelete = errors.New("attempted to delete checked out branch") var ErrUnmergedBranch = errors.New("branch is not fully merged") var ErrWorkingSetsOnBothBranches = errors.New("checkout would overwrite uncommitted changes on target branch") -func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch string, remoteDbPro env.RemoteDbProvider, force bool) error { +func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch string, remoteDbPro env.RemoteDbProvider, force bool, rsc *doltdb.ReplicationStatusController) error { oldRef := ref.NewBranchRef(oldBranch) newRef := ref.NewBranchRef(newBranch) - err := CopyBranchOnDB(ctx, dbData.Ddb, oldBranch, newBranch, force) + // TODO: This function smears the branch updates across multiple commits of the datas.Database. + + err := CopyBranchOnDB(ctx, dbData.Ddb, oldBranch, newBranch, force, rsc) if err != nil { return err } @@ -66,14 +68,14 @@ func RenameBranch(ctx context.Context, dbData env.DbData, oldBranch, newBranch s } } - return DeleteBranch(ctx, dbData, oldBranch, DeleteOptions{Force: true}, remoteDbPro) + return DeleteBranch(ctx, dbData, oldBranch, DeleteOptions{Force: true}, remoteDbPro, rsc) } func CopyBranch(ctx context.Context, dEnv *env.DoltEnv, oldBranch, newBranch string, force bool) error { - return CopyBranchOnDB(ctx, dEnv.DoltDB, oldBranch, newBranch, force) + return CopyBranchOnDB(ctx, dEnv.DoltDB, oldBranch, newBranch, force, nil) } -func CopyBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, oldBranch, newBranch string, force bool) error { +func CopyBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, oldBranch, newBranch string, force bool, rsc *doltdb.ReplicationStatusController) error { oldRef := ref.NewBranchRef(oldBranch) newRef := ref.NewBranchRef(newBranch) @@ -104,7 +106,7 @@ func CopyBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, oldBranch, newBranc return err } - return ddb.NewBranchAtCommit(ctx, newRef, cm) + return ddb.NewBranchAtCommit(ctx, newRef, cm, rsc) } type DeleteOptions struct { @@ -112,7 +114,7 @@ type DeleteOptions struct { Remote bool } -func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts DeleteOptions, remoteDbPro env.RemoteDbProvider) error { +func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts DeleteOptions, remoteDbPro env.RemoteDbProvider, rsc *doltdb.ReplicationStatusController) error { var branchRef ref.DoltRef if opts.Remote { var err error @@ -127,10 +129,10 @@ func DeleteBranch(ctx context.Context, dbData env.DbData, brName string, opts De } } - return DeleteBranchOnDB(ctx, dbData, branchRef, opts, remoteDbPro) + return DeleteBranchOnDB(ctx, dbData, branchRef, opts, remoteDbPro, rsc) } -func DeleteBranchOnDB(ctx context.Context, dbdata env.DbData, branchRef ref.DoltRef, opts DeleteOptions, pro env.RemoteDbProvider) error { +func DeleteBranchOnDB(ctx context.Context, dbdata env.DbData, branchRef ref.DoltRef, opts DeleteOptions, pro env.RemoteDbProvider, rsc *doltdb.ReplicationStatusController) error { ddb := dbdata.Ddb hasRef, err := ddb.HasRef(ctx, branchRef) @@ -173,7 +175,7 @@ func DeleteBranchOnDB(ctx context.Context, dbdata env.DbData, branchRef ref.Dolt } } - return ddb.DeleteBranch(ctx, branchRef) + return ddb.DeleteBranch(ctx, branchRef, rsc) } // validateBranchMergedIntoCurrentWorkingBranch returns an error if the given branch is not fully merged into the HEAD of the current branch. @@ -267,8 +269,8 @@ func validateBranchMergedIntoUpstream(ctx context.Context, dbdata env.DbData, br return nil } -func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch, startPt string, force bool) error { - err := createBranch(ctx, dbData, newBranch, startPt, force) +func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch, startPt string, force bool, rsc *doltdb.ReplicationStatusController) error { + err := createBranch(ctx, dbData, newBranch, startPt, force, rsc) if err != nil { if err == ErrAlreadyExists { @@ -289,7 +291,7 @@ func CreateBranchWithStartPt(ctx context.Context, dbData env.DbData, newBranch, return nil } -func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, startingPoint string, force bool, headRef ref.DoltRef) error { +func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, startingPoint string, force bool, headRef ref.DoltRef, rsc *doltdb.ReplicationStatusController) error { branchRef := ref.NewBranchRef(newBranch) hasRef, err := ddb.HasRef(ctx, branchRef) if err != nil { @@ -314,7 +316,7 @@ func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, starti return err } - err = ddb.NewBranchAtCommit(ctx, branchRef, cm) + err = ddb.NewBranchAtCommit(ctx, branchRef, cm, rsc) if err != nil { return err } @@ -322,8 +324,8 @@ func CreateBranchOnDB(ctx context.Context, ddb *doltdb.DoltDB, newBranch, starti return nil } -func createBranch(ctx context.Context, dbData env.DbData, newBranch, startingPoint string, force bool) error { - return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, dbData.Rsr.CWBHeadRef()) +func createBranch(ctx context.Context, dbData env.DbData, newBranch, startingPoint string, force bool, rsc *doltdb.ReplicationStatusController) error { + return CreateBranchOnDB(ctx, dbData.Ddb, newBranch, startingPoint, force, dbData.Rsr.CWBHeadRef(), rsc) } var emptyHash = hash.Hash{} diff --git a/go/libraries/doltcore/env/actions/clone.go b/go/libraries/doltcore/env/actions/clone.go index a6907d1c60..779a4027b5 100644 --- a/go/libraries/doltcore/env/actions/clone.go +++ b/go/libraries/doltcore/env/actions/clone.go @@ -230,7 +230,7 @@ func CloneRemote(ctx context.Context, srcDB *doltdb.DoltDB, remoteName, branch s } if brnch.GetPath() != branch { - err := dEnv.DoltDB.DeleteBranch(ctx, brnch) + err := dEnv.DoltDB.DeleteBranch(ctx, brnch, nil) if err != nil { return fmt.Errorf("%w: %s; %s", ErrFailedToDeleteBranch, brnch.String(), err.Error()) } diff --git a/go/libraries/doltcore/env/actions/remotes.go b/go/libraries/doltcore/env/actions/remotes.go index 21dab4f914..179e8efeef 100644 --- a/go/libraries/doltcore/env/actions/remotes.go +++ b/go/libraries/doltcore/env/actions/remotes.go @@ -220,14 +220,14 @@ func DeleteRemoteBranch(ctx context.Context, targetRef ref.BranchRef, remoteRef } if hasRef { - err = remoteDB.DeleteBranch(ctx, targetRef) + err = remoteDB.DeleteBranch(ctx, targetRef, nil) } if err != nil { return err } - err = localDB.DeleteBranch(ctx, remoteRef) + err = localDB.DeleteBranch(ctx, remoteRef, nil) if err != nil { return err diff --git a/go/libraries/doltcore/env/environment.go b/go/libraries/doltcore/env/environment.go index 4eca2b6a9a..3fb3a4e562 100644 --- a/go/libraries/doltcore/env/environment.go +++ b/go/libraries/doltcore/env/environment.go @@ -911,7 +911,7 @@ func (dEnv *DoltEnv) RemoveRemote(ctx context.Context, name string) error { rr := r.(ref.RemoteRef) if rr.GetRemote() == remote.Name { - err = ddb.DeleteBranch(ctx, rr) + err = ddb.DeleteBranch(ctx, rr, nil) if err != nil { return fmt.Errorf("%w; failed to delete remote tracking ref '%s'; %s", ErrFailedToDeleteRemote, rr.String(), err.Error()) diff --git a/go/libraries/doltcore/migrate/progress.go b/go/libraries/doltcore/migrate/progress.go index 26592dee8c..7d9656bc57 100644 --- a/go/libraries/doltcore/migrate/progress.go +++ b/go/libraries/doltcore/migrate/progress.go @@ -182,7 +182,7 @@ func persistMigratedCommitMapping(ctx context.Context, ddb *doltdb.DoltDB, mappi } br := ref.NewBranchRef(MigratedCommitsBranch) - err = ddb.NewBranchAtCommit(ctx, br, init) + err = ddb.NewBranchAtCommit(ctx, br, init, nil) if err != nil { return err } diff --git a/go/libraries/doltcore/rebase/rebase.go b/go/libraries/doltcore/rebase/rebase.go index 5ade41afba..3cad66c404 100644 --- a/go/libraries/doltcore/rebase/rebase.go +++ b/go/libraries/doltcore/rebase/rebase.go @@ -152,7 +152,7 @@ func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, n for i, r := range refs { switch dRef := r.(type) { case ref.BranchRef: - err = ddb.NewBranchAtCommit(ctx, dRef, newHeads[i]) + err = ddb.NewBranchAtCommit(ctx, dRef, newHeads[i], nil) case ref.TagRef: // rewrite tag with new commit diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_branch.go b/go/libraries/doltcore/sqle/dprocedures/dolt_branch.go index 3d0c7f2776..5002f8d872 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_branch.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_branch.go @@ -64,30 +64,36 @@ func doDoltBranch(ctx *sql.Context, args []string) (int, error) { return 1, fmt.Errorf("Could not load database %s", dbName) } + var rsc doltdb.ReplicationStatusController + switch { case apr.Contains(cli.CopyFlag): - err = copyBranch(ctx, dbData, apr) + err = copyBranch(ctx, dbData, apr, &rsc) case apr.Contains(cli.MoveFlag): - err = renameBranch(ctx, dbData, apr, dSess, dbName) + err = renameBranch(ctx, dbData, apr, dSess, dbName, &rsc) case apr.Contains(cli.DeleteFlag), apr.Contains(cli.DeleteForceFlag): - err = deleteBranches(ctx, dbData, apr, dSess, dbName) + err = deleteBranches(ctx, dbData, apr, dSess, dbName, &rsc) default: - err = createNewBranch(ctx, dbData, apr) + err = createNewBranch(ctx, dbData, apr, &rsc) } if err != nil { return 1, err } else { - return 0, commitTransaction(ctx, dSess) + return 0, commitTransaction(ctx, dSess, &rsc) } } -func commitTransaction(ctx *sql.Context, dSess *dsess.DoltSession) error { +func commitTransaction(ctx *sql.Context, dSess *dsess.DoltSession, rsc *doltdb.ReplicationStatusController) error { err := dSess.CommitTransaction(ctx, ctx.GetTransaction()) if err != nil { return err } + if rsc != nil { + dsess.WaitForReplicationController(ctx, *rsc) + } + // Because this transaction manipulation is happening outside the engine's awareness, we need to set it to nil here // to get a fresh transaction started on the next statement. // TODO: put this under engine control @@ -97,7 +103,7 @@ func commitTransaction(ctx *sql.Context, dSess *dsess.DoltSession) error { // renameBranch takes DoltSession and database name to try accessing file system for dolt database. // If the oldBranch being renamed is the current branch on CLI, then RepoState head will be updated with the newBranch ref. -func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string) error { +func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string, rsc *doltdb.ReplicationStatusController) error { if apr.NArg() != 2 { return InvalidArgErr } @@ -124,7 +130,7 @@ func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseRe return err } - err := actions.RenameBranch(ctx, dbData, oldBranchName, newBranchName, sess.Provider(), force) + err := actions.RenameBranch(ctx, dbData, oldBranchName, newBranchName, sess.Provider(), force, rsc) if err != nil { return err } @@ -150,7 +156,7 @@ func renameBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseRe // deleteBranches takes DoltSession and database name to try accessing file system for dolt database. // If the database is not session state db and the branch being deleted is the current branch on CLI, it will update // the RepoState to set head as empty branchRef. -func deleteBranches(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string) error { +func deleteBranches(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, sess *dsess.DoltSession, dbName string, rsc *doltdb.ReplicationStatusController) error { if apr.NArg() == 0 { return InvalidArgErr } @@ -194,7 +200,7 @@ func deleteBranches(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParse err = actions.DeleteBranch(ctx, dbData, branchName, actions.DeleteOptions{ Force: force, - }, dSess.Provider()) + }, dSess.Provider(), rsc) if err != nil { return err } @@ -274,7 +280,7 @@ func loadConfig(ctx *sql.Context) *env.DoltCliConfig { return dEnv.Config } -func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error { +func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error { if apr.NArg() == 0 || apr.NArg() > 2 { return InvalidArgErr } @@ -332,7 +338,7 @@ func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgPars return err } - err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, startPt, apr.Contains(cli.ForceFlag)) + err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, startPt, apr.Contains(cli.ForceFlag), rsc) if err != nil { return err } @@ -348,7 +354,7 @@ func createNewBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgPars return nil } -func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults) error { +func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error { if apr.NArg() != 2 { return InvalidArgErr } @@ -364,10 +370,10 @@ func copyBranch(ctx *sql.Context, dbData env.DbData, apr *argparser.ArgParseResu } force := apr.Contains(cli.ForceFlag) - return copyABranch(ctx, dbData, srcBr, destBr, force) + return copyABranch(ctx, dbData, srcBr, destBr, force, rsc) } -func copyABranch(ctx *sql.Context, dbData env.DbData, srcBr string, destBr string, force bool) error { +func copyABranch(ctx *sql.Context, dbData env.DbData, srcBr string, destBr string, force bool, rsc *doltdb.ReplicationStatusController) error { if err := branch_control.CanCreateBranch(ctx, destBr); err != nil { return err } @@ -378,7 +384,7 @@ func copyABranch(ctx *sql.Context, dbData env.DbData, srcBr string, destBr strin return err } } - err := actions.CopyBranchOnDB(ctx, dbData.Ddb, srcBr, destBr, force) + err := actions.CopyBranchOnDB(ctx, dbData.Ddb, srcBr, destBr, force, rsc) if err != nil { if err == doltdb.ErrBranchNotFound { return errors.New(fmt.Sprintf("fatal: A branch named '%s' not found", srcBr)) diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go b/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go index 5ab0417340..4e851e18fc 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_checkout.go @@ -71,9 +71,11 @@ func doDoltCheckout(ctx *sql.Context, args []string) (int, error) { return 1, fmt.Errorf("Could not load database %s", currentDbName) } + var rsc doltdb.ReplicationStatusController + // Checking out new branch. if branchOrTrack { - err = checkoutNewBranch(ctx, dbName, dbData, apr) + err = checkoutNewBranch(ctx, dbName, dbData, apr, &rsc) if err != nil { return 1, err } else { @@ -121,13 +123,15 @@ func doDoltCheckout(ctx *sql.Context, args []string) (int, error) { err = checkoutTables(ctx, roots, dbName, args) if err != nil && apr.NArg() == 1 { - err = checkoutRemoteBranch(ctx, dbName, dbData, branchName, apr) + err = checkoutRemoteBranch(ctx, dbName, dbData, branchName, apr, &rsc) } if err != nil { return 1, err } + dsess.WaitForReplicationController(ctx, rsc) + return 0, nil } @@ -196,7 +200,7 @@ func getRevisionForRevisionDatabase(ctx *sql.Context, dbName string) (string, st // checkoutRemoteBranch checks out a remote branch creating a new local branch with the same name as the remote branch // and set its upstream. The upstream persists out of sql session. -func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, branchName string, apr *argparser.ArgParseResults) error { +func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, branchName string, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error { remoteRefs, err := actions.GetRemoteBranchRef(ctx, dbData.Ddb, branchName) if err != nil { return errors.New("fatal: unable to read from data repository") @@ -206,7 +210,7 @@ func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, br return fmt.Errorf("error: could not find %s", branchName) } else if len(remoteRefs) == 1 { remoteRef := remoteRefs[0] - err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, remoteRef.String(), false) + err = actions.CreateBranchWithStartPt(ctx, dbData, branchName, remoteRef.String(), false, rsc) if err != nil { return err } @@ -226,7 +230,7 @@ func checkoutRemoteBranch(ctx *sql.Context, dbName string, dbData env.DbData, br } } -func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr *argparser.ArgParseResults) error { +func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error { var newBranchName string var remoteName, remoteBranchName string var startPt = "head" @@ -259,7 +263,7 @@ func checkoutNewBranch(ctx *sql.Context, dbName string, dbData env.DbData, apr * newBranchName = newBranch } - err = actions.CreateBranchWithStartPt(ctx, dbData, newBranchName, startPt, false) + err = actions.CreateBranchWithStartPt(ctx, dbData, newBranchName, startPt, false, rsc) if err != nil { return err } diff --git a/go/libraries/doltcore/sqle/dprocedures/dolt_remote.go b/go/libraries/doltcore/sqle/dprocedures/dolt_remote.go index aa1b59840d..2e6d32451e 100644 --- a/go/libraries/doltcore/sqle/dprocedures/dolt_remote.go +++ b/go/libraries/doltcore/sqle/dprocedures/dolt_remote.go @@ -24,6 +24,7 @@ import ( "github.com/dolthub/dolt/go/cmd/dolt/errhand" "github.com/dolthub/dolt/go/libraries/doltcore/branch_control" "github.com/dolthub/dolt/go/libraries/doltcore/dbfactory" + "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" "github.com/dolthub/dolt/go/libraries/doltcore/ref" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess" @@ -65,11 +66,13 @@ func doDoltRemote(ctx *sql.Context, args []string) (int, error) { return 1, fmt.Errorf("error: invalid argument, use 'dolt_remotes' system table to list remotes") } + var rsc doltdb.ReplicationStatusController + switch apr.Arg(0) { case "add": err = addRemote(ctx, dbName, dbData, apr, dSess) case "remove", "rm": - err = removeRemote(ctx, dbData, apr) + err = removeRemote(ctx, dbData, apr, &rsc) default: err = fmt.Errorf("error: invalid argument") } @@ -77,6 +80,9 @@ func doDoltRemote(ctx *sql.Context, args []string) (int, error) { if err != nil { return 1, err } + + dsess.WaitForReplicationController(ctx, rsc) + return 0, nil } @@ -106,7 +112,7 @@ func addRemote(_ *sql.Context, dbName string, dbd env.DbData, apr *argparser.Arg return dbd.Rsw.AddRemote(r) } -func removeRemote(ctx *sql.Context, dbd env.DbData, apr *argparser.ArgParseResults) error { +func removeRemote(ctx *sql.Context, dbd env.DbData, apr *argparser.ArgParseResults, rsc *doltdb.ReplicationStatusController) error { if apr.NArg() != 2 { return fmt.Errorf("error: invalid argument") } @@ -133,7 +139,7 @@ func removeRemote(ctx *sql.Context, dbd env.DbData, apr *argparser.ArgParseResul rr := r.(ref.RemoteRef) if rr.GetRemote() == remote.Name { - err = ddb.DeleteBranch(ctx, rr) + err = ddb.DeleteBranch(ctx, rr, rsc) if err != nil { return fmt.Errorf("%w; failed to delete remote tracking ref '%s'; %s", env.ErrFailedToDeleteRemote, rr.String(), err.Error()) diff --git a/go/libraries/doltcore/sqle/dsess/transactions.go b/go/libraries/doltcore/sqle/dsess/transactions.go index 3234ec9e5a..eb9159db85 100644 --- a/go/libraries/doltcore/sqle/dsess/transactions.go +++ b/go/libraries/doltcore/sqle/dsess/transactions.go @@ -219,7 +219,7 @@ func doltCommit(ctx *sql.Context, var rsc doltdb.ReplicationStatusController newCommit, err := tx.dbData.Ddb.CommitWithWorkingSet(ctx, headRef, tx.workingSetRef, &pending, workingSet, currHash, tx.getWorkingSetMeta(ctx), &rsc) - waitForReplicationController(ctx, rsc) + WaitForReplicationController(ctx, rsc) return workingSet, newCommit, err } @@ -232,7 +232,7 @@ func txCommit(ctx *sql.Context, ) (*doltdb.WorkingSet, *doltdb.Commit, error) { var rsc doltdb.ReplicationStatusController err := tx.dbData.Ddb.UpdateWorkingSet(ctx, tx.workingSetRef, workingSet, hash, tx.getWorkingSetMeta(ctx), &rsc) - waitForReplicationController(ctx, rsc) + WaitForReplicationController(ctx, rsc) return workingSet, nil, err } @@ -241,7 +241,7 @@ func (tx *DoltTransaction) DoltCommit(ctx *sql.Context, workingSet *doltdb.Worki return tx.doCommit(ctx, workingSet, commit, doltCommit) } -func waitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatusController) { +func WaitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatusController) { if len(rsc.Wait) == 0 { return } diff --git a/go/libraries/doltcore/sqle/read_replica_database.go b/go/libraries/doltcore/sqle/read_replica_database.go index beb4ae50fd..1cb591e3ef 100644 --- a/go/libraries/doltcore/sqle/read_replica_database.go +++ b/go/libraries/doltcore/sqle/read_replica_database.go @@ -243,7 +243,7 @@ func (rrd ReadReplicaDatabase) CreateLocalBranchFromRemote(ctx *sql.Context, bra } // create refs/heads/branch dataset - err = rrd.ddb.NewBranchAtCommit(ctx, branchRef, cm) + err = rrd.ddb.NewBranchAtCommit(ctx, branchRef, cm, nil) if err != nil { return nil, err } @@ -458,7 +458,7 @@ func (rrd ReadReplicaDatabase) createNewBranchFromRemote(ctx *sql.Context, remot return err } - err = rrd.ddb.NewBranchAtCommit(ctx, remoteRef.Ref, cm) + err = rrd.ddb.NewBranchAtCommit(ctx, remoteRef.Ref, cm, nil) err = rrd.ddb.SetHead(ctx, trackingRef, remoteRef.Hash) if err != nil { return err @@ -534,7 +534,7 @@ func refsToDelete(remRefs, localRefs []doltdb.RefWithHash) []doltdb.RefWithHash func deleteBranches(ctx *sql.Context, rrd ReadReplicaDatabase, branches []doltdb.RefWithHash) error { for _, b := range branches { - err := rrd.ddb.DeleteBranch(ctx, b.Ref) + err := rrd.ddb.DeleteBranch(ctx, b.Ref, nil) if errors.Is(err, doltdb.ErrBranchNotFound) { continue } else if err != nil { From db9dce8e5043fd6aad5eccc4f61bac3b4e293b53 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Mon, 15 May 2023 10:51:11 -0700 Subject: [PATCH 57/82] go: sqle: dsess/transactions: Turn replication failures into session warnings if dolt_cluster_ack_writes_timeout_secs is on. --- .../doltcore/sqle/dsess/transactions.go | 33 ++++++++++++++++--- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/go/libraries/doltcore/sqle/dsess/transactions.go b/go/libraries/doltcore/sqle/dsess/transactions.go index eb9159db85..077c6c0fe2 100644 --- a/go/libraries/doltcore/sqle/dsess/transactions.go +++ b/go/libraries/doltcore/sqle/dsess/transactions.go @@ -24,6 +24,7 @@ import ( "github.com/dolthub/go-mysql-server/sql" "github.com/sirupsen/logrus" + "github.com/dolthub/vitess/go/mysql" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" @@ -255,14 +256,17 @@ func WaitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatus } cCtx, cancel := context.WithCancel(ctx) - defer cancel() var wg sync.WaitGroup wg.Add(len(rsc.Wait)) - for _, f := range rsc.Wait { + for i, f := range rsc.Wait { f := f - go func() error { + i := i + go func() { defer wg.Done() - return f(cCtx) + err := f(cCtx) + if err == nil { + rsc.Wait[i] = nil + } }() } @@ -274,9 +278,28 @@ func WaitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatus select { case <-time.After(time.Duration(timeoutI) * time.Second): - // TODO: Error, warning, something... + // We timed out before all the waiters were done. + // First we make certain to finalize everything. + cancel() + <-done case <-done: + cancel() } + + // Just because our waiters all completed does not mean they all + // returned nil errors. Any non-nil entries in rsc.Wait returned an + // error. We turn those into warnings here. + numFailed := 0 + for _, f := range rsc.Wait { + if f != nil { + numFailed += 1 + } + } + ctx.Session.Warn(&sql.Warning{ + Level: "Warning", + Code: mysql.ERQueryTimeout, + Message: fmt.Sprintf("Timed out replication of commit to %d out of %d replicas.", numFailed, len(rsc.Wait)), + }) } // doCommit commits this transaction with the write function provided. It takes the same params as DoltCommit From a886ab276a828cab62124d519fa9696339a77048 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Mon, 15 May 2023 13:56:50 -0700 Subject: [PATCH 58/82] go: sqle: cluster: Add a circuit breaker for dolt_cluster_ack_writes_timeout_secs. Once a replica fails the timeout, do not block on it going forward. --- go/libraries/doltcore/doltdb/doltdb.go | 7 ++++ go/libraries/doltcore/doltdb/hooksdatabase.go | 10 +++++ .../doltcore/sqle/cluster/commithook.go | 39 ++++++++++++++----- .../doltcore/sqle/dsess/transactions.go | 7 +++- 4 files changed, 52 insertions(+), 11 deletions(-) diff --git a/go/libraries/doltcore/doltdb/doltdb.go b/go/libraries/doltcore/doltdb/doltdb.go index a7d81b508d..a95be4e82d 100644 --- a/go/libraries/doltcore/doltdb/doltdb.go +++ b/go/libraries/doltcore/doltdb/doltdb.go @@ -1222,6 +1222,13 @@ type ReplicationStatusController struct { // associated with a commithook to complete. Must return if the // associated Context is canceled. Wait []func(ctx context.Context) error + + // There is an entry here for each function in Wait. If a Wait fails, + // you can notify the corresponding function in this slice. This might + // control resiliency behaviors like adaptive retry and timeouts, + // circuit breakers, etc. and might feed into exposed replication + // metrics. + NotifyWaitFailed []func() } // UpdateWorkingSet updates the working set with the ref given to the root value given diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index 96bf911e3e..464bf48942 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -81,6 +81,7 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset rsc := getReplicaState(ctx) if rsc != nil { rsc.Wait = make([]func(context.Context) error, len(db.postCommitHooks)) + rsc.NotifyWaitFailed = make([]func(), len(db.postCommitHooks)) } for il, hook := range db.postCommitHooks { if !onlyWS || hook.ExecuteForWorkingSets() { @@ -95,6 +96,13 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset } if rsc != nil { rsc.Wait[i] = f + if nf, ok := hook.(interface{ + NotifyWaitFailed() + }); ok { + rsc.NotifyWaitFailed[i] = nf.NotifyWaitFailed + } else { + rsc.NotifyWaitFailed[i] = func() {} + } } }() } @@ -105,10 +113,12 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset for i := range rsc.Wait { if rsc.Wait[i] != nil { rsc.Wait[j] = rsc.Wait[i] + rsc.NotifyWaitFailed[j] = rsc.NotifyWaitFailed[i] j++ } } rsc.Wait = rsc.Wait[:j] + rsc.NotifyWaitFailed = rsc.NotifyWaitFailed[:j] } } diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index f49ce7123d..475a9fc180 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -61,6 +61,12 @@ type commithook struct { // 4. If you read a channel out of |successChs|, that channel will be closed on the next successful replication attempt. It will not be closed before then. successChs []chan struct{} + // If this is true, the waitF returned by Execute() will fast fail if + // we are not already caught up, instead of blocking on a successCh + // actually indicated we are caught up. This is set to by a call to + // NotifyWaitFailed(), an optional interface on CommitHook. + circuitBreakerOpen bool + role Role // The standby replica to which the new root gets replicated. @@ -157,6 +163,7 @@ func (h *commithook) replicate(ctx context.Context) { } h.successChs = nil } + h.circuitBreakerOpen = false h.cond.Wait() lgr.Tracef("cluster/commithook: background thread: woken up.") } @@ -406,22 +413,34 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat } var waitF func(context.Context) error if !h.isCaughtUp() { - if len(h.successChs) == 0 { - h.successChs = append(h.successChs, make(chan struct{})) - } - successCh := h.successChs[0] - waitF = func(ctx context.Context) error { - select { - case <-successCh: - return nil - case <-ctx.Done(): - return ctx.Err() + if h.circuitBreakerOpen { + waitF = func(ctx context.Context) error { + return fmt.Errorf("circuit breaker for replication to %s/%s is open. this commit did not necessarily replicate successfully.", h.remotename, h.dbname) + } + } else { + if len(h.successChs) == 0 { + h.successChs = append(h.successChs, make(chan struct{})) + } + successCh := h.successChs[0] + waitF = func(ctx context.Context) error { + select { + case <-successCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } } } } return waitF, nil } +func (h *commithook) NotifyWaitFailed() { + h.mu.Lock() + defer h.mu.Unlock() + h.circuitBreakerOpen = true +} + func (h *commithook) HandleError(ctx context.Context, err error) error { return nil } diff --git a/go/libraries/doltcore/sqle/dsess/transactions.go b/go/libraries/doltcore/sqle/dsess/transactions.go index 077c6c0fe2..e259ed29eb 100644 --- a/go/libraries/doltcore/sqle/dsess/transactions.go +++ b/go/libraries/doltcore/sqle/dsess/transactions.go @@ -276,12 +276,14 @@ func WaitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatus close(done) }() + waitFailed := false select { case <-time.After(time.Duration(timeoutI) * time.Second): // We timed out before all the waiters were done. // First we make certain to finalize everything. cancel() <-done + waitFailed = true case <-done: cancel() } @@ -290,9 +292,12 @@ func WaitForReplicationController(ctx *sql.Context, rsc doltdb.ReplicationStatus // returned nil errors. Any non-nil entries in rsc.Wait returned an // error. We turn those into warnings here. numFailed := 0 - for _, f := range rsc.Wait { + for i, f := range rsc.Wait { if f != nil { numFailed += 1 + if waitFailed { + rsc.NotifyWaitFailed[i]() + } } } ctx.Session.Warn(&sql.Warning{ From 09eb9aaed57739e07cffbf9bc30c3ecb8b15f649 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Mon, 15 May 2023 13:58:14 -0700 Subject: [PATCH 59/82] go: sqle: cluster: Only close successChs when we are actually caught up. --- go/libraries/doltcore/sqle/cluster/commithook.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index 475a9fc180..1889257f4e 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -157,13 +157,13 @@ func (h *commithook) replicate(ctx context.Context) { if h.waitNotify != nil { h.waitNotify() } - if len(h.successChs) != 0 { + if len(h.successChs) != 0 && h.isCaughtUp() { for _, ch := range h.successChs { close(ch) } h.successChs = nil + h.circuitBreakerOpen = false } - h.circuitBreakerOpen = false h.cond.Wait() lgr.Tracef("cluster/commithook: background thread: woken up.") } From a5358ff02dfb4c26fe6c220068c560c78ebde252 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Mon, 15 May 2023 14:17:21 -0700 Subject: [PATCH 60/82] go: Catch some stragglers in _test files for an interface change. --- go/libraries/doltcore/dtestutils/testcommands/multienv.go | 2 +- go/libraries/doltcore/env/actions/commitwalk/commitwalk_test.go | 2 +- go/libraries/doltcore/merge/merge_test.go | 2 +- go/libraries/doltcore/sqle/sqlselect_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go/libraries/doltcore/dtestutils/testcommands/multienv.go b/go/libraries/doltcore/dtestutils/testcommands/multienv.go index 1a02907169..f669255be3 100644 --- a/go/libraries/doltcore/dtestutils/testcommands/multienv.go +++ b/go/libraries/doltcore/dtestutils/testcommands/multienv.go @@ -156,7 +156,7 @@ func (mr *MultiRepoTestSetup) NewRemote(remoteName string) { func (mr *MultiRepoTestSetup) NewBranch(dbName, branchName string) { dEnv := mr.envs[dbName] - err := actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), branchName, "head", false) + err := actions.CreateBranchWithStartPt(context.Background(), dEnv.DbData(), branchName, "head", false, nil) if err != nil { mr.Errhand(err) } diff --git a/go/libraries/doltcore/env/actions/commitwalk/commitwalk_test.go b/go/libraries/doltcore/env/actions/commitwalk/commitwalk_test.go index 8f79fefc3c..6188662261 100644 --- a/go/libraries/doltcore/env/actions/commitwalk/commitwalk_test.go +++ b/go/libraries/doltcore/env/actions/commitwalk/commitwalk_test.go @@ -86,7 +86,7 @@ func TestGetDotDotRevisions(t *testing.T) { // Create a feature branch. bref := ref.NewBranchRef("feature") - err = dEnv.DoltDB.NewBranchAtCommit(context.Background(), bref, mainCommits[5]) + err = dEnv.DoltDB.NewBranchAtCommit(context.Background(), bref, mainCommits[5], nil) require.NoError(t, err) // Create 3 commits on feature branch. diff --git a/go/libraries/doltcore/merge/merge_test.go b/go/libraries/doltcore/merge/merge_test.go index 2b5fa3acdf..c1e54c6749 100644 --- a/go/libraries/doltcore/merge/merge_test.go +++ b/go/libraries/doltcore/merge/merge_test.go @@ -743,7 +743,7 @@ func buildLeftRightAncCommitsAndBranches(t *testing.T, ddb *doltdb.DoltDB, rootT commit, err := ddb.Commit(context.Background(), hash, ref.NewBranchRef(env.DefaultInitBranch), meta) require.NoError(t, err) - err = ddb.NewBranchAtCommit(context.Background(), ref.NewBranchRef("to-merge"), initialCommit) + err = ddb.NewBranchAtCommit(context.Background(), ref.NewBranchRef("to-merge"), initialCommit, nil) require.NoError(t, err) mergeCommit, err := ddb.Commit(context.Background(), mergeHash, ref.NewBranchRef("to-merge"), meta) require.NoError(t, err) diff --git a/go/libraries/doltcore/sqle/sqlselect_test.go b/go/libraries/doltcore/sqle/sqlselect_test.go index ff3caee88a..88caf6d0b6 100644 --- a/go/libraries/doltcore/sqle/sqlselect_test.go +++ b/go/libraries/doltcore/sqle/sqlselect_test.go @@ -1675,7 +1675,7 @@ func processNode(t *testing.T, ctx context.Context, dEnv *env.DoltEnv, node Hist require.NoError(t, err) if !ok { - err = dEnv.DoltDB.NewBranchAtCommit(ctx, branchRef, parent) + err = dEnv.DoltDB.NewBranchAtCommit(ctx, branchRef, parent, nil) require.NoError(t, err) } From 070b8d3c6adc65092afe806d1816b1ecbab07460 Mon Sep 17 00:00:00 2001 From: bheni Date: Mon, 15 May 2023 21:25:05 +0000 Subject: [PATCH 61/82] [ga-bump-release] Update Dolt version to 1.0.2 and release v1.0.2 --- go/cmd/dolt/dolt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/cmd/dolt/dolt.go b/go/cmd/dolt/dolt.go index 2b97e77572..6941c77684 100644 --- a/go/cmd/dolt/dolt.go +++ b/go/cmd/dolt/dolt.go @@ -62,7 +62,7 @@ import ( ) const ( - Version = "1.0.1" + Version = "1.0.2" ) var dumpDocsCommand = &commands.DumpDocsCmd{} From 05125426dc48da639d26ddc4f1dd6d80bfda7a4f Mon Sep 17 00:00:00 2001 From: reltuk Date: Mon, 15 May 2023 21:26:49 +0000 Subject: [PATCH 62/82] [ga-format-pr] Run go/utils/repofmt/format_repo.sh and go/Godeps/update.sh --- go/libraries/doltcore/doltdb/hooksdatabase.go | 2 +- go/libraries/doltcore/sqle/dsess/transactions.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index 464bf48942..2b60c5dea5 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -96,7 +96,7 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset } if rsc != nil { rsc.Wait[i] = f - if nf, ok := hook.(interface{ + if nf, ok := hook.(interface { NotifyWaitFailed() }); ok { rsc.NotifyWaitFailed[i] = nf.NotifyWaitFailed diff --git a/go/libraries/doltcore/sqle/dsess/transactions.go b/go/libraries/doltcore/sqle/dsess/transactions.go index e259ed29eb..9be838ed59 100644 --- a/go/libraries/doltcore/sqle/dsess/transactions.go +++ b/go/libraries/doltcore/sqle/dsess/transactions.go @@ -23,8 +23,8 @@ import ( "time" "github.com/dolthub/go-mysql-server/sql" - "github.com/sirupsen/logrus" "github.com/dolthub/vitess/go/mysql" + "github.com/sirupsen/logrus" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env" From 3899ddf05a8254a7d31dd1c55bd5ff0a0b4b577b Mon Sep 17 00:00:00 2001 From: Jason Fulghum Date: Mon, 15 May 2023 15:26:21 -0700 Subject: [PATCH 63/82] First pass at FK equality checking to handle resolved/unresolved FKs. --- go/libraries/doltcore/diff/table_deltas.go | 44 +++++----- .../doltcore/doltdb/foreign_key_coll.go | 81 ++++++++++++++++++- integration-tests/bats/diff.bats | 36 ++++++--- 3 files changed, 127 insertions(+), 34 deletions(-) diff --git a/go/libraries/doltcore/diff/table_deltas.go b/go/libraries/doltcore/diff/table_deltas.go index ae24acfe1a..0bc6337e2f 100644 --- a/go/libraries/doltcore/diff/table_deltas.go +++ b/go/libraries/doltcore/diff/table_deltas.go @@ -398,7 +398,29 @@ func (td TableDelta) CurName() string { } func (td TableDelta) HasFKChanges() bool { - return !fkSlicesAreEqual(td.FromFks, td.ToFks) + if len(td.FromFks) != len(td.ToFks) { + return true + } + + sort.Slice(td.FromFks, func(i, j int) bool { + return td.FromFks[i].Name < td.FromFks[j].Name + }) + sort.Slice(td.ToFks, func(i, j int) bool { + return td.ToFks[i].Name < td.ToFks[j].Name + }) + + fromSchemaMap := td.FromFksParentSch + fromSchemaMap[td.FromName] = td.FromSch + toSchemaMap := td.ToFksParentSch + toSchemaMap[td.ToName] = td.ToSch + + for i := range td.FromFks { + if !td.FromFks[i].Equals(td.ToFks[i], fromSchemaMap, toSchemaMap) { + return true + } + } + + return false } // GetSchemas returns the table's schema at the fromRoot and toRoot, or schema.Empty if the table did not exist. @@ -538,26 +560,6 @@ func (td TableDelta) GetRowData(ctx context.Context) (from, to durable.Index, er return from, to, nil } -func fkSlicesAreEqual(from, to []doltdb.ForeignKey) bool { - if len(from) != len(to) { - return false - } - - sort.Slice(from, func(i, j int) bool { - return from[i].Name < from[j].Name - }) - sort.Slice(to, func(i, j int) bool { - return to[i].Name < to[j].Name - }) - - for i := range from { - if !from[i].DeepEquals(to[i]) { - return false - } - } - return true -} - // SqlSchemaDiff returns a slice of DDL statements that will transform the schema in the from delta to the schema in // the to delta. func SqlSchemaDiff(ctx context.Context, td TableDelta, toSchemas map[string]schema.Schema) ([]string, error) { diff --git a/go/libraries/doltcore/doltdb/foreign_key_coll.go b/go/libraries/doltcore/doltdb/foreign_key_coll.go index 5a21204822..b42700b3e8 100644 --- a/go/libraries/doltcore/doltdb/foreign_key_coll.go +++ b/go/libraries/doltcore/doltdb/foreign_key_coll.go @@ -124,7 +124,86 @@ func (fk ForeignKey) EqualDefs(other ForeignKey) bool { fk.OnDelete == other.OnDelete } -// DeepEquals compares all attributes of a foreign key to another, including name and table names. +// Equals compares this ForeignKey to |other| and returns true if they are equal. Foreign keys can either be in +// a "resolved" state, where the referenced columns in the parent and child tables are identified by column tags, +// or in an "unresolved" state where the reference columns in the parent and child are still identified by strings. +// If one foreign key is resolved and one is unresolved, the logic for comparing them requires resolving the string +// column names to column tags, which is why |fkSchemasByName| and |otherSchemasByName| are passed in. Each of these +// is a map of table schemas for |fk| and |other|, where the child table and every parent table referenced in the +// foreign key is present in the map. +func (fk ForeignKey) Equals(other ForeignKey, fkSchemasByName, otherSchemasByName map[string]schema.Schema) bool { + // If both FKs are resolved or unresolved, we can just deeply compare them + if fk.IsResolved() == other.IsResolved() { + return fk.DeepEquals(other) + } + + // Otherwise, one FK is resolved and one is not, so we need to work a little harder + // to calculate equality since their referenced columns are represented differently. + // First check the attributes that don't change when an FK is resolved or unresolved. + if fk.Name != other.Name && + fk.TableName != other.TableName && + fk.ReferencedTableName != other.ReferencedTableName && + fk.TableIndex != other.TableIndex && + fk.ReferencedTableIndex != other.ReferencedTableIndex && + fk.OnUpdate == other.OnUpdate && + fk.OnDelete == other.OnDelete { + return false + } + + // Sort out which FK is resolved and which is not + var resolvedFK, unresolvedFK ForeignKey + var resolvedSchemasByName map[string]schema.Schema + if fk.IsResolved() { + resolvedFK, unresolvedFK, resolvedSchemasByName = fk, other, fkSchemasByName + } else { + resolvedFK, unresolvedFK, resolvedSchemasByName = other, fk, otherSchemasByName + } + + // Check the columns on the child table + if len(resolvedFK.TableColumns) != len(unresolvedFK.UnresolvedFKDetails.TableColumns) { + return false + } + for i, tag := range resolvedFK.TableColumns { + unresolvedColName := unresolvedFK.UnresolvedFKDetails.TableColumns[i] + resolvedSch, ok := resolvedSchemasByName[resolvedFK.TableName] + if !ok { + return false + } + resolvedCol, ok := resolvedSch.GetAllCols().GetByTag(tag) + if !ok { + return false + } + if resolvedCol.Name != unresolvedColName { + return false + } + } + + // Check the columns on the parent table + if len(resolvedFK.ReferencedTableColumns) != len(unresolvedFK.UnresolvedFKDetails.ReferencedTableColumns) { + return false + } + for i, tag := range resolvedFK.ReferencedTableColumns { + unresolvedColName := unresolvedFK.UnresolvedFKDetails.ReferencedTableColumns[i] + resolvedSch, ok := resolvedSchemasByName[unresolvedFK.ReferencedTableName] + if !ok { + return false + } + resolvedCol, ok := resolvedSch.GetAllCols().GetByTag(tag) + if !ok { + return false + } + if resolvedCol.Name != unresolvedColName { + return false + } + } + + return true +} + +// DeepEquals compares all attributes of a foreign key to another, including name and +// table names. Note that if one foreign key is resolved and the other is NOT resolved, +// then this function will not calculate equality correctly. When comparing a resolved +// FK with an unresolved FK, the ForeignKey.Equals() function should be used instead. func (fk ForeignKey) DeepEquals(other ForeignKey) bool { if !fk.EqualDefs(other) { return false diff --git a/integration-tests/bats/diff.bats b/integration-tests/bats/diff.bats index 6678590fef..fef8f0c770 100644 --- a/integration-tests/bats/diff.bats +++ b/integration-tests/bats/diff.bats @@ -771,23 +771,35 @@ SQL [[ "$output" =~ 'resolved foreign key' ]] || false } -@test "diff: existing foreign key is resolved" { +@test "diff: resolved FKs don't show up in diff results" { dolt sql < Date: Mon, 15 May 2023 16:36:50 -0700 Subject: [PATCH 64/82] Update ci-check-repo.yaml --- .github/workflows/ci-check-repo.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/ci-check-repo.yaml b/.github/workflows/ci-check-repo.yaml index 17d9fafec7..370c7a435b 100644 --- a/.github/workflows/ci-check-repo.yaml +++ b/.github/workflows/ci-check-repo.yaml @@ -3,8 +3,6 @@ name: Check Formatting, Committers and Generated Code on: pull_request: branches: [ main ] -# pull_request_target: -# types: [opened, edited, reopened] concurrency: group: ci-check-repo-${{ github.event.pull_request.number || github.ref }} @@ -107,7 +105,7 @@ jobs: ref: ${{ github.event.pull_request.head.ref || github.ref }} repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} submodules: true - token: ${{ secrets.GITHUB_TOKEN }} + token: ${{ secrets.REPO_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} - name: Run go mod tidy run: go mod tidy working-directory: ./go From 6ab9794b04a6fff6282f5cd0218a0a6def51be95 Mon Sep 17 00:00:00 2001 From: Neil Macneale IV Date: Mon, 15 May 2023 16:52:04 -0700 Subject: [PATCH 65/82] Print global arguments for 'dolt --help' but not 'dolt' --- go/cmd/dolt/dolt.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/go/cmd/dolt/dolt.go b/go/cmd/dolt/dolt.go index 2b97e77572..9b6675c20b 100644 --- a/go/cmd/dolt/dolt.go +++ b/go/cmd/dolt/dolt.go @@ -427,6 +427,12 @@ func runMain() int { globalArgs, args, initCliContext, printUsage, err := splitArgsOnSubCommand(args) if printUsage { doltCommand.PrintUsage("dolt") + _, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser)) + + cli.Println("\n\nDolt subcommands are in transition to using the flags listed below as global flags. ") + cli.Println("The sql subcommand is currently the only command that uses these flags. All other commands will ignore them.\n") + usage() + return 0 } if err != nil { From 9944a32b733b6ef046d0360ba109ad409c6f67ae Mon Sep 17 00:00:00 2001 From: Neil Macneale IV Date: Mon, 15 May 2023 17:26:17 -0700 Subject: [PATCH 66/82] Now with no \n chars --- go/cmd/dolt/dolt.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/go/cmd/dolt/dolt.go b/go/cmd/dolt/dolt.go index 9b6675c20b..ca83ffd20a 100644 --- a/go/cmd/dolt/dolt.go +++ b/go/cmd/dolt/dolt.go @@ -429,8 +429,11 @@ func runMain() int { doltCommand.PrintUsage("dolt") _, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString("dolt", doc, globalArgParser)) - cli.Println("\n\nDolt subcommands are in transition to using the flags listed below as global flags. ") - cli.Println("The sql subcommand is currently the only command that uses these flags. All other commands will ignore them.\n") + specialMsg := ` +Dolt subcommands are in transition to using the flags listed below as global flags. +The sql subcommand is currently the only command that uses these flags. All other commands will ignore them. +` + cli.Println(specialMsg) usage() return 0 From 639c8cf18123cac1eeb45bbb17cfc516f04a8300 Mon Sep 17 00:00:00 2001 From: Hydrocharged Date: Tue, 16 May 2023 12:59:00 +0000 Subject: [PATCH 67/82] [ga-bump-dep] Bump dependency in Dolt by Hydrocharged --- go/go.mod | 4 +++- go/go.sum | 8 ++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/go/go.mod b/go/go.mod index 1a32d8b4d4..7ce69f2a5d 100644 --- a/go/go.mod +++ b/go/go.mod @@ -59,7 +59,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.15.1-0.20230515160548-5bd8954a0f02 + github.com/dolthub/go-mysql-server v0.15.1-0.20230516125723-110a1193be66 github.com/dolthub/swiss v0.1.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/jmoiron/sqlx v1.3.4 @@ -95,6 +95,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac // indirect github.com/dolthub/jsonpath v0.0.1 // indirect github.com/dolthub/maphash v0.0.0-20221220182448-74e1e1ea1577 // indirect github.com/go-fonts/liberation v0.2.0 // indirect @@ -125,6 +126,7 @@ require ( github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rs/xid v1.4.0 // indirect + github.com/tetratelabs/wazero v1.0.3 // indirect github.com/tidwall/gjson v1.14.4 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect diff --git a/go/go.sum b/go/go.sum index 4e0aa642b6..d7d1e2f203 100644 --- a/go/go.sum +++ b/go/go.sum @@ -166,8 +166,10 @@ github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 h1:u3PMzfF8RkKd3lB9pZ2bfn0qEG+1G github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2/go.mod h1:mIEZOHnFx4ZMQeawhw9rhsj+0zwQj7adVsnBX7t+eKY= github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U= github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0= -github.com/dolthub/go-mysql-server v0.15.1-0.20230515160548-5bd8954a0f02 h1:rLb85kS5mwt4HnHN+hXJIyge9rlqA35Kgsw4i+bTuvc= -github.com/dolthub/go-mysql-server v0.15.1-0.20230515160548-5bd8954a0f02/go.mod h1:YO4FtMULZ/HuKxlvm7QfvTE8uBKEv+1LtK2A3FrrGe4= +github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac h1:/bsG4AyV5MesUPw7LSkxHKMsP9f+LSLrsMbBxLP6+Mk= +github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac/go.mod h1:xLKpPutKiF9FxxcLG3gf/JA95YZQNAqBegkDRe1AZF4= +github.com/dolthub/go-mysql-server v0.15.1-0.20230516125723-110a1193be66 h1:GNS0Ym5152HqqWNaRR40h58EhKYmQOEIlRuO4PKn1rQ= +github.com/dolthub/go-mysql-server v0.15.1-0.20230516125723-110a1193be66/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto= github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0= @@ -623,6 +625,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tealeg/xlsx v1.0.5 h1:+f8oFmvY8Gw1iUXzPk+kz+4GpbDZPK1FhPiQRd+ypgE= github.com/tealeg/xlsx v1.0.5/go.mod h1:btRS8dz54TDnvKNosuAqxrM1QgN1udgk9O34bDCnORM= +github.com/tetratelabs/wazero v1.0.3 h1:IWmaxc/5vKg71DE+c0SLjjLFAA3u3tD/Zegpgif2Wpo= +github.com/tetratelabs/wazero v1.0.3/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= github.com/thepudds/swisstable v0.0.0-20221011152303-9c77dc657777 h1:5u+6YWU2faS+Sr/x8j9yalMpSDUkatNOZWXV3wMUCGQ= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= From ef6f8e4a065c59737af72cfb1283f85f9e859f5a Mon Sep 17 00:00:00 2001 From: Daylon Wilkins Date: Tue, 16 May 2023 06:06:44 -0700 Subject: [PATCH 68/82] Updated deps --- go/Godeps/LICENSES | 625 ++++++++++++++++++++++++++++++--------------- 1 file changed, 417 insertions(+), 208 deletions(-) diff --git a/go/Godeps/LICENSES b/go/Godeps/LICENSES index 8902e8dd40..a4c5eecf34 100644 --- a/go/Godeps/LICENSES +++ b/go/Godeps/LICENSES @@ -2072,6 +2072,215 @@ Library. = LICENSE bcd6f24ec7cb31e4eac53a4e067489b1ddd360b968ceb45faf5645ec = ================================================================================ +================================================================================ += github.com/dolthub/go-icu-regex licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += LICENSE 75cd5500580317e758b5e984e017524dc961140e4889f7d427f85e41 = +================================================================================ + ================================================================================ = github.com/dolthub/go-mysql-server licensed under: = @@ -6474,214 +6683,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. = LICENSE 41cbff0d41b7d20dd9d70de1e0380fdca6ec1f42d2533c75c5c1bec3 = ================================================================================ -================================================================================ -= github.com/prometheus/procfs licensed under: = - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -= LICENSE 41cbff0d41b7d20dd9d70de1e0380fdca6ec1f42d2533c75c5c1bec3 = -================================================================================ - ================================================================================ = github.com/rivo/uniseg licensed under: = @@ -6892,6 +6893,214 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. = LICENSE 8324b31a3793e08aae6a3c5bad20c4f41d089fd801d4d24c21aa6ea2 = ================================================================================ +================================================================================ += github.com/tetratelabs/wazero licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 wazero authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += LICENSE 06e378fe3dca2626ad24b9814356eb2d02a762c13234b0d13cf821e0 = +================================================================================ + ================================================================================ = github.com/tidwall/gjson licensed under: = From 09648ef769e58bf653ed77543266e931ac07a082 Mon Sep 17 00:00:00 2001 From: Daylon Wilkins Date: Tue, 16 May 2023 06:18:50 -0700 Subject: [PATCH 69/82] Second attempt at updating deps --- go/Godeps/LICENSES | 208 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) diff --git a/go/Godeps/LICENSES b/go/Godeps/LICENSES index a4c5eecf34..0080866598 100644 --- a/go/Godeps/LICENSES +++ b/go/Godeps/LICENSES @@ -6683,6 +6683,214 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. = LICENSE 41cbff0d41b7d20dd9d70de1e0380fdca6ec1f42d2533c75c5c1bec3 = ================================================================================ +================================================================================ += github.com/prometheus/procfs licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += LICENSE 41cbff0d41b7d20dd9d70de1e0380fdca6ec1f42d2533c75c5c1bec3 = +================================================================================ + ================================================================================ = github.com/rivo/uniseg licensed under: = From ad455cf1edc4bac32d92ba37b7603d884464442d Mon Sep 17 00:00:00 2001 From: timsehn Date: Tue, 16 May 2023 16:28:46 +0000 Subject: [PATCH 70/82] [ga-bump-release] Update Dolt version to 1.1.0 and release v1.1.0 --- go/cmd/dolt/dolt.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/cmd/dolt/dolt.go b/go/cmd/dolt/dolt.go index a3d0e0bb2c..45f5807fa7 100644 --- a/go/cmd/dolt/dolt.go +++ b/go/cmd/dolt/dolt.go @@ -62,7 +62,7 @@ import ( ) const ( - Version = "1.0.2" + Version = "1.1.0" ) var dumpDocsCommand = &commands.DumpDocsCmd{} From 2b159425f59a3b05e16d7ed0da5dec81364d452d Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Tue, 16 May 2023 10:12:36 -0700 Subject: [PATCH 71/82] go: doltdb: hooksdatabase: Pass replication status controller as a field on the pass-by-value struct, instead of through the context. --- go/libraries/doltcore/doltdb/doltdb.go | 30 +++++------ go/libraries/doltcore/doltdb/hooksdatabase.go | 52 ++++++++----------- 2 files changed, 36 insertions(+), 46 deletions(-) diff --git a/go/libraries/doltcore/doltdb/doltdb.go b/go/libraries/doltcore/doltdb/doltdb.go index a95be4e82d..4722582ab7 100644 --- a/go/libraries/doltcore/doltdb/doltdb.go +++ b/go/libraries/doltcore/doltdb/doltdb.go @@ -1160,11 +1160,10 @@ func (ddb *DoltDB) CopyWorkingSet(ctx context.Context, fromWSRef ref.WorkingSetR // DeleteBranch deletes the branch given, returning an error if it doesn't exist. func (ddb *DoltDB) DeleteBranch(ctx context.Context, branch ref.DoltRef, replicationStatus *ReplicationStatusController) error { - rsCtx := withReplicaState(ctx, replicationStatus) - return ddb.deleteRef(rsCtx, branch) + return ddb.deleteRef(ctx, branch, replicationStatus) } -func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef) error { +func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef, replicationStatus *ReplicationStatusController) error { ds, err := ddb.db.GetDataset(ctx, dref.String()) if err != nil { @@ -1185,7 +1184,7 @@ func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef) error { } } - _, err = ddb.db.Delete(ctx, ds) + _, err = ddb.db.withReplicationStatusController(replicationStatus).Delete(ctx, ds) return err } @@ -1251,8 +1250,7 @@ func (ddb *DoltDB) UpdateWorkingSet( return err } - ctx = withReplicaState(ctx, replicationStatus) - _, err = ddb.db.UpdateWorkingSet(ctx, ds, datas.WorkingSetSpec{ + _, err = ddb.db.withReplicationStatusController(replicationStatus).UpdateWorkingSet(ctx, ds, datas.WorkingSetSpec{ Meta: meta, WorkingRoot: workingRootRef, StagedRoot: stagedRef, @@ -1289,13 +1287,13 @@ func (ddb *DoltDB) CommitWithWorkingSet( return nil, err } - rsCtx := withReplicaState(ctx, replicationStatus) - commitDataset, _, err := ddb.db.CommitWithWorkingSet(rsCtx, headDs, wsDs, commit.Roots.Staged.nomsValue(), datas.WorkingSetSpec{ - Meta: meta, - WorkingRoot: workingRootRef, - StagedRoot: stagedRef, - MergeState: mergeState, - }, prevHash, commit.CommitOptions) + commitDataset, _, err := ddb.db.withReplicationStatusController(replicationStatus). + CommitWithWorkingSet(ctx, headDs, wsDs, commit.Roots.Staged.nomsValue(), datas.WorkingSetSpec{ + Meta: meta, + WorkingRoot: workingRootRef, + StagedRoot: stagedRef, + MergeState: mergeState, + }, prevHash, commit.CommitOptions) if err != nil { return nil, err @@ -1329,7 +1327,7 @@ func (ddb *DoltDB) DeleteWorkingSet(ctx context.Context, workingSetRef ref.Worki } func (ddb *DoltDB) DeleteTag(ctx context.Context, tag ref.DoltRef) error { - err := ddb.deleteRef(ctx, tag) + err := ddb.deleteRef(ctx, tag, nil) if err == ErrBranchNotFound { return ErrTagNotFound @@ -1356,7 +1354,7 @@ func (ddb *DoltDB) NewWorkspaceAtCommit(ctx context.Context, workRef ref.DoltRef } func (ddb *DoltDB) DeleteWorkspace(ctx context.Context, workRef ref.DoltRef) error { - err := ddb.deleteRef(ctx, workRef) + err := ddb.deleteRef(ctx, workRef, nil) if err == ErrBranchNotFound { return ErrWorkspaceNotFound @@ -1671,7 +1669,7 @@ func (ddb *DoltDB) RemoveStashAtIdx(ctx context.Context, idx int) error { // RemoveAllStashes removes the stash list Dataset from the database, // which equivalent to removing Stash entries from the stash list. func (ddb *DoltDB) RemoveAllStashes(ctx context.Context) error { - err := ddb.deleteRef(ctx, ref.NewStashRef()) + err := ddb.deleteRef(ctx, ref.NewStashRef(), nil) if err == ErrBranchNotFound { return nil } diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index 2b60c5dea5..9565485ac9 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -17,35 +17,17 @@ package doltdb import ( "context" "io" + "sync" "github.com/dolthub/dolt/go/store/datas" "github.com/dolthub/dolt/go/store/hash" "github.com/dolthub/dolt/go/store/types" - - "sync" ) -type replicaStateContextKey struct { -} - -func withReplicaState(ctx context.Context, c *ReplicationStatusController) context.Context { - if c != nil { - return context.WithValue(ctx, replicaStateContextKey{}, c) - } - return ctx -} - -func getReplicaState(ctx context.Context) *ReplicationStatusController { - v := ctx.Value(replicaStateContextKey{}) - if v == nil { - return nil - } - return v.(*ReplicationStatusController) -} - type hooksDatabase struct { datas.Database postCommitHooks []CommitHook + rsc *ReplicationStatusController } // CommitHook is an abstraction for executing arbitrary commands after atomic database commits @@ -61,7 +43,8 @@ type CommitHook interface { } func (db hooksDatabase) SetCommitHooks(ctx context.Context, postHooks []CommitHook) hooksDatabase { - db.postCommitHooks = postHooks + db.postCommitHooks = make([]CommitHook, len(postHooks)) + copy(db.postCommitHooks, postHooks) return db } @@ -72,16 +55,25 @@ func (db hooksDatabase) SetCommitHookLogger(ctx context.Context, wr io.Writer) h return db } +func (db hooksDatabase) withReplicationStatusController(rsc *ReplicationStatusController) hooksDatabase { + db.rsc = rsc + return db +} + func (db hooksDatabase) PostCommitHooks() []CommitHook { - return db.postCommitHooks + toret := make([]CommitHook, len(db.postCommitHooks)) + copy(toret, db.postCommitHooks) + return toret } func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset, onlyWS bool) { var wg sync.WaitGroup - rsc := getReplicaState(ctx) + rsc := db.rsc + var ioff int if rsc != nil { - rsc.Wait = make([]func(context.Context) error, len(db.postCommitHooks)) - rsc.NotifyWaitFailed = make([]func(), len(db.postCommitHooks)) + ioff = len(rsc.Wait) + rsc.Wait = append(rsc.Wait, make([]func(context.Context) error, len(db.postCommitHooks))...) + rsc.NotifyWaitFailed = append(rsc.NotifyWaitFailed, make([]func(), len(db.postCommitHooks))...) } for il, hook := range db.postCommitHooks { if !onlyWS || hook.ExecuteForWorkingSets() { @@ -95,13 +87,13 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset hook.HandleError(ctx, err) } if rsc != nil { - rsc.Wait[i] = f + rsc.Wait[i+ioff] = f if nf, ok := hook.(interface { NotifyWaitFailed() }); ok { - rsc.NotifyWaitFailed[i] = nf.NotifyWaitFailed + rsc.NotifyWaitFailed[i+ioff] = nf.NotifyWaitFailed } else { - rsc.NotifyWaitFailed[i] = func() {} + rsc.NotifyWaitFailed[i+ioff] = func() {} } } }() @@ -109,8 +101,8 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset } wg.Wait() if rsc != nil { - j := 0 - for i := range rsc.Wait { + j := ioff + for i := ioff; i < len(rsc.Wait); i++ { if rsc.Wait[i] != nil { rsc.Wait[j] = rsc.Wait[i] rsc.NotifyWaitFailed[j] = rsc.NotifyWaitFailed[i] From a19dadd32b8bd580c815620368a0d0a7cff21dd5 Mon Sep 17 00:00:00 2001 From: Neil Macneale IV Date: Tue, 16 May 2023 13:21:34 -0700 Subject: [PATCH 72/82] Use UTC timestamp in date comparison test. Allows tests to run on laptops which aren't in UTC. --- integration-tests/bats/sql.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/bats/sql.bats b/integration-tests/bats/sql.bats index 5d78e56725..2591057a94 100755 --- a/integration-tests/bats/sql.bats +++ b/integration-tests/bats/sql.bats @@ -2544,7 +2544,7 @@ SQL [ "$status" -eq 0 ] [[ "$output" =~ "3" ]] || false - run dolt sql -q "SELECT COUNT(*) from dolt_diff_t where to_commit_date < now()" + run dolt sql -q "SELECT COUNT(*) from dolt_diff_t where to_commit_date < UTC_TIMESTAMP()" [ "$status" -eq 0 ] [[ "$output" =~ "3" ]] || false } From 6ccae75a8d1d684a7b4cb06317ffe9e3332e3356 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Tue, 16 May 2023 14:21:41 -0700 Subject: [PATCH 73/82] integration-tests/go-sql-server-driver: Add TestCluster/last_updated_heartbeats test which asserts that last_updated does not grow while there are no writes to replicate. --- .../tests/sql-server-cluster.yaml | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml b/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml index 1bd5b6ec00..04c1959a4d 100644 --- a/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml +++ b/integration-tests/go-sql-server-driver/tests/sql-server-cluster.yaml @@ -925,6 +925,74 @@ tests: result: columns: ["count(*)"] rows: [["15"]] +- name: last_updated heartbeats + multi_repos: + - name: server1 + with_files: + - name: server.yaml + contents: | + log_level: trace + listener: + host: 0.0.0.0 + port: 3309 + cluster: + standby_remotes: + - name: standby + remote_url_template: http://localhost:3852/{database} + bootstrap_role: primary + bootstrap_epoch: 1 + remotesapi: + port: 3851 + server: + args: ["--config", "server.yaml"] + port: 3309 + - name: server2 + with_files: + - name: server.yaml + contents: | + log_level: trace + listener: + host: 0.0.0.0 + port: 3310 + cluster: + standby_remotes: + - name: standby + remote_url_template: http://localhost:3851/{database} + bootstrap_role: standby + bootstrap_epoch: 1 + remotesapi: + port: 3852 + server: + args: ["--config", "server.yaml"] + port: 3310 + connections: + - on: server1 + queries: + - exec: 'create database repo1' + - query: "call dolt_assume_cluster_role('standby', 2)" + result: + columns: ["status"] + rows: [["0"]] + - on: server1 + queries: + - query: "call dolt_assume_cluster_role('primary', 3)" + result: + columns: ["status"] + rows: [["0"]] + - on: server2 + queries: + - query: "SELECT TIMESTAMPDIFF(SECOND, CONVERT_TZ(last_update, 'GMT', @@GLOBAL.time_zone), NOW()) < 5 AS within_threshold FROM dolt_cluster.dolt_cluster_status;" + result: + columns: ["within_threshold"] + rows: [["1"]] + - query: "SELECT SLEEP(5)" + result: + columns: ["SLEEP(5)"] + rows: [["0"]] + - query: "SELECT TIMESTAMPDIFF(SECOND, CONVERT_TZ(last_update, 'GMT', @@GLOBAL.time_zone), NOW()) < 5 AS within_threshold FROM dolt_cluster.dolt_cluster_status;" + result: + columns: ["within_threshold"] + rows: [["1"]] - name: create new database, clone a database, primary replicates to standby, standby has both databases multi_repos: - name: server1 From 8bbb49b79efdb159618a7656ae794deae4b1f466 Mon Sep 17 00:00:00 2001 From: Aaron Son Date: Tue, 16 May 2023 15:42:14 -0700 Subject: [PATCH 74/82] go: doltdb,sqle/cluster: Some PR feedback. Small cleanups. --- go/libraries/doltcore/doltdb/hooksdatabase.go | 11 ++++++++--- go/libraries/doltcore/sqle/cluster/commithook.go | 11 ++++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/go/libraries/doltcore/doltdb/hooksdatabase.go b/go/libraries/doltcore/doltdb/hooksdatabase.go index 9565485ac9..8b14765ca1 100644 --- a/go/libraries/doltcore/doltdb/hooksdatabase.go +++ b/go/libraries/doltcore/doltdb/hooksdatabase.go @@ -42,6 +42,13 @@ type CommitHook interface { ExecuteForWorkingSets() bool } +// If a commit hook supports this interface, it can be notified if waiting for +// replication in the callback returned by |Execute| failed to complete in time +// or returned an error. +type NotifyWaitFailedCommitHook interface { + NotifyWaitFailed() +} + func (db hooksDatabase) SetCommitHooks(ctx context.Context, postHooks []CommitHook) hooksDatabase { db.postCommitHooks = make([]CommitHook, len(postHooks)) copy(db.postCommitHooks, postHooks) @@ -88,9 +95,7 @@ func (db hooksDatabase) ExecuteCommitHooks(ctx context.Context, ds datas.Dataset } if rsc != nil { rsc.Wait[i+ioff] = f - if nf, ok := hook.(interface { - NotifyWaitFailed() - }); ok { + if nf, ok := hook.(NotifyWaitFailedCommitHook); ok { rsc.NotifyWaitFailed[i+ioff] = nf.NotifyWaitFailed } else { rsc.NotifyWaitFailed[i+ioff] = func() {} diff --git a/go/libraries/doltcore/sqle/cluster/commithook.go b/go/libraries/doltcore/sqle/cluster/commithook.go index 1889257f4e..2e3d822ed2 100644 --- a/go/libraries/doltcore/sqle/cluster/commithook.go +++ b/go/libraries/doltcore/sqle/cluster/commithook.go @@ -32,6 +32,7 @@ import ( ) var _ doltdb.CommitHook = (*commithook)(nil) +var _ doltdb.NotifyWaitFailedCommitHook = (*commithook)(nil) type commithook struct { rootLgr *logrus.Entry @@ -56,7 +57,7 @@ type commithook struct { // This is a slice of notification channels maintained by the // commithook. The semantics are: // 1. All accesses to |successChs| must happen with |mu| held. - // 2. There maybe be |0| or more channels in the slice. + // 2. There may be |0| or more channels in the slice. // 3. As a reader, if |successChs| is non-empty, you should just read a value, for example, |successChs[0]| and use it. All entries will be closed at the same time. If |successChs| is empty when you need a channel, you should add one to it. // 4. If you read a channel out of |successChs|, that channel will be closed on the next successful replication attempt. It will not be closed before then. successChs []chan struct{} @@ -65,7 +66,7 @@ type commithook struct { // we are not already caught up, instead of blocking on a successCh // actually indicated we are caught up. This is set to by a call to // NotifyWaitFailed(), an optional interface on CommitHook. - circuitBreakerOpen bool + fastFailReplicationWait bool role Role @@ -162,7 +163,7 @@ func (h *commithook) replicate(ctx context.Context) { close(ch) } h.successChs = nil - h.circuitBreakerOpen = false + h.fastFailReplicationWait = false } h.cond.Wait() lgr.Tracef("cluster/commithook: background thread: woken up.") @@ -413,7 +414,7 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat } var waitF func(context.Context) error if !h.isCaughtUp() { - if h.circuitBreakerOpen { + if h.fastFailReplicationWait { waitF = func(ctx context.Context) error { return fmt.Errorf("circuit breaker for replication to %s/%s is open. this commit did not necessarily replicate successfully.", h.remotename, h.dbname) } @@ -438,7 +439,7 @@ func (h *commithook) Execute(ctx context.Context, ds datas.Dataset, db datas.Dat func (h *commithook) NotifyWaitFailed() { h.mu.Lock() defer h.mu.Unlock() - h.circuitBreakerOpen = true + h.fastFailReplicationWait = true } func (h *commithook) HandleError(ctx context.Context, err error) error { From 621104344fb434f2d234e3091c65c4b636e7615f Mon Sep 17 00:00:00 2001 From: JCOR11599 Date: Wed, 17 May 2023 04:30:35 +0000 Subject: [PATCH 75/82] [ga-bump-dep] Bump dependency in Dolt by JCOR11599 --- go/go.mod | 2 +- go/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/go.mod b/go/go.mod index 7ce69f2a5d..6d3576c84b 100644 --- a/go/go.mod +++ b/go/go.mod @@ -59,7 +59,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.15.1-0.20230516125723-110a1193be66 + github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854 github.com/dolthub/swiss v0.1.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/jmoiron/sqlx v1.3.4 diff --git a/go/go.sum b/go/go.sum index d7d1e2f203..39df4ccd7d 100644 --- a/go/go.sum +++ b/go/go.sum @@ -168,8 +168,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U= github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0= github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac h1:/bsG4AyV5MesUPw7LSkxHKMsP9f+LSLrsMbBxLP6+Mk= github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac/go.mod h1:xLKpPutKiF9FxxcLG3gf/JA95YZQNAqBegkDRe1AZF4= -github.com/dolthub/go-mysql-server v0.15.1-0.20230516125723-110a1193be66 h1:GNS0Ym5152HqqWNaRR40h58EhKYmQOEIlRuO4PKn1rQ= -github.com/dolthub/go-mysql-server v0.15.1-0.20230516125723-110a1193be66/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I= +github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854 h1:2AMV4KSxCp6sHA3eWlKX/93HEVNacfqFJhSQw39VRh4= +github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto= github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0= From 9226024961db37de9a81028d73e7fa39d92f643c Mon Sep 17 00:00:00 2001 From: Nick Tobey Date: Wed, 17 May 2023 11:14:37 -0700 Subject: [PATCH 76/82] Add clarifying TODO to GetWorkingSet. --- go/libraries/doltcore/sqle/database.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/go/libraries/doltcore/sqle/database.go b/go/libraries/doltcore/sqle/database.go index 7441235dbe..53cfccfc68 100644 --- a/go/libraries/doltcore/sqle/database.go +++ b/go/libraries/doltcore/sqle/database.go @@ -651,7 +651,9 @@ func (db Database) GetRoot(ctx *sql.Context) (*doltdb.RootValue, error) { // GetWorkingSet gets the current working set for the database. // If there is no working set (most likely because the DB is in Detached Head mode, return an error. // If a command needs to work while in Detached Head, that command should call sess.LookupDbState directly. -// TODO: Replace all uses of dbState.WorkingSet, including this, with a new interface. +// TODO: This is a temporary measure to make sure that new commands that call GetWorkingSet don't unexpectedly receive +// a null pointer. In the future, we should replace all uses of dbState.WorkingSet, including this, with a new interface +// where users avoid handling the WorkingSet directly. func (db Database) GetWorkingSet(ctx *sql.Context) (*doltdb.WorkingSet, error) { sess := dsess.DSessFromSess(ctx.Session) dbState, ok, err := sess.LookupDbState(ctx, db.Name()) From f4f4c200be3d67f824fad7acc203e294e76fef40 Mon Sep 17 00:00:00 2001 From: Maximilian Hoffman Date: Thu, 18 May 2023 09:02:05 -0700 Subject: [PATCH 77/82] [no-release-notes] fix GMS bump (#5979) --- go/go.mod | 2 +- go/go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go/go.mod b/go/go.mod index 6d3576c84b..34ac1dad17 100644 --- a/go/go.mod +++ b/go/go.mod @@ -59,7 +59,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/creasty/defaults v1.6.0 github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 - github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854 + github.com/dolthub/go-mysql-server v0.15.1-0.20230517201855-8477b3b02370 github.com/dolthub/swiss v0.1.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/jmoiron/sqlx v1.3.4 diff --git a/go/go.sum b/go/go.sum index 39df4ccd7d..2f7b1f7e2c 100644 --- a/go/go.sum +++ b/go/go.sum @@ -170,6 +170,8 @@ github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac h1:/bsG4AyV5M github.com/dolthub/go-icu-regex v0.0.0-20230516121657-5424676dd4ac/go.mod h1:xLKpPutKiF9FxxcLG3gf/JA95YZQNAqBegkDRe1AZF4= github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854 h1:2AMV4KSxCp6sHA3eWlKX/93HEVNacfqFJhSQw39VRh4= github.com/dolthub/go-mysql-server v0.15.1-0.20230517042856-2405a8d89854/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I= +github.com/dolthub/go-mysql-server v0.15.1-0.20230517201855-8477b3b02370 h1:YGNpsPKq7u5cAow/5Sjb3ncu5Qh6SwCp5jBBMhtCnRs= +github.com/dolthub/go-mysql-server v0.15.1-0.20230517201855-8477b3b02370/go.mod h1:J+NMF5CgU6b3tBI4G2QVb1AUSf+YoPDDCsZkcM7rY1I= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514= github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto= github.com/dolthub/jsonpath v0.0.1 h1:Nd+T3U+XisK3kOuxtABS5IIbZqXVIlOR9VYquyjQ0u0= From fcb5455fd0cd2aa0b5802d5fa2363bcdae40ace7 Mon Sep 17 00:00:00 2001 From: Tim Sehn Date: Thu, 18 May 2023 11:11:31 -0700 Subject: [PATCH 78/82] Stubs and a couple bats tests for load data bats tests improvements --- integration-tests/bats/sql-load-data.bats | 196 ++++++++++++++++++---- 1 file changed, 165 insertions(+), 31 deletions(-) diff --git a/integration-tests/bats/sql-load-data.bats b/integration-tests/bats/sql-load-data.bats index b513fa01f2..e089666bf5 100644 --- a/integration-tests/bats/sql-load-data.bats +++ b/integration-tests/bats/sql-load-data.bats @@ -1,11 +1,13 @@ #!/usr/bin/env bats load $BATS_TEST_DIRNAME/helper/common.bash +load $BATS_TEST_DIRNAME/helper/query-server-common.bash setup() { setup_common } teardown() { + stop_sql_server assert_feature_version teardown_common } @@ -17,13 +19,11 @@ pk||c1||c2||c3||c4||c5 1||1||2||3||4||5 DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int primary key, c1 int, c2 int, c3 int, c4 int, c5 int); LOAD DATA INFILE '1pk5col-ints.csv' INTO TABLE test CHARACTER SET UTF8MB4 FIELDS TERMINATED BY '||' ESCAPED BY '' LINES TERMINATED BY '\n' IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -56,13 +56,11 @@ pk||c1||c2||c3||c4||c5 "1"||"1"||"2"||"3"||"4"||"5" DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int primary key, c1 int, c2 int, c3 int, c4 int, c5 int); LOAD DATA INFILE '1pk5col-ints.csv' INTO TABLE test CHARACTER SET UTF8MB4 FIELDS TERMINATED BY '||' ENCLOSED BY '"' ESCAPED BY '' LINES TERMINATED BY '\n' IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -80,13 +78,11 @@ ignore me sssYo DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk longtext); LOAD DATA INFILE 'prefixed.txt' INTO TABLE test CHARACTER SET UTF8MB4 LINES STARTING BY 'sss' IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test ORDER BY pk" [ "$status" -eq 0 ] @@ -103,13 +99,11 @@ pk,c1 1,1 DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int primary key, c1 int, c2 int); LOAD DATA INFILE '1pk2col-ints.csv' INTO TABLE test FIELDS TERMINATED BY ',' IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -126,13 +120,11 @@ pk c1 1 1 DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int primary key, c1 int); LOAD DATA INFILE '1pk2col-ints.csv' INTO TABLE test FIELDS TERMINATED BY '\t' IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -148,13 +140,11 @@ pk NULL DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk longtext); LOAD DATA INFILE '1pk2col-ints.csv' INTO TABLE test FIELDS IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -q "select COUNT(*) from test WHERE pk IS NULL" [ "$status" -eq 0 ] [[ "$output" =~ "2" ]] || false @@ -169,13 +159,11 @@ pk,c1 "hello","2" DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int, c1 longtext); LOAD DATA INFILE '1pk2col-ints.csv' INTO TABLE test FIELDS ENCLOSED BY '"' TERMINATED BY ',' IGNORE 1 LINES (c1,pk); SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -193,13 +181,11 @@ SQL 4,"a string containing a \", quote and comma",102.20 DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int, c1 longtext, c2 float); LOAD DATA INFILE 'complex.csv' INTO TABLE test FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -219,13 +205,11 @@ SQL "new\ns" DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE loadtable(pk longtext); LOAD DATA INFILE './testdata/test5.txt' INTO TABLE loadtable FIELDS ENCLOSED BY '\"'; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -244,13 +228,16 @@ pk||c1||c2||c3||c4||c5 1||1||2||3||4||5||6 DELIM - run dolt sql << SQL + dolt sql << SQL CREATE TABLE test(pk int primary key, c1 int, c2 int, c3 int, c4 int, c5 int); -LOAD DATA INFILE '1pk5col-ints.csv' INTO TABLE test CHARACTER SET UTF8MB4 FIELDS TERMINATED BY '||' ESCAPED BY '' LINES TERMINATED BY '\n' IGNORE 1 LINES; +LOAD DATA INFILE '1pk5col-ints.csv' INTO TABLE test +CHARACTER SET UTF8MB4 +FIELDS TERMINATED BY '||' +ESCAPED BY '' +LINES TERMINATED BY '\n' +IGNORE 1 LINES; SQL - [ "$status" -eq 0 ] - run dolt sql -r csv -q "select * from test" [ "$status" -eq 0 ] @@ -258,3 +245,150 @@ SQL [ "${lines[1]}" = "0,1,2,3,4,5" ] [ "${lines[2]}" = "1,1,2,3,4,5" ] } + +@test "sql-load-data: load data ignore" { + cat < in.csv +0,0,0 +1,1,1 +CSV + + dolt sql -q "create table t (pk int primary key, c1 int, c2 int)" + dolt sql -q "insert into t values (0,0,0)" + skip "load data ignore not supported" + run dolt sql < in.csv +0,0,1 +1,1,1 +CSV + + dolt sql -q "create table t (pk int primary key, c1 int, c2 int)" + dolt sql -q "insert into t values (0,0,0)" + skip "load data replace not supported" + run dolt sql < in.csv +0,0,0 +1,1,1 +CSV + + dolt sql -q "create table t (pk int primary key, c1 int, c2 int)" + + run dolt sql < in.csv +0,0,0 +1,1,1 +CSV + dolt sql -q "create table t (pk int primary key, c1 int, c2 int)" + + start_sql_server + + # File not found errors + run dolt sql-client -P $PORT -u dolt -q "load data infile 'foo.csv' into table t" + [ $status -ne 0 ] + [[ $output =~ "no such file or directory" ]] || false + + dolt sql-client -P $PORT -u dolt -q " +load data infile 'in.csv' into table t +fields terminated by ',' +lines terminated by '\n' +" + + stop_sql_server + + run dolt sql -r csv -q "select * from t" + [ $status -eq 0 ] + [[ $output =~ "0,0,0" ]] || false + [[ $output =~ "1,1,1" ]] || false +} From ce0d917c642517bf95ae23ee4a36209d7b48b9fa Mon Sep 17 00:00:00 2001 From: Tim Sehn Date: Thu, 18 May 2023 12:08:23 -0700 Subject: [PATCH 79/82] Load data testing progress --- integration-tests/bats/sql-load-data.bats | 154 ++++++++++++++++++++-- 1 file changed, 146 insertions(+), 8 deletions(-) diff --git a/integration-tests/bats/sql-load-data.bats b/integration-tests/bats/sql-load-data.bats index e089666bf5..a67dca575c 100644 --- a/integration-tests/bats/sql-load-data.bats +++ b/integration-tests/bats/sql-load-data.bats @@ -246,6 +246,46 @@ SQL [ "${lines[2]}" = "1,1,2,3,4,5" ] } +@test "sql-load-data: run twice it appends" { + cat < in.csv +0,0,0 +CSV + + dolt sql -q "create table t (pk int primary key, c1 int, c2 int)" + dolt sql < in.csv +1,1,1 +CSV + + dolt sql < in.csv 0,0,0 @@ -264,8 +304,8 @@ SQL run dolt sql -r csv -q "select * from t" [ $status -eq 0 ] - [[ $output =~ 0,0,0 ]] || false - [[ $output =~ 1,1,1 ]] || false + [[ $output =~ "0,0,0" ]] || false + [[ $output =~ "1,1,1" ]] || false } @@ -278,27 +318,125 @@ CSV dolt sql -q "create table t (pk int primary key, c1 int, c2 int)" dolt sql -q "insert into t values (0,0,0)" skip "load data replace not supported" - run dolt sql < in.csv +this,is,keyless +and,uses,strings +CSV + dolt sql -q "create table t (c1 varchar(10), c2 varchar(20), c3 varchar(30))" + + dolt sql < in.csv +0,a,a,this is text,0,0,0.01,a +CSV + dolt sql -q "create table t ( +pk int primary key, +c1 char(1), +c2 varchar(1), +c3 text, +c4 int, +c5 tinyint, +c6 double, +c7 enum('a','b') +)" + dolt sql < in.csv +1,a,a,this is text,0,5555555,0.01,a +CSV + run dolt sql < in.csv +0,2022-10-10 00:00:00,2022-10-10,00:00:00 +CSV + + dolt sql -q "create table t ( +pk int primary key, +c1 datetime, +c2 date, +c3 time)" + + dolt sql < in.csv +1,2022-10-10 00:00:00:00,2022-10-10,00:00:00 +CSV + + run dolt sql < Date: Thu, 18 May 2023 12:42:43 -0700 Subject: [PATCH 80/82] More load data tests --- integration-tests/bats/sql-load-data.bats | 84 +++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/integration-tests/bats/sql-load-data.bats b/integration-tests/bats/sql-load-data.bats index a67dca575c..bf6da95e84 100644 --- a/integration-tests/bats/sql-load-data.bats +++ b/integration-tests/bats/sql-load-data.bats @@ -437,15 +437,99 @@ SQL @test "sql-load-data: schema with not null constraints" { + cat < in.csv +0,0,0 +CSV + dolt sql -q "create table t (pk int primary key, c1 int not null, c2 int)" + dolt sql < in.csv +1,NULL,1 +CSV + + run dolt sql < in.csv +0,0,0 +CSV + dolt sql -q "create table t (pk int primary key, c1 int default 1, c2 int)" + dolt sql < in.csv +1,NULL,1 +CSV + + dolt sql < in.csv +0,0,0 +CSV + dolt sql -q "create table t (pk int primary key, c1 int, c2 int, check(c1 > 0))" + run dolt sql < in.csv +0,1,0 +CSV + + run dolt sql < Date: Thu, 18 May 2023 13:15:22 -0700 Subject: [PATCH 81/82] More thorough load data bats tests --- integration-tests/bats/sql-load-data.bats | 31 ++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/integration-tests/bats/sql-load-data.bats b/integration-tests/bats/sql-load-data.bats index bf6da95e84..c968b93fff 100644 --- a/integration-tests/bats/sql-load-data.bats +++ b/integration-tests/bats/sql-load-data.bats @@ -113,7 +113,7 @@ SQL } @test "sql-load-data: works with fields separated by tabs" { - skip "This needs to be fixed." + skip "This is a test problem with CI preserving tabs in this file. Tabs work locally." cat < 1pk2col-ints.csv pk c1 0 1 @@ -533,11 +533,36 @@ SQL } @test "sql-load-data: test schema with foreign keys" { + dolt sql -q "create table t1 (pk int primary key, c1 int, c2 int)" + dolt sql -q "create table t2 (pk int primary key, foreign key (pk) references t1(pk))" -} + dolt sql -q "insert into t1 values (0,0,0),(2,2,2)" -@test "sql-load-data: test load data defaults" { + cat < in.csv +0 +2 +CSV + dolt sql -q "load data infile 'in.csv' into table t2" + run dolt sql -r csv -q "select * from t2" + [ $status -eq 0 ] + [[ $output =~ "0" ]] || false + [[ $output =~ "2" ]] || false + + cat < in.csv +1 +CSV + + run dolt sql -q "load data infile 'in.csv' into table t2" + [ $status -ne 0 ] + [[ $output =~ "Foreign key violation" ]] || false + + dolt sql -q "set foreign_key_checks=0; load data infile 'in.csv' into table t2" + run dolt sql -r csv -q "select * from t2" + [ $status -eq 0 ] + [[ $output =~ "0" ]] || false + [[ $output =~ "1" ]] || false + [[ $output =~ "2" ]] || false } @test "sql-load-data: load data local" { From 3b43b06ceffb8c0a6143f201b438ded16c23f82b Mon Sep 17 00:00:00 2001 From: Tim Sehn Date: Thu, 18 May 2023 13:24:42 -0700 Subject: [PATCH 82/82] Remove extraneous spaces --- integration-tests/bats/sql-load-data.bats | 2 -- 1 file changed, 2 deletions(-) diff --git a/integration-tests/bats/sql-load-data.bats b/integration-tests/bats/sql-load-data.bats index c968b93fff..a0e95d43f7 100644 --- a/integration-tests/bats/sql-load-data.bats +++ b/integration-tests/bats/sql-load-data.bats @@ -433,8 +433,6 @@ SQL [ $status -ne 0 ] [[ $output =~ "Incorrect datetime value" ]] || false } - - @test "sql-load-data: schema with not null constraints" { cat < in.csv