mirror of
https://github.com/dolthub/dolt.git
synced 2026-03-18 09:40:59 -05:00
Merge branch 'main' into james/test
This commit is contained in:
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"Template": {
|
||||
"TemplateName": "ImportBenchmarkingReleaseTemplate",
|
||||
"SubjectPart": "Import Benchmarks for {{format}} {{version}}",
|
||||
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{format}} {{version}} Import Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
|
||||
"TextPart": "Dolt {{format}} {{version}} Import Results,\r\n{{results}}"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"Template": {
|
||||
"TemplateName": "PerformanceBenchmarkingReleaseTemplate",
|
||||
"SubjectPart": "Performance Benchmarks for {{format}} {{version}}",
|
||||
"SubjectPart": "Read/Write Benchmarks for {{format}} {{version}}",
|
||||
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{format}} {{version}} Performance Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
|
||||
"TextPart": "Dolt {{format}} {{version}} Performance Results,\r\n{{results}}"
|
||||
}
|
||||
|
||||
8
.github/actions/ses-email-action/template.json
vendored
Normal file
8
.github/actions/ses-email-action/template.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"Template": {
|
||||
"TemplateName": "SysbenchTemplate",
|
||||
"SubjectPart": "{{ category }} Benchmarks for {{format}} {{version}}",
|
||||
"HtmlPart": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Dolt {{format}} {{version}} Performance Results</title>\n <style>\n table {\n border: 1px solid black;\n letter-spacing: 1px;\n font-family: sans-serif;\n font-size: .8rem;\n padding: 5px;\n margin: 5px;\n }\n th {\n border: 1px solid rgb(190, 190, 190);\n padding: 10px;\n }\n td {\n padding: 5px;\n }\n tr:nth-child(even) {background-color: #f2f2f2;}\n </style>\n</head><body>{{results}}</body></html>",
|
||||
"TextPart": "Dolt {{format}} {{version}} Performance Results,\r\n{{results}}"
|
||||
}
|
||||
}
|
||||
2
.github/workflows/import-perf.yaml
vendored
2
.github/workflows/import-perf.yaml
vendored
@@ -142,6 +142,8 @@ jobs:
|
||||
version: ${{ steps.version.outputs.ref }}
|
||||
format: '__DOLT__'
|
||||
dataFile: ${{ steps.html.outputs.html }}
|
||||
template: 'SysbenchTemplate'
|
||||
category: 'Import'
|
||||
|
||||
- name: Read CSV
|
||||
if: ${{ github.event.client_payload.issue_id }} != ""
|
||||
|
||||
4
.github/workflows/sysbench-perf.yaml
vendored
4
.github/workflows/sysbench-perf.yaml
vendored
@@ -140,9 +140,11 @@ jobs:
|
||||
with:
|
||||
region: us-west-2
|
||||
toAddresses: '["${{ github.event.client_payload.email_recipient }}"]'
|
||||
version: ${{ steps.version.outputs.ref }}
|
||||
version: ${{ github.event.client_payload.version }}
|
||||
format: '__DOLT__'
|
||||
dataFile: ${{ steps.html.outputs.html }}
|
||||
template: 'SysbenchTemplate'
|
||||
category: 'Sysbench'
|
||||
|
||||
- name: Read CSV
|
||||
if: ${{ github.event.client_payload.issue_id }} != ""
|
||||
|
||||
@@ -120,7 +120,12 @@ func (cmd CheckoutCmd) Exec(ctx context.Context, commandStr string, args []strin
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt)
|
||||
}
|
||||
verr := actions.ResetHard(ctx, dEnv, "HEAD", roots)
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
ws, err := dEnv.WorkingSet(ctx)
|
||||
if err != nil {
|
||||
HandleVErrAndExitCode(errhand.BuildDError(err.Error()).Build(), usagePrt)
|
||||
}
|
||||
verr := actions.ResetHard(ctx, dEnv, "HEAD", roots, headRef, ws)
|
||||
return handleResetError(verr, usagePrt)
|
||||
}
|
||||
|
||||
@@ -292,6 +297,15 @@ func checkoutBranch(ctx context.Context, dEnv *env.DoltEnv, name string, force b
|
||||
// Being on the same branch shouldn't be an error
|
||||
cli.Printf("Already on branch '%s'\n", name)
|
||||
return nil
|
||||
} else if err == actions.ErrWorkingSetsOnBothBranches {
|
||||
str := fmt.Sprintf("error: There are uncommitted changes already on branch '%s'.", name) +
|
||||
"This can happen when someone modifies that branch in a SQL session." +
|
||||
fmt.Sprintf("You have uncommitted changes on this branch, and they would overwrite the uncommitted changes on branch %s on checkout.", name) +
|
||||
"To solve this problem, you can " +
|
||||
"1) commit or reset your changes on this branch, using `dolt commit` or `dolt reset`, before checking out the other branch, " +
|
||||
"2) use the `-f` flag with `dolt checkout` to force an overwrite, or " +
|
||||
"3) connect to branch '%s' with the SQL server and revert or commit changes there before proceeding."
|
||||
return errhand.BuildDError(str).AddCause(err).Build()
|
||||
} else {
|
||||
bdr := errhand.BuildDError("fatal: Unexpected error checking out branch '%s'", name)
|
||||
bdr.AddCause(err)
|
||||
|
||||
@@ -43,7 +43,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
dbName = "filterDB"
|
||||
dbName = "filterDB"
|
||||
branchesFlag = "branches"
|
||||
)
|
||||
|
||||
var filterBranchDocs = cli.CommandDocumentationContent{
|
||||
@@ -52,7 +53,9 @@ var filterBranchDocs = cli.CommandDocumentationContent{
|
||||
|
||||
If a {{.LessThan}}commit-spec{{.GreaterThan}} is provided, the traversal will stop when the commit is reached and rewriting will begin at that commit, or will error if the commit is not found.
|
||||
|
||||
If the {{.EmphasisLeft}}--all{{.EmphasisRight}} flag is supplied, the traversal starts with the HEAD commits of all branches.
|
||||
If the {{.EmphasisLeft}}--branches{{.EmphasisRight}} flag is supplied, filter-branch traverses and rewrites commits for all branches.
|
||||
|
||||
If the {{.EmphasisLeft}}--all{{.EmphasisRight}} flag is supplied, filter-branch traverses and rewrites commits for all branches and tags.
|
||||
`,
|
||||
|
||||
Synopsis: []string{
|
||||
@@ -81,8 +84,9 @@ func (cmd FilterBranchCmd) Docs() *cli.CommandDocumentation {
|
||||
|
||||
func (cmd FilterBranchCmd) ArgParser() *argparser.ArgParser {
|
||||
ap := argparser.NewArgParser()
|
||||
ap.SupportsFlag(allFlag, "a", "filter all branches")
|
||||
ap.SupportsFlag(verboseFlag, "v", "logs more information")
|
||||
ap.SupportsFlag(branchesFlag, "b", "filter all branches")
|
||||
ap.SupportsFlag(allFlag, "a", "filter all branches and tags")
|
||||
return ap
|
||||
}
|
||||
|
||||
@@ -153,9 +157,12 @@ func (cmd FilterBranchCmd) Exec(ctx context.Context, commandStr string, args []s
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
|
||||
if apr.Contains(allFlag) {
|
||||
switch {
|
||||
case apr.Contains(branchesFlag):
|
||||
err = rebase.AllBranches(ctx, dEnv, replay, nerf)
|
||||
} else {
|
||||
case apr.Contains(allFlag):
|
||||
err = rebase.AllBranchesAndTags(ctx, dEnv, replay, nerf)
|
||||
default:
|
||||
err = rebase.CurrentBranch(ctx, dEnv, replay, nerf)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -106,7 +106,13 @@ func (cmd ResetCmd) Exec(ctx context.Context, commandStr string, args []string,
|
||||
arg = apr.Arg(0)
|
||||
}
|
||||
|
||||
err = actions.ResetHard(ctx, dEnv, arg, roots)
|
||||
headRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
ws, err := dEnv.WorkingSet(ctx)
|
||||
if err != nil {
|
||||
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
|
||||
}
|
||||
|
||||
err = actions.ResetHard(ctx, dEnv, arg, roots, headRef, ws)
|
||||
} else {
|
||||
// Check whether the input argument is a ref.
|
||||
if apr.NArg() == 1 {
|
||||
|
||||
@@ -56,7 +56,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "0.51.11"
|
||||
Version = "0.51.12"
|
||||
)
|
||||
|
||||
var dumpDocsCommand = &commands.DumpDocsCmd{}
|
||||
|
||||
84
go/libraries/doltcore/env/actions/branch.go
vendored
84
go/libraries/doltcore/env/actions/branch.go
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/branch_control"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
@@ -31,6 +30,7 @@ import (
|
||||
var ErrAlreadyExists = errors.New("already exists")
|
||||
var ErrCOBranchDelete = errors.New("attempted to delete checked out branch")
|
||||
var ErrUnmergedBranchDelete = errors.New("attempted to delete a branch that is not fully merged into its parent; use `-f` to force")
|
||||
var ErrWorkingSetsOnBothBranches = errors.New("checkout would overwrite uncommitted changes on target branch")
|
||||
|
||||
func RenameBranch(ctx context.Context, dbData env.DbData, config *env.DoltCliConfig, oldBranch, newBranch string, force bool) error {
|
||||
oldRef := ref.NewBranchRef(oldBranch)
|
||||
@@ -306,6 +306,7 @@ func checkoutBranchNoDocs(ctx context.Context, roots doltdb.Roots, branchRoot *d
|
||||
|
||||
func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, force bool) error {
|
||||
branchRef := ref.NewBranchRef(brName)
|
||||
branchHeadRef := dEnv.RepoStateReader().CWBHeadRef()
|
||||
|
||||
db := dEnv.DoltDB
|
||||
hasRef, err := db.HasRef(ctx, branchRef)
|
||||
@@ -325,15 +326,43 @@ func CheckoutBranch(ctx context.Context, dEnv *env.DoltEnv, brName string, force
|
||||
return err
|
||||
}
|
||||
|
||||
currentWs, err := dEnv.WorkingSet(ctx)
|
||||
if err != nil {
|
||||
// working set does not exist, skip check
|
||||
return nil
|
||||
}
|
||||
|
||||
if !force {
|
||||
err = checkWorkingSetCompatibility(ctx, dEnv, branchRef, currentWs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
shouldResetWorkingSet := true
|
||||
roots, err := dEnv.Roots(ctx)
|
||||
// roots will be empty/nil if the working set is not set (working set is not set if the current branch was deleted)
|
||||
if errors.Is(err, doltdb.ErrBranchNotFound) || errors.Is(err, doltdb.ErrWorkingSetNotFound) {
|
||||
roots, err = dEnv.RecoveryRoots(ctx)
|
||||
roots, _ = dEnv.RecoveryRoots(ctx)
|
||||
shouldResetWorkingSet = false
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return checkoutBranchNoDocs(ctx, roots, branchRoot, dEnv.RepoStateWriter(), branchRef, force)
|
||||
err = checkoutBranchNoDocs(ctx, roots, branchRoot, dEnv.RepoStateWriter(), branchRef, force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if shouldResetWorkingSet {
|
||||
// reset the source branch's working set to the branch head, leaving the source branch unchanged
|
||||
err = ResetHard(ctx, dEnv, "", roots, branchHeadRef, currentWs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BranchRoot returns the root value at the branch with the name given
|
||||
@@ -462,6 +491,55 @@ func overwriteRoot(ctx context.Context, head *doltdb.RootValue, tblHashes map[st
|
||||
return head, nil
|
||||
}
|
||||
|
||||
// checkWorkingSetCompatibility checks that the current working set is "compatible" with the dest working set.
|
||||
// This means that if both working sets are present (ie there are changes on both source and dest branches),
|
||||
// we check if the changes are identical before allowing a clobbering checkout.
|
||||
// Working set errors are ignored by this function, because they are properly handled elsewhere.
|
||||
func checkWorkingSetCompatibility(ctx context.Context, dEnv *env.DoltEnv, branchRef ref.BranchRef, currentWs *doltdb.WorkingSet) error {
|
||||
db := dEnv.DoltDB
|
||||
destWsRef, err := ref.WorkingSetRefForHead(branchRef)
|
||||
if err != nil {
|
||||
// dest working set does not exist, skip check
|
||||
return nil
|
||||
}
|
||||
destWs, err := db.ResolveWorkingSet(ctx, destWsRef)
|
||||
if err != nil {
|
||||
// dest working set does not resolve, skip check
|
||||
return nil
|
||||
}
|
||||
|
||||
sourceHasChanges, sourceHash, err := detectWorkingSetChanges(currentWs)
|
||||
if err != nil {
|
||||
// error detecting source changes, skip check
|
||||
return nil
|
||||
}
|
||||
destHasChanges, destHash, err := detectWorkingSetChanges(destWs)
|
||||
if err != nil {
|
||||
// error detecting dest changes, skip check
|
||||
return nil
|
||||
}
|
||||
areHashesEqual := sourceHash.Equal(destHash)
|
||||
|
||||
if sourceHasChanges && destHasChanges && !areHashesEqual {
|
||||
return ErrWorkingSetsOnBothBranches
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// detectWorkingSetChanges returns a boolean indicating whether the working set has changes, and a hash of the changes
|
||||
func detectWorkingSetChanges(ws *doltdb.WorkingSet) (hasChanges bool, wrHash hash.Hash, err error) {
|
||||
wrHash, err = ws.WorkingRoot().HashOf()
|
||||
if err != nil {
|
||||
return false, hash.Hash{}, err
|
||||
}
|
||||
srHash, err := ws.StagedRoot().HashOf()
|
||||
if err != nil {
|
||||
return false, hash.Hash{}, err
|
||||
}
|
||||
hasChanges = !wrHash.Equal(srHash)
|
||||
return hasChanges, wrHash, nil
|
||||
}
|
||||
|
||||
func IsBranch(ctx context.Context, ddb *doltdb.DoltDB, str string) (bool, error) {
|
||||
return IsBranchOnDB(ctx, ddb, str)
|
||||
}
|
||||
|
||||
23
go/libraries/doltcore/env/actions/reset.go
vendored
23
go/libraries/doltcore/env/actions/reset.go
vendored
@@ -18,10 +18,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/env"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/argparser"
|
||||
)
|
||||
|
||||
@@ -136,7 +136,17 @@ func ResetHardTables(ctx context.Context, dbData env.DbData, cSpecStr string, ro
|
||||
return resetHardTables(ctx, dbData, cSpecStr, roots)
|
||||
}
|
||||
|
||||
func ResetHard(ctx context.Context, dEnv *env.DoltEnv, cSpecStr string, roots doltdb.Roots) error {
|
||||
// ResetHard resets the working, staged, and head to the ones in the provided roots and head ref.
|
||||
// The reset can be performed on a non-current branch and working set.
|
||||
// Returns an error if the reset fails.
|
||||
func ResetHard(
|
||||
ctx context.Context,
|
||||
dEnv *env.DoltEnv,
|
||||
cSpecStr string,
|
||||
roots doltdb.Roots,
|
||||
headRef ref.DoltRef,
|
||||
ws *doltdb.WorkingSet,
|
||||
) error {
|
||||
dbData := dEnv.DbData()
|
||||
|
||||
newHead, roots, err := resetHardTables(ctx, dbData, cSpecStr, roots)
|
||||
@@ -144,18 +154,13 @@ func ResetHard(ctx context.Context, dEnv *env.DoltEnv, cSpecStr string, roots do
|
||||
return err
|
||||
}
|
||||
|
||||
ws, err := dEnv.WorkingSet(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dEnv.UpdateWorkingSet(ctx, ws.WithWorkingRoot(roots.Working).WithStagedRoot(roots.Staged).ClearMerge())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if newHead != nil {
|
||||
err = dEnv.DoltDB.SetHeadToCommit(ctx, dEnv.RepoStateReader().CWBHeadRef(), newHead)
|
||||
err = dEnv.DoltDB.SetHeadToCommit(ctx, headRef, newHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -42,11 +42,6 @@ var (
|
||||
)
|
||||
|
||||
func migrateWorkingSet(ctx context.Context, menv Environment, brRef ref.BranchRef, wsRef ref.WorkingSetRef, old, new *doltdb.DoltDB) error {
|
||||
oldWs, err := old.ResolveWorkingSet(ctx, wsRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldHead, err := old.ResolveCommitRef(ctx, brRef)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -56,6 +51,16 @@ func migrateWorkingSet(ctx context.Context, menv Environment, brRef ref.BranchRe
|
||||
return err
|
||||
}
|
||||
|
||||
oldWs, err := old.ResolveWorkingSet(ctx, wsRef)
|
||||
if err == doltdb.ErrWorkingSetNotFound {
|
||||
// If a branch was created prior to dolt version 0.26.10, no working set will exist for it.
|
||||
// In this case, we will pretend it exists with the same root as the head commit.
|
||||
oldWs = doltdb.EmptyWorkingSet(wsRef)
|
||||
oldWs = oldWs.WithWorkingRoot(oldHeadRoot).WithStagedRoot(oldHeadRoot)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newHead, err := new.ResolveCommitRef(ctx, brRef)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -87,6 +87,20 @@ func wrapReplayRootFn(fn ReplayRootFn) ReplayCommitFn {
|
||||
}
|
||||
}
|
||||
|
||||
// AllBranchesAndTags rewrites the history of all branches and tags in the repo using the |replay| function.
|
||||
func AllBranchesAndTags(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn, nerf NeedsRebaseFn) error {
|
||||
branches, err := dEnv.DoltDB.GetBranches(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tags, err := dEnv.DoltDB.GetTags(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rebaseRefs(ctx, dEnv.DbData(), replay, nerf, append(branches, tags...)...)
|
||||
}
|
||||
|
||||
// AllBranches rewrites the history of all branches in the repo using the |replay| function.
|
||||
func AllBranches(ctx context.Context, dEnv *env.DoltEnv, replay ReplayCommitFn, nerf NeedsRebaseFn) error {
|
||||
branches, err := dEnv.DoltDB.GetBranches(ctx)
|
||||
@@ -121,11 +135,6 @@ func CurrentBranchByRoot(ctx context.Context, dEnv *env.DoltEnv, replay ReplayRo
|
||||
|
||||
func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, nerf NeedsRebaseFn, refs ...ref.DoltRef) error {
|
||||
ddb := dbData.Ddb
|
||||
rsr := dbData.Rsr
|
||||
rsw := dbData.Rsw
|
||||
|
||||
cwbRef := rsr.CWBHeadRef()
|
||||
|
||||
heads := make([]*doltdb.Commit, len(refs))
|
||||
for i, dRef := range refs {
|
||||
var err error
|
||||
@@ -140,41 +149,30 @@ func rebaseRefs(ctx context.Context, dbData env.DbData, replay ReplayCommitFn, n
|
||||
return err
|
||||
}
|
||||
|
||||
for i, dRef := range refs {
|
||||
|
||||
switch dRef.(type) {
|
||||
for i, r := range refs {
|
||||
switch dRef := r.(type) {
|
||||
case ref.BranchRef:
|
||||
err = ddb.NewBranchAtCommit(ctx, dRef, newHeads[i])
|
||||
if err != nil {
|
||||
|
||||
case ref.TagRef:
|
||||
// rewrite tag with new commit
|
||||
var tag *doltdb.Tag
|
||||
if tag, err = ddb.ResolveTag(ctx, dRef); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ddb.DeleteTag(ctx, dRef); err != nil {
|
||||
return err
|
||||
}
|
||||
err = ddb.NewTagAtCommit(ctx, dRef, newHeads[i], tag.Meta)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("cannot rebase ref: %s", ref.String(dRef))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cm, err := ddb.ResolveCommitRef(ctx, cwbRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := cm.GetRootValue(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: this should be a single update to repo state, not two
|
||||
err = rsw.UpdateStagedRoot(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rsw.UpdateWorkingRoot(ctx, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func rebase(ctx context.Context, ddb *doltdb.DoltDB, replay ReplayCommitFn, nerf NeedsRebaseFn, origins ...*doltdb.Commit) ([]*doltdb.Commit, error) {
|
||||
|
||||
@@ -42,11 +42,10 @@ func main() {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if _, err = os.Stat(*scriptDir); err != nil {
|
||||
log.Fatalf("-scriptDir not found: '%s'\n", *scriptDir)
|
||||
}
|
||||
|
||||
conf = conf.WithScriptDir(*scriptDir)
|
||||
if err := os.Chdir(*scriptDir); err != nil {
|
||||
log.Fatalf("failed to 'cd %s'", *scriptDir)
|
||||
}
|
||||
|
||||
tmpdir, err := os.MkdirTemp("", "repo-store-")
|
||||
if err != nil {
|
||||
@@ -63,7 +62,7 @@ func main() {
|
||||
var err error
|
||||
switch {
|
||||
case r.ExternalServer != nil:
|
||||
panic("unsupported")
|
||||
err = test.RunExternalServerTests(r.Name, r.ExternalServer, conf)
|
||||
case r.Server != nil:
|
||||
err = test.RunSqlServerTests(r, u, conf)
|
||||
default:
|
||||
|
||||
47
go/performance/sysbench/testdata/read-write.yaml
vendored
Normal file
47
go/performance/sysbench/testdata/read-write.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
tests:
|
||||
- name: "read"
|
||||
repos:
|
||||
- name: dolt
|
||||
server:
|
||||
port: 3309
|
||||
args: [ "--port", "3309", "--password", "password"]
|
||||
- name: mysql
|
||||
external-server:
|
||||
name: test
|
||||
host: 127.0.0.1
|
||||
user: root
|
||||
password:
|
||||
port: 3309
|
||||
scripts:
|
||||
- covering_index_scan.lua
|
||||
- groupby_scan.lua
|
||||
- index_join.lua
|
||||
- index_join_scan.lua
|
||||
- index_scan.lua
|
||||
- oltp_point_select
|
||||
- oltp_read_only
|
||||
- select_random_points
|
||||
- select_random_ranges
|
||||
- table_scan.lua
|
||||
- types_table_scan.lua
|
||||
- name: "write"
|
||||
repos:
|
||||
- name: dolt
|
||||
server:
|
||||
port: 3308
|
||||
args: [ "--port", "3308" ]
|
||||
- name: mysql
|
||||
external-server:
|
||||
name: test
|
||||
host: 127.0.0.1
|
||||
user: root
|
||||
password:
|
||||
port: 3309
|
||||
scripts:
|
||||
- oltp_delete_insert.lua
|
||||
- oltp_insert
|
||||
- oltp_read_write
|
||||
- oltp_update_index
|
||||
- oltp_update_non_index
|
||||
- oltp_write_only
|
||||
- types_delete_insert.lua
|
||||
@@ -306,6 +306,7 @@ func (r *Result) populateAvg(buf []byte) error {
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
server string
|
||||
detail string
|
||||
test string
|
||||
|
||||
@@ -318,8 +319,9 @@ type Result struct {
|
||||
stddev float64
|
||||
}
|
||||
|
||||
func newResult(test, detail string) *Result {
|
||||
func newResult(server, test, detail string) *Result {
|
||||
return &Result{
|
||||
server: server,
|
||||
detail: detail,
|
||||
test: test,
|
||||
}
|
||||
@@ -335,6 +337,9 @@ func (r *Result) String() string {
|
||||
if r.detail != "" {
|
||||
fmt.Fprintf(b, "- detail: '%s'\n", r.detail)
|
||||
}
|
||||
if r.server != "" {
|
||||
fmt.Fprintf(b, "- server: '%s'\n", r.server)
|
||||
}
|
||||
fmt.Fprintf(b, "- time: %.3f\n", r.time)
|
||||
fmt.Fprintf(b, "- iters: %d\n", r.iters)
|
||||
fmt.Fprintf(b, "- mean: %.3f\n", r.hist.mean())
|
||||
@@ -365,12 +370,13 @@ func (r *Results) SqlDump() string {
|
||||
b.WriteString(`CREATE TABLE IF NOT EXISTS sysbench_results (
|
||||
test_name varchar(64),
|
||||
detail varchar(64),
|
||||
servervarchar(64),
|
||||
time double,
|
||||
iters int,
|
||||
avg double,
|
||||
median double,
|
||||
stdd double,
|
||||
primary key (test_name, detail)
|
||||
primary key (test_name, detail, server)
|
||||
);
|
||||
`)
|
||||
|
||||
@@ -380,8 +386,8 @@ func (r *Results) SqlDump() string {
|
||||
b.WriteString(",\n ")
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(
|
||||
"('%s', '%s',%.3f, %d, %.3f, %.3f, %.3f)",
|
||||
r.test, r.detail, r.time, r.iters, r.avg, r.median, r.stddev))
|
||||
"('%s', '%s', '%s', %.3f, %d, %.3f, %.3f, %.3f)",
|
||||
r.test, r.detail, r.server, r.time, r.iters, r.avg, r.median, r.stddev))
|
||||
}
|
||||
b.WriteString(";\n")
|
||||
|
||||
@@ -448,6 +454,39 @@ func modifyServerForImport(db *sql.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunExternalServerTests connects to a single externally provided server to run every test
|
||||
func (test *Script) RunExternalServerTests(repoName string, s *driver.ExternalServer, conf Config) error {
|
||||
return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error {
|
||||
db, err := driver.ConnectDB(s.User, s.Password, s.Name, s.Host, s.Port, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if err := prep.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
run.Stdout = buf
|
||||
err = run.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO scrape histogram data
|
||||
r := newResult(repoName, script, test.Name)
|
||||
if conf.Histogram {
|
||||
r.populateHistogram(buf.Bytes())
|
||||
} else {
|
||||
r.populateAvg(buf.Bytes())
|
||||
}
|
||||
test.Results.Append(r)
|
||||
|
||||
return clean.Run()
|
||||
})
|
||||
}
|
||||
|
||||
// RunSqlServerTests creates a new repo and server for every import test.
|
||||
func (test *Script) RunSqlServerTests(repo driver.TestRepo, user driver.DoltUser, conf Config) error {
|
||||
return test.IterSysbenchScripts(conf, test.Scripts, func(script string, prep, run, clean *exec.Cmd) error {
|
||||
@@ -462,7 +501,11 @@ func (test *Script) RunSqlServerTests(repo driver.TestRepo, user driver.DoltUser
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = modifyServerForImport(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := prep.Run(); err != nil {
|
||||
return err
|
||||
@@ -476,7 +519,7 @@ func (test *Script) RunSqlServerTests(repo driver.TestRepo, user driver.DoltUser
|
||||
}
|
||||
|
||||
// TODO scrape histogram data
|
||||
r := newResult(script, test.Name)
|
||||
r := newResult(repo.Name, script, test.Name)
|
||||
if conf.Histogram {
|
||||
r.populateHistogram(buf.Bytes())
|
||||
} else {
|
||||
@@ -509,6 +552,8 @@ func newServer(u driver.DoltUser, r driver.TestRepo) (*driver.SqlServer, error)
|
||||
return server, nil
|
||||
}
|
||||
|
||||
const luaExt = ".lua"
|
||||
|
||||
// IterSysbenchScripts returns 3 executable commands for the given script path: prepare, run, cleanup
|
||||
func (test *Script) IterSysbenchScripts(conf Config, scripts []string, cb func(name string, prep, run, clean *exec.Cmd) error) error {
|
||||
newCmd := func(command, script string) *exec.Cmd {
|
||||
@@ -521,9 +566,12 @@ func (test *Script) IterSysbenchScripts(conf Config, scripts []string, cb func(n
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
p := path.Join(conf.ScriptDir, script)
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
return fmt.Errorf("failed to run script: '%s'", err)
|
||||
p := script
|
||||
if strings.HasSuffix(script, luaExt) {
|
||||
p = path.Join(conf.ScriptDir, script)
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
return fmt.Errorf("failed to run script: '%s'", err)
|
||||
}
|
||||
}
|
||||
prep := newCmd("prepare", p)
|
||||
run := newCmd("run", p)
|
||||
|
||||
@@ -187,13 +187,13 @@ func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents,
|
||||
}
|
||||
|
||||
func conjoinTables(ctx context.Context, conjoinees []tableSpec, p tablePersister, stats *Stats) (conjoined tableSpec, err error) {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg, ectx := errgroup.WithContext(ctx)
|
||||
toConjoin := make(chunkSources, len(conjoinees))
|
||||
|
||||
for idx := range conjoinees {
|
||||
i, spec := idx, conjoinees[idx]
|
||||
eg.Go(func() (err error) {
|
||||
toConjoin[i], err = p.Open(ctx, spec.name, spec.chunkCount, stats)
|
||||
toConjoin[i], err = p.Open(ectx, spec.name, spec.chunkCount, stats)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
@@ -285,3 +285,299 @@ SQL
|
||||
[[ "$commitmeta" =~ "$shaparent1" ]] || false
|
||||
[[ "$commitmeta" =~ "$shaparent2" ]] || false
|
||||
}
|
||||
|
||||
|
||||
@test "checkout: dolt_checkout brings in changes from main to feature branch that has no working set" {
|
||||
# original setup
|
||||
dolt sql -q "create table users (id int primary key, name varchar(32));"
|
||||
dolt add .
|
||||
dolt commit -m "original users table"
|
||||
|
||||
# create feature branch
|
||||
dolt branch -c main feature
|
||||
|
||||
# make changes on main and verify
|
||||
dolt sql -q 'insert into users (id, name) values (1, "main-change");'
|
||||
run dolt sql -q "select name from users"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# checkout feature branch and bring over main changes
|
||||
dolt checkout feature
|
||||
|
||||
# verify working set changes are brought in from main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# verify working set changes are not on main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
# revert working set changes on feature branch
|
||||
dolt reset --hard HEAD
|
||||
run dolt sql -q "select count(*) from users"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
# switch to main and verify working set changes are not present
|
||||
dolt checkout main
|
||||
run dolt sql -q "select count(*) from users"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
}
|
||||
|
||||
@test "checkout: dolt_checkout switches from clean main to feature branch that has changes" {
|
||||
# original setup
|
||||
dolt sql -q "create table users (id int primary key, name varchar(32));"
|
||||
dolt add .
|
||||
dolt commit -m "original users table"
|
||||
|
||||
# create feature branch
|
||||
dolt branch -c main feature
|
||||
|
||||
# make changes on feature (through SQL)
|
||||
dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
insert into users (id, name) values (1, "feature-change");
|
||||
SQL
|
||||
|
||||
# verify feature branch changes are present
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select name from users;
|
||||
SQL
|
||||
echo "output = $output"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "feature-change" ]] || false
|
||||
|
||||
# checkout feature branch
|
||||
dolt checkout feature
|
||||
|
||||
# verify feature's working set changes are gone
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
# verify working set changes are not on main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
}
|
||||
|
||||
@test "checkout: dolt_checkout brings in changes from main to feature branch that has identical changes" {
|
||||
# original setup
|
||||
dolt sql -q "create table users (id int primary key, name varchar(32));"
|
||||
dolt add .
|
||||
dolt commit -m "original users table"
|
||||
|
||||
# create feature branch
|
||||
dolt branch -c main feature
|
||||
|
||||
# make changes on main and verify
|
||||
dolt sql -q 'insert into users (id, name) values (1, "main-change");'
|
||||
run dolt sql -q "select name from users"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# make identical changes on feature (through SQL)
|
||||
dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
insert into users (id, name) values (1, "main-change");
|
||||
SQL
|
||||
|
||||
# verify feature branch changes are present
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select name from users;
|
||||
SQL
|
||||
echo "output = $output"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# checkout feature branch
|
||||
dolt checkout feature
|
||||
|
||||
# verify working set changes are still the same on feature branch
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# verify working set changes are not on main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
# revert working set changes on feature branch
|
||||
dolt reset --hard HEAD
|
||||
|
||||
# verify working set changes are not on feature branch
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
|
||||
# switch to main and verify working set changes are not present
|
||||
dolt checkout main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
}
|
||||
|
||||
@test "checkout: dolt_checkout needs -f to bring in changes from main to feature branch that has different changes" {
|
||||
# original setup
|
||||
dolt sql -q "create table users (id int primary key, name varchar(32));"
|
||||
dolt add .
|
||||
dolt commit -m "original users table"
|
||||
|
||||
# create feature branch from main
|
||||
dolt branch feature
|
||||
|
||||
# make changes on main and verify
|
||||
dolt sql -q 'insert into users (id, name) values (1, "main-change");'
|
||||
run dolt sql -q "select name from users"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# make different changes on feature (through SQL)
|
||||
dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
insert into users (id, name) values (2, "feature-change");
|
||||
SQL
|
||||
|
||||
# verify feature branch changes are present
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select name from users;
|
||||
SQL
|
||||
echo "output = $output"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "feature-change" ]] || false
|
||||
|
||||
# checkout feature branch: should fail due to working set changes
|
||||
run dolt checkout feature
|
||||
echo "output = $output"
|
||||
[ "$status" -eq 1 ]
|
||||
[[ "$output" =~ "checkout would overwrite uncommitted changes on target branch" ]] || false
|
||||
|
||||
# force checkout feature branch
|
||||
dolt checkout -f feature
|
||||
|
||||
# verify working set changes on feature are from main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
# verify working set changes are not on main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select count(*) from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "0" ]] || false
|
||||
}
|
||||
|
||||
@test "checkout: dolt_checkout brings changes from main to multiple feature branches and back to main" {
|
||||
# original setup
|
||||
dolt sql -q "create table users (id int primary key, name varchar(32));"
|
||||
dolt add .
|
||||
dolt commit -m "original users table"
|
||||
|
||||
|
||||
# make changes on main and verify
|
||||
dolt sql -q 'insert into users (id, name) values (0, "main-change");'
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
|
||||
# create feature1 branch and bring changes to the new feature branch
|
||||
dolt checkout -b feature1
|
||||
|
||||
# verify the changes are brought to feature1
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature1');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
|
||||
|
||||
# make changes on feature1 and verify
|
||||
dolt sql -q 'insert into users (id, name) values (1, "feature1-change");'
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature1');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
[[ "$output" =~ "feature1-change" ]] || false
|
||||
|
||||
# create feature2 branch and bring changes to next feature branch
|
||||
dolt checkout -b feature2
|
||||
|
||||
# verify the changes are brought to feature1
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature2');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
[[ "$output" =~ "feature1-change" ]] || false
|
||||
|
||||
# make changes on feature2 and verify
|
||||
dolt sql -q 'insert into users (id, name) values (2, "feature2-change");'
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('feature2');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
[[ "$output" =~ "feature1-change" ]] || false
|
||||
[[ "$output" =~ "feature2-change" ]] || false
|
||||
|
||||
|
||||
# bring changes back to main
|
||||
dolt checkout main
|
||||
|
||||
# verify the changes are brought to main
|
||||
run dolt sql << SQL
|
||||
call dolt_checkout('main');
|
||||
select name from users
|
||||
SQL
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "main-change" ]] || false
|
||||
[[ "$output" =~ "feature1-change" ]] || false
|
||||
[[ "$output" =~ "feature2-change" ]] || false
|
||||
|
||||
}
|
||||
|
||||
@@ -83,6 +83,72 @@ teardown() {
|
||||
[[ "$output" =~ "4,4" ]] || false
|
||||
}
|
||||
|
||||
@test "filter-branch: filter tags" {
|
||||
dolt sql <<SQL
|
||||
create table t (pk int primary key);
|
||||
insert into t values (1),(2);
|
||||
call dcommit('-Am', 'msg');
|
||||
insert into t values (3);
|
||||
call dcommit('-Am', 'three');
|
||||
call dtag('myTag');
|
||||
insert into t values (4);
|
||||
call dcommit('-Am', 'four');
|
||||
SQL
|
||||
run dolt sql -q "select * from t as of 'myTag'" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
|
||||
dolt filter-branch --all "delete from t where pk >= 3"
|
||||
|
||||
run dolt sql -q "select * from t as of 'myTag'" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ ! "$output" =~ "3" ]] || false
|
||||
}
|
||||
|
||||
@test "filter-branch: filter branches only" {
|
||||
dolt sql <<SQL
|
||||
create table t (pk int primary key);
|
||||
insert into t values (1),(2);
|
||||
call dcommit('-Am', 'msg');
|
||||
insert into t values (3);
|
||||
call dcommit('-Am', 'three');
|
||||
call dtag('myTag');
|
||||
insert into t values (4);
|
||||
call dcommit('-Am', 'four');
|
||||
SQL
|
||||
run dolt sql -q "select * from t" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
[[ "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt sql -q "select * from t as of 'myTag'" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
|
||||
dolt filter-branch --branches "delete from t where pk >= 3"
|
||||
|
||||
run dolt sql -q "select * from t" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ ! "$output" =~ "3" ]] || false
|
||||
[[ ! "$output" =~ "4" ]] || false
|
||||
|
||||
run dolt sql -q "select * from t as of 'myTag'" -r csv
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "1" ]] || false
|
||||
[[ "$output" =~ "2" ]] || false
|
||||
[[ "$output" =~ "3" ]] || false
|
||||
}
|
||||
|
||||
@test "filter-branch: with missing table" {
|
||||
dolt sql -q "DROP TABLE test;"
|
||||
dolt add -A && dolt commit -m "dropped test"
|
||||
|
||||
Reference in New Issue
Block a user