Merge remote-tracking branch 'origin/main' into andy/migration-6

This commit is contained in:
Andy Arthur
2022-08-02 12:56:20 -07:00
52 changed files with 580 additions and 519 deletions
@@ -33,7 +33,7 @@ if [ -n "$withTpcc" ]; then
withTpcc="\"--withTpcc=$withTpcc\","
fi
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan')"
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan', 'types_table_scan')"
medianLatencyChangeReadsQuery="select f.test_name as read_tests, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) < -0.1 then 1 when ((avg(t.latency_percentile) - avg(f.latency_percentile)) / (avg(f.latency_percentile) + .0000001)) > 0.1 then -1 else 0 end as is_faster from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
writeTests="('oltp_read_write', 'oltp_update_index', 'oltp_update_non_index', 'oltp_insert', 'bulk_insert', 'oltp_write_only', 'oltp_delete_insert')"
@@ -34,7 +34,7 @@ if [ -n "$withTpcc" ]; then
withTpcc="\"--withTpcc=$withTpcc\","
fi
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan')"
readTests="('oltp_read_only', 'oltp_point_select', 'select_random_points', 'select_random_ranges', 'covering_index_scan', 'index_scan', 'table_scan', 'groupby_scan', 'index_join_scan', 'types_table_scan')"
medianLatencyMultiplierReadsQuery="select f.test_name as read_tests, f.server_name, f.server_version, case when avg(f.latency_percentile) < 0.001 then 0.001 else avg(f.latency_percentile) end as from_latency_median, t.server_name, t.server_version, case when avg(t.latency_percentile) < 0.001 then 0.001 else avg(t.latency_percentile) end as to_latency_median, case when ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) < 1.0 then 1.0 else ROUND(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision) end as multiplier from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name;"
meanMultiplierReadsQuery="select round(avg(multipliers), $precision) as reads_mean_multiplier from (select case when (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) < 1.0 then 1.0 else (round(avg(t.latency_percentile) / (avg(f.latency_percentile) + .000001), $precision)) end as multipliers from from_results as f join to_results as t on f.test_name = t.test_name where f.test_name in $readTests group by f.test_name)"
+4 -8
View File
@@ -202,15 +202,11 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
}
if mergeErr != nil {
var verr errhand.VerboseError
switch mergeErr {
case doltdb.ErrIsAhead:
verr = nil
default:
verr = errhand.VerboseErrorFromError(mergeErr)
cli.Println("Unable to stage changes: add and commit to finish merge")
if mergeErr == doltdb.ErrIsAhead {
return 0
}
return handleCommitErr(ctx, dEnv, verr, usage)
verr := errhand.BuildDError("Merge aborted due to error").AddCause(mergeErr).Build()
return HandleVErrAndExitCode(verr, usage)
}
}
}
+17 -5
View File
@@ -36,7 +36,7 @@ var pullDocs = cli.CommandDocumentationContent{
More precisely, dolt pull runs {{.EmphasisLeft}}dolt fetch{{.EmphasisRight}} with the given parameters and calls {{.EmphasisLeft}}dolt merge{{.EmphasisRight}} to merge the retrieved branch {{.EmphasisLeft}}HEAD{{.EmphasisRight}} into the current branch.
`,
Synopsis: []string{
"{{.LessThan}}remote{{.GreaterThan}}",
`[{{.LessThan}}remote{{.GreaterThan}}, [{{.LessThan}}remoteBranch{{.GreaterThan}}]]`,
},
}
@@ -59,6 +59,8 @@ func (cmd PullCmd) Docs() *cli.CommandDocumentation {
func (cmd PullCmd) ArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"remote", "The name of the remote to pull from."})
ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"remoteBranch", "The name of a branch on the specified remote to be merged into the current working set."})
ap.SupportsFlag(cli.SquashParam, "", "Merges changes to the working set without updating the commit history")
return ap
}
@@ -75,22 +77,24 @@ func (cmd PullCmd) Exec(ctx context.Context, commandStr string, args []string, d
apr := cli.ParseArgsOrDie(ap, args, help)
if apr.NArg() > 1 {
if apr.NArg() > 2 {
verr := errhand.VerboseErrorFromError(actions.ErrInvalidPullArgs)
return HandleVErrAndExitCode(verr, usage)
}
var remoteName string
var remoteName, remoteRefName string
if apr.NArg() == 1 {
remoteName = apr.Arg(0)
} else if apr.NArg() == 2 {
remoteName = apr.Arg(0)
remoteRefName = apr.Arg(1)
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
pullSpec, err := env.NewPullSpec(ctx, dEnv.RepoStateReader(), remoteName, apr.Contains(cli.SquashParam), apr.Contains(cli.NoFFParam), apr.Contains(cli.ForceFlag), apr.NArg() == 1)
pullSpec, err := env.NewPullSpec(ctx, dEnv.RepoStateReader(), remoteName, remoteRefName, apr.Contains(cli.SquashParam), apr.Contains(cli.NoFFParam), apr.Contains(cli.ForceFlag), apr.NArg() == 1)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
@@ -115,6 +119,14 @@ func pullHelper(ctx context.Context, dEnv *env.DoltEnv, pullSpec *env.PullSpec)
return env.ErrFailedToReadDb
}
hasBranch, err := srcDB.HasBranch(ctx, pullSpec.Branch.GetPath())
if err != nil {
return err
}
if !hasBranch {
return fmt.Errorf("branch %q not found on remote", pullSpec.Branch.GetPath())
}
// Go through every reference and every branch in each reference
for _, rs := range pullSpec.RefSpecs {
rsSeen := false // track invalid refSpecs
+1 -1
View File
@@ -56,7 +56,7 @@ import (
)
const (
Version = "0.40.20"
Version = "0.40.21"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
+1 -1
View File
@@ -58,7 +58,7 @@ require (
)
require (
github.com/dolthub/go-mysql-server v0.12.1-0.20220728202257-cd27ab81b097
github.com/dolthub/go-mysql-server v0.12.1-0.20220801213125-6a5fb1229fc0
github.com/google/flatbuffers v2.0.6+incompatible
github.com/gosuri/uilive v0.0.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
+2 -2
View File
@@ -173,8 +173,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220728202257-cd27ab81b097 h1:pywBH+t2pdsVB27E67N2rAAdhv/QtGl5VK9G2aqKLrg=
github.com/dolthub/go-mysql-server v0.12.1-0.20220728202257-cd27ab81b097/go.mod h1:JgB3WpY0RMgyAda3YG5VHVncH2B8i1N9Mx9LOp41lIs=
github.com/dolthub/go-mysql-server v0.12.1-0.20220801213125-6a5fb1229fc0 h1:0HkZlUpIytFYZt3QLccf/tJLFAC2UQiX8qQOi+8aYrE=
github.com/dolthub/go-mysql-server v0.12.1-0.20220801213125-6a5fb1229fc0/go.mod h1:JgB3WpY0RMgyAda3YG5VHVncH2B8i1N9Mx9LOp41lIs=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
@@ -16,11 +16,10 @@ package durable
import (
"context"
"fmt"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/shim"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -38,8 +37,7 @@ func RefFromConflictIndex(ctx context.Context, vrw types.ValueReadWriter, idx Co
return refFromNomsValue(ctx, vrw, idx.(nomsConflictIndex).index)
case types.Format_DOLT_1:
b := shim.ValueFromConflictMap(idx.(prollyConflictIndex).index)
return refFromNomsValue(ctx, vrw, b)
return types.Ref{}, fmt.Errorf("__DOLT_1__ conflicts should be stored in ArtifactIndex")
default:
return types.Ref{}, errNbfUnkown
@@ -57,13 +55,7 @@ func NewEmptyConflictIndex(ctx context.Context, vrw types.ValueReadWriter, ns tr
return ConflictIndexFromNomsMap(m, vrw), nil
case types.Format_DOLT_1:
kd, oursVD := shim.MapDescriptorsFromSchema(oursSch)
theirsVD := shim.ValueDescriptorFromSchema(theirsSch)
baseVD := shim.ValueDescriptorFromSchema(baseSch)
m := prolly.NewEmptyConflictMap(ns, kd, oursVD, theirsVD, baseVD)
return ConflictIndexFromProllyMap(m), nil
return nil, fmt.Errorf("__DOLT_1__ conflicts should be stored in ArtifactIndex")
default:
return nil, errNbfUnkown
@@ -81,16 +73,6 @@ func NomsMapFromConflictIndex(i ConflictIndex) types.Map {
return i.(nomsConflictIndex).index
}
func ConflictIndexFromProllyMap(m prolly.ConflictMap) ConflictIndex {
return prollyConflictIndex{
index: m,
}
}
func ProllyMapFromConflictIndex(i ConflictIndex) prolly.ConflictMap {
return i.(prollyConflictIndex).index
}
func conflictIndexFromRef(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, ourSch, theirSch, baseSch schema.Schema, r types.Ref) (ConflictIndex, error) {
return conflictIndexFromAddr(ctx, vrw, ns, ourSch, theirSch, baseSch, r.TargetHash())
}
@@ -106,8 +88,7 @@ func conflictIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tr
return ConflictIndexFromNomsMap(v.(types.Map), vrw), nil
case types.Format_DOLT_1:
m := shim.ConflictMapFromValue(v, ourSch, theirSch, baseSch, ns)
return ConflictIndexFromProllyMap(m), nil
return nil, fmt.Errorf("__DOLT_1__ conflicts should be stored in ArtifactIndex")
default:
return nil, errNbfUnkown
@@ -130,19 +111,3 @@ func (i nomsConflictIndex) Count() uint64 {
func (i nomsConflictIndex) Format() *types.NomsBinFormat {
return i.vrw.Format()
}
type prollyConflictIndex struct {
index prolly.ConflictMap
}
func (i prollyConflictIndex) HashOf() (hash.Hash, error) {
return i.index.HashOf(), nil
}
func (i prollyConflictIndex) Count() uint64 {
return uint64(i.index.Count())
}
func (i prollyConflictIndex) Format() *types.NomsBinFormat {
return i.index.Format()
}
+50 -38
View File
@@ -420,12 +420,60 @@ func (root *RootValue) GenerateTagsForNewColumns(
return nil, fmt.Errorf("error generating tags, newColNames and newColKinds must be of equal length")
}
var existingCols []schema.Column
newTags := make([]uint64, len(newColNames))
// Get existing columns from the current root, or the head root if the table doesn't exist in the current root. The
// latter case is to support reusing table tags in the case of drop / create in the same session, which is common
// during import.
existingCols, err := getExistingColumns(ctx, root, headRoot, tableName, newColNames, newColKinds)
if err != nil {
return nil, err
}
// If we found any existing columns set them in the newTags list.
for _, col := range existingCols {
for i := range newColNames {
// Only re-use tags if the noms kind didn't change
// TODO: revisit this when new storage format is further along
if strings.ToLower(newColNames[i]) == strings.ToLower(col.Name) &&
newColKinds[i] == col.TypeInfo.NomsKind() {
newTags[i] = col.Tag
break
}
}
}
var existingColKinds []types.NomsKind
for _, col := range existingCols {
existingColKinds = append(existingColKinds, col.Kind)
}
existingTags, err := GetAllTagsForRoot(ctx, root)
if err != nil {
return nil, err
}
for i := range newTags {
if newTags[i] > 0 {
continue
}
newTags[i] = schema.AutoGenerateTag(existingTags, tableName, existingColKinds, newColNames[i], newColKinds[i])
existingColKinds = append(existingColKinds, newColKinds[i])
existingTags.Add(newTags[i], tableName)
}
return newTags, nil
}
func getExistingColumns(
ctx context.Context,
root, headRoot *RootValue,
tableName string,
newColNames []string,
newColKinds []types.NomsKind) ([]schema.Column, error) {
var existingCols []schema.Column
tbl, found, err := root.GetTable(ctx, tableName)
if err != nil {
return nil, err
@@ -456,43 +504,7 @@ func (root *RootValue) GenerateTagsForNewColumns(
}
}
// If we found any existing columns set them in the newTags list.
// We only do this if we want to reuse columns from a previous existing table with the same name
if headRoot != nil {
for _, col := range existingCols {
for i := range newColNames {
// Only re-use tags if the noms kind didn't change
// TODO: revisit this when new storage format is further along
if strings.ToLower(newColNames[i]) == strings.ToLower(col.Name) &&
newColKinds[i] == col.TypeInfo.NomsKind() {
newTags[i] = col.Tag
break
}
}
}
}
var existingColKinds []types.NomsKind
for _, col := range existingCols {
existingColKinds = append(existingColKinds, col.Kind)
}
existingTags, err := GetAllTagsForRoot(ctx, root)
if err != nil {
return nil, err
}
for i := range newTags {
if newTags[i] > 0 {
continue
}
newTags[i] = schema.AutoGenerateTag(existingTags, tableName, existingColKinds, newColNames[i], newColKinds[i])
existingColKinds = append(existingColKinds, newColKinds[i])
existingTags.Add(newTags[i], tableName)
}
return newTags, nil
return existingCols, nil
}
func (root *RootValue) GetAllSchemas(ctx context.Context) (map[string]schema.Schema, error) {
+1 -1
View File
@@ -32,7 +32,7 @@ import (
)
var ErrCantFF = errors.New("can't fast forward merge")
var ErrInvalidPullArgs = errors.New("dolt pull takes at most one arg")
var ErrInvalidPullArgs = errors.New("dolt pull takes at most two args")
var ErrCannotPushRef = errors.New("cannot push ref")
var ErrFailedToSaveRepoState = errors.New("failed to save repo state")
var ErrFailedToDeleteRemote = errors.New("failed to delete remote")
+25 -20
View File
@@ -45,7 +45,7 @@ var ErrCannotPushRef = errors.New("cannot push ref")
var ErrNoRefSpecForRemote = errors.New("no refspec for remote")
var ErrInvalidSetUpstreamArgs = errors.New("invalid set-upstream arguments")
var ErrInvalidFetchSpec = errors.New("invalid fetch spec")
var ErrPullWithRemoteNoUpstream = errors.New("You asked to pull from the remote '%s', but did not specify a branch. Because this is not the default configured remote for your current branch, you must specify a branch on the command line.")
var ErrPullWithRemoteNoUpstream = errors.New("You asked to pull from the remote '%s', but did not specify a branch. Because this is not the default configured remote for your current branch, you must specify a branch.")
var ErrPullWithNoRemoteAndNoUpstream = errors.New("There is no tracking information for the current branch.\nPlease specify which branch you want to merge with.\n\n\tdolt pull <remote> <branch>\n\nIf you wish to set tracking information for this branch you can do so with:\n\n\t dolt push --set-upstream <remote> <branch>\n")
func IsEmptyRemote(r Remote) bool {
@@ -360,45 +360,50 @@ type PullSpec struct {
Branch ref.DoltRef
}
func NewPullSpec(ctx context.Context, rsr RepoStateReader, remoteName string, squash, noff, force, remoteOnly bool) (*PullSpec, error) {
branch := rsr.CWBHeadRef()
func NewPullSpec(_ context.Context, rsr RepoStateReader, remoteName, remoteRefName string, squash, noff, force, remoteOnly bool) (*PullSpec, error) {
refSpecs, err := GetRefSpecs(rsr, remoteName)
if err != nil {
return nil, err
}
if len(refSpecs) == 0 {
return nil, ErrNoRefSpecForRemote
}
trackedBranches, err := rsr.GetBranches()
if err != nil {
return nil, err
}
trackedBranch, hasUpstream := trackedBranches[branch.GetPath()]
if !hasUpstream {
if remoteOnly {
return nil, fmt.Errorf(ErrPullWithRemoteNoUpstream.Error(), remoteName)
} else {
return nil, ErrPullWithNoRemoteAndNoUpstream
}
}
remotes, err := rsr.GetRemotes()
if err != nil {
return nil, err
}
remote := remotes[refSpecs[0].GetRemote()]
var remoteRef ref.DoltRef
if remoteRefName == "" {
branch := rsr.CWBHeadRef()
trackedBranches, err := rsr.GetBranches()
if err != nil {
return nil, err
}
trackedBranch, hasUpstream := trackedBranches[branch.GetPath()]
if !hasUpstream {
if remoteOnly {
return nil, fmt.Errorf(ErrPullWithRemoteNoUpstream.Error(), remoteName)
} else {
return nil, ErrPullWithNoRemoteAndNoUpstream
}
}
remoteRef = trackedBranch.Merge.Ref
} else {
remoteRef = ref.NewBranchRef(remoteRefName)
}
return &PullSpec{
Squash: squash,
Noff: noff,
RemoteName: remoteName,
Remote: remote,
RefSpecs: refSpecs,
Branch: trackedBranch.Merge.Ref,
Branch: remoteRef,
Force: force,
}, nil
}
+1
View File
@@ -28,6 +28,7 @@ import (
var ErrFastForward = errors.New("fast forward")
var ErrSameTblAddedTwice = errors.New("table with same name added in 2 commits can't be merged")
var ErrTableDeletedAndModified = errors.New("conflict: table with same name deleted and modified ")
var ErrSchemaConflict = errors.New("schema conflict found, merge aborted. Please alter schema to prevent schema conflicts before merging")
// ErrCantOverwriteConflicts is returned when there are unresolved conflicts
// and the merge produces new conflicts. Because we currently don't have a model
+1 -1
View File
@@ -126,7 +126,7 @@ func (rm *RootMerger) MergeTable(ctx context.Context, tblName string, opts edito
}
if schConflicts.Count() != 0 {
// error on schema conflicts for now
return nil, nil, schConflicts.AsError()
return nil, nil, fmt.Errorf("%w.\n%s", ErrSchemaConflict, schConflicts.AsError().Error())
}
mergeTbl, err := tm.leftTbl.UpdateSchema(ctx, mergeSch)
+1 -1
View File
@@ -74,7 +74,7 @@ type ColConflict struct {
func (c ColConflict) String() string {
switch c.Kind {
case NameCollision:
return fmt.Sprintf("two columns with the name '%s'", c.Ours.Name)
return fmt.Sprintf("two columns with the same name '%s' have different tags. See https://github.com/dolthub/dolt/issues/3963", c.Ours.Name)
case TagCollision:
return fmt.Sprintf("different column definitions for our column %s and their column %s", c.Ours.Name, c.Theirs.Name)
}
-30
View File
@@ -821,42 +821,12 @@ func MustDebugFormatProlly(t *testing.T, m prolly.Map) string {
return s
}
func MustDebugFormatConflictMap(t *testing.T, m prolly.ConflictMap) string {
s, err := prolly.ConflictDebugFormat(context.Background(), m)
require.NoError(t, err)
return s
}
func MustEqualProlly(t *testing.T, expected prolly.Map, actual prolly.Map) {
require.Equal(t, expected.HashOf(), actual.HashOf(),
"hashes differed. expected: %s\nactual: %s", MustDebugFormatProlly(t, expected), MustDebugFormatProlly(t, actual))
}
func MustEqualConflictMap(t *testing.T, expected prolly.ConflictMap, actual prolly.ConflictMap) {
require.Equal(t, expected.HashOf(), actual.HashOf(),
"conflict map hashes differed. expected: %s\nactual: %s", MustDebugFormatConflictMap(t, expected), MustDebugFormatConflictMap(t, actual))
}
func MustEqualArtifactMap(t *testing.T, expected prolly.ArtifactMap, actual prolly.ArtifactMap) {
require.Equal(t, expected.HashOf(), actual.HashOf(),
"artifact map hashes differed.")
}
//func diffStr(t tree.Diff, kD val.TupleDesc) string {
// var str string
// switch t.Type {
// case tree.AddedDiff:
// str = "added"
// case tree.ModifiedDiff:
// str = "modified"
// case tree.RemovedDiff:
// str = "removed"
// default:
// panic("unknown type")
// }
//
// key := kD.Format(val.Tuple(t.Key))
// str += " key " + key
//
// return str
//}
+4
View File
@@ -50,6 +50,10 @@ func (tm TagMapping) Add(tag uint64, table string) {
tm[tag] = table
}
func (tm TagMapping) Remove(tag uint64) {
delete(tm, tag)
}
func (tm TagMapping) Size() int {
return len(tm)
}
@@ -92,16 +92,19 @@ func DoDoltPull(ctx *sql.Context, args []string) (int, int, error) {
return noConflictsOrViolations, threeWayMerge, err
}
if apr.NArg() > 1 {
if apr.NArg() > 2 {
return noConflictsOrViolations, threeWayMerge, actions.ErrInvalidPullArgs
}
var remoteName string
var remoteName, remoteRefName string
if apr.NArg() == 1 {
remoteName = apr.Arg(0)
} else if apr.NArg() == 2 {
remoteName = apr.Arg(0)
remoteRefName = apr.Arg(1)
}
pullSpec, err := env.NewPullSpec(ctx, dbData.Rsr, remoteName, apr.Contains(cli.SquashParam), apr.Contains(cli.NoFFParam), apr.Contains(cli.ForceFlag), apr.NArg() == 1)
pullSpec, err := env.NewPullSpec(ctx, dbData.Rsr, remoteName, remoteRefName, apr.Contains(cli.SquashParam), apr.Contains(cli.NoFFParam), apr.Contains(cli.ForceFlag), apr.NArg() == 1)
if err != nil {
return noConflictsOrViolations, threeWayMerge, err
}
@@ -122,6 +125,15 @@ func DoDoltPull(ctx *sql.Context, args []string) (int, int, error) {
return noConflictsOrViolations, threeWayMerge, env.ErrFailedToReadDb
}
hasBranch, err := srcDB.HasBranch(ctx, pullSpec.Branch.GetPath())
if err != nil {
return noConflictsOrViolations, threeWayMerge, err
}
if !hasBranch {
return noConflictsOrViolations, threeWayMerge,
fmt.Errorf("branch %q not found on remote", pullSpec.Branch.GetPath())
}
var conflicts int
var fastForward int
for _, refSpec := range pullSpec.RefSpecs {
@@ -40,9 +40,6 @@ type prollyIndexIter struct {
pkMap val.OrdinalMapping
pkBld *val.TupleBuilder
eg *errgroup.Group
rowChan chan sql.Row
// keyMap and valMap transform tuples from
// primary row storage into sql.Row's
keyMap, valMap val.OrdinalMapping
@@ -75,78 +72,48 @@ func newProllyIndexIter(
pkMap := ordinalMappingFromIndex(idx)
keyProj, valProj, ordProj := projectionMappings(idx.Schema(), projections)
eg, c := errgroup.WithContext(ctx)
iter := prollyIndexIter{
idx: idx,
indexIter: indexIter,
primary: primary,
pkBld: pkBld,
pkMap: pkMap,
eg: eg,
rowChan: make(chan sql.Row, indexLookupBufSize),
keyMap: keyProj,
valMap: valProj,
ordMap: ordProj,
sqlSch: pkSch.Schema,
}
eg.Go(func() error {
return iter.queueRows(c)
})
return iter, nil
}
// Next returns the next row from the iterator.
func (p prollyIndexIter) Next(ctx *sql.Context) (sql.Row, error) {
r, ok := <-p.rowChan
if ok {
return r, nil
}
if err := p.eg.Wait(); err != nil {
idxKey, _, err := p.indexIter.Next(ctx)
if err != nil {
return nil, err
}
return nil, io.EOF
for to := range p.pkMap {
from := p.pkMap.MapOrdinal(to)
p.pkBld.PutRaw(to, idxKey.GetField(from))
}
pk := p.pkBld.Build(sharePool)
r := make(sql.Row, len(p.keyMap)+len(p.valMap))
err = p.primary.Get(ctx, pk, func(key, value val.Tuple) error {
return p.rowFromTuples(ctx, key, value, r)
})
if err != nil {
return nil, err
}
return r, nil
}
func (p prollyIndexIter) Next2(ctx *sql.Context, frame *sql.RowFrame) error {
panic("unimplemented")
}
func (p prollyIndexIter) queueRows(ctx context.Context) error {
defer close(p.rowChan)
for {
idxKey, _, err := p.indexIter.Next(ctx)
if err != nil {
return err
}
for to := range p.pkMap {
from := p.pkMap.MapOrdinal(to)
p.pkBld.PutRaw(to, idxKey.GetField(from))
}
pk := p.pkBld.Build(sharePool)
r := make(sql.Row, len(p.keyMap)+len(p.valMap))
err = p.primary.Get(ctx, pk, func(key, value val.Tuple) error {
return p.rowFromTuples(ctx, key, value, r)
})
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case p.rowChan <- r:
}
}
}
func (p prollyIndexIter) rowFromTuples(ctx context.Context, key, value val.Tuple, r sql.Row) (err error) {
keyDesc, valDesc := p.primary.Descriptors()
+1 -1
View File
@@ -87,7 +87,7 @@ func LoadedLocalLocation() *time.Location {
func BasicSelectTests() []SelectTest {
headCommitHash := "73hc2robs4v0kt9taoe3m5hd49dmrgun"
if types.Format_Default == types.Format_DOLT_DEV {
headCommitHash = "8rpomvicsi7h22diru7r8mmsk757hg8n"
headCommitHash = "4ej7hfduufg4o2837g3gc4p5uolrlmv9"
}
return []SelectTest{
{
+89 -11
View File
@@ -1296,11 +1296,10 @@ func (t *AlterableDoltTable) RewriteInserter(
return nil, err
}
newSch, err := sqlutil.ToDoltSchema(ctx, ws.WorkingRoot(), t.Name(), newSchema, headRoot)
newSch, err := t.getNewSch(ctx, oldColumn, newColumn, oldSch, newSchema, ws.WorkingRoot(), headRoot)
if err != nil {
return nil, err
}
newSch = schema.CopyChecksConstraints(oldSch, newSch)
if isColumnDrop(oldSchema, newSchema) {
@@ -1385,6 +1384,92 @@ func (t *AlterableDoltTable) RewriteInserter(
return ed, nil
}
func (t *AlterableDoltTable) getNewSch(ctx context.Context, oldColumn, newColumn *sql.Column, oldSch schema.Schema, newSchema sql.PrimaryKeySchema, root, headRoot *doltdb.RootValue) (schema.Schema, error) {
if oldColumn == nil || newColumn == nil {
// Adding or dropping a column
newSch, err := sqlutil.ToDoltSchema(ctx, root, t.Name(), newSchema, headRoot)
if err != nil {
return nil, err
}
return newSch, err
}
oldTi, err := typeinfo.FromSqlType(oldColumn.Type)
if err != nil {
return nil, err
}
newTi, err := typeinfo.FromSqlType(newColumn.Type)
if err != nil {
return nil, err
}
if oldTi.NomsKind() != newTi.NomsKind() {
oldCol, ok := oldSch.GetAllCols().GetByName(oldColumn.Name)
if !ok {
return nil, fmt.Errorf("expected column %s to exist in the old schema but did not find it", oldColumn.Name)
}
// Remove the old column from |root| so that its kind will not seed the
// new tag.
root, err = filterColumnFromRoot(ctx, root, oldCol.Tag)
if err != nil {
return nil, err
}
}
newSch, err := sqlutil.ToDoltSchema(ctx, root, t.Name(), newSchema, headRoot)
if err != nil {
return nil, err
}
return newSch, nil
}
// filterColumnFromRoot removes any columns matching |colTag| from a |root|. Returns the updated root.
func filterColumnFromRoot(ctx context.Context, root *doltdb.RootValue, colTag uint64) (*doltdb.RootValue, error) {
newRoot := root
err := root.IterTables(ctx, func(name string, table *doltdb.Table, sch schema.Schema) (stop bool, err error) {
_, ok := sch.GetAllCols().GetByTag(colTag)
if !ok {
return false, nil
}
newSch, err := filterColumnFromSch(sch, colTag)
if err != nil {
return true, err
}
t, err := table.UpdateSchema(ctx, newSch)
if err != nil {
return true, err
}
newRoot, err = newRoot.PutTable(ctx, name, t)
if err != nil {
return true, err
}
return false, nil
})
if err != nil {
return nil, err
}
return newRoot, nil
}
func filterColumnFromSch(sch schema.Schema, colTag uint64) (schema.Schema, error) {
var cols []schema.Column
_ = sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
if tag == colTag {
return false, nil
}
cols = append(cols, col)
return false, nil
})
colCol := schema.NewColCollection(cols...)
newSch, err := schema.SchemaFromCols(colCol)
if err != nil {
return nil, err
}
return newSch, nil
}
// validateSchemaChange returns an error if the schema change given is not legal
func validateSchemaChange(
tableName string,
@@ -1580,15 +1665,8 @@ func (t *AlterableDoltTable) ModifyColumn(ctx *sql.Context, columnName string, c
// TODO: move this logic into ShouldRewrite
if !existingCol.TypeInfo.Equals(col.TypeInfo) {
if existingCol.Kind != col.Kind { // We only change the tag when the underlying Noms kind changes
tags, err := root.GenerateTagsForNewColumns(ctx, t.tableName, []string{col.Name}, []types.NomsKind{col.Kind}, nil)
if err != nil {
return err
}
if len(tags) != 1 {
return fmt.Errorf("expected a generated tag length of 1")
}
col.Tag = tags[0]
if existingCol.Kind != col.Kind {
panic("table cannot be modified in place")
}
}
+3
View File
@@ -32,6 +32,9 @@ do
--single) export GOMAXPROCS=1
;;
--row2) export ENABLE_ROW_ITER_2=true
;;
# specify sysbench benchmark
*) SYSBENCH_TEST="$1"
;;
+4 -2
View File
@@ -30,7 +30,9 @@ type AddressMap struct {
}
func NewEmptyAddressMap(ns tree.NodeStore) AddressMap {
return NewAddressMap(newEmptyMapNode(ns.Pool()), ns)
serializer := message.NewAddressMapSerializer(ns.Pool())
msg := serializer.Serialize(nil, nil, nil, 0)
return NewAddressMap(tree.NodeFromBytes(msg), ns)
}
func NewAddressMap(node tree.Node, ns tree.NodeStore) AddressMap {
@@ -145,7 +147,7 @@ func (wr AddressMapEditor) Delete(ctx context.Context, name string) error {
func (wr AddressMapEditor) Flush(ctx context.Context) (AddressMap, error) {
tr := wr.addresses.tree
serializer := message.AddressMapSerializer{Pool: tr.ns.Pool()}
serializer := message.NewAddressMapSerializer(tr.ns.Pool())
root, err := tree.ApplyMutations(ctx, tr.ns, tr.root, serializer, wr.addresses.mutations(), tr.compareItems)
if err != nil {
+3 -6
View File
@@ -72,7 +72,7 @@ func NewArtifactMap(node tree.Node, ns tree.NodeStore, srcKeyDesc val.TupleDesc)
// the corresponding row map and inserts the given |tups|. |tups| must be a key followed by a value.
func NewArtifactMapFromTuples(ctx context.Context, ns tree.NodeStore, srcKeyDesc val.TupleDesc, tups ...val.Tuple) (ArtifactMap, error) {
kd, vd := mergeArtifactsDescriptorsFromSource(srcKeyDesc)
serializer := message.MergeArtifactSerializer{KeyDesc: kd, Pool: ns.Pool()}
serializer := message.NewMergeArtifactSerializer(kd, ns.Pool())
ch, err := tree.NewEmptyChunker(ctx, ns, serializer)
if err != nil {
@@ -296,7 +296,7 @@ func (m ArtifactMap) iterAllOfTypes(ctx context.Context, artTypes ...ArtifactTyp
}
func MergeArtifactMaps(ctx context.Context, left, right, base ArtifactMap, cb tree.CollisionFn) (ArtifactMap, error) {
serializer := message.MergeArtifactSerializer{KeyDesc: base.keyDesc, Pool: left.tuples.ns.Pool()}
serializer := message.NewMergeArtifactSerializer(base.keyDesc, left.tuples.ns.Pool())
tuples, err := mergeOrderedTrees(ctx, left.tuples, right.tuples, base.tuples, cb, serializer, base.valDesc)
if err != nil {
return ArtifactMap{}, err
@@ -411,10 +411,7 @@ func (wr ArtifactsEditor) Delete(ctx context.Context, key val.Tuple) error {
}
func (wr ArtifactsEditor) Flush(ctx context.Context) (ArtifactMap, error) {
s := message.MergeArtifactSerializer{
KeyDesc: wr.artKB.Desc,
Pool: wr.NodeStore().Pool(),
}
s := message.NewMergeArtifactSerializer(wr.artKB.Desc, wr.NodeStore().Pool())
m, err := wr.mut.flushWithSerializer(ctx, s)
if err != nil {
@@ -18,6 +18,7 @@ import (
"context"
"fmt"
"math/rand"
"sync"
"testing"
"github.com/dolthub/dolt/go/store/val"
@@ -153,3 +154,110 @@ func benchmarkTypesMapGetParallel(b *testing.B, size uint64) {
b.ReportAllocs()
})
}
const mapScale = 4096
func BenchmarkGoMapGet(b *testing.B) {
b.Skip()
kv1 := makeGoMap(mapScale)
kv2 := makeSyncMap(mapScale)
b.ResetTimer()
b.Run("test golang map", func(b *testing.B) {
for j := 0; j < b.N; j++ {
_, ok := kv1[uint64(j%mapScale)]
if !ok {
b.Fail()
}
}
b.ReportAllocs()
})
b.Run("test golang sync map", func(b *testing.B) {
for j := 0; j < b.N; j++ {
_, ok := kv2.Load(uint64(j % mapScale))
if !ok {
b.Fail()
}
}
b.ReportAllocs()
})
}
func BenchmarkParallelGoMapGet(b *testing.B) {
b.Skip()
kv1 := makeGoMap(mapScale)
kv2 := makeSyncMap(mapScale)
b.ResetTimer()
b.Run("test golang map", func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
j := 0
for pb.Next() {
_, _ = kv1[uint64(j%mapScale)]
j++
}
})
b.ReportAllocs()
})
b.Run("test golang sync map", func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
v, _ := kv2.Load(uint64(1234))
tup := v.(val.Tuple)
j := 0
for pb.Next() {
k := uint64(j % mapScale)
if j%10 == 0 {
kv2.Store(k, tup)
} else {
_, _ = kv2.Load(k)
}
j++
}
})
b.ReportAllocs()
})
}
func makeGoMap(scale uint64) map[uint64]val.Tuple {
src := rand.NewSource(0)
vb := val.NewTupleBuilder(val.NewTupleDescriptor(
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
))
kv := make(map[uint64]val.Tuple, scale)
for i := uint64(0); i < scale; i++ {
vb.PutInt64(0, src.Int63())
vb.PutInt64(1, src.Int63())
vb.PutInt64(2, src.Int63())
vb.PutInt64(3, src.Int63())
vb.PutInt64(4, src.Int63())
kv[i] = vb.Build(shared)
}
return kv
}
func makeSyncMap(scale uint64) *sync.Map {
src := rand.NewSource(0)
vb := val.NewTupleBuilder(val.NewTupleDescriptor(
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
val.Type{Enc: val.Int64Enc, Nullable: true},
))
kv := &sync.Map{}
for i := uint64(0); i < scale; i++ {
vb.PutInt64(0, src.Int63())
vb.PutInt64(1, src.Int63())
vb.PutInt64(2, src.Int63())
vb.PutInt64(3, src.Int63())
vb.PutInt64(4, src.Int63())
kv.Store(i, vb.Build(shared))
}
return kv
}
+2 -2
View File
@@ -47,7 +47,7 @@ func (o commitClosureKeyOrdering) Compare(left, right CommitClosureKey) int {
}
func NewEmptyCommitClosure(ns tree.NodeStore) CommitClosure {
serializer := message.CommitClosureSerializer{Pool: ns.Pool()}
serializer := message.NewCommitClosureSerializer(ns.Pool())
msg := serializer.Serialize(nil, nil, nil, 0)
node := tree.NodeFromBytes(msg)
return NewCommitClosure(node, ns)
@@ -138,7 +138,7 @@ func (wr CommitClosureEditor) Delete(ctx context.Context, key CommitClosureKey)
func (wr CommitClosureEditor) Flush(ctx context.Context) (CommitClosure, error) {
tr := wr.closure.tree
serializer := message.CommitClosureSerializer{Pool: tr.ns.Pool()}
serializer := message.NewCommitClosureSerializer(tr.ns.Pool())
root, err := tree.ApplyMutations(ctx, tr.ns, tr.root, serializer, wr.closure.mutations(), tr.compareItems)
if err != nil {
-222
View File
@@ -1,222 +0,0 @@
// Copyright 2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prolly
import (
"context"
"fmt"
"io"
"strings"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly/message"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
type Conflict val.Triple[val.Tuple]
func (c Conflict) OurValue() val.Tuple {
return val.Triple[val.Tuple](c).First()
}
func (c Conflict) TheirValue() val.Tuple {
return val.Triple[val.Tuple](c).Second()
}
func (c Conflict) BaseValue() val.Tuple {
return val.Triple[val.Tuple](c).Third()
}
type ConflictIter kvIter[val.Tuple, Conflict]
type ConflictMap struct {
conflicts orderedTree[val.Tuple, Conflict, val.TupleDesc]
keyDesc val.TupleDesc
ourDesc val.TupleDesc
theirDesc val.TupleDesc
baseDesc val.TupleDesc
}
func NewConflictMap(root tree.Node, ns tree.NodeStore, key, ours, theirs, base val.TupleDesc) ConflictMap {
conflicts := orderedTree[val.Tuple, Conflict, val.TupleDesc]{
root: root,
ns: ns,
order: key,
}
return ConflictMap{
conflicts: conflicts,
keyDesc: key,
ourDesc: ours,
theirDesc: theirs,
baseDesc: base,
}
}
func NewEmptyConflictMap(ns tree.NodeStore, key, ours, theirs, base val.TupleDesc) ConflictMap {
return NewConflictMap(newEmptyMapNode(ns.Pool()), ns, key, ours, theirs, base)
}
func (c ConflictMap) Count() int {
return c.conflicts.count()
}
func (c ConflictMap) Height() int {
return c.conflicts.height()
}
func (c ConflictMap) HashOf() hash.Hash {
return c.conflicts.hashOf()
}
func (c ConflictMap) Node() tree.Node {
return c.conflicts.root
}
func (c ConflictMap) Format() *types.NomsBinFormat {
return c.conflicts.ns.Format()
}
func (c ConflictMap) Descriptors() (key, ours, theirs, base val.TupleDesc) {
return c.keyDesc, c.ourDesc, c.theirDesc, c.baseDesc
}
func (c ConflictMap) WalkAddresses(ctx context.Context, cb tree.AddressCb) error {
return c.conflicts.walkAddresses(ctx, cb)
}
func (c ConflictMap) WalkNodes(ctx context.Context, cb tree.NodeCb) error {
return c.conflicts.walkNodes(ctx, cb)
}
func (c ConflictMap) Get(ctx context.Context, key val.Tuple, cb KeyValueFn[val.Tuple, Conflict]) (err error) {
return c.conflicts.get(ctx, key, cb)
}
func (c ConflictMap) Has(ctx context.Context, key val.Tuple) (ok bool, err error) {
return c.conflicts.has(ctx, key)
}
func (c ConflictMap) IterAll(ctx context.Context) (ConflictIter, error) {
return c.conflicts.iterAll(ctx)
}
func (c ConflictMap) IterOrdinalRange(ctx context.Context, start, stop uint64) (ConflictIter, error) {
return c.conflicts.iterOrdinalRange(ctx, start, stop)
}
// Pool returns the pool.BuffPool of the underlying conflicts' tree.NodeStore
func (c ConflictMap) Pool() pool.BuffPool {
return c.conflicts.ns.Pool()
}
func (c ConflictMap) Editor() ConflictEditor {
return ConflictEditor{
conflicts: c.conflicts.mutate(),
keyDesc: c.keyDesc,
ourDesc: c.ourDesc,
theirDesc: c.theirDesc,
baseDesc: c.baseDesc,
}
}
type ConflictEditor struct {
conflicts orderedMap[val.Tuple, Conflict, val.TupleDesc]
keyDesc val.TupleDesc
ourDesc val.TupleDesc
theirDesc val.TupleDesc
baseDesc val.TupleDesc
}
func (wr ConflictEditor) Add(ctx context.Context, key, ourVal, theirVal, baseVal val.Tuple) error {
p := wr.conflicts.tree.ns.Pool()
c := val.NewTriple(p, ourVal, theirVal, baseVal)
return wr.conflicts.put(ctx, key, Conflict(c))
}
func (wr ConflictEditor) Delete(ctx context.Context, key val.Tuple) error {
return wr.conflicts.delete(ctx, key)
}
func (wr ConflictEditor) Flush(ctx context.Context) (ConflictMap, error) {
tr := wr.conflicts.tree
serializer := message.ProllyMapSerializer{Pool: tr.ns.Pool()}
root, err := tree.ApplyMutations(ctx, tr.ns, tr.root, serializer, wr.conflicts.mutations(), tr.compareItems)
if err != nil {
return ConflictMap{}, err
}
return ConflictMap{
conflicts: orderedTree[val.Tuple, Conflict, val.TupleDesc]{
root: root,
ns: tr.ns,
order: tr.order,
},
keyDesc: wr.keyDesc,
ourDesc: wr.ourDesc,
theirDesc: wr.theirDesc,
baseDesc: wr.baseDesc,
}, nil
}
// ConflictDebugFormat formats a ConflictMap.
func ConflictDebugFormat(ctx context.Context, m ConflictMap) (string, error) {
kd, ourVD, theirVD, baseVD := m.Descriptors()
iter, err := m.IterAll(ctx)
if err != nil {
return "", err
}
c := m.Count()
var sb strings.Builder
sb.WriteString(fmt.Sprintf("Prolly Map (count: %d) {\n", c))
for {
k, v, err := iter.Next(ctx)
if err == io.EOF {
break
}
if err != nil {
return "", err
}
sb.WriteString("\t")
sb.WriteString(kd.Format(k))
sb.WriteString(": ")
if len(v.OurValue()) == 0 {
sb.WriteString("NULL")
} else {
sb.WriteString(ourVD.Format(v.OurValue()))
}
sb.WriteString(", ")
if len(v.TheirValue()) == 0 {
sb.WriteString("NULL")
} else {
sb.WriteString(theirVD.Format(v.TheirValue()))
}
sb.WriteString(", ")
if len(v.BaseValue()) == 0 {
sb.WriteString("NULL")
} else {
sb.WriteString(baseVD.Format(v.BaseValue()))
}
sb.WriteString(",\n")
}
sb.WriteString("}")
return sb.String(), nil
}
+3 -12
View File
@@ -71,7 +71,7 @@ type TupleIter interface {
}
func NewMapFromTupleIter(ctx context.Context, ns tree.NodeStore, keyDesc, valDesc val.TupleDesc, iter TupleIter) (Map, error) {
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
serializer := message.NewProllyMapSerializer(valDesc, ns.Pool())
ch, err := tree.NewEmptyChunker(ctx, ns, serializer)
if err != nil {
return Map{}, err
@@ -102,10 +102,7 @@ func NewMapFromTupleIter(ctx context.Context, ns tree.NodeStore, keyDesc, valDes
func MutateMapWithTupleIter(ctx context.Context, m Map, iter TupleIter) (Map, error) {
t := m.tuples
i := mutationIter{iter: iter}
s := message.ProllyMapSerializer{
Pool: t.ns.Pool(),
ValDesc: m.valDesc,
}
s := message.NewProllyMapSerializer(m.valDesc, t.ns.Pool())
root, err := tree.ApplyMutations(ctx, t.ns, t.root, s, i, t.compareItems)
if err != nil {
@@ -128,7 +125,7 @@ func DiffMaps(ctx context.Context, from, to Map, cb DiffFn) error {
}
func MergeMaps(ctx context.Context, left, right, base Map, cb tree.CollisionFn) (Map, error) {
serializer := message.ProllyMapSerializer{Pool: left.tuples.ns.Pool()}
serializer := message.NewProllyMapSerializer(left.valDesc, base.NodeStore().Pool())
tuples, err := mergeOrderedTrees(ctx, left.tuples, right.tuples, base.tuples, cb, serializer, base.valDesc)
if err != nil {
return Map{}, err
@@ -304,12 +301,6 @@ func (p *pointLookup) Next(context.Context) (key, value val.Tuple, err error) {
return
}
func newEmptyMapNode(pool pool.BuffPool) tree.Node {
serializer := message.ProllyMapSerializer{Pool: pool}
msg := serializer.Serialize(nil, nil, nil, 0)
return tree.NodeFromBytes(msg)
}
// DebugFormat formats a Map.
func DebugFormat(ctx context.Context, m Map) (string, error) {
kd, vd := m.Descriptors()
+4 -2
View File
@@ -236,7 +236,9 @@ func TestMutateMapWithTupleIter(t *testing.T) {
}
func TestNewEmptyNode(t *testing.T) {
empty := newEmptyMapNode(sharedPool)
s := message.NewProllyMapSerializer(val.TupleDesc{}, sharedPool)
msg := s.Serialize(nil, nil, nil, 0)
empty := tree.NodeFromBytes(msg)
assert.Equal(t, 0, empty.Level())
assert.Equal(t, 0, empty.Count())
assert.Equal(t, 0, empty.TreeCount())
@@ -293,7 +295,7 @@ func prollyMapFromTuples(t *testing.T, kd, vd val.TupleDesc, tuples [][2]val.Tup
ctx := context.Background()
ns := tree.NewTestNodeStore()
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
serializer := message.NewProllyMapSerializer(vd, ns.Pool())
chunker, err := tree.NewEmptyChunker(ctx, ns, serializer)
require.NoError(t, err)
+6 -2
View File
@@ -33,8 +33,12 @@ const (
var addressMapFileID = []byte(serial.AddressMapFileID)
func NewAddressMapSerializer(pool pool.BuffPool) AddressMapSerializer {
return AddressMapSerializer{pool: pool}
}
type AddressMapSerializer struct {
Pool pool.BuffPool
pool pool.BuffPool
}
var _ Serializer = AddressMapSerializer{}
@@ -46,7 +50,7 @@ func (s AddressMapSerializer) Serialize(keys, addrs [][]byte, subtrees []uint64,
)
keySz, addrSz, totalSz := estimateAddressMapSize(keys, addrs, subtrees)
b := getFlatbufferBuilder(s.Pool, totalSz)
b := getFlatbufferBuilder(s.pool, totalSz)
// keys
keyArr = writeItemBytes(b, keys, keySz)
+6 -2
View File
@@ -25,15 +25,19 @@ import (
var blobFileID = []byte(serial.BlobFileID)
func NewBlobSerializer(pool pool.BuffPool) BlobSerializer {
return BlobSerializer{pool: pool}
}
type BlobSerializer struct {
Pool pool.BuffPool
pool pool.BuffPool
}
var _ Serializer = BlobSerializer{}
func (s BlobSerializer) Serialize(keys, values [][]byte, subtrees []uint64, level int) serial.Message {
bufSz := estimateBlobSize(values, subtrees)
b := getFlatbufferBuilder(s.Pool, bufSz)
b := getFlatbufferBuilder(s.pool, bufSz)
if level == 0 {
assertTrue(len(values) == 1)
+6 -2
View File
@@ -119,8 +119,12 @@ func walkCommitClosureAddresses(ctx context.Context, msg serial.Message, cb func
var commitClosureFileID = []byte(serial.CommitClosureFileID)
func NewCommitClosureSerializer(pool pool.BuffPool) CommitClosureSerializer {
return CommitClosureSerializer{pool: pool}
}
type CommitClosureSerializer struct {
Pool pool.BuffPool
pool pool.BuffPool
}
var _ Serializer = CommitClosureSerializer{}
@@ -129,7 +133,7 @@ func (s CommitClosureSerializer) Serialize(keys, addrs [][]byte, subtrees []uint
var keyArr, addrArr, cardArr fb.UOffsetT
keySz, addrSz, totalSz := estimateCommitClosureSize(keys, addrs, subtrees)
b := getFlatbufferBuilder(s.Pool, totalSz)
b := getFlatbufferBuilder(s.pool, totalSz)
// keys
keyArr = writeItemBytes(b, keys, keySz)
+14 -7
View File
@@ -37,9 +37,16 @@ const (
var mergeArtifactFileID = []byte(serial.MergeArtifactsFileID)
func NewMergeArtifactSerializer(keyDesc val.TupleDesc, pool pool.BuffPool) MergeArtifactSerializer {
return MergeArtifactSerializer{
keyDesc: keyDesc,
pool: pool,
}
}
type MergeArtifactSerializer struct {
KeyDesc val.TupleDesc
Pool pool.BuffPool
keyDesc val.TupleDesc
pool pool.BuffPool
}
var _ Serializer = MergeArtifactSerializer{}
@@ -52,8 +59,8 @@ func (s MergeArtifactSerializer) Serialize(keys, values [][]byte, subtrees []uin
refArr, cardArr fb.UOffsetT
)
keySz, valSz, bufSz := estimateMergeArtifactSize(keys, values, subtrees, s.KeyDesc.AddressFieldCount())
b := getFlatbufferBuilder(s.Pool, bufSz)
keySz, valSz, bufSz := estimateMergeArtifactSize(keys, values, subtrees, s.keyDesc.AddressFieldCount())
b := getFlatbufferBuilder(s.pool, bufSz)
// serialize keys and offsets
keyTups = writeItemBytes(b, keys, keySz)
@@ -66,9 +73,9 @@ func (s MergeArtifactSerializer) Serialize(keys, values [][]byte, subtrees []uin
serial.MergeArtifactsStartValueOffsetsVector(b, len(values)+1)
valOffs = writeItemOffsets(b, values, valSz)
// serialize offsets of chunk addresses within |keyTups|
if s.KeyDesc.AddressFieldCount() > 0 {
serial.MergeArtifactsStartKeyAddressOffsetsVector(b, countAddresses(keys, s.KeyDesc))
keyAddrOffs = writeAddressOffsets(b, keys, keySz, s.KeyDesc)
if s.keyDesc.AddressFieldCount() > 0 {
serial.MergeArtifactsStartKeyAddressOffsetsVector(b, countAddresses(keys, s.keyDesc))
keyAddrOffs = writeAddressOffsets(b, keys, keySz, s.keyDesc)
}
} else {
// serialize child refs and subtree counts for internal nodes
+11 -7
View File
@@ -37,9 +37,13 @@ const (
var prollyMapFileID = []byte(serial.ProllyTreeNodeFileID)
func NewProllyMapSerializer(valueDesc val.TupleDesc, pool pool.BuffPool) ProllyMapSerializer {
return ProllyMapSerializer{valDesc: valueDesc, pool: pool}
}
type ProllyMapSerializer struct {
Pool pool.BuffPool
ValDesc val.TupleDesc
valDesc val.TupleDesc
pool pool.BuffPool
}
var _ Serializer = ProllyMapSerializer{}
@@ -52,8 +56,8 @@ func (s ProllyMapSerializer) Serialize(keys, values [][]byte, subtrees []uint64,
refArr, cardArr fb.UOffsetT
)
keySz, valSz, bufSz := estimateProllyMapSize(keys, values, subtrees, s.ValDesc.AddressFieldCount())
b := getFlatbufferBuilder(s.Pool, bufSz)
keySz, valSz, bufSz := estimateProllyMapSize(keys, values, subtrees, s.valDesc.AddressFieldCount())
b := getFlatbufferBuilder(s.pool, bufSz)
// serialize keys and offsets
keyTups = writeItemBytes(b, keys, keySz)
@@ -66,9 +70,9 @@ func (s ProllyMapSerializer) Serialize(keys, values [][]byte, subtrees []uint64,
serial.ProllyTreeNodeStartValueOffsetsVector(b, len(values)+1)
valOffs = writeItemOffsets(b, values, valSz)
// serialize offsets of chunk addresses within |valTups|
if s.ValDesc.AddressFieldCount() > 0 {
serial.ProllyTreeNodeStartValueAddressOffsetsVector(b, countAddresses(values, s.ValDesc))
valAddrOffs = writeAddressOffsets(b, values, valSz, s.ValDesc)
if s.valDesc.AddressFieldCount() > 0 {
serial.ProllyTreeNodeStartValueAddressOffsetsVector(b, countAddresses(values, s.valDesc))
valAddrOffs = writeAddressOffsets(b, values, valSz, s.valDesc)
}
} else {
// serialize child refs and subtree counts for internal nodes
+2 -1
View File
@@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/val"
)
var sharedPool = pool.NewBuffPool()
@@ -29,7 +30,7 @@ func TestGetKeyValueOffsetsVectors(t *testing.T) {
for trial := 0; trial < 100; trial++ {
keys, values := randomByteSlices(t, (testRand.Int()%101)+50)
require.True(t, sumSize(keys)+sumSize(values) < MaxVectorOffset)
s := ProllyMapSerializer{Pool: sharedPool}
s := ProllyMapSerializer{valDesc: val.TupleDesc{}, pool: sharedPool}
msg := s.Serialize(keys, values, nil, 0)
// uses hard-coded vtable slot
+2 -5
View File
@@ -50,11 +50,8 @@ func newMutableMap(m Map) MutableMap {
// Map materializes all pending and applied mutations in the MutableMap.
func (mut MutableMap) Map(ctx context.Context) (Map, error) {
serializer := message.ProllyMapSerializer{
Pool: mut.NodeStore().Pool(),
ValDesc: mut.valDesc,
}
return mut.flushWithSerializer(ctx, serializer)
s := message.NewProllyMapSerializer(mut.valDesc, mut.NodeStore().Pool())
return mut.flushWithSerializer(ctx, s)
}
func (mut MutableMap) flushWithSerializer(ctx context.Context, s message.Serializer) (Map, error) {
+1 -1
View File
@@ -137,7 +137,7 @@ func makeMutableMap(t *testing.T, count int) (testMap, [][2]val.Tuple) {
tree.SortTuplePairs(mapTuples, kd)
tree.SortTuplePairs(memTuples, kd)
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
serializer := message.NewProllyMapSerializer(vd, ns.Pool())
chunker, err := tree.NewEmptyChunker(ctx, ns, serializer)
require.NoError(t, err)
for _, pair := range mapTuples {
-12
View File
@@ -32,10 +32,6 @@ func ValueFromMap(m prolly.Map) types.Value {
return tree.ValueFromNode(m.Node())
}
func ValueFromConflictMap(m prolly.ConflictMap) types.Value {
return tree.ValueFromNode(m.Node())
}
func ValueFromArtifactMap(m prolly.ArtifactMap) types.Value {
return tree.ValueFromNode(m.Node())
}
@@ -47,14 +43,6 @@ func MapFromValue(v types.Value, sch schema.Schema, ns tree.NodeStore) prolly.Ma
return prolly.NewMap(root, ns, kd, vd)
}
func ConflictMapFromValue(v types.Value, ourSchema, theirSchema, baseSchema schema.Schema, ns tree.NodeStore) prolly.ConflictMap {
root := NodeFromValue(v)
kd, ourVD := MapDescriptorsFromSchema(ourSchema)
theirVD := ValueDescriptorFromSchema(theirSchema)
baseVD := ValueDescriptorFromSchema(baseSchema)
return prolly.NewConflictMap(root, ns, kd, ourVD, theirVD, baseVD)
}
func MapDescriptorsFromSchema(sch schema.Schema) (kd, vd val.TupleDesc) {
kd = KeyDescriptorFromSchema(sch)
vd = ValueDescriptorFromSchema(sch)
+2 -2
View File
@@ -47,8 +47,8 @@ func makeTree(t *testing.T, tuples [][2]val.Tuple) Node {
ns := NewTestNodeStore()
// todo(andy): move this test
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
chunker, err := newEmptyChunker(ctx, ns, serializer)
s := message.NewProllyMapSerializer(val.TupleDesc{}, ns.Pool())
chunker, err := newEmptyChunker(ctx, ns, s)
require.NoError(t, err)
for _, pair := range tuples {
err := chunker.AddPair(ctx, Item(pair[0]), Item(pair[1]))
+1 -1
View File
@@ -242,7 +242,7 @@ type ImmutableTree struct {
}
func NewImmutableTreeFromReader(ctx context.Context, r io.Reader, ns NodeStore, chunkSize int) (*ImmutableTree, error) {
s := message.BlobSerializer{Pool: ns.Pool()}
s := message.NewBlobSerializer(ns.Pool())
root, err := buildImmutableTree(ctx, r, ns, s, chunkSize)
if errors.Is(err, io.EOF) {
return &ImmutableTree{Addr: hash.Hash{}}, nil
+3 -3
View File
@@ -138,7 +138,7 @@ func TestWriteImmutableTree(t *testing.T) {
ctx := context.Background()
r := bytes.NewReader(buf)
ns := NewTestNodeStore()
serializer := message.BlobSerializer{Pool: ns.Pool()}
serializer := message.NewBlobSerializer(ns.Pool())
root, err := buildImmutableTree(ctx, r, ns, serializer, tt.chunkSize)
if tt.err != nil {
require.True(t, errors.Is(err, tt.err))
@@ -361,8 +361,8 @@ func newTree(t *testing.T, ns NodeStore, keyCnt, blobLen, chunkSize int) Node {
tuples[i][1] = valBld.Build(sharedPool)
}
serializer := message.ProllyMapSerializer{Pool: ns.Pool(), ValDesc: valDesc}
chunker, err := newEmptyChunker(ctx, ns, serializer)
s := message.NewProllyMapSerializer(valDesc, ns.Pool())
chunker, err := newEmptyChunker(ctx, ns, s)
require.NoError(t, err)
for _, pair := range tuples {
err := chunker.AddPair(ctx, Item(pair[0]), Item(pair[1]))
+1 -1
View File
@@ -81,7 +81,7 @@ func testNewCursorAtItem(t *testing.T, count int) {
func randomTree(t *testing.T, count int) (Node, [][2]Item, NodeStore) {
ctx := context.Background()
ns := NewTestNodeStore()
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
serializer := message.NewProllyMapSerializer(valDesc, ns.Pool())
chkr, err := newEmptyChunker(ctx, ns, serializer)
require.NoError(t, err)
+2 -1
View File
@@ -26,6 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/prolly/message"
"github.com/dolthub/dolt/go/store/val"
)
func init() {
@@ -101,7 +102,7 @@ func makeProllyTreeWithSizes(t *testing.T, fact splitterFactory, scale, keySz, v
ctx := context.Background()
ns = NewTestNodeStore()
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
serializer := message.NewProllyMapSerializer(val.TupleDesc{}, ns.Pool())
chunker, err := newEmptyChunker(ctx, ns, serializer)
require.NoError(t, err)
+1 -1
View File
@@ -80,7 +80,7 @@ func TestNodeHashValueCompatibility(t *testing.T) {
h1 := hash.Parse("kvup5vdur99ush7c18g0kjc6rhdkfdgo")
h2 := hash.Parse("7e54ill10nji9oao1ja88buh9itaj7k9")
msg := message.AddressMapSerializer{Pool: sharedPool}.Serialize(
msg := message.NewAddressMapSerializer(sharedPool).Serialize(
[][]byte{[]byte("chopin"), []byte("listz")},
[][]byte{h1[:], h2[:]},
[]uint64{},
+1 -1
View File
@@ -163,7 +163,7 @@ func newLeafNode(keys, values []Item) Node {
vv[i] = values[i]
}
s := message.ProllyMapSerializer{Pool: sharedPool}
s := message.NewProllyMapSerializer(val.TupleDesc{}, sharedPool)
msg := s.Serialize(kk, vv, nil, 0)
return NodeFromBytes(msg)
}
+1 -1
View File
@@ -198,7 +198,7 @@ func prollyMapFromKeysAndValues(t *testing.T, kd, vd val.TupleDesc, keys, values
ns := tree.NewTestNodeStore()
require.Equal(t, len(keys), len(values))
serializer := message.ProllyMapSerializer{Pool: ns.Pool()}
serializer := message.NewProllyMapSerializer(vd, ns.Pool())
chunker, err := tree.NewEmptyChunker(ctx, ns, serializer)
require.NoError(t, err)
+2
View File
@@ -372,6 +372,8 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
fallthrough
case serial.MergeArtifactsFileID:
fallthrough
case serial.BlobFileID:
fallthrough
case serial.CommitClosureFileID:
return message.WalkAddresses(context.TODO(), serial.Message(sm), func(ctx context.Context, addr hash.Hash) error {
r, err := constructRef(nbf, addr, PrimitiveTypeMap[ValueKind], SerialMessageRefHeight)
+53
View File
@@ -236,3 +236,56 @@ DELIM
[[ "$output" =~ "ints_table,c4,5634" ]] || false
[[ "$output" =~ "ints_table,c5,12796" ]] || false
}
@test "column_tags: Round-tripping a column type through different NomsKinds restores original tag" {
dolt sql -q "CREATE TABLE t (pk INT PRIMARY KEY, col1 int);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 10878" ]] || false
dolt sql -q "ALTER TABLE t MODIFY COLUMN col1 VARCHAR(100);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 16050" ]] || false
dolt sql -q "ALTER TABLE t MODIFY COLUMN col1 int;"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 10878" ]] || false
}
@test "column_tags: Round-tripping a column type through same NomsKinds keeps original tag" {
dolt sql -q "CREATE TABLE t (pk INT PRIMARY KEY, col1 VARCHAR(100));"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 16050" ]] || false
dolt sql -q "ALTER TABLE t MODIFY COLUMN col1 VARCHAR(101);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 16050" ]] || false
dolt sql -q "ALTER TABLE t MODIFY COLUMN col1 VARCHAR(100);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 16050" ]] || false
}
@test "column_tags: Round-tripping a column type after some other column has been altered" {
dolt sql -q "CREATE TABLE t (pk INT PRIMARY KEY, col1 int);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 10878" ]] || false
dolt sql -q "ALTER TABLE t ADD COLUMN col2 int;"
dolt sql -q "ALTER TABLE t MODIFY COLUMN col1 VARCHAR(100);"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 11127" ]] || false
dolt sql -q "ALTER TABLE t MODIFY COLUMN col1 int;"
run dolt schema tags
[ $status -eq 0 ]
[[ $output =~ "col1 | 10186" ]] || false
}
+6 -6
View File
@@ -470,18 +470,18 @@ SQL
dolt sql -q "insert into test values (1, 1, 1, 1, 1, 1)"
dolt add test
dolt commit -m "table created"
dolt sql -q "insert into test values (2, 22, 0, 0, 0, 0)"
dolt sql -q "insert into test values (3, 33, 0, 0, 0, 0)"
dolt sql -q "insert into test values (2, 222, 0, 0, 0, 0)"
dolt sql -q "insert into test values (3, 333, 0, 0, 0, 0)"
run dolt diff --where "to_pk=2"
[ "$status" -eq 0 ]
[[ "$output" =~ "22" ]] || false
! [[ "$output" =~ "33" ]] || false
[[ "$output" =~ "222" ]] || false
[[ ! "$output" =~ "333" ]] || false
run dolt diff --where "to_pk < 3"
[ "$status" -eq 0 ]
[[ "$output" =~ "22" ]] || false
! [[ "$output" =~ "33" ]] || false
[[ "$output" =~ "222" ]] || false
! [[ "$output" =~ "333" ]] || false
dolt add test
dolt commit -m "added two rows"
@@ -68,6 +68,19 @@ SQL
[ "$status" -eq "0" ]
}
@test "garbage_collection: blob types work after GC" {
dolt sql -q "create table t(pk int primary key, val text)"
dolt sql -q "insert into t values (1, 'one'), (2, 'two');"
dolt add -A && dolt commit -am "added a table with blob encoding"
dolt gc
run dolt sql -q "select * from t"
[ $status -eq 0 ]
[[ $output =~ "one" ]] || false
[[ $output =~ "two" ]] || false
}
@test "garbage_collection: clone a remote" {
dolt sql <<SQL
CREATE TABLE test (pk int PRIMARY KEY);
@@ -17,17 +17,19 @@ teardown() {
dolt clone dolthub/first-hour-db
cd first-hour-db
dolt tag -v
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
[[ ! "$output" =~ "ovpnp265d9cubjeo9qf0ts10piq7c70d" ]] || false
[[ ! "$output" =~ "popqo96mjvhsaumd3rbba9m56f1oij7h" ]] || false
dolt migrate
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
dolt tag -v
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "ovpnp265d9cubjeo9qf0ts10piq7c70d" ]] || false
[[ "$output" =~ "popqo96mjvhsaumd3rbba9m56f1oij7h" ]] || false
[[ ! "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
}
@@ -35,16 +37,18 @@ teardown() {
dolt clone dolthub/us-jails
cd us-jails
dolt tag -v
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "u8s83gapv7ghnbmrtpm8q5es0dbl7lpd" ]] || false
[[ ! "$output" =~ "t25l8d0u3tp1tul8o9ttf8k3t5a24n4q" ]] || false
[[ ! "$output" =~ "k0hgumfrd2i891h1nh172cfutih5n6ea" ]] || false
dolt migrate
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
dolt tag -v
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "t25l8d0u3tp1tul8o9ttf8k3t5a24n4q" ]] || false
[[ "$output" =~ "k0hgumfrd2i891h1nh172cfutih5n6ea" ]] || false
[[ ! "$output" =~ "u8s83gapv7ghnbmrtpm8q5es0dbl7lpd" ]] || false
}
+38
View File
@@ -352,6 +352,44 @@ SQL
[[ "$output" =~ "Everything up-to-date" ]] || false
}
@test "remotes: pull with explicit remote and branch" {
dolt remote add test-remote http://localhost:50051/test-org/test-repo
dolt checkout -b test-branch
dolt sql -q "create table t1(c0 varchar(100));"
dolt commit -am "adding table t1"
run dolt push test-remote test-branch
[ "$status" -eq 0 ]
dolt checkout main
run dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ ! "$output" =~ "t1" ]] || false
# Specifying a non-existent remote branch returns an error
run dolt pull test-remote doesnotexist
[ "$status" -eq 1 ]
[[ "$output" =~ 'branch "doesnotexist" not found on remote' ]] || false
# Explicitly specifying the remote and branch will merge in that branch
run dolt pull test-remote test-branch
[ "$status" -eq 0 ]
run dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "t1" ]] || false
# Make a conflicting working set change and test that pull complains
dolt reset --hard HEAD^1
dolt sql -q "create table t1 (pk int primary key);"
run dolt pull test-remote test-branch
[ "$status" -eq 1 ]
[[ "$output" =~ 'local changes to the following tables would be overwritten by merge' ]] || false
# Commit changes and test that a merge conflict fails the pull
dolt commit -am "adding new t1 table"
run dolt pull test-remote test-branch
[ "$status" -eq 1 ]
[[ "$output" =~ "table with same name added in 2 commits can't be merged" ]] || false
}
@test "remotes: push and pull from non-main branch and use --set-upstream" {
dolt remote add test-remote http://localhost:50051/test-org/test-repo
dolt checkout -b test-branch
+38
View File
@@ -459,6 +459,44 @@ teardown() {
[[ ! "$output" =~ "v3" ]] || false
}
@test "sql-pull: dolt_pull with remote and remote ref" {
cd repo1
dolt checkout feature
dolt checkout -b newbranch
run dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ ! "$output" =~ "t1" ]] || false
# Specifying a non-existent remote branch returns an error
run dolt sql -q "call dolt_pull('origin', 'doesnotexist');"
[ "$status" -eq 1 ]
[[ "$output" =~ 'branch "doesnotexist" not found on remote' ]] || false
# Explicitly specifying the remote and branch will merge in that branch
run dolt sql -q "call dolt_pull('origin', 'main');"
[ "$status" -eq 0 ]
run dolt sql -q "show tables"
[ "$status" -eq 0 ]
[[ "$output" =~ "t1" ]] || false
run dolt status
[ "$status" -eq 0 ]
[[ "$output" =~ "working tree clean" ]] || false
# Make a conflicting working set change and test that pull complains
dolt reset --hard HEAD^1
dolt sql -q "insert into t1 values (0, 100);"
run dolt sql -q "call dolt_pull('origin', 'main');"
[ "$status" -eq 1 ]
[[ "$output" =~ 'cannot merge with uncommitted changes' ]] || false
# Commit changes and test that a merge conflict fails the pull
dolt commit -am "adding new t1 table"
run dolt sql -q "call dolt_pull('origin', 'main');"
[ "$status" -eq 1 ]
[[ "$output" =~ "| fast_forward | conflicts |" ]] || false
[[ "$output" =~ "| 0 | 1 |" ]] || false
}
@test "sql-pull: dolt_pull also fetches, but does not merge other branches" {
cd repo1
dolt checkout -b other