add keyless support for new format merge and other merge related tables

This commit is contained in:
Dhruv Sringari
2022-06-30 15:43:27 -07:00
parent a0d3084c22
commit 00681ab1b2
16 changed files with 2562 additions and 1894 deletions
@@ -16,6 +16,7 @@ package merge_test
import (
"context"
"io"
"testing"
"github.com/stretchr/testify/assert"
@@ -29,25 +30,17 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
const tblName = "noKey"
var sch = dtu.MustSchema(
schema.NewColumn("c1", 1, types.IntKind, false),
schema.NewColumn("c2", 2, types.IntKind, false),
)
var c1Tag = types.Uint(1)
var c2Tag = types.Uint(2)
var cardTag = types.Uint(schema.KeylessRowCardinalityTag)
func TestKeylessMerge(t *testing.T) {
tests := []struct {
name string
setup []testCommand
expected tupleSet
expected keylessEntries
}{
{
name: "fast-forward merge",
@@ -60,10 +53,10 @@ func TestKeylessMerge(t *testing.T) {
{cmd.CheckoutCmd{}, []string{env.DefaultInitBranch}},
{cmd.MergeCmd{}, []string{"other"}},
},
expected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(3), c2Tag, types.Int(4)),
),
expected: []keylessEntry{
{2, 1, 2},
{1, 3, 4},
},
},
{
name: "3-way merge",
@@ -78,11 +71,11 @@ func TestKeylessMerge(t *testing.T) {
{cmd.CommitCmd{}, []string{"-am", "added rows on main"}},
{cmd.MergeCmd{}, []string{"other"}},
},
expected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(3), c2Tag, types.Int(4)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(5), c2Tag, types.Int(6)),
),
expected: []keylessEntry{
{2, 1, 2},
{1, 3, 4},
{1, 5, 6},
},
},
{
name: "3-way merge with duplicates",
@@ -97,11 +90,11 @@ func TestKeylessMerge(t *testing.T) {
{cmd.CommitCmd{}, []string{"-am", "added rows on main"}},
{cmd.MergeCmd{}, []string{"other"}},
},
expected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(3), c2Tag, types.Int(4)),
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(5), c2Tag, types.Int(6)),
),
expected: []keylessEntry{
{2, 1, 2},
{2, 3, 4},
{2, 5, 6},
},
},
}
@@ -142,10 +135,10 @@ func TestKeylessMergeConflicts(t *testing.T) {
// Tuple(val)
// Tuple(mergeVal)
// )
conflicts tupleSet
conflicts conflictEntries
oursExpected tupleSet
theirsExpected tupleSet
oursExpected keylessEntries
theirsExpected keylessEntries
}{
{
name: "identical parallel changes",
@@ -160,21 +153,21 @@ func TestKeylessMergeConflicts(t *testing.T) {
{cmd.CommitCmd{}, []string{"-am", "added rows on main"}},
{cmd.MergeCmd{}, []string{"other"}},
},
conflicts: mustTupleSet(
dtu.MustTuple(
types.NullValue,
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(3), c2Tag, types.Int(4)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(3), c2Tag, types.Int(4)),
),
),
oursExpected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(3), c2Tag, types.Int(4)),
),
theirsExpected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(3), c2Tag, types.Int(4)),
),
conflicts: []conflictEntry{
{
base: nil,
ours: &keylessEntry{1, 3, 4},
theirs: &keylessEntry{1, 3, 4},
},
},
oursExpected: []keylessEntry{
{2, 1, 2},
{1, 3, 4},
},
theirsExpected: []keylessEntry{
{2, 1, 2},
{1, 3, 4},
},
},
{
name: "asymmetric parallel deletes",
@@ -189,19 +182,19 @@ func TestKeylessMergeConflicts(t *testing.T) {
{cmd.CommitCmd{}, []string{"-am", "deleted 2 rows on main"}},
{cmd.MergeCmd{}, []string{"other"}},
},
conflicts: mustTupleSet(
dtu.MustTuple(
dtu.MustTuple(cardTag, types.Uint(4), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(3), c1Tag, types.Int(1), c2Tag, types.Int(2)),
),
),
oursExpected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
),
theirsExpected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(3), c1Tag, types.Int(1), c2Tag, types.Int(2)),
),
conflicts: []conflictEntry{
{
base: &keylessEntry{4, 1, 2},
ours: &keylessEntry{2, 1, 2},
theirs: &keylessEntry{3, 1, 2},
},
},
oursExpected: []keylessEntry{
{2, 1, 2},
},
theirsExpected: []keylessEntry{
{3, 1, 2},
},
},
{
name: "asymmetric parallel updates",
@@ -216,26 +209,26 @@ func TestKeylessMergeConflicts(t *testing.T) {
{cmd.CommitCmd{}, []string{"-am", "deleted 2 rows on main"}},
{cmd.MergeCmd{}, []string{"other"}},
},
conflicts: mustTupleSet(
dtu.MustTuple(
dtu.MustTuple(cardTag, types.Uint(4), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(3), c1Tag, types.Int(1), c2Tag, types.Int(2)),
),
dtu.MustTuple(
types.NullValue,
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(9)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(1), c2Tag, types.Int(9)),
),
),
oursExpected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(2), c1Tag, types.Int(1), c2Tag, types.Int(9)),
),
theirsExpected: mustTupleSet(
dtu.MustTuple(cardTag, types.Uint(3), c1Tag, types.Int(1), c2Tag, types.Int(2)),
dtu.MustTuple(cardTag, types.Uint(1), c1Tag, types.Int(1), c2Tag, types.Int(9)),
),
conflicts: []conflictEntry{
{
base: &keylessEntry{4, 1, 2},
ours: &keylessEntry{2, 1, 2},
theirs: &keylessEntry{3, 1, 2},
},
{
base: nil,
ours: &keylessEntry{2, 1, 9},
theirs: &keylessEntry{1, 1, 9},
},
},
oursExpected: []keylessEntry{
{2, 1, 2},
{2, 1, 9},
},
theirsExpected: []keylessEntry{
{3, 1, 2},
{1, 1, 9},
},
},
}
@@ -263,32 +256,16 @@ func TestKeylessMergeConflicts(t *testing.T) {
require.NoError(t, err)
tbl, _, err := root.GetTable(ctx, tblName)
require.NoError(t, err)
_, confIdx, err := tbl.GetConflicts(ctx)
require.NoError(t, err)
conflicts := durable.NomsMapFromConflictIndex(confIdx)
assert.True(t, conflicts.Len() > 0)
assert.Equal(t, int(conflicts.Len()), len(test.conflicts))
actual, err := conflicts.Iterator(ctx)
require.NoError(t, err)
for {
_, act, err := actual.Next(ctx)
if act == nil {
return
}
assert.NoError(t, err)
h, err := act.Hash(types.Format_Default)
assert.NoError(t, err)
exp, ok := test.conflicts[h]
assert.True(t, ok)
assert.True(t, exp.Equals(act))
}
assertConflicts(t, ctx, tbl, test.conflicts)
})
// conflict resolution
t.Run(test.name+"_resolved_ours", func(t *testing.T) {
if types.IsFormat_DOLT_1(types.Format_Default) {
// TODO (dhruv): unskip when resolve command is implemented
t.Skip()
}
dEnv := dtu.CreateTestEnv()
setupTest(t, ctx, dEnv, test.setup)
@@ -306,6 +283,10 @@ func TestKeylessMergeConflicts(t *testing.T) {
assertKeylessRows(t, ctx, tbl, test.oursExpected)
})
t.Run(test.name+"_resolved_theirs", func(t *testing.T) {
if types.IsFormat_DOLT_1(types.Format_Default) {
// TODO (dhruv): unskip when resolve command is implemented
t.Skip()
}
dEnv := dtu.CreateTestEnv()
setupTest(t, ctx, dEnv, test.setup)
@@ -325,14 +306,71 @@ func TestKeylessMergeConflicts(t *testing.T) {
}
}
// |expected| is a tupleSet to compensate for random storage order
func assertKeylessRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected tupleSet) {
rowData, err := tbl.GetNomsRowData(ctx)
func assertConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected conflictEntries) {
if types.IsFormat_DOLT_1(tbl.Format()) {
assertProllyConflicts(t, ctx, tbl, expected)
return
}
assertNomsConflicts(t, ctx, tbl, expected)
}
func assertProllyConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected conflictEntries) {
artIdx, err := tbl.GetArtifacts(ctx)
require.NoError(t, err)
artM := durable.ProllyMapFromArtifactIndex(artIdx)
itr, err := artM.IterAllConflicts(ctx)
require.NoError(t, err)
assert.Equal(t, int(rowData.Len()), len(expected))
expectedSet := expected.toConflictSet()
actual, err := rowData.Iterator(ctx)
var c int
var h [16]byte
for {
conf, err := itr.Next(ctx)
if err == io.EOF {
break
}
require.NoError(t, err)
c++
ours := mustGetRowValueFromTable(t, ctx, tbl, conf.Key)
theirs := mustGetRowValueFromRootIsh(t, ctx, tbl.ValueReadWriter(), conf.TheirRootIsh, tblName, conf.Key)
base := mustGetRowValueFromRootIsh(t, ctx, tbl.ValueReadWriter(), conf.Metadata.BaseRootIsh, tblName, conf.Key)
copy(h[:], conf.Key.GetField(0))
expectedConf, ok := expectedSet[h]
require.True(t, ok)
if expectedConf.base != nil {
_, value := expectedConf.base.HashAndValue()
require.Equal(t, valDesc.Format(value), valDesc.Format(base))
}
if expectedConf.ours != nil {
_, value := expectedConf.ours.HashAndValue()
require.Equal(t, valDesc.Format(value), valDesc.Format(ours))
}
if expectedConf.theirs != nil {
_, value := expectedConf.theirs.HashAndValue()
require.Equal(t, valDesc.Format(value), valDesc.Format(theirs))
}
}
require.Equal(t, len(expected), c)
}
func assertNomsConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected conflictEntries) {
_, confIdx, err := tbl.GetConflicts(ctx)
require.NoError(t, err)
conflicts := durable.NomsMapFromConflictIndex(confIdx)
assert.True(t, conflicts.Len() > 0)
assert.Equal(t, int(conflicts.Len()), len(expected))
expectedSet := expected.toTupleSet()
actual, err := conflicts.Iterator(ctx)
require.NoError(t, err)
for {
_, act, err := actual.Next(ctx)
@@ -342,12 +380,198 @@ func assertKeylessRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, exp
assert.NoError(t, err)
h, err := act.Hash(types.Format_Default)
assert.NoError(t, err)
exp, ok := expected[h]
exp, ok := expectedSet[h]
assert.True(t, ok)
assert.True(t, exp.Equals(act))
}
}
func mustGetRowValueFromTable(t *testing.T, ctx context.Context, tbl *doltdb.Table, key val.Tuple) val.Tuple {
idx, err := tbl.GetRowData(ctx)
require.NoError(t, err)
m := durable.ProllyMapFromIndex(idx)
var value val.Tuple
err = m.Get(ctx, key, func(_, v val.Tuple) error {
value = v
return nil
})
require.NoError(t, err)
return value
}
func mustGetRowValueFromRootIsh(t *testing.T, ctx context.Context, vrw types.ValueReadWriter, rootIsh hash.Hash, tblName string, key val.Tuple) val.Tuple {
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, vrw, rootIsh)
require.NoError(t, err)
tbl, ok, err := rv.GetTable(ctx, tblName)
require.NoError(t, err)
require.True(t, ok)
return mustGetRowValueFromTable(t, ctx, tbl, key)
}
// |expected| is a tupleSet to compensate for random storage order
func assertKeylessRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected keylessEntries) {
if types.IsFormat_DOLT_1(tbl.Format()) {
assertKeylessProllyRows(t, ctx, tbl, expected)
return
}
assertKeylessNomsRows(t, ctx, tbl, expected)
}
func assertKeylessProllyRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected []keylessEntry) {
idx, err := tbl.GetRowData(ctx)
require.NoError(t, err)
m := durable.ProllyMapFromIndex(idx)
expectedSet := mustHash128Set(expected...)
itr, err := m.IterAll(ctx)
require.NoError(t, err)
var c int
var h [16]byte
for {
hashId, value, err := itr.Next(ctx)
if err == io.EOF {
break
}
c++
require.NoError(t, err)
copy(h[:], hashId.GetField(0))
expectedVal, ok := expectedSet[h]
assert.True(t, ok)
assert.Equal(t, valDesc.Format(expectedVal), valDesc.Format(value))
}
require.Equal(t, len(expected), c)
}
func assertKeylessNomsRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected keylessEntries) {
rowData, err := tbl.GetNomsRowData(ctx)
require.NoError(t, err)
assert.Equal(t, int(rowData.Len()), len(expected))
expectedSet := expected.toTupleSet()
actual, err := rowData.Iterator(ctx)
require.NoError(t, err)
for {
_, act, err := actual.Next(ctx)
if act == nil {
break
}
assert.NoError(t, err)
h, err := act.Hash(types.Format_Default)
assert.NoError(t, err)
exp, ok := expectedSet[h]
assert.True(t, ok)
assert.True(t, exp.Equals(act))
}
}
const tblName = "noKey"
var sch = dtu.MustSchema(
schema.NewColumn("c1", 1, types.IntKind, false),
schema.NewColumn("c2", 2, types.IntKind, false),
)
var c1Tag = types.Uint(1)
var c2Tag = types.Uint(2)
var cardTag = types.Uint(schema.KeylessRowCardinalityTag)
var valDesc = val.NewTupleDescriptor(val.Type{Enc: val.Uint64Enc}, val.Type{Enc: val.Int64Enc, Nullable: true}, val.Type{Enc: val.Int64Enc, Nullable: true})
var valBld = val.NewTupleBuilder(valDesc)
var sharePool = pool.NewBuffPool()
type keylessEntries []keylessEntry
type keylessEntry struct {
card int
c1 int
c2 int
}
func (e keylessEntries) toTupleSet() tupleSet {
tups := make([]types.Tuple, len(e))
for i, t := range e {
tups[i] = t.ToNomsTuple()
}
return mustTupleSet(tups...)
}
func (e keylessEntry) ToNomsTuple() types.Tuple {
return dtu.MustTuple(cardTag, types.Uint(e.card), c1Tag, types.Int(e.c1), c2Tag, types.Int(e.c2))
}
func (e keylessEntry) HashAndValue() ([]byte, val.Tuple) {
valBld.PutUint64(0, uint64(e.card))
valBld.PutInt64(1, int64(e.c1))
valBld.PutInt64(2, int64(e.c2))
value := valBld.Build(sharePool)
hashTup := val.HashTupleFromValue(sharePool, value)
return hashTup.GetField(0), value
}
type conflictSet map[[16]byte]conflictEntry
type conflictEntries []conflictEntry
type conflictEntry struct {
base, ours, theirs *keylessEntry
}
func (e conflictEntries) toConflictSet() conflictSet {
s := make(conflictSet, len(e))
for _, t := range e {
s[t.Key()] = t
}
return s
}
func (e conflictEntries) toTupleSet() tupleSet {
tups := make([]types.Tuple, len(e))
for i, t := range e {
tups[i] = t.ToNomsTuple()
}
return mustTupleSet(tups...)
}
func (e conflictEntry) Key() (h [16]byte) {
if e.base != nil {
h2, _ := e.base.HashAndValue()
copy(h[:], h2[:])
return
}
if e.ours != nil {
h2, _ := e.ours.HashAndValue()
copy(h[:], h2[:])
return
}
if e.theirs != nil {
h2, _ := e.theirs.HashAndValue()
copy(h[:], h2[:])
return
}
return
}
func (e conflictEntry) ToNomsTuple() types.Tuple {
var b, o, t types.Value = types.NullValue, types.NullValue, types.NullValue
if e.base != nil {
b = e.base.ToNomsTuple()
}
if e.ours != nil {
o = e.ours.ToNomsTuple()
}
if e.theirs != nil {
t = e.theirs.ToNomsTuple()
}
return dtu.MustTuple(b, o, t)
}
type tupleSet map[hash.Hash]types.Tuple
func mustTupleSet(tt ...types.Tuple) (s tupleSet) {
@@ -361,3 +585,18 @@ func mustTupleSet(tt ...types.Tuple) (s tupleSet) {
}
return
}
type hash128Set map[[16]byte]val.Tuple
func mustHash128Set(entries ...keylessEntry) (s hash128Set) {
var h [16]byte
s = make(hash128Set, len(entries))
for _, e := range entries {
h2, value := e.HashAndValue()
copy(h[:], h2)
s[h] = value
}
return s
}
+31 -21
View File
@@ -242,35 +242,21 @@ func mergeProllyRowData(ctx context.Context, postMergeSchema, rootSch, mergeSch,
m := durable.ProllyMapFromIndex(rootR)
vMerger := newValueMerger(postMergeSchema, rootSch, mergeSch, ancSch, m.Pool())
keyless := schema.IsKeyless(postMergeSchema)
mergedRP, err := prolly.MergeMaps(ctx, rootRP, mergeRP, ancRP, func(left, right tree.Diff) (tree.Diff, bool) {
if left.Type == right.Type && bytes.Equal(left.To, right.To) {
// convergent edit
if keyless {
// convergent edits are conflicts for keyless tables
_ = sendConflict(ctx, conflicts, indexEdits, left, right)
return tree.Diff{}, false
}
return left, true
}
merged, isConflict := vMerger.tryMerge(val.Tuple(left.To), val.Tuple(right.To), val.Tuple(left.From))
if isConflict {
c := confVals{
key: val.Tuple(left.Key),
ourVal: val.Tuple(left.To),
theirVal: val.Tuple(right.To),
baseVal: val.Tuple(left.From),
}
select {
case conflicts <- c:
case <-ctx.Done():
return tree.Diff{}, false
}
// Reset the change on the right
e := conflictEdit{
right: right,
}
select {
case indexEdits <- e:
case <-ctx.Done():
return tree.Diff{}, false
}
_ = sendConflict(ctx, conflicts, indexEdits, left, right)
return tree.Diff{}, false
}
@@ -302,6 +288,30 @@ func mergeProllyRowData(ctx context.Context, postMergeSchema, rootSch, mergeSch,
return updatedTbl, durable.IndexFromProllyMap(mergedRP), nil
}
func sendConflict(ctx context.Context, confs chan confVals, edits chan indexEdit, left, right tree.Diff) error {
c := confVals{
key: val.Tuple(left.Key),
ourVal: val.Tuple(left.To),
theirVal: val.Tuple(right.To),
baseVal: val.Tuple(left.From),
}
select {
case confs <- c:
case <-ctx.Done():
return ctx.Err()
}
// Reset the change on the right
e := conflictEdit{
right: right,
}
select {
case edits <- e:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
type valueMerger struct {
numCols int
vD val.TupleDesc
@@ -163,7 +163,11 @@ func mergeProllyIndexSets(
if err != nil {
return prolly.Map{}, false, err
}
return durable.ProllyMapFromIndex(idx), true, nil
m := durable.ProllyMapFromIndex(idx)
if schema.IsKeyless(sch) {
m = prolly.ConvertToSecondaryKeylessIndex(m)
}
return m, true, nil
}
return prolly.Map{}, false, nil
}
@@ -353,21 +357,24 @@ OUTER:
// getMutableSecondaryIdxs returns a MutableSecondaryIdx for each secondary index
// defined in |schema| and |tbl|.
func getMutableSecondaryIdxs(ctx context.Context, schema schema.Schema, tbl *doltdb.Table) ([]MutableSecondaryIdx, error) {
func getMutableSecondaryIdxs(ctx context.Context, sch schema.Schema, tbl *doltdb.Table) ([]MutableSecondaryIdx, error) {
indexSet, err := tbl.GetIndexSet(ctx)
if err != nil {
return nil, err
}
mods := make([]MutableSecondaryIdx, schema.Indexes().Count())
for i, index := range schema.Indexes().AllIndexes() {
idx, err := indexSet.GetIndex(ctx, schema, index.Name())
mods := make([]MutableSecondaryIdx, sch.Indexes().Count())
for i, index := range sch.Indexes().AllIndexes() {
idx, err := indexSet.GetIndex(ctx, sch, index.Name())
if err != nil {
return nil, err
}
m := durable.ProllyMapFromIndex(idx)
if schema.IsKeyless(sch) {
m = prolly.ConvertToSecondaryKeylessIndex(m)
}
mods[i] = NewMutableSecondaryIdx(m, schema, index, m.Pool())
mods[i] = NewMutableSecondaryIdx(m, sch, index, m.Pool())
}
return mods, nil
@@ -39,13 +39,14 @@ type MutableSecondaryIdx struct {
// NewMutableSecondaryIdx returns a MutableSecondaryIdx. |m| is the secondary idx data.
func NewMutableSecondaryIdx(m prolly.Map, sch schema.Schema, index schema.Index, syncPool pool.BuffPool) MutableSecondaryIdx {
kD, _ := m.Descriptors()
pkLen, keyMap := creation.GetIndexKeyMapping(sch, index)
return MutableSecondaryIdx{
index.Name(),
m.Mutate(),
creation.GetIndexKeyMapping(sch, index),
sch.GetPKCols().Size(),
val.NewTupleBuilder(kD),
syncPool,
Name: index.Name(),
mut: m.Mutate(),
keyMap: keyMap,
pkLen: pkLen,
keyBld: val.NewTupleBuilder(kD),
syncPool: syncPool,
}
}
@@ -308,7 +308,11 @@ func makePartialKey(kb *val.TupleBuilder, idxSch schema.Index, tblSch schema.Sch
if v.FieldIsNull(j) {
return nil, true
}
kb.PutRaw(i, v.GetField(j))
if schema.IsKeyless(tblSch) {
kb.PutRaw(i, v.GetField(j+1))
} else {
kb.PutRaw(i, v.GetField(j))
}
}
return kb.Build(pool), false
@@ -107,6 +107,7 @@ type prollyConflictRowIter struct {
tblName string
vrw types.ValueReadWriter
ourRows prolly.Map
keyless bool
kd val.TupleDesc
baseVD, oursVD, theirsVD val.TupleDesc
@@ -133,21 +134,31 @@ func newProllyConflictRowIter(ctx *sql.Context, ct ProllyConflictsTable) (*proll
return nil, err
}
keyless := schema.IsKeyless(ct.ourSch)
kd := shim.KeyDescriptorFromSchema(ct.baseSch)
baseVD := shim.ValueDescriptorFromSchema(ct.baseSch)
oursVD := shim.ValueDescriptorFromSchema(ct.ourSch)
theirsVD := shim.ValueDescriptorFromSchema(ct.theirSch)
b := 1
o := b + kd.Count() + baseVD.Count()
t := o + kd.Count() + oursVD.Count()
n := t + kd.Count() + theirsVD.Count()
var o, t, n int
if !keyless {
o = b + kd.Count() + baseVD.Count()
t = o + kd.Count() + oursVD.Count()
n = t + kd.Count() + theirsVD.Count()
} else {
o = b + baseVD.Count() - 1
t = o + oursVD.Count() - 1
n = t + theirsVD.Count() - 1
}
return &prollyConflictRowIter{
itr: itr,
tblName: ct.tblName,
vrw: ct.tbl.ValueReadWriter(),
ourRows: ourRows,
keyless: keyless,
kd: kd,
baseVD: baseVD,
oursVD: oursVD,
@@ -168,27 +179,43 @@ func (itr *prollyConflictRowIter) Next(ctx *sql.Context) (sql.Row, error) {
r := make(sql.Row, itr.n)
r[0] = c.h.String()
for i := 0; i < itr.kd.Count(); i++ {
f, err := index.GetField(ctx, itr.kd, i, c.k, itr.baseRows.NodeStore())
if !itr.keyless {
for i := 0; i < itr.kd.Count(); i++ {
f, err := index.GetField(ctx, itr.kd, i, c.k, itr.baseRows.NodeStore())
if err != nil {
return nil, err
}
if c.bV != nil {
r[itr.b+i] = f
}
if c.oV != nil {
r[itr.o+i] = f
}
if c.tV != nil {
r[itr.t+i] = f
}
}
err = itr.putConflictRowVals(ctx, c, r)
if err != nil {
return nil, err
}
if c.bV != nil {
r[itr.b+i] = f
}
if c.oV != nil {
r[itr.o+i] = f
}
if c.tV != nil {
r[itr.t+i] = f
} else {
err = itr.putKeylessConflictRowVals(ctx, c, r)
if err != nil {
return nil, err
}
}
return r, nil
}
func (itr *prollyConflictRowIter) putConflictRowVals(ctx *sql.Context, c conf, r sql.Row) error {
if c.bV != nil {
for i := 0; i < itr.baseVD.Count(); i++ {
f, err := index.GetField(ctx, itr.baseVD, i, c.bV, itr.baseRows.NodeStore())
if err != nil {
return nil, err
return err
}
r[itr.b+itr.kd.Count()+i] = f
}
@@ -198,7 +225,7 @@ func (itr *prollyConflictRowIter) Next(ctx *sql.Context) (sql.Row, error) {
for i := 0; i < itr.oursVD.Count(); i++ {
f, err := index.GetField(ctx, itr.oursVD, i, c.oV, itr.baseRows.NodeStore())
if err != nil {
return nil, err
return err
}
r[itr.o+itr.kd.Count()+i] = f
}
@@ -208,13 +235,47 @@ func (itr *prollyConflictRowIter) Next(ctx *sql.Context) (sql.Row, error) {
for i := 0; i < itr.theirsVD.Count(); i++ {
f, err := index.GetField(ctx, itr.theirsVD, i, c.tV, itr.baseRows.NodeStore())
if err != nil {
return nil, err
return err
}
r[itr.t+itr.kd.Count()+i] = f
}
}
return r, nil
return nil
}
func (itr *prollyConflictRowIter) putKeylessConflictRowVals(ctx *sql.Context, c conf, r sql.Row) error {
if c.bV != nil {
for i := 0; i < itr.baseVD.Count()-1; i++ {
f, err := index.GetField(ctx, itr.baseVD, i+1, c.bV, itr.baseRows.NodeStore())
if err != nil {
return err
}
r[itr.b+i] = f
}
}
if c.oV != nil {
for i := 0; i < itr.oursVD.Count()-1; i++ {
f, err := index.GetField(ctx, itr.oursVD, i+1, c.oV, itr.baseRows.NodeStore())
if err != nil {
return err
}
r[itr.o+i] = f
}
}
if c.tV != nil {
for i := 0; i < itr.theirsVD.Count()-1; i++ {
f, err := index.GetField(ctx, itr.theirsVD, i+1, c.tV, itr.baseRows.NodeStore())
if err != nil {
return err
}
r[itr.t+i] = f
}
}
return nil
}
type conf struct {
@@ -337,6 +398,9 @@ func newProllyConflictDeleter(ct ProllyConflictsTable) *prollyConflictDeleter {
}
func (cd *prollyConflictDeleter) Delete(ctx *sql.Context, r sql.Row) error {
if schema.IsKeyless(cd.ct.ourSch) {
panic("conflict deleter for keyless tables not implemented")
}
// get keys from either base, ours, or theirs
o := func() int {
@@ -161,21 +161,31 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
}
o := 2
for i := 0; i < itr.kd.Count(); i++ {
r[o+i], err = index.GetField(ctx, itr.kd, i, art.Key, itr.ns)
if err != nil {
return nil, err
if !schema.IsKeyless(itr.sch) {
for i := 0; i < itr.kd.Count(); i++ {
r[o+i], err = index.GetField(ctx, itr.kd, i, art.Key, itr.ns)
if err != nil {
return nil, err
}
}
}
o += itr.kd.Count()
o += itr.kd.Count()
for i := 0; i < itr.vd.Count(); i++ {
r[o+i], err = index.GetField(ctx, itr.vd, i, meta.Value, itr.ns)
if err != nil {
return nil, err
for i := 0; i < itr.vd.Count(); i++ {
r[o+i], err = index.GetField(ctx, itr.vd, i, meta.Value, itr.ns)
if err != nil {
return nil, err
}
}
o += itr.vd.Count()
} else {
for i := 0; i < itr.vd.Count()-1; i++ {
r[o+i], err = index.GetField(ctx, itr.vd, i+1, meta.Value, itr.ns)
if err != nil {
return nil, err
}
}
o += itr.vd.Count() - 1
}
o += itr.vd.Count()
switch art.ArtType {
case prolly.ArtifactTypeForeignKeyViol:
@@ -692,6 +692,16 @@ func TestDoltMerge(t *testing.T) {
}
}
// tests new format behavior for keyless merges that create CVs and conflicts
func TestKeylessDoltMergeCVsAndConflicts(t *testing.T) {
if !types.IsFormat_DOLT_1(types.Format_Default) {
t.Skip()
}
for _, script := range KeylessMergeCVsAndConflictsScripts {
enginetest.TestScript(t, newDoltHarness(t), script)
}
}
// eventually this will be part of TestDoltMerge
func TestDoltMergeArtifacts(t *testing.T) {
if !types.IsFormat_DOLT_1(types.Format_Default) {
@@ -1488,6 +1488,101 @@ var MergeScripts = []queries.ScriptTest{
},
}
var KeylessMergeCVsAndConflictsScripts = []queries.ScriptTest{
{
Name: "Keyless merge with unique indexes documents violations",
SetUpScript: []string{
"SET dolt_force_transaction_commit = on;",
"CREATE table t (col1 int, col2 int UNIQUE);",
"CALL DOLT_COMMIT('-am', 'setup');",
"CALL DOLT_CHECKOUT('-b', 'right');",
"INSERT INTO t VALUES (2, 1);",
"CALL DOLT_COMMIT('-am', 'right');",
"CALL DOLT_CHECKOUT('main');",
"INSERT INTO t values (1, 1);",
"CALL DOLT_COMMIT('-am', 'left');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL DOLT_MERGE('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "SELECT violation_type, col1, col2 from dolt_constraint_violations_t ORDER BY col1 ASC;",
Expected: []sql.Row{{uint64(merge.CvType_UniqueIndex), 1, 1}, {uint64(merge.CvType_UniqueIndex), 2, 1}},
},
{
Query: "SELECT * from t ORDER BY col1 ASC;",
Expected: []sql.Row{{1, 1}, {2, 1}},
},
},
},
{
Name: "Keyless merge with foreign keys documents violations",
SetUpScript: []string{
"SET dolt_force_transaction_commit = on;",
"CREATE table parent (pk int PRIMARY KEY);",
"CREATE table child (parent_fk int, FOREIGN KEY (parent_fk) REFERENCES parent (pk));",
"INSERT INTO parent VALUES (1);",
"CALL DOLT_COMMIT('-am', 'setup');",
"CALL DOLT_CHECKOUT('-b', 'right');",
"INSERT INTO child VALUES (1);",
"CALL DOLT_COMMIT('-am', 'right');",
"CALL DOLT_CHECKOUT('main');",
"DELETE from parent where pk = 1;",
"CALL DOLT_COMMIT('-am', 'left');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL DOLT_MERGE('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "SELECT violation_type, parent_fk from dolt_constraint_violations_child;",
Expected: []sql.Row{{uint64(merge.CvType_ForeignKey), 1}},
},
{
Query: "SELECT * from parent;",
Expected: []sql.Row{},
},
{
Query: "SELECT * from child;",
Expected: []sql.Row{{1}},
},
},
},
{
Name: "Keyless merge documents conflicts",
SetUpScript: []string{
"SET dolt_allow_commit_conflicts = on;",
"CREATE table t (col1 int, col2 int);",
"CALL DOLT_COMMIT('-am', 'setup');",
"CALL DOLT_CHECKOUT('-b', 'right');",
"INSERT INTO t VALUES (1, 1);",
"CALL DOLT_COMMIT('-am', 'right');",
"CALL DOLT_CHECKOUT('main');",
"INSERT INTO t VALUES (1, 1);",
"CALL DOLT_COMMIT('-am', 'left');",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL DOLT_MERGE('right');",
Expected: []sql.Row{{0, 1}},
},
{
Query: "SELECT base_col1, base_col2, our_col1, our_col2, their_col1, their_col2 from dolt_conflicts_t;",
Expected: []sql.Row{{nil, nil, 1, 1, 1, 1}},
},
},
},
}
// MergeArtifactsScripts tests new format merge behavior where
// existing violations and conflicts are merged together.
var MergeArtifactsScripts = []queries.ScriptTest{
@@ -381,9 +381,6 @@ type prollyKeylessSecondaryWriter struct {
keyBld *val.TupleBuilder
keyMap val.OrdinalMapping
valBld *val.TupleBuilder
valMap val.OrdinalMapping
}
var _ indexWriter = prollyKeylessSecondaryWriter{}
@@ -106,8 +106,8 @@ func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlS
m := durable.ProllyMapFromIndex(idxRows)
m = prolly.ConvertToSecondaryKeylessIndex(m)
keyMap, valMap := ordinalMappingsFromSchema(sqlSch, def.Schema())
keyDesc, valDesc := m.Descriptors()
keyMap, _ := ordinalMappingsFromSchema(sqlSch, def.Schema())
keyDesc, _ := m.Descriptors()
writers[defName] = prollyKeylessSecondaryWriter{
name: defName,
@@ -116,8 +116,6 @@ func getSecondaryKeylessProllyWriters(ctx context.Context, t *doltdb.Table, sqlS
unique: def.IsUnique(),
keyBld: val.NewTupleBuilder(keyDesc),
keyMap: keyMap,
valBld: val.NewTupleBuilder(valDesc),
valMap: valMap,
}
}
@@ -175,19 +175,21 @@ func BuildSecondaryProllyIndex(ctx context.Context, vrw types.ValueReadWriter, s
return nil, err
}
secondary := durable.ProllyMapFromIndex(empty)
iter, err := primary.IterAll(ctx)
if err != nil {
return nil, err
if schema.IsKeyless(sch) {
secondary = prolly.ConvertToSecondaryKeylessIndex(secondary)
}
pkLen := sch.GetPKCols().Size()
// create a key builder for index key tuples
kd, _ := secondary.Descriptors()
keyBld := val.NewTupleBuilder(kd)
keyMap := GetIndexKeyMapping(sch, idx)
pkLen, keyMap := GetIndexKeyMapping(sch, idx)
mut := secondary.Mutate()
iter, err := primary.IterAll(ctx)
if err != nil {
return nil, err
}
for {
k, v, err := iter.Next(ctx)
if err == io.EOF {
@@ -237,17 +239,19 @@ func BuildUniqueProllyIndex(ctx context.Context, vrw types.ValueReadWriter, sch
return nil, err
}
secondary := durable.ProllyMapFromIndex(empty)
if schema.IsKeyless(sch) {
secondary = prolly.ConvertToSecondaryKeylessIndex(secondary)
}
iter, err := primary.IterAll(ctx)
if err != nil {
return nil, err
}
pkLen := sch.GetPKCols().Size()
// create a key builder for index key tuples
kd, _ := secondary.Descriptors()
keyBld := val.NewTupleBuilder(kd)
keyMap := GetIndexKeyMapping(sch, idx)
pkLen, keyMap := GetIndexKeyMapping(sch, idx)
// key builder for the indexed columns only which is a prefix of the index key
prefixKD := kd.PrefixDesc(idx.Count())
@@ -366,19 +370,33 @@ type rangeIterator interface {
IterRange(ctx context.Context, rng prolly.Range) (prolly.MapIter, error)
}
func GetIndexKeyMapping(sch schema.Schema, idx schema.Index) (m val.OrdinalMapping) {
// GetIndexKeyMapping returns a mapping from primary row data to index data. It can handle keyless schema.
func GetIndexKeyMapping(sch schema.Schema, idx schema.Index) (keyLen int, m val.OrdinalMapping) {
m = make(val.OrdinalMapping, len(idx.AllTags()))
if schema.IsKeyless(sch) {
// the only key is the hash of the values
keyLen = 1
} else {
keyLen = sch.GetPKCols().Size()
}
for i, tag := range idx.AllTags() {
j, ok := sch.GetPKCols().TagToIdx[tag]
if !ok {
j = sch.GetNonPKCols().TagToIdx[tag]
j += sch.GetPKCols().Size()
j += keyLen
}
m[i] = j
}
return
if schema.IsKeyless(sch) {
// last key in index is hash which is the only column in the key
m = append(m, 0)
return keyLen, m
}
return keyLen, m
}
var _ error = (*prollyUniqueKeyErr)(nil)
@@ -0,0 +1,159 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package creation
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
func TestGetIndexKeyMapping(t *testing.T) {
tests := []struct {
Name string
AllCols []schema.Column
IdxCols []string
KeyLen int
Mapping val.OrdinalMapping
}{
{
Name: "basic",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, true),
schema.NewColumn("col2", 1, types.IntKind, false),
},
IdxCols: []string{"col2"},
KeyLen: 1,
Mapping: []int{1, 0},
},
{
Name: "basic, pk not first",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, true),
},
IdxCols: []string{"col1"},
KeyLen: 1,
Mapping: []int{1, 0},
},
{
Name: "compound index",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, true),
schema.NewColumn("col2", 1, types.IntKind, false),
schema.NewColumn("col3", 2, types.IntKind, false),
},
IdxCols: []string{"col2", "col3"},
KeyLen: 1,
Mapping: []int{1, 2, 0},
},
{
Name: "compound index reverse",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, true),
schema.NewColumn("col2", 1, types.IntKind, false),
schema.NewColumn("col3", 2, types.IntKind, false),
},
IdxCols: []string{"col3", "col2"},
KeyLen: 1,
Mapping: []int{2, 1, 0},
},
{
Name: "compound index, pk not first",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, true),
schema.NewColumn("col3", 2, types.IntKind, false),
},
IdxCols: []string{"col1", "col3"},
KeyLen: 1,
Mapping: []int{1, 2, 0},
},
{
Name: "compound index, pk not first, reverse",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, true),
schema.NewColumn("col3", 2, types.IntKind, false),
},
IdxCols: []string{"col3", "col1"},
KeyLen: 1,
Mapping: []int{2, 1, 0},
},
{
Name: "keyless",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, false),
},
IdxCols: []string{"col1"},
KeyLen: 1,
Mapping: []int{1, 0},
},
{
Name: "keyless other",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, false),
},
IdxCols: []string{"col2"},
KeyLen: 1,
Mapping: []int{2, 0},
},
{
Name: "compound keyless",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, false),
schema.NewColumn("col3", 2, types.IntKind, false),
},
IdxCols: []string{"col2", "col3"},
KeyLen: 1,
Mapping: []int{2, 3, 0},
},
{
Name: "compound keyless reverse",
AllCols: []schema.Column{
schema.NewColumn("col1", 0, types.IntKind, false),
schema.NewColumn("col2", 1, types.IntKind, false),
schema.NewColumn("col3", 2, types.IntKind, false),
},
IdxCols: []string{"col3", "col2"},
KeyLen: 1,
Mapping: []int{3, 2, 0},
},
}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
sch := schema.MustSchemaFromCols(schema.NewColCollection(tt.AllCols...))
idxTags := make([]uint64, len(tt.IdxCols))
for i, name := range tt.IdxCols {
col := sch.GetAllCols().NameToCol[name]
idxTags[i] = col.Tag
}
allTags := append(idxTags, sch.GetPKCols().Tags...)
idx := schema.NewIndex("test_idx", idxTags, allTags, nil, schema.IndexProperties{})
keyLen, mapping := GetIndexKeyMapping(sch, idx)
require.Equal(t, tt.KeyLen, keyLen)
require.Equal(t, tt.Mapping, mapping)
})
}
}
+1 -1
View File
@@ -81,5 +81,5 @@ func (k keylessCompare) Compare(left, right Tuple, _ TupleDesc) int {
// CompareValues implements TupleComparator
func (k keylessCompare) CompareValues(left, right []byte, typ Type) int {
panic("unimplemented")
return compare(typ, left, right)
}
File diff suppressed because it is too large Load Diff
+67 -10
View File
@@ -3,7 +3,6 @@ load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
skip_nbf_dolt_1
dolt sql <<SQL
CREATE TABLE keyless (
@@ -161,10 +160,12 @@ CSV
run dolt table export keyless
[ $status -eq 0 ]
[[ "${lines[0]}" = "c0,c1" ]] || false
[[ "${lines[1]}" = "1,1" ]] || false
[[ "${lines[2]}" = "1,1" ]] || false
[[ "${lines[3]}" = "0,0" ]] || false
[[ "${lines[4]}" = "2,2" ]] || false
[[ "$output" =~ "1,1" ]] || false
[[ "$output" =~ "1,1" ]] || false
[[ "$output" =~ "0,0" ]] || false
[[ "$output" =~ "2,2" ]] || false
[[ "$output" =~ "Successfully exported data." ]] || false
[[ "${#lines[@]}" = "6" ]] || false
}
@test "keyless: table export SQL" {
@@ -176,14 +177,16 @@ CSV
[[ "${lines[2]}" = " \`c0\` int," ]] || false
[[ "${lines[3]}" = " \`c1\` int" ]] || false
[[ "${lines[4]}" = ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;" ]] || false
[[ "${lines[5]}" = "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (1,1);" ]] || false
[[ "${lines[6]}" = "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (1,1);" ]] || false
[[ "${lines[7]}" = "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (0,0);" ]] || false
[[ "${lines[8]}" = "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (2,2);" ]] || false
[[ "$output" =~ "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (1,1);" ]] || false
[[ "$output" =~ "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (1,1);" ]] || false
[[ "$output" =~ "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (0,0);" ]] || false
[[ "$output" =~ "INSERT INTO \`keyless\` (\`c0\`,\`c1\`) VALUES (2,2);" ]] || false
[[ "${#lines[@]}" = "9" ]] || false
}
@test "keyless: diff against working set" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt sql <<SQL
DELETE FROM keyless WHERE c0 = 0;
INSERT INTO keyless VALUES (8,8);
@@ -201,6 +204,8 @@ SQL
}
@test "keyless: diff --summary" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt sql <<SQL
DELETE FROM keyless WHERE c0 = 0;
INSERT INTO keyless VALUES (8,8);
@@ -213,6 +218,8 @@ SQL
}
@test "keyless: dolt_diff_ table" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt sql <<SQL
DELETE FROM keyless WHERE c0 = 0;
INSERT INTO keyless VALUES (8,8);
@@ -272,6 +279,8 @@ SQL
}
@test "keyless: diff branches with identical mutation history" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch other
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
@@ -307,6 +316,8 @@ SQL
}
@test "keyless: diff deletes from two branches" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch left
dolt checkout -b right
@@ -327,6 +338,8 @@ SQL
}
@test "keyless: merge deletes from two branches" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch left
dolt checkout -b right
@@ -359,6 +372,8 @@ SQL
}
@test "keyless: diff duplicate deletes" {
skip_nbf_dolt_1 "keyless diff not implemented"
make_dupe_table
dolt branch left
@@ -389,6 +404,8 @@ SQL
}
@test "keyless: merge duplicate deletes" {
skip_nbf_dolt_1 "conflicts resolve not implemented"
make_dupe_table
dolt branch left
@@ -414,6 +431,8 @@ SQL
}
@test "keyless: diff duplicate updates" {
skip_nbf_dolt_1 "keyless diff not implemented"
make_dupe_table
dolt branch left
@@ -436,6 +455,8 @@ SQL
}
@test "keyless: merge duplicate updates" {
skip_nbf_dolt_1 "conflicts resolve not implemented"
make_dupe_table
dolt branch left
@@ -501,6 +522,8 @@ SQL
}
@test "keyless: table replace" {
skip_nbf_dolt_1 "keyless diff not implemented"
cat <<CSV > data.csv
c0,c1
0,0
@@ -532,6 +555,8 @@ CSV
# in-place updates create become drop/add
@test "keyless: diff with in-place updates (working set)" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt sql -q "UPDATE keyless SET c1 = 9 where c0 = 2;"
run dolt diff
[ $status -eq 0 ]
@@ -574,6 +599,8 @@ CSV
# in-place updates diff as drop/add
@test "keyless: diff with in-place updates (branches)" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
dolt commit -am "added rows"
dolt branch other
@@ -597,6 +624,8 @@ CSV
}
@test "keyless: merge with in-place updates (branches)" {
skip_nbf_dolt_1 "conflicts resolve not implemented"
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
dolt commit -am "added rows"
dolt branch other
@@ -631,6 +660,8 @@ CSV
}
@test "keyless: diff branches with reordered mutation history" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch other
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
@@ -668,6 +699,8 @@ CSV
}
@test "keyless: diff branches with convergent mutation history" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch other
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
@@ -688,6 +721,7 @@ SQL
}
@test "keyless: merge branches with convergent mutation history" {
skip_nbf_dolt_1 "conflicts resolve not implemented"
dolt branch other
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
@@ -716,6 +750,7 @@ SQL
}
@test "keyless: diff branches with offset mutation history" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch other
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
@@ -732,6 +767,7 @@ SQL
}
@test "keyless: merge branches with offset mutation history" {
skip_nbf_dolt_1 "conflicts resolve not implemented"
dolt branch other
dolt sql -q "INSERT INTO keyless VALUES (7,7),(8,8),(9,9);"
@@ -757,6 +793,8 @@ SQL
}
@test "keyless: diff delete+add against working" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt sql <<SQL
DELETE FROM keyless WHERE c0 = 2;
INSERT INTO keyless VALUES (2,2)
@@ -767,6 +805,8 @@ SQL
}
@test "keyless: diff delete+add on two branches" {
skip_nbf_dolt_1 "keyless diff not implemented"
dolt branch left
dolt checkout -b right
@@ -787,6 +827,7 @@ SQL
}
@test "keyless: merge delete+add on two branches" {
skip_nbf_dolt_1 "conflicts resolve not implemented"
dolt branch left
dolt checkout -b right
@@ -977,6 +1018,8 @@ SQL
}
@test "keyless: insert into keyless table with unique index" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
dolt sql -q "CREATE TABLE mytable (pk int UNIQUE)";
run dolt sql -q "INSERT INTO mytable values (1),(2),(3),(4)"
@@ -1017,6 +1060,8 @@ SQL
}
@test "keyless: insert into keyless table with unique index and auto increment" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
dolt sql -q "CREATE TABLE gis (pk INT UNIQUE NOT NULL AUTO_INCREMENT, shape GEOMETRY NOT NULL)"
dolt sql -q "INSERT INTO gis VALUES (1, POINT(1,1))"
@@ -1037,6 +1082,8 @@ SQL
}
@test "keyless: string type unique key index" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
dolt sql -q "CREATE TABLE mytable (pk int, val varchar(6) UNIQUE)"
dolt sql -q "INSERT INTO mytable VALUES (1, 'nekter')"
@@ -1055,6 +1102,8 @@ SQL
}
@test "keyless: compound unique key index" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
dolt sql -q "CREATE TABLE mytable (pk int, v1 int, v2 int)"
dolt sql -q "ALTER TABLE mytable ADD CONSTRAINT ux UNIQUE (v1, v2)"
dolt sql -q "INSERT INTO mytable values (1, 2, 2)"
@@ -1075,6 +1124,8 @@ SQL
}
@test "keyless: replace into and unique key index" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
skip "Keyless tables with unique indexes do not properly support replace into semantics"
dolt sql -q "CREATE TABLE mytable (pk int, v1 int, v2 int)"
dolt sql -q "ALTER TABLE mytable ADD CONSTRAINT ux UNIQUE (v1, v2)"
@@ -1085,6 +1136,8 @@ SQL
}
@test "keyless: batch import with keyless unique index" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
dolt sql -q "CREATE TABLE mytable (pk int, v1 int, v2 int)"
dolt sql -q "ALTER TABLE mytable ADD CONSTRAINT ux UNIQUE (v1)"
@@ -1114,6 +1167,8 @@ SQL
}
@test "keyless: batch import with keyless unique index and secondary index" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
dolt sql -q "CREATE TABLE mytable (pk int, v1 int, v2 int)"
dolt sql -q "ALTER TABLE mytable ADD CONSTRAINT ux UNIQUE (v1)"
dolt sql -q "ALTER TABLE mytable ADD INDEX myidx (v2)"
@@ -1152,6 +1207,8 @@ SQL
}
@test "keyless: batch import into the unique key correctly works" {
skip_nbf_dolt_1 "keyless table with unique index does not throw errors"
skip "index error handling does not work with bulk import"
dolt sql -q "CREATE TABLE mytable (pk int, v1 int, v2 int)"
dolt sql -q "ALTER TABLE mytable ADD CONSTRAINT ux UNIQUE (v1, v2)"