Merge pull request #7230 from dolthub/nicktobey/json

Allow for three way merges to automatically resolve concurrent changes to a JSON object when the changes affect different keys.
This commit is contained in:
Nick Tobey
2024-01-08 09:33:51 -08:00
committed by GitHub
44 changed files with 1295 additions and 135 deletions
+1
View File
@@ -48,6 +48,7 @@ const (
NoFFParam = "no-ff"
NoPrettyFlag = "no-pretty"
NoTLSFlag = "no-tls"
NoJsonMergeFlag = "dont-merge-json"
NotFlag = "not"
NumberFlag = "number"
OneLineFlag = "oneline"
+9
View File
@@ -82,6 +82,7 @@ func (cmd CherryPickCmd) EventType() eventsapi.ClientEventType {
// Exec executes the command.
func (cmd CherryPickCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
ap := cli.CreateCherryPickArgParser()
ap.SupportsFlag(cli.NoJsonMergeFlag, "", "Do not attempt to automatically resolve multiple changes to the same JSON value, report a conflict instead.")
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, cherryPickDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
@@ -105,6 +106,14 @@ func (cmd CherryPickCmd) Exec(ctx context.Context, commandStr string, args []str
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
if apr.Contains(cli.NoJsonMergeFlag) {
_, _, err = queryist.Query(sqlCtx, "set @@session.dolt_dont_merge_json = 1")
if err != nil {
cli.Println(err.Error())
return 1
}
}
// TODO : support single commit cherry-pick only for now
if apr.NArg() == 0 {
usage()
+9
View File
@@ -91,6 +91,7 @@ func (cmd MergeCmd) RequiresRepo() bool {
// Exec executes the command
func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv, cliCtx cli.CliContext) int {
ap := cli.CreateMergeArgParser()
ap.SupportsFlag(cli.NoJsonMergeFlag, "", "Do not attempt to automatically resolve multiple changes to the same JSON value, report a conflict instead.")
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, mergeDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
@@ -121,6 +122,14 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
return 1
}
if apr.Contains(cli.NoJsonMergeFlag) {
_, _, err = queryist.Query(sqlCtx, "set @@dolt_dont_merge_json = 1")
if err != nil {
cli.Println(err.Error())
return 1
}
}
query, err := constructInterpolatedDoltMergeQuery(apr, cliCtx)
if err != nil {
cli.Println(err.Error())
@@ -52,7 +52,7 @@ type Index interface {
// Non-public. Used for flatbuffers Table persistence.
bytes() ([]byte, error)
DebugString(ctx context.Context) string
DebugString(ctx context.Context, ns tree.NodeStore, schema schema.Schema) string
}
// IndexSet stores a collection secondary Indexes.
@@ -221,7 +221,7 @@ func (i nomsIndex) AddColumnToRows(ctx context.Context, newCol string, newSchema
return i, nil
}
func (i nomsIndex) DebugString(ctx context.Context) string {
func (i nomsIndex) DebugString(ctx context.Context, ns tree.NodeStore, schema schema.Schema) string {
panic("Not implemented")
}
@@ -336,10 +336,10 @@ func (i prollyIndex) AddColumnToRows(ctx context.Context, newCol string, newSche
return IndexFromProllyMap(newMap), nil
}
func (i prollyIndex) DebugString(ctx context.Context) string {
func (i prollyIndex) DebugString(ctx context.Context, ns tree.NodeStore, schema schema.Schema) string {
var b bytes.Buffer
i.index.WalkNodes(ctx, func(ctx context.Context, nd tree.Node) error {
return tree.OutputProllyNode(&b, nd)
return tree.OutputProllyNode(ctx, &b, nd, ns, schema)
})
return b.String()
}
@@ -106,7 +106,7 @@ type Table interface {
SetAutoIncrement(ctx context.Context, val uint64) (Table, error)
// DebugString returns the table contents for debugging purposes
DebugString(ctx context.Context) string
DebugString(ctx context.Context, ns tree.NodeStore) string
}
type nomsTable struct {
@@ -615,7 +615,7 @@ func (t nomsTable) SetAutoIncrement(ctx context.Context, val uint64) (Table, err
return nomsTable{t.vrw, t.ns, st}, nil
}
func (t nomsTable) DebugString(ctx context.Context) string {
func (t nomsTable) DebugString(ctx context.Context, ns tree.NodeStore) string {
var buf bytes.Buffer
err := types.WriteEncodedValue(ctx, &buf, t.tableStruct)
if err != nil {
@@ -685,13 +685,18 @@ type doltDevTable struct {
msg *serial.Table
}
func (t doltDevTable) DebugString(ctx context.Context) string {
func (t doltDevTable) DebugString(ctx context.Context, ns tree.NodeStore) string {
rows, err := t.GetTableRows(ctx)
if err != nil {
panic(err)
}
return rows.DebugString(ctx)
schema, err := t.GetSchema(ctx)
if err != nil {
panic(err)
}
return rows.DebugString(ctx, ns, schema)
}
var _ Table = doltDevTable{}
+1 -1
View File
@@ -1147,7 +1147,7 @@ func (root *RootValue) DebugString(ctx context.Context, transitive bool) string
buf.WriteString(name)
buf.WriteString(":\n")
buf.WriteString(table.DebugString(ctx))
buf.WriteString(table.DebugString(ctx, root.ns))
return false, nil
})
+2 -2
View File
@@ -668,6 +668,6 @@ func (t *Table) AddColumnToRows(ctx context.Context, newCol string, newSchema sc
return &Table{table: newTable}, nil
}
func (t *Table) DebugString(ctx context.Context) string {
return t.table.DebugString(ctx)
func (t *Table) DebugString(ctx context.Context, ns tree.NodeStore) string {
return t.table.DebugString(ctx, ns)
}
@@ -25,9 +25,9 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -154,7 +154,7 @@ func (table *fulltextTable) ApplyToTable(ctx *sql.Context) (*doltdb.Table, error
for ; err == nil; sqlRow, err = rowIter.Next(ctx) {
for to := range keyMap {
from := keyMap.MapOrdinal(to)
if err = index.PutField(ctx, mut.NodeStore(), keyBld, to, sqlRow[from]); err != nil {
if err = tree.PutField(ctx, mut.NodeStore(), keyBld, to, sqlRow[from]); err != nil {
return nil, err
}
}
@@ -162,7 +162,7 @@ func (table *fulltextTable) ApplyToTable(ctx *sql.Context) (*doltdb.Table, error
for to := range valMap {
from := valMap.MapOrdinal(to)
if err = index.PutField(ctx, mut.NodeStore(), valBld, to, sqlRow[from]); err != nil {
if err = tree.PutField(ctx, mut.NodeStore(), valBld, to, sqlRow[from]); err != nil {
return nil, err
}
}
@@ -25,6 +25,8 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/go-mysql-server/sql/transform"
"github.com/dolthub/go-mysql-server/sql/types"
"golang.org/x/exp/maps"
errorkinds "gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
@@ -1464,7 +1466,7 @@ func remapTupleWithColumnDefaults(
secondPass = append(secondPass, to)
}
value, err = index.GetField(ctx, valDesc, from, valueTuple, tm.ns)
value, err = tree.GetField(ctx, valDesc, from, valueTuple, tm.ns)
if err != nil {
return nil, err
}
@@ -1475,7 +1477,7 @@ func remapTupleWithColumnDefaults(
return nil, err
}
err = index.PutField(ctx, tm.ns, tb, to, value)
err = tree.PutField(ctx, tm.ns, tb, to, value)
if err != nil {
return nil, err
}
@@ -1525,7 +1527,7 @@ func writeTupleExpression(
return err
}
return index.PutField(ctx, tm.ns, tb, colIdx, value)
return tree.PutField(ctx, tm.ns, tb, colIdx, value)
}
// convertValueToNewType handles converting a value from a previous type into a new type. |value| is the value from
@@ -1691,7 +1693,7 @@ func findNonPKColumnMappingByTagOrName(sch schema.Schema, col schema.Column) int
// tuples. It returns the merged cell value tuple and a bool indicating if a
// conflict occurred. tryMerge should only be called if left and right produce
// non-identical diffs against base.
func (m *valueMerger) tryMerge(ctx context.Context, left, right, base val.Tuple) (val.Tuple, bool, error) {
func (m *valueMerger) tryMerge(ctx *sql.Context, left, right, base val.Tuple) (val.Tuple, bool, error) {
// If we're merging a keyless table and the keys match, but the values are different,
// that means that the row data is the same, but the cardinality has changed, and if the
// cardinality has changed in different ways on each merge side, we can't auto resolve.
@@ -1825,7 +1827,7 @@ func (m *valueMerger) processBaseColumn(ctx context.Context, i int, left, right,
// processColumn returns the merged value of column |i| of the merged schema,
// based on the |left|, |right|, and |base| schema.
func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, base val.Tuple) (result []byte, conflict bool, err error) {
func (m *valueMerger) processColumn(ctx *sql.Context, i int, left, right, base val.Tuple) (result []byte, conflict bool, err error) {
// missing columns are coerced into NULL column values
var baseCol []byte
@@ -1840,6 +1842,8 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
resultColumn := m.resultSchema.GetNonPKCols().GetByIndex(i)
generatedColumn := resultColumn.Generated != ""
sqlType := m.resultSchema.GetNonPKCols().GetByIndex(i).TypeInfo.ToSqlType()
// We previously asserted that left and right are not nil.
// But base can be nil in the event of convergent inserts.
if base == nil || !baseColExists {
@@ -1947,6 +1951,19 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
return leftCol, false, nil
}
// concurrent modification
// if the result type is JSON, we can attempt to merge the JSON changes.
dontMergeJsonVar, err := ctx.Session.GetSessionVariable(ctx, "dolt_dont_merge_json")
if err != nil {
return nil, true, err
}
disallowJsonMerge, err := sql.ConvertToBool(ctx, dontMergeJsonVar)
if err != nil {
return nil, true, err
}
if _, ok := sqlType.(types.JsonType); ok && !disallowJsonMerge {
return m.mergeJSONAddr(ctx, baseCol, leftCol, rightCol)
}
// otherwise, this is a conflict.
return nil, true, nil
case leftModified:
return leftCol, false, nil
@@ -1955,6 +1972,97 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
}
}
func (m *valueMerger) mergeJSONAddr(ctx context.Context, baseAddr []byte, leftAddr []byte, rightAddr []byte) (resultAddr []byte, conflict bool, err error) {
baseDoc, err := tree.NewJSONDoc(hash.New(baseAddr), m.ns).ToJSONDocument(ctx)
if err != nil {
return nil, true, err
}
leftDoc, err := tree.NewJSONDoc(hash.New(leftAddr), m.ns).ToJSONDocument(ctx)
if err != nil {
return nil, true, err
}
rightDoc, err := tree.NewJSONDoc(hash.New(rightAddr), m.ns).ToJSONDocument(ctx)
if err != nil {
return nil, true, err
}
mergedDoc, conflict, err := mergeJSON(baseDoc, leftDoc, rightDoc)
if err != nil {
return nil, true, err
}
if conflict {
return nil, true, nil
}
mergedBytes, err := json.Marshal(mergedDoc.ToInterface())
if err != nil {
return nil, true, err
}
mergedAddr, err := tree.SerializeBytesToAddr(ctx, m.ns, bytes.NewReader(mergedBytes), len(mergedBytes))
if err != nil {
return nil, true, err
}
return mergedAddr[:], false, nil
}
func mergeJSON(base types.JSONDocument, left types.JSONDocument, right types.JSONDocument) (resultDoc types.JSONDocument, conflict bool, err error) {
// First, deserialize each value into JSON.
// We can only merge if the value at all three commits is a JSON object.
baseObject, baseIsObject := base.Val.(types.JsonObject)
leftObject, leftIsObject := left.Val.(types.JsonObject)
rightObject, rightIsObject := right.Val.(types.JsonObject)
if !baseIsObject || !leftIsObject || !rightIsObject {
// At least one of the commits does not have a JSON object.
// If both left and right have the same value, use that value.
// But if they differ, this is an unresolvable merge conflict.
cmp, err := left.Compare(right)
if err != nil {
return types.JSONDocument{}, true, err
}
if cmp == 0 {
//convergent operation.
return left, false, nil
} else {
return types.JSONDocument{}, true, nil
}
}
mergedObject := maps.Clone(leftObject)
merged := types.JSONDocument{Val: mergedObject}
threeWayDiffer := NewThreeWayJsonDiffer(baseObject, leftObject, rightObject)
// Compute the merged object by applying diffs to the left object as needed.
for {
threeWayDiff, err := threeWayDiffer.Next()
if err == io.EOF {
return merged, false, nil
}
switch threeWayDiff.Op {
case tree.DiffOpRightAdd, tree.DiffOpConvergentAdd, tree.DiffOpRightModify, tree.DiffOpConvergentModify:
_, _, err := merged.Set(threeWayDiff.Key, threeWayDiff.Right)
if err != nil {
return types.JSONDocument{}, true, err
}
case tree.DiffOpRightDelete, tree.DiffOpConvergentDelete:
_, _, err := merged.Remove(threeWayDiff.Key)
if err != nil {
return types.JSONDocument{}, true, err
}
case tree.DiffOpLeftAdd, tree.DiffOpLeftModify, tree.DiffOpLeftDelete:
// these changes already exist on the left, so do nothing.
case tree.DiffOpDivergentModifyConflict, tree.DiffOpDivergentDeleteConflict:
return types.JSONDocument{}, true, nil
default:
panic("unreachable")
}
}
}
func isEqual(i int, left []byte, right []byte, resultType val.Type) bool {
// We use a default comparator instead of the comparator in the schema.
// This is necessary to force a binary collation for string comparisons.
@@ -1976,7 +2084,7 @@ func convert(ctx context.Context, fromDesc, toDesc val.TupleDesc, toSchema schem
// No conversion is necessary here.
return originalValue, nil
}
parsedCell, err := index.GetField(ctx, fromDesc, fromIndex, tuple, ns)
parsedCell, err := tree.GetField(ctx, fromDesc, fromIndex, tuple, ns)
if err != nil {
return nil, err
}
@@ -1989,5 +2097,5 @@ func convert(ctx context.Context, fromDesc, toDesc val.TupleDesc, toSchema schem
// If a merge results in assigning NULL to a non-null column, don't panic.
// Instead we validate the merged tuple before merging it into the table.
typ.Nullable = true
return index.Serialize(ctx, ns, typ, convertedCell)
return tree.Serialize(ctx, ns, typ, convertedCell)
}
@@ -19,6 +19,7 @@ import (
"strconv"
"testing"
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/assert"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
@@ -201,7 +202,7 @@ func TestRowMerge(t *testing.T) {
t.Skip()
}
ctx := context.Background()
ctx := sql.NewEmptyContext()
tests := make([]rowMergeTest, len(testCases))
for i, t := range testCases {
@@ -102,6 +102,9 @@ func TestSchemaMerge(t *testing.T) {
t.Run("simple conflict tests", func(t *testing.T) {
testSchemaMerge(t, simpleConflictTests)
})
t.Run("json merge tests", func(t *testing.T) {
testSchemaMerge(t, jsonMergeTests)
})
}
var columnAddDropTests = []schemaMergeTest{
@@ -1142,6 +1145,200 @@ var simpleConflictTests = []schemaMergeTest{
},
}
var jsonMergeTests = []schemaMergeTest{
{
name: "json merge",
ancestor: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int, b int, j json)")),
left: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int, b int, j json)")),
right: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int, b int, j json)")),
merged: tbl(sch("CREATE TABLE t (id int PRIMARY KEY, a int, b int, j json)")),
dataTests: []dataTest{
{
name: "convergent insertion",
ancestor: singleRow(1, 1, 1, `{}`),
left: singleRow(1, 2, 1, `{ "key1": "value1" }`),
right: singleRow(1, 1, 2, `{ "key1": "value1" }`),
merged: singleRow(1, 2, 2, `{ "key1": "value1" }`),
},
{
name: "convergent insertion with escaped quotes in keys",
ancestor: singleRow(1, 1, 1, `{}`),
left: singleRow(1, 2, 1, `{ "\"key1\"": "\"value1\"" }`),
right: singleRow(1, 1, 2, `{ "\"key1\"": "\"value1\"" }`),
merged: singleRow(1, 2, 2, `{ "\"key1\"": "\"value1\"" }`),
},
{
name: `parallel insertion`,
ancestor: singleRow(1, 1, 1, `{}`),
left: singleRow(1, 2, 1, `{ "key1": "value1" }`),
right: singleRow(1, 1, 2, `{ "key2": "value2" }`),
merged: singleRow(1, 2, 2, `{ "key1": "value1", "key2": "value2" }`),
},
{
name: `convergent modification`,
ancestor: singleRow(1, 1, 1, `{ "key1": "value1" }`),
left: singleRow(1, 2, 1, `{ "key1": "value2" }`),
right: singleRow(1, 1, 2, `{ "key1": "value2" }`),
merged: singleRow(1, 2, 2, `{ "key1": "value2" }`),
},
{
name: `parallel modification`,
ancestor: singleRow(1, 1, 1, `{ "key1": "value1", "key2": "value2" }`),
left: singleRow(1, 2, 1, `{ "key1": "value3", "key2": "value2" }`),
right: singleRow(1, 1, 2, `{ "key1": "value1", "key2": "value4" }`),
merged: singleRow(1, 2, 2, `{ "key1": "value3", "key2": "value4" }`),
},
{
name: `parallel deletion`,
ancestor: singleRow(1, 1, 1, `{ "key1": "value1" }`),
left: singleRow(1, 2, 1, `{}`),
right: singleRow(1, 1, 2, `{}`),
merged: singleRow(1, 2, 2, `{}`),
},
{
name: `convergent deletion`,
ancestor: singleRow(1, 1, 1, `{ "key1": "value1", "key2": "value2" }`),
left: singleRow(1, 2, 1, `{ "key2": "value2" }`),
right: singleRow(1, 1, 2, `{ "key1": "value1" }`),
merged: singleRow(1, 2, 2, `{}`),
},
{
name: `divergent insertion`,
ancestor: singleRow(1, 1, 1, `{}`),
left: singleRow(1, 2, 1, `{ "key1": "value1" }`),
right: singleRow(1, 1, 2, `{ "key1": "value2" }`),
dataConflict: true,
},
{
name: `divergent modification`,
ancestor: singleRow(1, 1, 1, `{ "key1": "value1"}`),
left: singleRow(1, 2, 1, `{ "key1": "value2" }`),
right: singleRow(1, 1, 2, `{ "key1": "value3" }`),
dataConflict: true,
},
{
name: `divergent modification and deletion`,
ancestor: singleRow(1, 1, 1, `{ "key1": "value1"}`),
left: singleRow(1, 2, 1, `{ "key1": "value2" }`),
right: singleRow(1, 1, 2, `{}`),
dataConflict: true,
},
{
name: `nested insertion`,
ancestor: singleRow(1, 1, 1, `{ "key1": {} }`),
left: singleRow(1, 2, 1, `{ "key1": { "key1a": "value1a" } }`),
right: singleRow(1, 1, 2, `{ "key1": { "key1b": "value1b" } }`),
merged: singleRow(1, 2, 2, `{ "key1": { "key1a": "value1a", "key1b": "value1b" } }`),
},
{
name: `nested insertion with escaped quotes in keys`,
ancestor: singleRow(1, 1, 1, `{ "\"key1\"": {} }`),
left: singleRow(1, 2, 1, `{ "\"key1\"": { "\"key1a\"": "value1a" } }`),
right: singleRow(1, 1, 2, `{ "\"key1\"": { "\"key1b\"": "value1b" } }`),
merged: singleRow(1, 2, 2, `{ "\"key1\"": { "\"key1a\"": "value1a", "\"key1b\"": "value1b" } }`),
},
{
name: `nested modification`,
ancestor: singleRow(1, 1, 1, `{ "key1": { "key1a": "value1a", "key1b": "value1b" } }`),
left: singleRow(1, 2, 1, `{ "key1": { "key1a": "value2a", "key1b": "value1b" } }`),
right: singleRow(1, 1, 2, `{ "key1": { "key1a": "value1a", "key1b": "value2b" } }`),
merged: singleRow(1, 2, 2, `{ "key1": { "key1a": "value2a", "key1b": "value2b" } }`),
},
{
name: `nested modification with escaped quotes in keys`,
ancestor: singleRow(1, 1, 1, `{ "\"key1\"": { "\"key1a\"": "value1a", "\"key1b\"": "value1b" } }`),
left: singleRow(1, 2, 1, `{ "\"key1\"": { "\"key1a\"": "value2a", "\"key1b\"": "value1b" } }`),
right: singleRow(1, 1, 2, `{ "\"key1\"": { "\"key1a\"": "value1a", "\"key1b\"": "value2b" } }`),
merged: singleRow(1, 2, 2, `{ "\"key1\"": { "\"key1a\"": "value2a", "\"key1b\"": "value2b" } }`),
},
{
name: `nested deletion`,
ancestor: singleRow(1, 1, 1, `{ "key1": { "key1a": "value1a", "key1b": "value1b" } }`),
left: singleRow(1, 2, 1, `{ "key1": { "key1a": "value1a" } }`),
right: singleRow(1, 1, 2, `{ "key1": { "key1b": "value1b" } }`),
merged: singleRow(1, 2, 2, `{ "key1": { } }`),
},
{
name: `nested deletion with escaped quotes in keys`,
ancestor: singleRow(1, 1, 1, `{ "\"key1\"": { "\"key1a\"": "value1a", "\"key1b\"": "value1b" } }`),
left: singleRow(1, 2, 1, `{ "\"key1\"": { "\"key1a\"": "value1a" } }`),
right: singleRow(1, 1, 2, `{ "\"key1\"": { "\"key1b\"": "value1b" } }`),
merged: singleRow(1, 2, 2, `{ "\"key1\"": { } }`),
},
{
name: "complicated nested merge",
ancestor: singleRow(1, 1, 1, `{ "removed": 1, "modified": 2, "nested": { "removed": 3, "modified": 4 } }`),
left: singleRow(1, 2, 1, `{ "added": 7, "modified": 2, "nested": { "removed": 3, "modified": 5 } }`),
right: singleRow(1, 1, 2, `{ "removed": 1, "modified": 6, "nested": { "added": 8, "modified": 4 } }`),
merged: singleRow(1, 2, 2, `{ "added": 7, "modified": 6, "nested": { "added": 8, "modified": 5 } }`),
},
{
name: "object with double quotes in keys",
ancestor: singleRow(1, 1, 1, `{ "\"removed\"": 1, "\"modified\"": 2, "\"nested\"": { "\"removed\"": 3, "\"modified\"": 4 } }`),
left: singleRow(1, 2, 1, `{ "\"added\"": 7, "\"modified\"": 2, "\"nested\"": { "\"removed\"": 3, "\"modified\"": 5 } }`),
right: singleRow(1, 1, 2, `{ "\"removed\"": 1, "\"modified\"": 6, "\"nested\"": { "\"added\"": 8, "\"modified\"": 4 } }`),
merged: singleRow(1, 2, 2, `{ "\"added\"": 7, "\"modified\"": 6, "\"nested\"": { "\"added\"": 8, "\"modified\"": 5 } }`),
},
{
name: "changing types",
ancestor: singleRow(1, 1, 1, `{ "key1": {}, "key2": 2 }`),
left: singleRow(1, 2, 1, `{ "key1": [], "key2": 2 }`),
right: singleRow(1, 1, 2, `{ "key1": {}, "key2": true }`),
merged: singleRow(1, 2, 2, `{ "key1": [], "key2": true }`),
},
{
name: "changing types conflict",
ancestor: singleRow(1, 1, 1, `{ "key1": {} }`),
left: singleRow(1, 2, 1, `{ "key1": [] }`),
right: singleRow(1, 1, 2, `{ "key1": 2 }`),
dataConflict: true,
},
{
name: "object insert and modify conflict",
ancestor: singleRow(1, 1, 1, `{ "key1": {} }`),
left: singleRow(1, 2, 1, `{ "key1": { "key2": 2 } }`),
right: singleRow(1, 1, 2, `{ "key1": 2 }`),
dataConflict: true,
},
{
name: "object insert and delete conflict",
ancestor: singleRow(1, 1, 1, `{ "key1": {} }`),
left: singleRow(1, 2, 1, `{ "key1": { "key2": 2 } }`),
right: singleRow(1, 1, 2, `{ }`),
dataConflict: true,
},
{
name: "changing arrays conflict",
ancestor: singleRow(1, 1, 1, `{ "key1": [1] }`),
left: singleRow(1, 2, 1, `{ "key1": [1, 1] }`),
right: singleRow(1, 1, 2, `{ "key1": [] }`),
dataConflict: true,
},
{
// Which array element should go first?
// We avoid making assumptions and flag this as a conflict.
name: "object inside array conflict",
ancestor: singleRow(1, 1, 1, `{ "key1": [ { } ] }`),
left: singleRow(1, 2, 1, `{ "key1": [ { "key2": "value2" } ] }`),
right: singleRow(1, 1, 2, `{ "key1": [ { "key3": "value3" } ] }`),
dataConflict: true,
},
{
// Did the left branch overwrite the first value in the array?
// Or did it remove the last value and insert at the beginning?
// Did the right branch overwrite the second value in the array?
// Or did it remove the first value and insert at the end?
// Diffs on arrays are ambiguous. We avoid making assumptions and flag this as a conflict.
name: "parallel array modification",
ancestor: singleRow(1, 1, 1, `{ "key1": [ 1, 1 ] }`),
left: singleRow(1, 2, 1, `{ "key1": [ 2, 1 ] }`),
right: singleRow(1, 1, 2, `{ "key1": [ 1, 2 ] }`),
dataConflict: true,
},
},
},
}
func testSchemaMerge(t *testing.T, tests []schemaMergeTest) {
t.Run("merge left to right", func(t *testing.T) {
testSchemaMergeHelper(t, tests, false)
@@ -1199,7 +1396,15 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
require.NoError(t, err)
foundDataConflict = foundDataConflict || hasConflict
}
require.True(t, foundDataConflict, "Expected data conflict, but didn't find one.")
if !assert.True(t, foundDataConflict, "Expected data conflict, but didn't find one.") {
for name, _ := range exp {
table, _, err := result.Root.GetTable(ctx, name)
require.NoError(t, err)
t.Logf("table %s:", name)
t.Log(table.DebugString(ctx, m.NodeStore()))
}
}
} else {
for name, addr := range exp {
a, ok := act[name]
@@ -1248,8 +1453,8 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
require.NoError(t, err)
if expRowDataHash != actRowDataHash {
t.Error("Rows unequal")
t.Logf("expected rows: %s", expTbl.DebugString(ctx))
t.Logf("actual rows: %s", actTbl.DebugString(ctx))
t.Logf("expected rows: %s", expTbl.DebugString(ctx, m.NodeStore()))
t.Logf("actual rows: %s", actTbl.DebugString(ctx, m.NodeStore()))
}
expIndexSet, err := expTbl.GetIndexSet(ctx)
require.NoError(t, err)
@@ -1268,8 +1473,8 @@ func testSchemaMergeHelper(t *testing.T, tests []schemaMergeTest, flipSides bool
require.NoError(t, err)
if expIndexHash != actIndexHash {
t.Errorf("Index %s unequal", index.Name())
t.Logf("expected rows: %s", expIndex.DebugString(ctx))
t.Logf("actual rows: %s", actIndex.DebugString(ctx))
t.Logf("expected rows: %s", expIndex.DebugString(ctx, m.NodeStore(), expSchema))
t.Logf("actual rows: %s", actIndex.DebugString(ctx, m.NodeStore(), expSchema))
}
return false, nil
})
@@ -0,0 +1,207 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package merge
import (
"bytes"
"io"
"strings"
"github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/dolt/go/store/prolly/tree"
)
type ThreeWayJsonDiffer struct {
leftDiffer, rightDiffer tree.JsonDiffer
leftCurrentDiff, rightCurrentDiff *tree.JsonDiff
leftIsDone, rightIsDone bool
}
func NewThreeWayJsonDiffer(base, left, right types.JsonObject) ThreeWayJsonDiffer {
return ThreeWayJsonDiffer{
leftDiffer: tree.NewJsonDiffer("$", base, left),
rightDiffer: tree.NewJsonDiffer("$", base, right),
}
}
type ThreeWayJsonDiff struct {
// Op indicates the type of diff
Op tree.DiffOp
// a partial set of document values are set
// depending on the diffOp
Key string
Base, Left, Right, Merged *types.JSONDocument
}
func (differ *ThreeWayJsonDiffer) Next() (ThreeWayJsonDiff, error) {
for {
err := differ.loadNextDiff()
if err != nil {
return ThreeWayJsonDiff{}, err
}
if differ.rightIsDone {
// We don't care if there are remaining left diffs because they don't affect the merge.
return ThreeWayJsonDiff{}, io.EOF
}
if differ.leftIsDone {
return differ.processRightSideOnlyDiff(), nil
}
// !differ.rightIsDone && !differ.leftIsDone
leftDiff := differ.leftCurrentDiff
rightDiff := differ.rightCurrentDiff
cmp := bytes.Compare([]byte(leftDiff.Key), []byte(rightDiff.Key))
if cmp > 0 {
if strings.HasPrefix(leftDiff.Key, rightDiff.Key) {
// The left diff must be replacing or deleting an object,
// and the right diff makes changes to that object.
// Note the fact that all keys in these paths are quoted means we don't have to worry about
// one key being a prefix of the other and triggering a false positive here.
result := ThreeWayJsonDiff{
Op: tree.DiffOpDivergentModifyConflict,
}
differ.leftCurrentDiff = nil
return result, nil
}
// key only changed on right
return differ.processRightSideOnlyDiff(), nil
} else if cmp < 0 {
if strings.HasPrefix(rightDiff.Key, leftDiff.Key) {
// The right diff must be replacing or deleting an object,
// and the left diff makes changes to that object.
// Note the fact that all keys in these paths are quoted means we don't have to worry about
// one key being a prefix of the other and triggering a false positive here.
result := ThreeWayJsonDiff{
Op: tree.DiffOpDivergentModifyConflict,
}
differ.rightCurrentDiff = nil
return result, nil
}
// left side was modified. We don't need to do anything with this diff.
differ.leftCurrentDiff = nil
continue
} else {
// cmp == 0; Both diffs are on the same key
if differ.leftCurrentDiff.From == nil {
// Key did not exist at base, so both sides are inserts.
// Check that they're inserting the same value.
valueCmp, err := differ.leftCurrentDiff.To.Compare(differ.rightCurrentDiff.To)
if err != nil {
return ThreeWayJsonDiff{}, err
}
if valueCmp == 0 {
return differ.processMergedDiff(tree.DiffOpConvergentModify, differ.leftCurrentDiff.To), nil
} else {
return differ.processMergedDiff(tree.DiffOpDivergentModifyConflict, nil), nil
}
}
if differ.leftCurrentDiff.To == nil && differ.rightCurrentDiff.To == nil {
return differ.processMergedDiff(tree.DiffOpConvergentDelete, nil), nil
}
if differ.leftCurrentDiff.To == nil || differ.rightCurrentDiff.To == nil {
return differ.processMergedDiff(tree.DiffOpDivergentDeleteConflict, nil), nil
}
// If the key existed at base, we can do a recursive three-way merge to resolve
// changes to the values.
// This shouldn't be necessary: if its an object on all three branches, the original diff is recursive.
mergedValue, conflict, err := mergeJSON(*differ.leftCurrentDiff.From,
*differ.leftCurrentDiff.To,
*differ.rightCurrentDiff.To)
if err != nil {
return ThreeWayJsonDiff{}, err
}
if conflict {
return differ.processMergedDiff(tree.DiffOpDivergentModifyConflict, nil), nil
} else {
return differ.processMergedDiff(tree.DiffOpDivergentModifyResolved, &mergedValue), nil
}
}
}
}
func (differ *ThreeWayJsonDiffer) loadNextDiff() error {
if differ.leftCurrentDiff == nil && !differ.leftIsDone {
newLeftDiff, err := differ.leftDiffer.Next()
if err == io.EOF {
differ.leftIsDone = true
} else if err != nil {
return err
} else {
differ.leftCurrentDiff = &newLeftDiff
}
}
if differ.rightCurrentDiff == nil && !differ.rightIsDone {
newRightDiff, err := differ.rightDiffer.Next()
if err == io.EOF {
differ.rightIsDone = true
} else if err != nil {
return err
} else {
differ.rightCurrentDiff = &newRightDiff
}
}
return nil
}
func (differ *ThreeWayJsonDiffer) processRightSideOnlyDiff() ThreeWayJsonDiff {
switch differ.rightCurrentDiff.Type {
case tree.AddedDiff:
result := ThreeWayJsonDiff{
Op: tree.DiffOpRightAdd,
Key: differ.rightCurrentDiff.Key,
Right: differ.rightCurrentDiff.To,
}
differ.rightCurrentDiff = nil
return result
case tree.RemovedDiff:
result := ThreeWayJsonDiff{
Op: tree.DiffOpRightDelete,
Key: differ.rightCurrentDiff.Key,
Base: differ.rightCurrentDiff.From,
}
differ.rightCurrentDiff = nil
return result
case tree.ModifiedDiff:
result := ThreeWayJsonDiff{
Op: tree.DiffOpRightModify,
Key: differ.rightCurrentDiff.Key,
Base: differ.rightCurrentDiff.From,
Right: differ.rightCurrentDiff.To,
}
differ.rightCurrentDiff = nil
return result
default:
panic("unreachable")
}
}
func (differ *ThreeWayJsonDiffer) processMergedDiff(op tree.DiffOp, merged *types.JSONDocument) ThreeWayJsonDiff {
result := ThreeWayJsonDiff{
Op: op,
Key: differ.leftCurrentDiff.Key,
Base: differ.leftCurrentDiff.From,
Left: differ.leftCurrentDiff.To,
Right: differ.rightCurrentDiff.To,
Merged: merged,
}
differ.leftCurrentDiff = nil
differ.rightCurrentDiff = nil
return result
}
@@ -20,7 +20,6 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
@@ -34,7 +33,7 @@ func NextConstraintViolation(ctx context.Context, itr prolly.ArtifactIter, kd, v
key = make(sql.Row, kd.Count())
for i := 0; i < kd.Count(); i++ {
key[i], err = index.GetField(ctx, kd, i, art.SourceKey, ns)
key[i], err = tree.GetField(ctx, kd, i, art.SourceKey, ns)
if err != nil {
return
}
@@ -48,7 +47,7 @@ func NextConstraintViolation(ctx context.Context, itr prolly.ArtifactIter, kd, v
value = make(sql.Row, vd.Count())
for i := 0; i < vd.Count(); i++ {
value[i], err = index.GetField(ctx, vd, i, meta.Value, ns)
value[i], err = tree.GetField(ctx, vd, i, meta.Value, ns)
if err != nil {
return
}
+2 -2
View File
@@ -29,7 +29,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/globalstate"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -332,7 +332,7 @@ func getMaxIndexValue(ctx context.Context, indexData durable.Index) (uint64, err
}
// TODO: is the auto-inc column always the first column in the index?
field, err := index.GetField(ctx, kd, 0, k, idx.NodeStore())
field, err := tree.GetField(ctx, kd, 0, k, idx.NodeStore())
if err != nil {
return 0, err
}
@@ -205,7 +205,7 @@ func (itr *prollyConflictRowIter) Next(ctx *sql.Context) (sql.Row, error) {
if !itr.keyless {
for i := 0; i < itr.kd.Count(); i++ {
f, err := index.GetField(ctx, itr.kd, i, c.k, itr.baseRows.NodeStore())
f, err := tree.GetField(ctx, itr.kd, i, c.k, itr.baseRows.NodeStore())
if err != nil {
return nil, err
}
@@ -238,7 +238,7 @@ func (itr *prollyConflictRowIter) Next(ctx *sql.Context) (sql.Row, error) {
func (itr *prollyConflictRowIter) putConflictRowVals(ctx *sql.Context, c conf, r sql.Row) error {
if c.bV != nil {
for i := 0; i < itr.baseVD.Count(); i++ {
f, err := index.GetField(ctx, itr.baseVD, i, c.bV, itr.baseRows.NodeStore())
f, err := tree.GetField(ctx, itr.baseVD, i, c.bV, itr.baseRows.NodeStore())
if err != nil {
return err
}
@@ -248,7 +248,7 @@ func (itr *prollyConflictRowIter) putConflictRowVals(ctx *sql.Context, c conf, r
if c.oV != nil {
for i := 0; i < itr.oursVD.Count(); i++ {
f, err := index.GetField(ctx, itr.oursVD, i, c.oV, itr.baseRows.NodeStore())
f, err := tree.GetField(ctx, itr.oursVD, i, c.oV, itr.baseRows.NodeStore())
if err != nil {
return err
}
@@ -259,7 +259,7 @@ func (itr *prollyConflictRowIter) putConflictRowVals(ctx *sql.Context, c conf, r
if c.tV != nil {
for i := 0; i < itr.theirsVD.Count(); i++ {
f, err := index.GetField(ctx, itr.theirsVD, i, c.tV, itr.baseRows.NodeStore())
f, err := tree.GetField(ctx, itr.theirsVD, i, c.tV, itr.baseRows.NodeStore())
if err != nil {
return err
}
@@ -288,13 +288,13 @@ func (itr *prollyConflictRowIter) putKeylessConflictRowVals(ctx *sql.Context, c
if c.bV != nil {
// Cardinality
r[itr.n-3], err = index.GetField(ctx, itr.baseVD, 0, c.bV, ns)
r[itr.n-3], err = tree.GetField(ctx, itr.baseVD, 0, c.bV, ns)
if err != nil {
return err
}
for i := 0; i < itr.baseVD.Count()-1; i++ {
f, err := index.GetField(ctx, itr.baseVD, i+1, c.bV, ns)
f, err := tree.GetField(ctx, itr.baseVD, i+1, c.bV, ns)
if err != nil {
return err
}
@@ -305,13 +305,13 @@ func (itr *prollyConflictRowIter) putKeylessConflictRowVals(ctx *sql.Context, c
}
if c.oV != nil {
r[itr.n-2], err = index.GetField(ctx, itr.oursVD, 0, c.oV, ns)
r[itr.n-2], err = tree.GetField(ctx, itr.oursVD, 0, c.oV, ns)
if err != nil {
return err
}
for i := 0; i < itr.oursVD.Count()-1; i++ {
f, err := index.GetField(ctx, itr.oursVD, i+1, c.oV, ns)
f, err := tree.GetField(ctx, itr.oursVD, i+1, c.oV, ns)
if err != nil {
return err
}
@@ -324,13 +324,13 @@ func (itr *prollyConflictRowIter) putKeylessConflictRowVals(ctx *sql.Context, c
r[itr.o+itr.oursVD.Count()-1] = getDiffType(c.bV, c.oV)
if c.tV != nil {
r[itr.n-1], err = index.GetField(ctx, itr.theirsVD, 0, c.tV, ns)
r[itr.n-1], err = tree.GetField(ctx, itr.theirsVD, 0, c.tV, ns)
if err != nil {
return err
}
for i := 0; i < itr.theirsVD.Count()-1; i++ {
f, err := index.GetField(ctx, itr.theirsVD, i+1, c.tV, ns)
f, err := tree.GetField(ctx, itr.theirsVD, i+1, c.tV, ns)
if err != nil {
return err
}
@@ -617,7 +617,7 @@ func (cd *prollyConflictDeleter) putPrimaryKeys(ctx *sql.Context, r sql.Row) err
}()
for i := 0; i < cd.kd.Count()-2; i++ {
err := index.PutField(ctx, cd.ed.NodeStore(), cd.kB, i, r[o+i])
err := tree.PutField(ctx, cd.ed.NodeStore(), cd.kB, i, r[o+i])
if err != nil {
return err
@@ -640,7 +640,7 @@ func (cd *prollyConflictDeleter) putKeylessHash(ctx *sql.Context, r sql.Row) err
// init cardinality to 0
cd.vB.PutUint64(0, 0)
for i, v := range rowVals {
err := index.PutField(ctx, cd.ed.NodeStore(), cd.vB, i+1, v)
err := tree.PutField(ctx, cd.ed.NodeStore(), cd.vB, i+1, v)
if err != nil {
return err
}
@@ -173,7 +173,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
o := 2
if !schema.IsKeyless(itr.sch) {
for i := 0; i < itr.kd.Count(); i++ {
r[o+i], err = index.GetField(ctx, itr.kd, i, art.SourceKey, itr.ns)
r[o+i], err = tree.GetField(ctx, itr.kd, i, art.SourceKey, itr.ns)
if err != nil {
return nil, err
}
@@ -181,7 +181,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
o += itr.kd.Count()
for i := 0; i < itr.vd.Count(); i++ {
r[o+i], err = index.GetField(ctx, itr.vd, i, meta.Value, itr.ns)
r[o+i], err = tree.GetField(ctx, itr.vd, i, meta.Value, itr.ns)
if err != nil {
return nil, err
}
@@ -189,7 +189,7 @@ func (itr prollyCVIter) Next(ctx *sql.Context) (sql.Row, error) {
o += itr.vd.Count()
} else {
for i := 0; i < itr.vd.Count()-1; i++ {
r[o+i], err = index.GetField(ctx, itr.vd, i+1, meta.Value, itr.ns)
r[o+i], err = tree.GetField(ctx, itr.vd, i+1, meta.Value, itr.ns)
if err != nil {
return nil, err
}
@@ -247,7 +247,7 @@ var _ sql.RowDeleter = (*prollyCVDeleter)(nil)
func (d *prollyCVDeleter) Delete(ctx *sql.Context, r sql.Row) error {
// first part of the artifact key is the keys of the source table
for i := 0; i < d.kd.Count()-2; i++ {
err := index.PutField(ctx, d.cvt.artM.NodeStore(), d.kb, i, r[i+2])
err := tree.PutField(ctx, d.cvt.artM.NodeStore(), d.kb, i, r[i+2])
if err != nil {
return err
}
@@ -21,7 +21,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/rowconv"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -117,7 +116,7 @@ func (c ProllyRowConverter) putFields(ctx context.Context, tup val.Tuple, proj v
if j == -1 {
continue
}
f, err := index.GetField(ctx, desc, i, tup, c.ns)
f, err := tree.GetField(ctx, desc, i, tup, c.ns)
if err != nil {
return err
}
@@ -2964,6 +2964,7 @@ func TestThreeWayMergeWithSchemaChangeScripts(t *testing.T) {
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsConstraints, "constraint changes", false)
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsSchemaConflicts, "schema conflicts", false)
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsGeneratedColumns, "generated columns", false)
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsForJsonConflicts, "json merge", false)
// Run non-symmetric schema merge tests in just one direction
t.Run("type changes", func(t *testing.T) {
@@ -2986,6 +2987,7 @@ func TestThreeWayMergeWithSchemaChangeScriptsPrepared(t *testing.T) {
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsConstraints, "constraint changes", true)
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsSchemaConflicts, "schema conflicts", true)
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsGeneratedColumns, "generated columns", true)
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsForJsonConflicts, "json merge", true)
// Run non-symmetric schema merge tests in just one direction
t.Run("type changes", func(t *testing.T) {
@@ -2405,3 +2405,67 @@ var SchemaChangeTestsGeneratedColumns = []MergeScriptTest{
},
},
}
var SchemaChangeTestsForJsonConflicts = []MergeScriptTest{
{
Name: "json merge succeeds without @@dolt_dont_merge_json",
AncSetUpScript: []string{
"set autocommit = 0;",
"CREATE table t (pk int primary key, j json);",
"INSERT into t values (1, '{}');",
},
RightSetUpScript: []string{
`update t set j = '{"a": 1}';`,
},
LeftSetUpScript: []string{
`update t set j = '{"b": 2}';`,
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{doltCommit, 0, 0}},
},
{
Query: "select * from t;",
Expected: []sql.Row{
{
1, `{"a": 1, "b": 2}`,
},
},
},
},
},
{
Name: "json merge fails with @@dolt_dont_merge_json",
AncSetUpScript: []string{
"set autocommit = 0;",
"set @@dolt_dont_merge_json = 1;",
"CREATE table t (pk int primary key, j json);",
"INSERT into t values (1, '{}');",
},
RightSetUpScript: []string{
`update t set j = '{"a": 1}';`,
},
LeftSetUpScript: []string{
`update t set j = '{"b": 2}';`,
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('right');",
Expected: []sql.Row{{"", 0, 1}},
},
{
Query: "select * from dolt_conflicts;",
Expected: []sql.Row{{"t", uint(1)}},
},
{
Query: "select base_j, our_j, their_j from dolt_conflicts_t;",
Expected: []sql.Row{
{
`{}`, `{"b": 2}`, `{"a": 1}`,
},
},
},
},
},
}
@@ -28,7 +28,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
@@ -215,7 +214,7 @@ func validateKeylessIndex(ctx context.Context, sch schema.Schema, def schema.Ind
if err != nil {
return err
}
cell := index.ZCell(geom.(sqltypes.GeometryValue))
cell := tree.ZCell(geom.(sqltypes.GeometryValue))
field = cell[:]
}
@@ -310,7 +309,7 @@ func validatePkIndex(ctx context.Context, sch schema.Schema, def schema.Index, p
if err != nil {
return err
}
cell := index.ZCell(geom.(sqltypes.GeometryValue))
cell := tree.ZCell(geom.(sqltypes.GeometryValue))
field = cell[:]
}
@@ -369,7 +368,7 @@ func shouldDereferenceContent(tablePos int, tableValueDescriptor val.TupleDesc,
// table, |tablePos| is the field index into the value tuple, and |tuple| is the value tuple from the
// main table.
func dereferenceContent(ctx context.Context, tableValueDescriptor val.TupleDesc, tablePos int, tuple val.Tuple, ns tree.NodeStore) ([]byte, error) {
v, err := index.GetField(ctx, tableValueDescriptor, tablePos, tuple, ns)
v, err := tree.GetField(ctx, tableValueDescriptor, tablePos, tuple, ns)
if err != nil {
return nil, err
}
@@ -392,7 +391,7 @@ func dereferenceContent(ctx context.Context, tableValueDescriptor val.TupleDesc,
// table, |tablePos| is the field index into the value tuple, and |tuple| is the value tuple from the
// main table.
func dereferenceGeometry(ctx context.Context, tableValueDescriptor val.TupleDesc, tablePos int, tuple val.Tuple, ns tree.NodeStore) (interface{}, error) {
v, err := index.GetField(ctx, tableValueDescriptor, tablePos, tuple, ns)
v, err := tree.GetField(ctx, tableValueDescriptor, tablePos, tuple, ns)
if err != nil {
return nil, err
}
@@ -1108,15 +1108,15 @@ func (di *doltIndex) prollySpatialRanges(ranges []sql.Range) ([]prolly.Range, er
}
var pRanges []prolly.Range
zMin := ZValue(minPoint)
zMax := ZValue(maxPoint)
zRanges := SplitZRanges(ZRange{zMin, zMax})
zMin := tree.ZValue(minPoint)
zMax := tree.ZValue(maxPoint)
zRanges := tree.SplitZRanges(tree.ZRange{zMin, zMax})
for level := byte(0); level < 65; level++ {
// For example, at highest level, we'll just look at origin point multiple times
var prevMinCell, prevMaxCell val.Cell
for i, zRange := range zRanges {
minCell := ZMask(level, zRange[0])
maxCell := ZMask(level, zRange[1])
minCell := tree.ZMask(level, zRange[0])
maxCell := tree.ZMask(level, zRange[1])
if i != 0 && minCell == prevMinCell && maxCell == prevMaxCell {
continue
}
@@ -1170,7 +1170,7 @@ func (di *doltIndex) prollyRangesFromSqlRanges(ctx context.Context, ns tree.Node
return nil, err
}
nv := di.trimRangeCutValue(j, v)
if err = PutField(ctx, ns, tb, j, nv); err != nil {
if err = tree.PutField(ctx, ns, tb, j, nv); err != nil {
return nil, err
}
bound := expr.LowerBound.TypeAsLowerBound()
@@ -1197,7 +1197,7 @@ func (di *doltIndex) prollyRangesFromSqlRanges(ctx context.Context, ns tree.Node
return nil, err
}
nv := di.trimRangeCutValue(i, v)
if err = PutField(ctx, ns, tb, i, nv); err != nil {
if err = tree.PutField(ctx, ns, tb, i, nv); err != nil {
return nil, err
}
fields[i].Hi = prolly.Bound{
@@ -209,7 +209,7 @@ func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.T
}
// TODO: type conversion
err = PutField(ctx, b.nodeStore, b.builder, to, value)
err = tree.PutField(ctx, b.nodeStore, b.builder, to, value)
if err != nil {
return nil, err
}
@@ -229,7 +229,7 @@ func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.T
if b.canCopyRawBytes(to) {
b.builder.PutRaw(to, v.GetField(from))
} else {
value, err := GetField(ctx, b.sch.GetValueDescriptor(), from, v, b.nodeStore)
value, err := tree.GetField(ctx, b.sch.GetValueDescriptor(), from, v, b.nodeStore)
if err != nil {
return nil, err
}
@@ -238,7 +238,7 @@ func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.T
value = val.TrimValueToPrefixLength(value, b.indexDef.PrefixLengths()[to])
}
err = PutField(ctx, b.nodeStore, b.builder, to, value)
err = tree.PutField(ctx, b.nodeStore, b.builder, to, value)
if err != nil {
return nil, err
}
@@ -114,7 +114,7 @@ func (p prollyIndexIter) rowFromTuples(ctx context.Context, key, value val.Tuple
for i, idx := range p.keyMap {
outputIdx := p.ordMap[i]
r[outputIdx], err = GetField(ctx, keyDesc, idx, key, p.primary.NodeStore())
r[outputIdx], err = tree.GetField(ctx, keyDesc, idx, key, p.primary.NodeStore())
if err != nil {
return err
}
@@ -122,7 +122,7 @@ func (p prollyIndexIter) rowFromTuples(ctx context.Context, key, value val.Tuple
for i, idx := range p.valMap {
outputIdx := p.ordMap[len(p.keyMap)+i]
r[outputIdx], err = GetField(ctx, valDesc, idx, value, p.primary.NodeStore())
r[outputIdx], err = tree.GetField(ctx, valDesc, idx, value, p.primary.NodeStore())
if err != nil {
return err
}
@@ -229,7 +229,7 @@ func (p prollyCoveringIndexIter) Next(ctx *sql.Context) (sql.Row, error) {
func (p prollyCoveringIndexIter) writeRowFromTuples(ctx context.Context, key, value val.Tuple, r sql.Row) (err error) {
for i, idx := range p.keyMap {
outputIdx := p.ordMap[i]
r[outputIdx], err = GetField(ctx, p.keyDesc, idx, key, p.ns)
r[outputIdx], err = tree.GetField(ctx, p.keyDesc, idx, key, p.ns)
if err != nil {
return err
}
@@ -237,7 +237,7 @@ func (p prollyCoveringIndexIter) writeRowFromTuples(ctx context.Context, key, va
for i, idx := range p.valMap {
outputIdx := p.ordMap[len(p.keyMap)+i]
r[outputIdx], err = GetField(ctx, p.valDesc, idx, value, p.ns)
r[outputIdx], err = tree.GetField(ctx, p.valDesc, idx, value, p.ns)
if err != nil {
return err
}
@@ -396,7 +396,7 @@ func (p prollyKeylessIndexIter) keylessRowsFromValueTuple(ctx context.Context, n
for i, idx := range p.valueMap {
outputIdx := p.ordMap[i]
rows[0][outputIdx], err = GetField(ctx, p.valueDesc, idx, value, ns)
rows[0][outputIdx], err = tree.GetField(ctx, p.valueDesc, idx, value, ns)
if err != nil {
return nil, err
}
@@ -176,14 +176,14 @@ func (it prollyRowIter) Next(ctx *sql.Context) (sql.Row, error) {
row := make(sql.Row, it.rowLen)
for i, idx := range it.keyProj {
outputIdx := it.ordProj[i]
row[outputIdx], err = GetField(ctx, it.keyDesc, idx, key, it.ns)
row[outputIdx], err = tree.GetField(ctx, it.keyDesc, idx, key, it.ns)
if err != nil {
return nil, err
}
}
for i, idx := range it.valProj {
outputIdx := it.ordProj[len(it.keyProj)+i]
row[outputIdx], err = GetField(ctx, it.valDesc, idx, value, it.ns)
row[outputIdx], err = tree.GetField(ctx, it.valDesc, idx, value, it.ns)
if err != nil {
return nil, err
}
@@ -235,7 +235,7 @@ func (it *prollyKeylessIter) nextTuple(ctx *sql.Context) error {
for i, idx := range it.valProj {
outputIdx := it.ordProj[i]
it.curr[outputIdx], err = GetField(ctx, it.valDesc, idx, value, it.ns)
it.curr[outputIdx], err = tree.GetField(ctx, it.valDesc, idx, value, it.ns)
if err != nil {
return err
}
+2 -3
View File
@@ -28,7 +28,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
@@ -220,7 +219,7 @@ func (u *bucketBuilder) finalize(ctx context.Context, ns tree.NodeStore) (DoltBu
upperBound := make(sql.Row, u.prefixLen)
if u.currentKey != nil {
for i := 0; i < u.prefixLen; i++ {
upperBound[i], err = index.GetField(ctx, u.tupleDesc, i, u.currentKey, ns)
upperBound[i], err = tree.GetField(ctx, u.tupleDesc, i, u.currentKey, ns)
if err != nil {
return DoltBucket{}, err
}
@@ -313,7 +312,7 @@ func (m mcvHeap) Values(ctx context.Context, keyDesc val.TupleDesc, ns tree.Node
row := make(sql.Row, prefixLen)
var err error
for i := 0; i < prefixLen; i++ {
row[i], err = index.GetField(ctx, keyDesc, i, mcv.val, ns)
row[i], err = tree.GetField(ctx, keyDesc, i, mcv.val, ns)
if err != nil {
return nil, err
}
@@ -24,8 +24,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -181,7 +181,7 @@ func TestBucketBuilder(t *testing.T) {
for _, k := range tt.keys {
for i, v := range k {
// |ns| only needed for out of band tuples
err := index.PutField(ctx, nil, kb, i, v)
err := tree.PutField(ctx, nil, kb, i, v)
assert.NoError(t, err)
}
b.add(kb.Build(pool))
@@ -180,6 +180,13 @@ func AddDoltSystemVariables() {
Type: types.NewSystemBoolType(dsess.ShowSystemTables),
Default: int8(0),
},
{
Name: "dolt_dont_merge_json",
Dynamic: true,
Scope: sql.SystemVariableScope_Both,
Type: types.NewSystemBoolType("dolt_dont_merge_json"),
Default: int8(0),
},
})
}
+3 -3
View File
@@ -32,11 +32,11 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
config2 "github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -550,7 +550,7 @@ func sqlRowFromTuples(sch schema.Schema, kd, vd val.TupleDesc, k, v val.Tuple) (
for i, col := range sch.GetAllCols().GetColumns() {
pos, ok := sch.GetPKCols().TagToIdx[col.Tag]
if ok {
r[i], err = index.GetField(ctx, kd, pos, k, nil)
r[i], err = tree.GetField(ctx, kd, pos, k, nil)
if err != nil {
return nil, err
}
@@ -561,7 +561,7 @@ func sqlRowFromTuples(sch schema.Schema, kd, vd val.TupleDesc, k, v val.Tuple) (
pos += 1 // compensate for cardinality field
}
if ok {
r[i], err = index.GetField(ctx, vd, pos, v, nil)
r[i], err = tree.GetField(ctx, vd, pos, v, nil)
if err != nil {
return nil, err
}
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -161,13 +162,13 @@ func (iter prollyFkPkRowIter) Next(ctx *sql.Context) (sql.Row, error) {
nextRow := make(sql.Row, len(iter.primary.keyMap)+len(iter.primary.valMap))
for from := range iter.primary.keyMap {
to := iter.primary.keyMap.MapOrdinal(from)
if nextRow[to], err = index.GetField(ctx, iter.primary.keyBld.Desc, from, tblKey, iter.primary.mut.NodeStore()); err != nil {
if nextRow[to], err = tree.GetField(ctx, iter.primary.keyBld.Desc, from, tblKey, iter.primary.mut.NodeStore()); err != nil {
return nil, err
}
}
for from := range iter.primary.valMap {
to := iter.primary.valMap.MapOrdinal(from)
if nextRow[to], err = index.GetField(ctx, iter.primary.valBld.Desc, from, tblVal, iter.primary.mut.NodeStore()); err != nil {
if nextRow[to], err = tree.GetField(ctx, iter.primary.valBld.Desc, from, tblVal, iter.primary.mut.NodeStore()); err != nil {
return nil, err
}
}
@@ -206,7 +207,7 @@ func (iter prollyFkKeylessRowIter) Next(ctx *sql.Context) (sql.Row, error) {
err = iter.primary.mut.Get(ctx, primaryKey, func(tblKey, tblVal val.Tuple) error {
for from := range iter.primary.valMap {
to := iter.primary.valMap.MapOrdinal(from)
if nextRow[to], err = index.GetField(ctx, iter.primary.valBld.Desc, from+1, tblVal, iter.primary.mut.NodeStore()); err != nil {
if nextRow[to], err = tree.GetField(ctx, iter.primary.valBld.Desc, from+1, tblVal, iter.primary.mut.NodeStore()); err != nil {
return err
}
}
@@ -24,8 +24,8 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -110,7 +110,7 @@ func (m prollyIndexWriter) Map(ctx context.Context) (prolly.Map, error) {
func (m prollyIndexWriter) keyFromRow(ctx context.Context, sqlRow sql.Row) (val.Tuple, error) {
for to := range m.keyMap {
from := m.keyMap.MapOrdinal(to)
if err := index.PutField(ctx, m.mut.NodeStore(), m.keyBld, to, sqlRow[from]); err != nil {
if err := tree.PutField(ctx, m.mut.NodeStore(), m.keyBld, to, sqlRow[from]); err != nil {
return nil, err
}
}
@@ -141,7 +141,7 @@ func (m prollyIndexWriter) Insert(ctx context.Context, sqlRow sql.Row) error {
for to := range m.valMap {
from := m.valMap.MapOrdinal(to)
if err := index.PutField(ctx, m.mut.NodeStore(), m.valBld, to, sqlRow[from]); err != nil {
if err := tree.PutField(ctx, m.mut.NodeStore(), m.valBld, to, sqlRow[from]); err != nil {
return err
}
}
@@ -186,7 +186,7 @@ func (m prollyIndexWriter) Update(ctx context.Context, oldRow sql.Row, newRow sq
for to := range m.valMap {
from := m.valMap.MapOrdinal(to)
if err = index.PutField(ctx, m.mut.NodeStore(), m.valBld, to, newRow[from]); err != nil {
if err = tree.PutField(ctx, m.mut.NodeStore(), m.valBld, to, newRow[from]); err != nil {
return err
}
}
@@ -225,7 +225,7 @@ func (m prollyIndexWriter) uniqueKeyError(ctx context.Context, keyStr string, ke
kd := m.keyBld.Desc
for from := range m.keyMap {
to := m.keyMap.MapOrdinal(from)
if existing[to], err = index.GetField(ctx, kd, from, key, m.mut.NodeStore()); err != nil {
if existing[to], err = tree.GetField(ctx, kd, from, key, m.mut.NodeStore()); err != nil {
return err
}
}
@@ -233,7 +233,7 @@ func (m prollyIndexWriter) uniqueKeyError(ctx context.Context, keyStr string, ke
vd := m.valBld.Desc
for from := range m.valMap {
to := m.valMap.MapOrdinal(from)
if existing[to], err = index.GetField(ctx, vd, from, value, m.mut.NodeStore()); err != nil {
if existing[to], err = tree.GetField(ctx, vd, from, value, m.mut.NodeStore()); err != nil {
return err
}
}
@@ -297,7 +297,7 @@ func (m prollySecondaryIndexWriter) keyFromRow(ctx context.Context, sqlRow sql.R
for to := range m.keyMap {
from := m.keyMap.MapOrdinal(to)
keyPart := m.trimKeyPart(to, sqlRow[from])
if err := index.PutField(ctx, m.mut.NodeStore(), m.keyBld, to, keyPart); err != nil {
if err := tree.PutField(ctx, m.mut.NodeStore(), m.keyBld, to, keyPart); err != nil {
return nil, err
}
}
@@ -323,7 +323,7 @@ func (m prollySecondaryIndexWriter) checkForUniqueKeyErr(ctx context.Context, sq
return nil
}
keyPart := m.trimKeyPart(to, sqlRow[from])
if err := index.PutField(ctx, ns, m.keyBld, to, keyPart); err != nil {
if err := tree.PutField(ctx, ns, m.keyBld, to, keyPart); err != nil {
return err
}
}
@@ -20,8 +20,8 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -131,13 +131,13 @@ func (k prollyKeylessWriter) IterRange(ctx context.Context, rng prolly.Range) (p
func (k prollyKeylessWriter) tuplesFromRow(ctx context.Context, sqlRow sql.Row) (hashId, value val.Tuple, err error) {
// initialize cardinality to 0
if err = index.PutField(ctx, k.mut.NodeStore(), k.valBld, 0, uint64(0)); err != nil {
if err = tree.PutField(ctx, k.mut.NodeStore(), k.valBld, 0, uint64(0)); err != nil {
return nil, nil, err
}
for to := range k.valMap {
from := k.valMap.MapOrdinal(to)
if err = index.PutField(ctx, k.mut.NodeStore(), k.valBld, to+1, sqlRow[from]); err != nil {
if err = tree.PutField(ctx, k.mut.NodeStore(), k.valBld, to+1, sqlRow[from]); err != nil {
return nil, nil, err
}
}
@@ -161,7 +161,7 @@ func (k prollyKeylessWriter) uniqueKeyError(ctx context.Context, keyStr string,
for from := range k.valMap {
to := k.valMap.MapOrdinal(from)
// offset from index for keyless rows, as first field is the count
if existing[to], err = index.GetField(ctx, vd, from+1, value, k.mut.NodeStore()); err != nil {
if existing[to], err = tree.GetField(ctx, vd, from+1, value, k.mut.NodeStore()); err != nil {
return err
}
}
@@ -239,11 +239,11 @@ func (writer prollyKeylessSecondaryWriter) Insert(ctx context.Context, sqlRow sq
for to := range writer.keyMap {
from := writer.keyMap.MapOrdinal(to)
keyPart := writer.trimKeyPart(to, sqlRow[from])
if err := index.PutField(ctx, writer.mut.NodeStore(), writer.keyBld, to, keyPart); err != nil {
if err := tree.PutField(ctx, writer.mut.NodeStore(), writer.keyBld, to, keyPart); err != nil {
return err
}
if to < writer.prefixBld.Desc.Count() {
if err := index.PutField(ctx, writer.mut.NodeStore(), writer.prefixBld, to, keyPart); err != nil {
if err := tree.PutField(ctx, writer.mut.NodeStore(), writer.prefixBld, to, keyPart); err != nil {
return err
}
}
@@ -313,7 +313,7 @@ func (writer prollyKeylessSecondaryWriter) Delete(ctx context.Context, sqlRow sq
for to := range writer.keyMap {
from := writer.keyMap.MapOrdinal(to)
keyPart := writer.trimKeyPart(to, sqlRow[from])
if err := index.PutField(ctx, writer.mut.NodeStore(), writer.keyBld, to, keyPart); err != nil {
if err := tree.PutField(ctx, writer.mut.NodeStore(), writer.keyBld, to, keyPart); err != nil {
return err
}
}
@@ -26,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/message"
"github.com/dolthub/dolt/go/store/prolly/tree"
@@ -103,9 +102,9 @@ func tuplesToRows(t *testing.T, kvs [][2]val.Tuple) (rows []sql.Row) {
rows = make([]sql.Row, len(kvs))
for i, kv := range kvs {
v1, err := index.GetField(context.Background(), kd, 0, kv[0], nil)
v1, err := tree.GetField(context.Background(), kd, 0, kv[0], nil)
require.NoError(t, err)
v2, err := index.GetField(context.Background(), kd, 0, kv[1], nil)
v2, err := tree.GetField(context.Background(), kd, 0, kv[1], nil)
require.NoError(t, err)
rows[i] = sql.Row{v1, v2}
}
+2 -2
View File
@@ -200,7 +200,7 @@ func outputEncodedValue(ctx context.Context, w io.Writer, value types.Value) err
}
fmt.Fprintf(w, "\tPrimary Index (rows %d, depth %d) #%s {",
c, node.Level()+1, node.HashOf().String())
tree.OutputProllyNode(w, node)
tree.OutputProllyNodeBytes(w, node)
fmt.Fprintf(w, "\t}\n")
// secondary indexes
@@ -255,7 +255,7 @@ func outputEncodedValue(ctx context.Context, w io.Writer, value types.Value) err
if err != nil {
return err
}
return tree.OutputProllyNode(w, node)
return tree.OutputProllyNodeBytes(w, node)
default:
return types.WriteEncodedValue(ctx, w, value)
}
+180
View File
@@ -0,0 +1,180 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tree
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
"github.com/dolthub/go-mysql-server/sql/types"
)
type JsonDiff struct {
Key string
From, To *types.JSONDocument
Type DiffType
}
type jsonKeyPair struct {
key string
value interface{}
}
// JsonDiffer computes the diff between two JSON objects.
type JsonDiffer struct {
root string
currentFromPair, currentToPair *jsonKeyPair
from, to types.JSONIter
subDiffer *JsonDiffer
}
func NewJsonDiffer(root string, from, to types.JsonObject) JsonDiffer {
fromIter := types.NewJSONIter(from)
toIter := types.NewJSONIter(to)
return JsonDiffer{
root: root,
from: fromIter,
to: toIter,
}
}
func (differ *JsonDiffer) appendKey(key string) string {
escapedKey := strings.Replace(key, "\"", "\\\"", -1)
return fmt.Sprintf("%s.\"%s\"", differ.root, escapedKey)
}
func (differ *JsonDiffer) Next() (diff JsonDiff, err error) {
for {
if differ.subDiffer != nil {
diff, err := differ.subDiffer.Next()
if err == io.EOF {
differ.subDiffer = nil
differ.currentFromPair = nil
differ.currentToPair = nil
continue
} else if err != nil {
return JsonDiff{}, err
}
return diff, nil
}
if differ.currentFromPair == nil && differ.from.HasNext() {
key, value, err := differ.from.Next()
if err != nil {
return JsonDiff{}, err
}
differ.currentFromPair = &jsonKeyPair{key, value}
}
if differ.currentToPair == nil && differ.to.HasNext() {
key, value, err := differ.to.Next()
if err != nil {
return JsonDiff{}, err
}
differ.currentToPair = &jsonKeyPair{key, value}
}
if differ.currentFromPair == nil && differ.currentToPair == nil {
return JsonDiff{}, io.EOF
}
var diffType DiffType
if differ.currentFromPair == nil && differ.currentToPair != nil {
diffType = AddedDiff
} else if differ.currentFromPair != nil && differ.currentToPair == nil {
diffType = RemovedDiff
} else { // differ.currentFromPair != nil && differ.currentToPair != nil
keyCmp := bytes.Compare([]byte(differ.currentFromPair.key), []byte(differ.currentToPair.key))
if keyCmp > 0 {
// `to` key comes before `from` key. Right key must have been inserted.
diffType = AddedDiff
} else if keyCmp < 0 {
// `to` key comes after `from` key. Right key must have been deleted.
diffType = RemovedDiff
} else {
key := differ.currentFromPair.key
fromValue := differ.currentFromPair.value
toValue := differ.currentToPair.value
if reflect.TypeOf(fromValue) != reflect.TypeOf(toValue) {
diffType = ModifiedDiff
} else {
switch from := fromValue.(type) {
case types.JsonObject:
// Recursively compare the objects to generate diffs.
subDiffer := NewJsonDiffer(differ.appendKey(key), from, toValue.(types.JsonObject))
differ.subDiffer = &subDiffer
continue
case types.JsonArray:
if reflect.DeepEqual(fromValue, toValue) {
differ.currentFromPair = nil
differ.currentToPair = nil
continue
} else {
diffType = ModifiedDiff
}
default:
if fromValue == toValue {
differ.currentFromPair = nil
differ.currentToPair = nil
continue
}
diffType = ModifiedDiff
}
}
}
}
switch diffType {
case AddedDiff:
key, value := differ.currentToPair.key, differ.currentToPair.value
result := JsonDiff{
Key: differ.appendKey(key),
From: nil,
To: &types.JSONDocument{Val: value},
Type: AddedDiff,
}
differ.currentToPair = nil
return result, nil
case RemovedDiff:
key, value := differ.currentFromPair.key, differ.currentFromPair.value
result := JsonDiff{
Key: differ.appendKey(key),
From: &types.JSONDocument{Val: value},
To: nil,
Type: RemovedDiff,
}
differ.currentFromPair = nil
return result, nil
case ModifiedDiff:
key := differ.currentFromPair.key
from := differ.currentFromPair.value
to := differ.currentToPair.value
result := JsonDiff{
Key: differ.appendKey(key),
From: &types.JSONDocument{Val: from},
To: &types.JSONDocument{Val: to},
Type: ModifiedDiff,
}
differ.currentFromPair = nil
differ.currentToPair = nil
return result, nil
default:
panic("unreachable")
}
}
}
+246
View File
@@ -0,0 +1,246 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tree
import (
"io"
"testing"
"github.com/dolthub/go-mysql-server/sql/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type jsonDiffTest struct {
name string
from, to types.JsonObject
expectedDiffs []JsonDiff
}
var simpleJsonDiffTests = []jsonDiffTest{
{
name: "empty object, no modifications",
from: types.JsonObject{},
to: types.JsonObject{},
expectedDiffs: nil,
},
{
name: "insert into empty object",
from: types.JsonObject{},
to: types.JsonObject{"a": 1},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\"",
From: nil,
To: &types.JSONDocument{Val: 1},
Type: AddedDiff,
},
},
},
{
name: "delete from object",
from: types.JsonObject{"a": 1},
to: types.JsonObject{},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\"",
From: &types.JSONDocument{Val: 1},
To: nil,
Type: RemovedDiff,
},
},
},
{
name: "modify object",
from: types.JsonObject{"a": 1},
to: types.JsonObject{"a": 2},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\"",
From: &types.JSONDocument{Val: 1},
To: &types.JSONDocument{Val: 2},
Type: ModifiedDiff,
},
},
},
{
name: "nested insert",
from: types.JsonObject{"a": types.JsonObject{}},
to: types.JsonObject{"a": types.JsonObject{"b": 1}},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\".\"b\"",
To: &types.JSONDocument{Val: 1},
Type: AddedDiff,
},
},
},
{
name: "nested delete",
from: types.JsonObject{"a": types.JsonObject{"b": 1}},
to: types.JsonObject{"a": types.JsonObject{}},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\".\"b\"",
From: &types.JSONDocument{Val: 1},
Type: RemovedDiff,
},
},
},
{
name: "nested modify",
from: types.JsonObject{"a": types.JsonObject{"b": 1}},
to: types.JsonObject{"a": types.JsonObject{"b": 2}},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\".\"b\"",
From: &types.JSONDocument{Val: 1},
To: &types.JSONDocument{Val: 2},
Type: ModifiedDiff,
},
},
},
{
name: "insert object",
from: types.JsonObject{"a": types.JsonObject{}},
to: types.JsonObject{"a": types.JsonObject{"b": types.JsonObject{"c": 3}}},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\".\"b\"",
To: &types.JSONDocument{Val: types.JsonObject{"c": 3}},
Type: AddedDiff,
},
},
},
{
name: "modify to object",
from: types.JsonObject{"a": types.JsonObject{"b": 2}},
to: types.JsonObject{"a": types.JsonObject{"b": types.JsonObject{"c": 3}}},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\".\"b\"",
From: &types.JSONDocument{Val: 2},
To: &types.JSONDocument{Val: types.JsonObject{"c": 3}},
Type: ModifiedDiff,
},
},
},
{
name: "modify from object",
from: types.JsonObject{"a": types.JsonObject{"b": 2}},
to: types.JsonObject{"a": 1},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\"",
From: &types.JSONDocument{Val: types.JsonObject{"b": 2}},
To: &types.JSONDocument{Val: 1},
Type: ModifiedDiff,
},
},
},
{
name: "remove object",
from: types.JsonObject{"a": types.JsonObject{"b": types.JsonObject{"c": 3}}},
to: types.JsonObject{"a": types.JsonObject{}},
expectedDiffs: []JsonDiff{
{
Key: "$.\"a\".\"b\"",
From: &types.JSONDocument{Val: types.JsonObject{"c": 3}},
Type: RemovedDiff,
},
},
},
{
name: "insert escaped double quotes",
from: types.JsonObject{"\"a\"": "1"},
to: types.JsonObject{"b": "\"2\""},
expectedDiffs: []JsonDiff{
{
Key: "$.\"\\\"a\\\"\"",
From: &types.JSONDocument{Val: "1"},
To: nil,
Type: RemovedDiff,
},
{
Key: "$.\"b\"",
From: nil,
To: &types.JSONDocument{Val: "\"2\""},
Type: AddedDiff,
},
},
},
{
name: "modifications returned in lexographic order",
from: types.JsonObject{"a": types.JsonObject{"1": "i"}, "aa": 2, "b": 6},
to: types.JsonObject{"": 1, "a": types.JsonObject{}, "aa": 3, "bb": 5},
expectedDiffs: []JsonDiff{
{
Key: "$.\"\"",
To: &types.JSONDocument{Val: 1},
Type: AddedDiff,
},
{
Key: "$.\"a\".\"1\"",
From: &types.JSONDocument{Val: "i"},
Type: RemovedDiff,
},
{
Key: "$.\"aa\"",
From: &types.JSONDocument{Val: 2},
To: &types.JSONDocument{Val: 3},
Type: ModifiedDiff,
},
{
Key: "$.\"b\"",
From: &types.JSONDocument{Val: 6},
Type: RemovedDiff,
},
{
Key: "$.\"bb\"",
To: &types.JSONDocument{Val: 5},
Type: AddedDiff,
},
},
},
}
func TestJsonDiff(t *testing.T) {
t.Run("simple tests", func(t *testing.T) {
runTestBatch(t, simpleJsonDiffTests)
})
}
func runTestBatch(t *testing.T, tests []jsonDiffTest) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
runTest(t, test)
})
}
}
func runTest(t *testing.T, test jsonDiffTest) {
differ := NewJsonDiffer("$", test.from, test.to)
var actualDiffs []JsonDiff
for {
diff, err := differ.Next()
if err == io.EOF {
break
}
assert.NoError(t, err)
actualDiffs = append(actualDiffs, diff)
}
require.Equal(t, test.expectedDiffs, actualDiffs)
}
+81 -5
View File
@@ -17,8 +17,11 @@ package tree
import (
"context"
"encoding/hex"
"fmt"
"io"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/message"
@@ -204,11 +207,12 @@ func getLastKey(nd Node) Item {
return nd.GetKey(int(nd.count) - 1)
}
// OutputProllyNode writes the node given to the writer given in a semi-human-readable format, where values are still
// displayed in hex-encoded byte strings, but are delineated into their fields. All nodes have keys displayed in this
// manner. Interior nodes have their child hash references spelled out, leaf nodes have value tuples delineated like
// the keys
func OutputProllyNode(w io.Writer, node Node) error {
// OutputProllyNode writes the node given to the writer given in a human-readable format, with values converted
// to the type specified by the provided schema. All nodes have keys displayed in this manner. Interior nodes have
// their child hash references spelled out, leaf nodes have value tuples delineated like the keys
func OutputProllyNode(ctx context.Context, w io.Writer, node Node, ns NodeStore, schema schema.Schema) error {
kd := schema.GetKeyDescriptor()
vd := schema.GetValueDescriptor()
for i := 0; i < int(node.count); i++ {
k := node.GetKey(i)
kt := val.Tuple(k)
@@ -218,6 +222,78 @@ func OutputProllyNode(w io.Writer, node Node) error {
if j > 0 {
w.Write([]byte(", "))
}
isAddr := val.IsAddrEncoding(kd.Types[i].Enc)
if isAddr {
w.Write([]byte("#"))
}
w.Write([]byte(hex.EncodeToString(kd.GetField(j, kt))))
if isAddr {
w.Write([]byte(" ("))
key, err := GetField(ctx, kd, i, kt, ns)
if err != nil {
return err
}
w.Write([]byte(fmt.Sprint(key)))
w.Write([]byte(")"))
}
}
if node.IsLeaf() {
v := node.GetValue(i)
vt := val.Tuple(v)
w.Write([]byte(" value: "))
for j := 0; j < vt.Count(); j++ {
if j > 0 {
w.Write([]byte(", "))
}
isAddr := val.IsAddrEncoding(vd.Types[j].Enc)
if isAddr {
w.Write([]byte("#"))
}
w.Write([]byte(hex.EncodeToString(vd.GetField(j, vt))))
if isAddr {
w.Write([]byte(" ("))
value, err := GetField(ctx, vd, j, vt, ns)
if err != nil {
return err
}
w.Write([]byte(fmt.Sprint(value)))
w.Write([]byte(")"))
}
}
w.Write([]byte(" }"))
} else {
ref := node.getAddress(i)
w.Write([]byte(" ref: #"))
w.Write([]byte(ref.String()))
w.Write([]byte(" }"))
}
}
w.Write([]byte("\n"))
return nil
}
// OutputProllyNodeBytes writes the node given to the writer given in a semi-human-readable format, where values are still
// displayed in hex-encoded byte strings, but are delineated into their fields. All nodes have keys displayed in this
// manner. Interior nodes have their child hash references spelled out, leaf nodes have value tuples delineated like
// the keys
func OutputProllyNodeBytes(w io.Writer, node Node) error {
for i := 0; i < int(node.count); i++ {
k := node.GetKey(i)
kt := val.Tuple(k)
w.Write([]byte("\n { key: "))
for j := 0; j < kt.Count(); j++ {
if j > 0 {
w.Write([]byte(", "))
}
w.Write([]byte(hex.EncodeToString(kt.GetField(j))))
}
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package index
package tree
import (
"bytes"
@@ -29,14 +29,13 @@ import (
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
var ErrValueExceededMaxFieldSize = errors.New("value exceeded max field size of 65kb")
// GetField reads the value from the ith field of the Tuple as an interface{}.
func GetField(ctx context.Context, td val.TupleDesc, i int, tup val.Tuple, ns tree.NodeStore) (v interface{}, err error) {
func GetField(ctx context.Context, td val.TupleDesc, i int, tup val.Tuple, ns NodeStore) (v interface{}, err error) {
var ok bool
switch td.Types[i].Enc {
case val.Int8Enc:
@@ -109,7 +108,7 @@ func GetField(ctx context.Context, td val.TupleDesc, i int, tup val.Tuple, ns tr
var h hash.Hash
h, ok = td.GetGeometryAddr(i, tup)
if ok {
buf, err = tree.NewByteArray(h, ns).ToBytes(ctx)
buf, err = NewByteArray(h, ns).ToBytes(ctx)
if err != nil {
return nil, err
}
@@ -122,19 +121,19 @@ func GetField(ctx context.Context, td val.TupleDesc, i int, tup val.Tuple, ns tr
var h hash.Hash
h, ok = td.GetBytesAddr(i, tup)
if ok {
v, err = tree.NewByteArray(h, ns).ToBytes(ctx)
v, err = NewByteArray(h, ns).ToBytes(ctx)
}
case val.JSONAddrEnc:
var h hash.Hash
h, ok = td.GetJSONAddr(i, tup)
if ok {
v, err = tree.NewJSONDoc(h, ns).ToJSONDocument(ctx)
v, err = NewJSONDoc(h, ns).ToJSONDocument(ctx)
}
case val.StringAddrEnc:
var h hash.Hash
h, ok = td.GetStringAddr(i, tup)
if ok {
v, err = tree.NewTextStorage(h, ns).ToString(ctx)
v, err = NewTextStorage(h, ns).ToString(ctx)
}
case val.CommitAddrEnc:
v, ok = td.GetCommitAddr(i, tup)
@@ -151,7 +150,7 @@ func GetField(ctx context.Context, td val.TupleDesc, i int, tup val.Tuple, ns tr
// Serialize writes an interface{} into the byte string representation used in val.Tuple, and returns the byte string,
// and a boolean indicating success.
func Serialize(ctx context.Context, ns tree.NodeStore, t val.Type, v interface{}) (result []byte, err error) {
func Serialize(ctx context.Context, ns NodeStore, t val.Type, v interface{}) (result []byte, err error) {
newTupleDesc := val.NewTupleDescriptor(t)
tb := val.NewTupleBuilder(newTupleDesc)
err = PutField(ctx, ns, tb, 0, v)
@@ -162,7 +161,7 @@ func Serialize(ctx context.Context, ns tree.NodeStore, t val.Type, v interface{}
}
// PutField writes an interface{} to the ith field of the Tuple being built.
func PutField(ctx context.Context, ns tree.NodeStore, tb *val.TupleBuilder, i int, v interface{}) error {
func PutField(ctx context.Context, ns NodeStore, tb *val.TupleBuilder, i int, v interface{}) error {
if v == nil {
return nil // NULL
}
@@ -220,14 +219,14 @@ func PutField(ctx context.Context, ns tree.NodeStore, tb *val.TupleBuilder, i in
// TODO: eventually remove GeometryEnc, but in the meantime write them as GeomAddrEnc
case val.GeometryEnc:
geo := serializeGeometry(v)
h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(geo), len(geo))
h, err := SerializeBytesToAddr(ctx, ns, bytes.NewReader(geo), len(geo))
if err != nil {
return err
}
tb.PutGeometryAddr(i, h)
case val.GeomAddrEnc:
geo := serializeGeometry(v)
h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(geo), len(geo))
h, err := SerializeBytesToAddr(ctx, ns, bytes.NewReader(geo), len(geo))
if err != nil {
return err
}
@@ -237,20 +236,20 @@ func PutField(ctx context.Context, ns tree.NodeStore, tb *val.TupleBuilder, i in
if err != nil {
return err
}
h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(buf), len(buf))
h, err := SerializeBytesToAddr(ctx, ns, bytes.NewReader(buf), len(buf))
if err != nil {
return err
}
tb.PutJSONAddr(i, h)
case val.BytesAddrEnc:
h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(v.([]byte)), len(v.([]byte)))
h, err := SerializeBytesToAddr(ctx, ns, bytes.NewReader(v.([]byte)), len(v.([]byte)))
if err != nil {
return err
}
tb.PutBytesAddr(i, h)
case val.StringAddrEnc:
//todo: v will be []byte after daylon's changes
h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader([]byte(v.(string))), len(v.(string)))
h, err := SerializeBytesToAddr(ctx, ns, bytes.NewReader([]byte(v.(string))), len(v.(string)))
if err != nil {
return err
}
@@ -360,7 +359,7 @@ func serializeGeometry(v interface{}) []byte {
}
}
func serializeBytesToAddr(ctx context.Context, ns tree.NodeStore, r io.Reader, dataSize int) (hash.Hash, error) {
func SerializeBytesToAddr(ctx context.Context, ns NodeStore, r io.Reader, dataSize int) (hash.Hash, error) {
bb := ns.BlobBuilder()
bb.Init(dataSize)
_, addr, err := bb.Chunk(ctx, r)
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package index
package tree
import (
"context"
@@ -28,7 +28,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/val"
)
@@ -187,7 +186,7 @@ var testPool = pool.NewBuffPool()
func testRoundTripProllyFields(t *testing.T, test prollyFieldTest) {
desc := val.NewTupleDescriptor(test.typ)
builder := val.NewTupleBuilder(desc)
ns := tree.NewTestNodeStore()
ns := NewTestNodeStore()
err := PutField(context.Background(), ns, builder, 0, test.value)
assert.NoError(t, err)
@@ -269,7 +268,7 @@ func TestGeometryEncoding(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ns := tree.NewTestNodeStore()
ns := NewTestNodeStore()
oldDesc := val.NewTupleDescriptor(val.Type{Enc: val.GeometryEnc})
builder := val.NewTupleBuilder(oldDesc)
b := serializeGeometry(test.value)
+4 -2
View File
@@ -21,6 +21,8 @@ import (
"fmt"
"io"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/store/val"
)
@@ -39,7 +41,7 @@ type ThreeWayDiffer[K ~[]byte, O Ordering[K]] struct {
//var _ DiffIter = (*threeWayDiffer[Item, val.TupleDesc])(nil)
type resolveCb func(context.Context, val.Tuple, val.Tuple, val.Tuple) (val.Tuple, bool, error)
type resolveCb func(*sql.Context, val.Tuple, val.Tuple, val.Tuple) (val.Tuple, bool, error)
// ThreeWayDiffInfo stores contextual data that can influence the diff.
// If |LeftSchemaChange| is true, then the left side's bytes have a different interpretation from the base,
@@ -100,7 +102,7 @@ const (
dsMatchFinalize
)
func (d *ThreeWayDiffer[K, O]) Next(ctx context.Context) (ThreeWayDiff, error) {
func (d *ThreeWayDiffer[K, O]) Next(ctx *sql.Context) (ThreeWayDiff, error) {
var err error
var res ThreeWayDiff
nextState := dsInit
@@ -21,6 +21,7 @@ import (
"io"
"testing"
"github.com/dolthub/go-mysql-server/sql"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/prolly/message"
@@ -178,7 +179,7 @@ func TestThreeWayDiffer(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
ctx := sql.NewEmptyContext()
ns := NewTestNodeStore()
var valTypes []val.Type
@@ -216,8 +217,8 @@ func TestThreeWayDiffer(t *testing.T) {
}
}
func testResolver(t *testing.T, ns NodeStore, valDesc val.TupleDesc, valBuilder *val.TupleBuilder) func(context.Context, val.Tuple, val.Tuple, val.Tuple) (val.Tuple, bool, error) {
return func(_ context.Context, l, r, b val.Tuple) (val.Tuple, bool, error) {
func testResolver(t *testing.T, ns NodeStore, valDesc val.TupleDesc, valBuilder *val.TupleBuilder) func(*sql.Context, val.Tuple, val.Tuple, val.Tuple) (val.Tuple, bool, error) {
return func(_ *sql.Context, l, r, b val.Tuple) (val.Tuple, bool, error) {
for i := range valDesc.Types {
var base, left, right int64
var ok bool
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package index
package tree
import (
"encoding/binary"
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package index
package tree
import (
"bytes"
+13
View File
@@ -100,6 +100,19 @@ const (
sentinel Encoding = 127
)
func IsAddrEncoding(enc Encoding) bool {
switch enc {
case BytesAddrEnc,
CommitAddrEnc,
StringAddrEnc,
JSONAddrEnc,
GeomAddrEnc:
return true
default:
return false
}
}
// Variable Width Encodings
const (
StringEnc = Encoding(serial.EncodingString)
+30
View File
@@ -1160,3 +1160,33 @@ SQL
[ "$head1" == "$head2" ]
}
@test "merge: three-way merge with mergible json fails when --dont_merge_json is set" {
# Base table has a key length of 2. Left and right will both add a column to
# the key, and the keys for all rows will differ in the last column.
dolt sql <<SQL
create table t (pk int primary key, j json);
insert into t values (1, '{}');
call dolt_commit('-Am', 'new table');
call dolt_branch('b1');
call dolt_branch('b2');
call dolt_checkout('b1');
update t set j = '{"a": 1}';
call dolt_commit('-Am', 'added key "a"');
call dolt_checkout('b2');
update t set j = '{"b": 1}';
call dolt_commit('-Am', 'added key "b"');
SQL
dolt checkout b2
run dolt merge --dont_merge_json b1
log_status_eq 1
run dolt merge b1
log_status_eq 0
}