mirror of
https://github.com/dolthub/dolt.git
synced 2026-01-06 00:39:40 -06:00
Merge pull request #6974 from dolthub/zachmu/virtual-merge
Support merging schemas with virtual / generated columns
This commit is contained in:
@@ -17,6 +17,8 @@ package indexcmds
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/cli"
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/commands"
|
||||
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
|
||||
@@ -102,7 +104,7 @@ func (cmd RebuildCmd) Exec(ctx context.Context, commandStr string, args []string
|
||||
if idxSch == nil {
|
||||
return HandleErr(errhand.BuildDError("the index `%s` does not exist on table `%s`", indexName, tableName).Build(), nil)
|
||||
}
|
||||
indexRowData, err := creation.BuildSecondaryIndex(ctx, table, idxSch, opts)
|
||||
indexRowData, err := creation.BuildSecondaryIndex(sql.NewContext(ctx), table, idxSch, tableName, opts)
|
||||
if err != nil {
|
||||
return HandleErr(errhand.BuildDError("Unable to rebuild index `%s` on table `%s`.", indexName, tableName).AddCause(err).Build(), nil)
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ require (
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/creasty/defaults v1.6.0
|
||||
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2
|
||||
github.com/dolthub/go-mysql-server v0.17.1-0.20231109211027-734826ff8972
|
||||
github.com/dolthub/go-mysql-server v0.17.1-0.20231110001639-33b593341822
|
||||
github.com/dolthub/swiss v0.1.0
|
||||
github.com/goccy/go-json v0.10.2
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
|
||||
@@ -181,8 +181,8 @@ github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
|
||||
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
|
||||
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e h1:kPsT4a47cw1+y/N5SSCkma7FhAPw7KeGmD6c9PBZW9Y=
|
||||
github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e/go.mod h1:KPUcpx070QOfJK1gNe0zx4pA5sicIK1GMikIGLKC168=
|
||||
github.com/dolthub/go-mysql-server v0.17.1-0.20231109211027-734826ff8972 h1:+cJcknWqzo1nZ9ZqkKxubiKIK/D+7c6ztOz0RC6dQ2I=
|
||||
github.com/dolthub/go-mysql-server v0.17.1-0.20231109211027-734826ff8972/go.mod h1:Z3EbOzC1yoK9MoYBxl6LDksV8GRRyjjHDZTu2lWpT/E=
|
||||
github.com/dolthub/go-mysql-server v0.17.1-0.20231110001639-33b593341822 h1:5A5opfcrehlHmaPPBAm6drw4rP3niBoVOci+7YjEg3s=
|
||||
github.com/dolthub/go-mysql-server v0.17.1-0.20231110001639-33b593341822/go.mod h1:Z3EbOzC1yoK9MoYBxl6LDksV8GRRyjjHDZTu2lWpT/E=
|
||||
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
|
||||
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
|
||||
github.com/dolthub/jsonpath v0.0.2-0.20230525180605-8dc13778fd72 h1:NfWmngMi1CYUWU4Ix8wM+USEhjc+mhPlT9JUR/anvbQ=
|
||||
|
||||
@@ -612,70 +612,6 @@ func GetDataDiffStatement(tableName string, sch schema.Schema, row sql.Row, rowD
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateCreateTableStatement returns CREATE TABLE statement for given table. This function was made to share the same
|
||||
// 'create table' statement logic as GMS. We initially were running `SHOW CREATE TABLE` query to get the statement;
|
||||
// however, it cannot be done for cases that need this statement in sql shell mode. Dolt uses its own Schema and
|
||||
// Column and other object types which are not directly compatible with GMS, so we try to use as much shared logic
|
||||
// as possible with GMS to get 'create table' statement in Dolt.
|
||||
func GenerateCreateTableStatement(tblName string, sch schema.Schema, pkSchema sql.PrimaryKeySchema, fks []doltdb.ForeignKey, fksParentSch map[string]schema.Schema) (string, error) {
|
||||
sqlSch := pkSchema.Schema
|
||||
colStmts := make([]string, len(sqlSch))
|
||||
|
||||
// Statement creation parts for each column
|
||||
for i, col := range sch.GetAllCols().GetColumns() {
|
||||
colStmts[i] = sqlfmt.GenerateCreateTableIndentedColumnDefinition(col, sql.CollationID(sch.GetCollation()))
|
||||
}
|
||||
|
||||
primaryKeyCols := sch.GetPKCols().GetColumnNames()
|
||||
if len(primaryKeyCols) > 0 {
|
||||
primaryKey := sql.GenerateCreateTablePrimaryKeyDefinition(primaryKeyCols)
|
||||
colStmts = append(colStmts, primaryKey)
|
||||
}
|
||||
|
||||
indexes := sch.Indexes().AllIndexes()
|
||||
for _, index := range indexes {
|
||||
// The primary key may or may not be declared as an index by the table. Don't print it twice if it's here.
|
||||
if isPrimaryKeyIndex(index, sch) {
|
||||
continue
|
||||
}
|
||||
colStmts = append(colStmts, sqlfmt.GenerateCreateTableIndexDefinition(index))
|
||||
}
|
||||
|
||||
for _, fk := range fks {
|
||||
colStmts = append(colStmts, sqlfmt.GenerateCreateTableForeignKeyDefinition(fk, sch, fksParentSch[fk.ReferencedTableName]))
|
||||
}
|
||||
|
||||
for _, check := range sch.Checks().AllChecks() {
|
||||
colStmts = append(colStmts, sqlfmt.GenerateCreateTableCheckConstraintClause(check))
|
||||
}
|
||||
|
||||
coll := sql.CollationID(sch.GetCollation())
|
||||
createTableStmt := sql.GenerateCreateTableStatement(tblName, colStmts, coll.CharacterSet().Name(), coll.Name())
|
||||
return fmt.Sprintf("%s;", createTableStmt), nil
|
||||
}
|
||||
|
||||
// isPrimaryKeyIndex returns whether the index given matches the table's primary key columns. Order is not considered.
|
||||
func isPrimaryKeyIndex(index schema.Index, sch schema.Schema) bool {
|
||||
var pks = sch.GetPKCols().GetColumns()
|
||||
var pkMap = make(map[string]struct{})
|
||||
for _, c := range pks {
|
||||
pkMap[c.Name] = struct{}{}
|
||||
}
|
||||
|
||||
indexCols := index.ColumnNames()
|
||||
if len(indexCols) != len(pks) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range index.ColumnNames() {
|
||||
if _, ok := pkMap[c]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// WorkingSetContainsOnlyIgnoredTables returns true if all changes in working set are ignored tables.
|
||||
// Otherwise, if there are any non-ignored changes, returns false.
|
||||
// Note that only unstaged tables are subject to dolt_ignore (this is consistent with what git does.)
|
||||
|
||||
@@ -224,5 +224,5 @@ func createRowIterForTable(ctx *sql.Context, t *doltdb.Table, sch schema.Schema)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return index.NewProllyRowIter(sch, rows, iter, nil)
|
||||
return index.NewProllyRowIterForMap(sch, rows, iter, nil), nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
@@ -35,7 +37,7 @@ import (
|
||||
// changes from the other side of the merge to have been merged in before this
|
||||
// function was called. This is safer, but less efficient.
|
||||
func mergeProllySecondaryIndexes(
|
||||
ctx context.Context,
|
||||
ctx *sql.Context,
|
||||
tm *TableMerger,
|
||||
leftSet, rightSet durable.IndexSet,
|
||||
finalSch schema.Schema,
|
||||
@@ -103,7 +105,17 @@ func mergeProllySecondaryIndexes(
|
||||
return mergedIndexSet, nil
|
||||
}
|
||||
|
||||
func buildIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, postMergeSchema schema.Schema, index schema.Index, m prolly.Map, artEditor *prolly.ArtifactsEditor, theirRootIsh doltdb.Rootish, tblName string) (durable.Index, error) {
|
||||
func buildIndex(
|
||||
ctx *sql.Context,
|
||||
vrw types.ValueReadWriter,
|
||||
ns tree.NodeStore,
|
||||
postMergeSchema schema.Schema,
|
||||
index schema.Index,
|
||||
m prolly.Map,
|
||||
artEditor *prolly.ArtifactsEditor,
|
||||
theirRootIsh doltdb.Rootish,
|
||||
tblName string,
|
||||
) (durable.Index, error) {
|
||||
if index.IsUnique() {
|
||||
meta, err := makeUniqViolMeta(postMergeSchema, index)
|
||||
if err != nil {
|
||||
@@ -119,33 +131,26 @@ func buildIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStor
|
||||
|
||||
pkMapping := ordinalMappingFromIndex(index)
|
||||
|
||||
mergedMap, err := creation.BuildUniqueProllyIndex(
|
||||
ctx,
|
||||
vrw,
|
||||
ns,
|
||||
postMergeSchema,
|
||||
index,
|
||||
m,
|
||||
func(ctx context.Context, existingKey, newKey val.Tuple) (err error) {
|
||||
eK := getPKFromSecondaryKey(kb, p, pkMapping, existingKey)
|
||||
nK := getPKFromSecondaryKey(kb, p, pkMapping, newKey)
|
||||
err = replaceUniqueKeyViolation(ctx, artEditor, m, eK, kd, theirRootIsh, vInfo, tblName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = replaceUniqueKeyViolation(ctx, artEditor, m, nK, kd, theirRootIsh, vInfo, tblName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
mergedMap, err := creation.BuildUniqueProllyIndex(ctx, vrw, ns, postMergeSchema, tblName, index, m, func(ctx context.Context, existingKey, newKey val.Tuple) (err error) {
|
||||
eK := getPKFromSecondaryKey(kb, p, pkMapping, existingKey)
|
||||
nK := getPKFromSecondaryKey(kb, p, pkMapping, newKey)
|
||||
err = replaceUniqueKeyViolation(ctx, artEditor, m, eK, kd, theirRootIsh, vInfo, tblName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = replaceUniqueKeyViolation(ctx, artEditor, m, nK, kd, theirRootIsh, vInfo, tblName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mergedMap, nil
|
||||
}
|
||||
|
||||
mergedIndex, err := creation.BuildSecondaryProllyIndex(ctx, vrw, ns, postMergeSchema, index, m)
|
||||
mergedIndex, err := creation.BuildSecondaryProllyIndex(ctx, vrw, ns, postMergeSchema, tblName, index, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -22,10 +22,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/memory"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/analyzer"
|
||||
"github.com/dolthub/go-mysql-server/sql/planbuilder"
|
||||
"github.com/dolthub/go-mysql-server/sql/expression"
|
||||
"github.com/dolthub/go-mysql-server/sql/transform"
|
||||
"github.com/dolthub/go-mysql-server/sql/types"
|
||||
errorkinds "gopkg.in/src-d/go-errors.v1"
|
||||
@@ -35,7 +33,6 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
|
||||
"github.com/dolthub/dolt/go/store/hash"
|
||||
"github.com/dolthub/dolt/go/store/pool"
|
||||
"github.com/dolthub/dolt/go/store/prolly"
|
||||
@@ -177,7 +174,7 @@ func mergeProllyTableData(ctx *sql.Context, tm *TableMerger, finalSch schema.Sch
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
nullChk, err := newNullValidator(ctx, finalSch, tm, valueMerger, artEditor, leftEditor, sec.leftMut)
|
||||
nullChk, err := newNullValidator(ctx, finalSch, tm, valueMerger, artEditor, leftEditor, sec.leftIdxes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -334,7 +331,16 @@ func threeWayDiffer(ctx context.Context, tm *TableMerger, valueMerger *valueMerg
|
||||
}
|
||||
ancRows := durable.ProllyMapFromIndex(ar)
|
||||
|
||||
return tree.NewThreeWayDiffer(ctx, leftRows.NodeStore(), leftRows.Tuples(), rightRows.Tuples(), ancRows.Tuples(), valueMerger.tryMerge, valueMerger.keyless, leftRows.Tuples().Order)
|
||||
return tree.NewThreeWayDiffer(
|
||||
ctx,
|
||||
leftRows.NodeStore(),
|
||||
leftRows.Tuples(),
|
||||
rightRows.Tuples(),
|
||||
ancRows.Tuples(),
|
||||
valueMerger.tryMerge,
|
||||
valueMerger.keyless,
|
||||
leftRows.Tuples().Order,
|
||||
)
|
||||
}
|
||||
|
||||
// checkValidator is responsible for inspecting three-way diff events, running any check constraint expressions
|
||||
@@ -361,7 +367,7 @@ func newCheckValidator(ctx *sql.Context, tm *TableMerger, vm *valueMerger, sch s
|
||||
continue
|
||||
}
|
||||
|
||||
expr, err := resolveExpression(ctx, check.Expression(), sch, tm.name)
|
||||
expr, err := index.ResolveCheckExpression(ctx, tm.name, sch, check.Expression())
|
||||
if err != nil {
|
||||
return checkValidator{}, err
|
||||
}
|
||||
@@ -424,7 +430,7 @@ func (cv checkValidator) validateDiff(ctx *sql.Context, diff tree.ThreeWayDiff)
|
||||
newTuple = val.NewTuple(cv.valueMerger.syncPool, newTupleBytes...)
|
||||
}
|
||||
|
||||
row, err := buildRow(ctx, diff.Key, newTuple, cv.sch, cv.tableMerger)
|
||||
row, err := index.BuildRow(ctx, diff.Key, newTuple, cv.sch, cv.valueMerger.ns)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -478,54 +484,6 @@ func (cv checkValidator) insertArtifact(ctx context.Context, key, value val.Tupl
|
||||
return cv.edits.ReplaceConstraintViolation(ctx, key, cv.srcHash, prolly.ArtifactTypeChkConsViol, cvm)
|
||||
}
|
||||
|
||||
// buildRow takes the |key| and |value| tuple and returns a new sql.Row, along with any errors encountered.
|
||||
func buildRow(ctx *sql.Context, key, value val.Tuple, sch schema.Schema, tableMerger *TableMerger) (sql.Row, error) {
|
||||
pkCols := sch.GetPKCols()
|
||||
valueCols := sch.GetNonPKCols()
|
||||
allCols := sch.GetAllCols()
|
||||
|
||||
// When we parse and resolve the check constraint expression with planbuilder, it leaves row position 0
|
||||
// for the expression itself, so we add an empty spot in index 0 of our row to account for that to make sure
|
||||
// the GetField expressions' indexes match up to the right columns.
|
||||
row := make(sql.Row, allCols.Size()+1)
|
||||
|
||||
// Skip adding the key tuple if we're working with a keyless table, since the table row data is
|
||||
// always all contained in the value tuple for keyless tables.
|
||||
if !schema.IsKeyless(sch) {
|
||||
keyDesc := sch.GetKeyDescriptor()
|
||||
for i := range keyDesc.Types {
|
||||
value, err := index.GetField(ctx, keyDesc, i, key, tableMerger.ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pkCol := pkCols.GetColumns()[i]
|
||||
row[allCols.TagToIdx[pkCol.Tag]+1] = value
|
||||
}
|
||||
}
|
||||
|
||||
valueColIndex := 0
|
||||
valueDescriptor := sch.GetValueDescriptor()
|
||||
for valueTupleIndex := range valueDescriptor.Types {
|
||||
// Skip processing the first value in the value tuple for keyless tables, since that field
|
||||
// always holds the cardinality of the row and shouldn't be passed in to an expression.
|
||||
if schema.IsKeyless(sch) && valueTupleIndex == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := index.GetField(ctx, valueDescriptor, valueTupleIndex, value, tableMerger.ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
col := valueCols.GetColumns()[valueColIndex]
|
||||
row[allCols.TagToIdx[col.Tag]+1] = value
|
||||
valueColIndex += 1
|
||||
}
|
||||
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// uniqValidator checks whether new additions from the merge-right
|
||||
// duplicate secondary index entries.
|
||||
type uniqValidator struct {
|
||||
@@ -537,7 +495,7 @@ type uniqValidator struct {
|
||||
tm *TableMerger
|
||||
}
|
||||
|
||||
func newUniqValidator(ctx context.Context, sch schema.Schema, tm *TableMerger, vm *valueMerger, edits *prolly.ArtifactsEditor) (uniqValidator, error) {
|
||||
func newUniqValidator(ctx *sql.Context, sch schema.Schema, tm *TableMerger, vm *valueMerger, edits *prolly.ArtifactsEditor) (uniqValidator, error) {
|
||||
srcHash, err := tm.rightSrc.HashOf()
|
||||
if err != nil {
|
||||
return uniqValidator{}, err
|
||||
@@ -575,7 +533,7 @@ func newUniqValidator(ctx context.Context, sch schema.Schema, tm *TableMerger, v
|
||||
}
|
||||
secondary := durable.ProllyMapFromIndex(idx)
|
||||
|
||||
u, err := newUniqIndex(sch, def, clustered, secondary)
|
||||
u, err := newUniqIndex(ctx, sch, tm.name, def, clustered, secondary)
|
||||
if err != nil {
|
||||
return uniqValidator{}, err
|
||||
}
|
||||
@@ -724,7 +682,7 @@ type uniqIndex struct {
|
||||
clusteredKeyDesc val.TupleDesc
|
||||
}
|
||||
|
||||
func newUniqIndex(sch schema.Schema, def schema.Index, clustered, secondary prolly.Map) (uniqIndex, error) {
|
||||
func newUniqIndex(ctx *sql.Context, sch schema.Schema, tableName string, def schema.Index, clustered, secondary prolly.Map) (uniqIndex, error) {
|
||||
meta, err := makeUniqViolMeta(sch, def)
|
||||
if err != nil {
|
||||
return uniqIndex{}, err
|
||||
@@ -736,7 +694,11 @@ func newUniqIndex(sch schema.Schema, def schema.Index, clustered, secondary prol
|
||||
p := clustered.Pool()
|
||||
|
||||
prefixDesc := secondary.KeyDesc().PrefixDesc(def.Count())
|
||||
secondaryBld := index.NewSecondaryKeyBuilder(sch, def, secondary.KeyDesc(), p, secondary.NodeStore())
|
||||
secondaryBld, err := index.NewSecondaryKeyBuilder(ctx, tableName, sch, def, secondary.KeyDesc(), p, secondary.NodeStore())
|
||||
if err != nil {
|
||||
return uniqIndex{}, err
|
||||
}
|
||||
|
||||
clusteredBld := index.NewClusteredKeyBuilder(def, sch, clustered.KeyDesc(), p)
|
||||
|
||||
return uniqIndex{
|
||||
@@ -1067,8 +1029,24 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
|
||||
return fmt.Errorf("cannot merge keyless tables with reordered columns")
|
||||
}
|
||||
} else {
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(ctx, diff.Key, diff.Right, sourceSch.GetValueDescriptor(),
|
||||
m.valueMerger.rightMapping, m.tableMerger, m.finalSch, m.valueMerger.syncPool, true)
|
||||
defaults, err := resolveDefaults(ctx, m.tableMerger.name, m.finalSch, m.tableMerger.rightSch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(
|
||||
ctx,
|
||||
diff.Key,
|
||||
diff.Right,
|
||||
sourceSch.GetValueDescriptor(),
|
||||
m.valueMerger.rightMapping,
|
||||
m.tableMerger,
|
||||
m.tableMerger.rightSch,
|
||||
m.finalSch,
|
||||
defaults,
|
||||
m.valueMerger.syncPool,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1078,12 +1056,111 @@ func (m *primaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSc
|
||||
case tree.DiffOpRightDelete:
|
||||
return m.mut.Put(ctx, diff.Key, diff.Right)
|
||||
case tree.DiffOpDivergentModifyResolved:
|
||||
return m.mut.Put(ctx, diff.Key, diff.Merged)
|
||||
// any generated columns need to be re-resolved because their computed values may have changed as a result of
|
||||
// the merge
|
||||
merged := diff.Merged
|
||||
if hasStoredGeneratedColumns(m.finalSch) {
|
||||
defaults, err := resolveDefaults(ctx, m.tableMerger.name, m.finalSch, m.tableMerger.rightSch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(
|
||||
ctx,
|
||||
diff.Key,
|
||||
merged,
|
||||
m.finalSch.GetValueDescriptor(),
|
||||
m.valueMerger.rightMapping,
|
||||
m.tableMerger,
|
||||
m.tableMerger.rightSch,
|
||||
m.finalSch,
|
||||
defaults,
|
||||
m.valueMerger.syncPool,
|
||||
true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
merged = tempTupleValue
|
||||
}
|
||||
|
||||
return m.mut.Put(ctx, diff.Key, merged)
|
||||
default:
|
||||
return fmt.Errorf("unexpected diffOp for editing primary index: %s", diff.Op)
|
||||
}
|
||||
}
|
||||
|
||||
func resolveDefaults(ctx *sql.Context, tableName string, mergedSchema schema.Schema, sourceSchema schema.Schema) ([]sql.Expression, error) {
|
||||
var exprs []sql.Expression
|
||||
i := 0
|
||||
|
||||
// We want a slice of expressions in the order of the merged schema, but with column indexes from the source schema,
|
||||
// against which they will be evaluated
|
||||
err := mergedSchema.GetNonPKCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
if col.Virtual {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if col.Default != "" || col.Generated != "" {
|
||||
expr, err := index.ResolveDefaultExpression(ctx, tableName, mergedSchema, col)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if len(exprs) == 0 {
|
||||
exprs = make([]sql.Expression, mergedSchema.GetNonPKCols().StoredSize())
|
||||
}
|
||||
exprs[i] = expr
|
||||
}
|
||||
|
||||
i++
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The default expresions always come in the order of the merged schema, but the fields we need to apply them to
|
||||
// might have different column indexes in the case of a schema change
|
||||
if len(exprs) > 0 {
|
||||
for i := range exprs {
|
||||
if exprs[i] == nil {
|
||||
continue
|
||||
}
|
||||
exprs[i], _, _ = transform.Expr(exprs[i], func(e sql.Expression) (sql.Expression, transform.TreeIdentity, error) {
|
||||
if gf, ok := e.(*expression.GetField); ok {
|
||||
newIdx := indexOf(gf.Name(), sourceSchema.GetAllCols().GetColumnNames())
|
||||
if newIdx >= 0 {
|
||||
return gf.WithIndex(newIdx), transform.NewTree, nil
|
||||
}
|
||||
}
|
||||
return e, transform.SameTree, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return exprs, nil
|
||||
}
|
||||
|
||||
func indexOf(col string, cols []string) int {
|
||||
for i, column := range cols {
|
||||
if column == col {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func hasStoredGeneratedColumns(sch schema.Schema) bool {
|
||||
hasGenerated := false
|
||||
sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
|
||||
if col.Generated != "" && !col.Virtual {
|
||||
hasGenerated = true
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return hasGenerated
|
||||
}
|
||||
|
||||
func (m *primaryMerger) finalize(ctx context.Context) (durable.Index, error) {
|
||||
mergedMap, err := m.mut.Map(ctx)
|
||||
if err != nil {
|
||||
@@ -1097,7 +1174,7 @@ func (m *primaryMerger) finalize(ctx context.Context) (durable.Index, error) {
|
||||
type secondaryMerger struct {
|
||||
leftSet durable.IndexSet
|
||||
rightSet durable.IndexSet
|
||||
leftMut []MutableSecondaryIdx
|
||||
leftIdxes []MutableSecondaryIdx
|
||||
valueMerger *valueMerger
|
||||
mergedSchema schema.Schema
|
||||
tableMerger *TableMerger
|
||||
@@ -1105,14 +1182,14 @@ type secondaryMerger struct {
|
||||
|
||||
const secondaryMergerPendingSize = 650_000
|
||||
|
||||
func newSecondaryMerger(ctx context.Context, tm *TableMerger, valueMerger *valueMerger, mergedSchema schema.Schema) (*secondaryMerger, error) {
|
||||
func newSecondaryMerger(ctx *sql.Context, tm *TableMerger, valueMerger *valueMerger, mergedSchema schema.Schema) (*secondaryMerger, error) {
|
||||
ls, err := tm.leftTbl.GetIndexSet(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Use the mergedSchema to work with the secondary indexes, to pull out row data using the right
|
||||
// pri_index -> sec_index mapping.
|
||||
lm, err := GetMutableSecondaryIdxsWithPending(ctx, mergedSchema, ls, secondaryMergerPendingSize)
|
||||
lm, err := GetMutableSecondaryIdxsWithPending(ctx, mergedSchema, tm.name, ls, secondaryMergerPendingSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1125,7 +1202,7 @@ func newSecondaryMerger(ctx context.Context, tm *TableMerger, valueMerger *value
|
||||
return &secondaryMerger{
|
||||
leftSet: ls,
|
||||
rightSet: rs,
|
||||
leftMut: lm,
|
||||
leftIdxes: lm,
|
||||
valueMerger: valueMerger,
|
||||
mergedSchema: mergedSchema,
|
||||
tableMerger: tm,
|
||||
@@ -1134,9 +1211,10 @@ func newSecondaryMerger(ctx context.Context, tm *TableMerger, valueMerger *value
|
||||
|
||||
func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, sourceSch schema.Schema) error {
|
||||
var err error
|
||||
for _, idx := range m.leftMut {
|
||||
for _, idx := range m.leftIdxes {
|
||||
switch diff.Op {
|
||||
case tree.DiffOpDivergentModifyResolved:
|
||||
// TODO: we need to re-resolve values from generated columns here as well
|
||||
err = applyEdit(ctx, idx, diff.Key, diff.Left, diff.Merged)
|
||||
case tree.DiffOpRightAdd, tree.DiffOpRightModify:
|
||||
// Just as with the primary index, we need to map right-side changes to the final, merged schema.
|
||||
@@ -1150,8 +1228,24 @@ func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, source
|
||||
return fmt.Errorf("cannot merge keyless tables with reordered columns")
|
||||
}
|
||||
} else {
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(ctx, diff.Key, diff.Right, sourceSch.GetValueDescriptor(),
|
||||
m.valueMerger.rightMapping, m.tableMerger, m.mergedSchema, m.valueMerger.syncPool, true)
|
||||
defaults, err := resolveDefaults(ctx, m.tableMerger.name, m.mergedSchema, m.tableMerger.rightSch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tempTupleValue, err := remapTupleWithColumnDefaults(
|
||||
ctx,
|
||||
diff.Key,
|
||||
diff.Right,
|
||||
sourceSch.GetValueDescriptor(),
|
||||
m.valueMerger.rightMapping,
|
||||
m.tableMerger,
|
||||
m.tableMerger.rightSch,
|
||||
m.mergedSchema,
|
||||
defaults,
|
||||
m.valueMerger.syncPool,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1177,7 +1271,7 @@ func (m *secondaryMerger) merge(ctx *sql.Context, diff tree.ThreeWayDiff, source
|
||||
|
||||
// finalize reifies edits into output index sets
|
||||
func (m *secondaryMerger) finalize(ctx context.Context) (durable.IndexSet, durable.IndexSet, error) {
|
||||
for _, idx := range m.leftMut {
|
||||
for _, idx := range m.leftIdxes {
|
||||
idxMap, err := idx.Map(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -1190,41 +1284,6 @@ func (m *secondaryMerger) finalize(ctx context.Context) (durable.IndexSet, durab
|
||||
return m.leftSet, m.rightSet, nil
|
||||
}
|
||||
|
||||
// resolveExpression takes in a string |expression| and does basic resolution on it (e.g. column names and function
|
||||
// names) so that the returned sql.Expression can be evaluated. The schema of the table is specified in |sch| and the
|
||||
// name of the table in |tableName|.
|
||||
func resolveExpression(ctx *sql.Context, expression string, sch schema.Schema, tableName string) (sql.Expression, error) {
|
||||
query := fmt.Sprintf("SELECT %s from %s.%s", expression, "mydb", tableName)
|
||||
sqlSch, err := sqlutil.FromDoltSchema("", tableName, sch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mockDatabase := memory.NewDatabase("mydb")
|
||||
mockTable := memory.NewLocalTable(mockDatabase.BaseDatabase, tableName, sqlSch, nil)
|
||||
mockDatabase.AddTable(tableName, mockTable)
|
||||
mockProvider := memory.NewDBProvider(mockDatabase)
|
||||
catalog := analyzer.NewCatalog(mockProvider)
|
||||
|
||||
pseudoAnalyzedQuery, err := planbuilder.Parse(ctx, catalog, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var expr sql.Expression
|
||||
transform.Inspect(pseudoAnalyzedQuery, func(n sql.Node) bool {
|
||||
if projector, ok := n.(sql.Projector); ok {
|
||||
expr = projector.ProjectedExprs()[0]
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if expr == nil {
|
||||
return nil, fmt.Errorf("unable to find expression in analyzed query")
|
||||
}
|
||||
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
// remapTuple takes the given |tuple| and the |desc| that describes its data, and uses |mapping| to map the tuple's
|
||||
// data into a new [][]byte, as indicated by the specified ordinal mapping.
|
||||
func remapTuple(tuple val.Tuple, desc val.TupleDesc, mapping val.OrdinalMapping) [][]byte {
|
||||
@@ -1240,51 +1299,48 @@ func remapTuple(tuple val.Tuple, desc val.TupleDesc, mapping val.OrdinalMapping)
|
||||
}
|
||||
|
||||
// remapTupleWithColumnDefaults takes the given |tuple| (and the |tupleDesc| that describes how to access its fields)
|
||||
// and uses |mapping| to map the tuple's data and return a new tuple. |tm| provides high access to the name of the table
|
||||
// currently being merged and associated node store. |mergedSch| is the new schema of the table and is used to look up
|
||||
// column default values to apply to any existing rows when a new column is added as part of a merge. |pool| is used
|
||||
// to allocate memory for the new tuple. A pointer to the new tuple data is returned, along with any error encountered.
|
||||
// The |rightSide| parameter indicates if the tuple came from the right side of the merge; this is needed to determine
|
||||
// if the tuple data needs to be converted from the old schema type to a changed schema type.
|
||||
func remapTupleWithColumnDefaults(ctx *sql.Context, keyTuple, valueTuple val.Tuple, tupleDesc val.TupleDesc, mapping val.OrdinalMapping, tm *TableMerger, mergedSch schema.Schema, pool pool.BuffPool, rightSide bool) (val.Tuple, error) {
|
||||
// and uses |mapping| to map the tuple's data and return a new tuple.
|
||||
// |tm| provides high access to the name of the table currently being merged and associated node store.
|
||||
// |mergedSch| is the new schema of the table and is used to look up column default values to apply to any existing
|
||||
// rows when a new column is added as part of a merge.
|
||||
// |pool| is used to allocate memory for the new tuple.
|
||||
// |defaultExprs| is a slice of expressions that represent the default or generated values for all columns, with
|
||||
// indexes in the same order as the tuple provided.
|
||||
// |rightSide| indicates if the tuple came from the right side of the merge; this is needed to determine if the tuple
|
||||
// data needs to be converted from the old schema type to a changed schema type.
|
||||
func remapTupleWithColumnDefaults(
|
||||
ctx *sql.Context,
|
||||
keyTuple, valueTuple val.Tuple,
|
||||
valDesc val.TupleDesc,
|
||||
mapping val.OrdinalMapping,
|
||||
tm *TableMerger,
|
||||
rowSch schema.Schema,
|
||||
mergedSch schema.Schema,
|
||||
defaultExprs []sql.Expression,
|
||||
pool pool.BuffPool,
|
||||
rightSide bool,
|
||||
) (val.Tuple, error) {
|
||||
tb := val.NewTupleBuilder(mergedSch.GetValueDescriptor())
|
||||
|
||||
var secondPass []int
|
||||
for to, from := range mapping {
|
||||
var value interface{}
|
||||
col := mergedSch.GetNonPKCols().GetByIndex(to)
|
||||
col := mergedSch.GetNonPKCols().GetByStoredIndex(to)
|
||||
if from == -1 {
|
||||
// If the column is a new column, then look up any default value
|
||||
if col.Default != "" {
|
||||
// TODO: Not great to reparse the expression for every single row... need to cache this
|
||||
expression, err := resolveExpression(ctx, col.Default, mergedSch, tm.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !expression.Resolved() {
|
||||
return nil, ErrUnableToMergeColumnDefaultValue.New(col.Default, tm.name)
|
||||
}
|
||||
|
||||
row, err := buildRow(ctx, keyTuple, valueTuple, mergedSch, tm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
value, err = expression.Eval(ctx, row)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value, _, err = col.TypeInfo.ToSqlType().Convert(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = index.PutField(ctx, tm.ns, tb, to, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If the column is a new column, then look up any default or generated value in a second pass, after the
|
||||
// non-default and non-generated fields have been established. Virtual columns have been excluded, so any
|
||||
// generated column is stored.
|
||||
if col.Default != "" || col.Generated != "" {
|
||||
secondPass = append(secondPass, to)
|
||||
}
|
||||
} else {
|
||||
value, err := index.GetField(ctx, tupleDesc, from, valueTuple, tm.ns)
|
||||
var value any
|
||||
var err error
|
||||
// Generated column values need to be regenerated after the merge
|
||||
if col.Generated != "" {
|
||||
secondPass = append(secondPass, to)
|
||||
}
|
||||
|
||||
value, err = index.GetField(ctx, valDesc, from, valueTuple, tm.ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1301,9 +1357,53 @@ func remapTupleWithColumnDefaults(ctx *sql.Context, keyTuple, valueTuple val.Tup
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, to := range secondPass {
|
||||
col := mergedSch.GetNonPKCols().GetByStoredIndex(to)
|
||||
err := writeTupleExpression(ctx, keyTuple, valueTuple, defaultExprs[to], col, rowSch, tm, tb, to)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tb.Build(pool), nil
|
||||
}
|
||||
|
||||
// writeTupleExpression attempts to evaluate the expression string |exprString| against the row provided and write it
|
||||
// to the provided index in the tuple builder. This is necessary for column default values and generated columns.
|
||||
func writeTupleExpression(
|
||||
ctx *sql.Context,
|
||||
keyTuple val.Tuple,
|
||||
valueTuple val.Tuple,
|
||||
expr sql.Expression,
|
||||
col schema.Column,
|
||||
sch schema.Schema,
|
||||
tm *TableMerger,
|
||||
tb *val.TupleBuilder,
|
||||
colIdx int,
|
||||
) error {
|
||||
if !expr.Resolved() {
|
||||
return ErrUnableToMergeColumnDefaultValue.New(expr.String(), tm.name)
|
||||
}
|
||||
|
||||
row, err := index.BuildRow(ctx, keyTuple, valueTuple, sch, tm.ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value, err := expr.Eval(ctx, row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value, _, err = col.TypeInfo.ToSqlType().Convert(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return index.PutField(ctx, tm.ns, tb, colIdx, value)
|
||||
}
|
||||
|
||||
// convertValueToNewType handles converting a value from a previous type into a new type. |value| is the value from
|
||||
// the previous schema, |newTypeInfo| is the type info for the value in the new schema, |tm| is the TableMerger
|
||||
// instance that describes how the table is being merged, |from| is the field position in the value tuple from the
|
||||
@@ -1411,15 +1511,20 @@ func newValueMerger(merged, leftSch, rightSch, baseSch schema.Schema, syncPool p
|
||||
// mapped from the source schema to destination schema by finding an identical tag, or if no
|
||||
// identical tag is found, then falling back to a match on column name and type.
|
||||
func generateSchemaMappings(mergedSch, leftSch, rightSch, baseSch schema.Schema) (leftMapping, rightMapping, baseMapping val.OrdinalMapping) {
|
||||
n := mergedSch.GetNonPKCols().Size()
|
||||
n := mergedSch.GetNonPKCols().StoredSize()
|
||||
leftMapping = make(val.OrdinalMapping, n)
|
||||
rightMapping = make(val.OrdinalMapping, n)
|
||||
baseMapping = make(val.OrdinalMapping, n)
|
||||
|
||||
for i, col := range mergedSch.GetNonPKCols().GetColumns() {
|
||||
i := 0
|
||||
for _, col := range mergedSch.GetNonPKCols().GetColumns() {
|
||||
if col.Virtual {
|
||||
continue
|
||||
}
|
||||
leftMapping[i] = findNonPKColumnMappingByTagOrName(leftSch, col)
|
||||
rightMapping[i] = findNonPKColumnMappingByTagOrName(rightSch, col)
|
||||
baseMapping[i] = findNonPKColumnMappingByTagOrName(baseSch, col)
|
||||
i++
|
||||
}
|
||||
|
||||
return leftMapping, rightMapping, baseMapping
|
||||
@@ -1440,7 +1545,7 @@ func findNonPKColumnMappingByName(sch schema.Schema, name string) int {
|
||||
// matching tag is not found, then this function falls back to looking for a matching column by name. If no
|
||||
// matching column is found, then this function returns -1.
|
||||
func findNonPKColumnMappingByTagOrName(sch schema.Schema, col schema.Column) int {
|
||||
if idx, ok := sch.GetNonPKCols().TagToIdx[col.Tag]; ok {
|
||||
if idx, ok := sch.GetNonPKCols().StoredIndexByTag(col.Tag); ok {
|
||||
return idx
|
||||
} else {
|
||||
return findNonPKColumnMappingByName(sch, col.Name)
|
||||
@@ -1468,6 +1573,11 @@ func migrateDataToMergedSchema(ctx *sql.Context, tm *TableMerger, vm *valueMerge
|
||||
}
|
||||
valueDescriptor := leftSch.GetValueDescriptor()
|
||||
|
||||
defaults, err := resolveDefaults(ctx, tm.name, mergedSch, tm.leftSch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
keyTuple, valueTuple, err := mapIter.Next(ctx)
|
||||
if err == io.EOF {
|
||||
@@ -1476,7 +1586,19 @@ func migrateDataToMergedSchema(ctx *sql.Context, tm *TableMerger, vm *valueMerge
|
||||
return err
|
||||
}
|
||||
|
||||
newValueTuple, err := remapTupleWithColumnDefaults(ctx, keyTuple, valueTuple, valueDescriptor, vm.leftMapping, tm, mergedSch, vm.syncPool, false)
|
||||
newValueTuple, err := remapTupleWithColumnDefaults(
|
||||
ctx,
|
||||
keyTuple,
|
||||
valueTuple,
|
||||
valueDescriptor,
|
||||
vm.leftMapping,
|
||||
tm,
|
||||
tm.leftSch,
|
||||
mergedSch,
|
||||
defaults,
|
||||
vm.syncPool,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1565,9 +1687,11 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
|
||||
leftCol, leftColIdx, leftColExists := getColumn(&left, &m.leftMapping, i)
|
||||
rightCol, rightColIdx, rightColExists := getColumn(&right, &m.rightMapping, i)
|
||||
resultType := m.resultVD.Types[i]
|
||||
resultColumn := m.resultSchema.GetNonPKCols().GetByIndex(i)
|
||||
generatedColumn := resultColumn.Generated != ""
|
||||
|
||||
// We previously asserted that left and right are not nil.
|
||||
//But base can be nil in the event of convergent inserts.
|
||||
// But base can be nil in the event of convergent inserts.
|
||||
if base == nil || !baseColExists {
|
||||
// There are two possible cases:
|
||||
// - The base row doesn't exist, or
|
||||
@@ -1591,6 +1715,12 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
|
||||
// columns are equal, return either.
|
||||
return leftCol, false, nil
|
||||
}
|
||||
|
||||
// generated columns will be updated as part of the merge later on, so choose either value for now
|
||||
if generatedColumn {
|
||||
return leftCol, false, nil
|
||||
}
|
||||
|
||||
// conflicting inserts
|
||||
return nil, true, nil
|
||||
}
|
||||
@@ -1642,6 +1772,10 @@ func (m *valueMerger) processColumn(ctx context.Context, i int, left, right, bas
|
||||
|
||||
switch {
|
||||
case leftModified && rightModified:
|
||||
// generated columns will be updated as part of the merge later on, so choose either value for now
|
||||
if generatedColumn {
|
||||
return leftCol, false, nil
|
||||
}
|
||||
// concurrent modification
|
||||
return nil, true, nil
|
||||
case leftModified:
|
||||
|
||||
@@ -311,21 +311,23 @@ func TestMergeCommits(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tbl, _, err := root.GetTable(context.Background(), tableName)
|
||||
ctx := sql.NewEmptyContext()
|
||||
|
||||
tbl, _, err := root.GetTable(ctx, tableName)
|
||||
assert.NoError(t, err)
|
||||
sch, err := tbl.GetSchema(context.Background())
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
assert.NoError(t, err)
|
||||
expected, err := doltdb.NewTable(context.Background(), vrw, ns, sch, expectedRows, nil, nil)
|
||||
expected, err := doltdb.NewTable(ctx, vrw, ns, sch, expectedRows, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
expected, err = rebuildAllProllyIndexes(context.Background(), expected)
|
||||
expected, err = rebuildAllProllyIndexes(ctx, expected)
|
||||
assert.NoError(t, err)
|
||||
expected, err = expected.SetArtifacts(context.Background(), durable.ArtifactIndexFromProllyMap(expectedArtifacts))
|
||||
expected, err = expected.SetArtifacts(ctx, durable.ArtifactIndexFromProllyMap(expectedArtifacts))
|
||||
require.NoError(t, err)
|
||||
|
||||
mergedRows, err := merged.table.GetRowData(context.Background())
|
||||
mergedRows, err := merged.table.GetRowData(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
artIdx, err := merged.table.GetArtifacts(context.Background())
|
||||
artIdx, err := merged.table.GetArtifacts(ctx)
|
||||
require.NoError(t, err)
|
||||
artifacts := durable.ProllyMapFromArtifactIndex(artIdx)
|
||||
MustEqualArtifactMap(t, expectedArtifacts, artifacts)
|
||||
@@ -333,9 +335,9 @@ func TestMergeCommits(t *testing.T) {
|
||||
MustEqualProlly(t, tableName, durable.ProllyMapFromIndex(expectedRows), durable.ProllyMapFromIndex(mergedRows))
|
||||
|
||||
for _, index := range sch.Indexes().AllIndexes() {
|
||||
mergedIndexRows, err := merged.table.GetIndexRowData(context.Background(), index.Name())
|
||||
mergedIndexRows, err := merged.table.GetIndexRowData(ctx, index.Name())
|
||||
require.NoError(t, err)
|
||||
expectedIndexRows, err := expected.GetIndexRowData(context.Background(), index.Name())
|
||||
expectedIndexRows, err := expected.GetIndexRowData(ctx, index.Name())
|
||||
require.NoError(t, err)
|
||||
MustEqualProlly(t, index.Name(), durable.ProllyMapFromIndex(expectedIndexRows), durable.ProllyMapFromIndex(mergedIndexRows))
|
||||
}
|
||||
@@ -466,29 +468,31 @@ func setupMergeTest(t *testing.T) (*doltdb.DoltDB, types.ValueReadWriter, tree.N
|
||||
}
|
||||
}
|
||||
|
||||
updatedRows, err := leftMut.Map(context.Background())
|
||||
ctx := sql.NewEmptyContext()
|
||||
|
||||
updatedRows, err := leftMut.Map(ctx)
|
||||
require.NoError(t, err)
|
||||
mergeRows, err := rightMut.Map(context.Background())
|
||||
mergeRows, err := rightMut.Map(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootTbl, err := doltdb.NewTable(context.Background(), vrw, ns, sch, durable.IndexFromProllyMap(updatedRows), nil, nil)
|
||||
rootTbl, err := doltdb.NewTable(ctx, vrw, ns, sch, durable.IndexFromProllyMap(updatedRows), nil, nil)
|
||||
require.NoError(t, err)
|
||||
rootTbl, err = rebuildAllProllyIndexes(context.Background(), rootTbl)
|
||||
rootTbl, err = rebuildAllProllyIndexes(ctx, rootTbl)
|
||||
require.NoError(t, err)
|
||||
|
||||
mergeTbl, err := doltdb.NewTable(context.Background(), vrw, ns, sch, durable.IndexFromProllyMap(mergeRows), nil, nil)
|
||||
mergeTbl, err := doltdb.NewTable(ctx, vrw, ns, sch, durable.IndexFromProllyMap(mergeRows), nil, nil)
|
||||
require.NoError(t, err)
|
||||
mergeTbl, err = rebuildAllProllyIndexes(context.Background(), mergeTbl)
|
||||
mergeTbl, err = rebuildAllProllyIndexes(ctx, mergeTbl)
|
||||
require.NoError(t, err)
|
||||
|
||||
ancTbl, err := doltdb.NewTable(context.Background(), vrw, ns, sch, durable.IndexFromProllyMap(initialRows), nil, nil)
|
||||
ancTbl, err := doltdb.NewTable(ctx, vrw, ns, sch, durable.IndexFromProllyMap(initialRows), nil, nil)
|
||||
require.NoError(t, err)
|
||||
ancTbl, err = rebuildAllProllyIndexes(context.Background(), ancTbl)
|
||||
ancTbl, err = rebuildAllProllyIndexes(ctx, ancTbl)
|
||||
require.NoError(t, err)
|
||||
|
||||
rightCm, baseCm, root, mergeRoot, ancRoot := buildLeftRightAncCommitsAndBranches(t, ddb, rootTbl, mergeTbl, ancTbl)
|
||||
|
||||
artifactMap, err := prolly.NewArtifactMapFromTuples(context.Background(), ns, kD)
|
||||
artifactMap, err := prolly.NewArtifactMapFromTuples(ctx, ns, kD)
|
||||
require.NoError(t, err)
|
||||
artEditor := artifactMap.Editor()
|
||||
|
||||
@@ -505,12 +509,12 @@ func setupMergeTest(t *testing.T) (*doltdb.DoltDB, types.ValueReadWriter, tree.N
|
||||
|
||||
for _, testCase := range testRows {
|
||||
if testCase.conflict {
|
||||
err = artEditor.Add(context.Background(), key(testCase.key), rightCmHash, prolly.ArtifactTypeConflict, d)
|
||||
err = artEditor.Add(ctx, key(testCase.key), rightCmHash, prolly.ArtifactTypeConflict, d)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
expectedArtifacts, err := artEditor.Flush(context.Background())
|
||||
expectedArtifacts, err := artEditor.Flush(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ddb, vrw, ns, rightCm, baseCm, root, mergeRoot, ancRoot, durable.IndexFromProllyMap(expectedRows), expectedArtifacts
|
||||
@@ -605,7 +609,7 @@ func setupNomsMergeTest(t *testing.T) (types.ValueReadWriter, tree.NodeStore, do
|
||||
|
||||
// rebuildAllProllyIndexes builds the data for the secondary indexes in |tbl|'s
|
||||
// schema.
|
||||
func rebuildAllProllyIndexes(ctx context.Context, tbl *doltdb.Table) (*doltdb.Table, error) {
|
||||
func rebuildAllProllyIndexes(ctx *sql.Context, tbl *doltdb.Table) (*doltdb.Table, error) {
|
||||
sch, err := tbl.GetSchema(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -627,7 +631,7 @@ func rebuildAllProllyIndexes(ctx context.Context, tbl *doltdb.Table) (*doltdb.Ta
|
||||
primary := durable.ProllyMapFromIndex(tableRowData)
|
||||
|
||||
for _, index := range sch.Indexes().AllIndexes() {
|
||||
rebuiltIndexRowData, err := creation.BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, index, primary)
|
||||
rebuiltIndexRowData, err := creation.BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, tableName, index, primary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ package merge
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
|
||||
@@ -25,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
// GetMutableSecondaryIdxs returns a MutableSecondaryIdx for each secondary index in |indexes|.
|
||||
func GetMutableSecondaryIdxs(ctx context.Context, sch schema.Schema, indexes durable.IndexSet) ([]MutableSecondaryIdx, error) {
|
||||
func GetMutableSecondaryIdxs(ctx *sql.Context, sch schema.Schema, tableName string, indexes durable.IndexSet) ([]MutableSecondaryIdx, error) {
|
||||
mods := make([]MutableSecondaryIdx, sch.Indexes().Count())
|
||||
for i, index := range sch.Indexes().AllIndexes() {
|
||||
idx, err := indexes.GetIndex(ctx, sch, index.Name())
|
||||
@@ -36,7 +38,10 @@ func GetMutableSecondaryIdxs(ctx context.Context, sch schema.Schema, indexes dur
|
||||
if schema.IsKeyless(sch) {
|
||||
m = prolly.ConvertToSecondaryKeylessIndex(m)
|
||||
}
|
||||
mods[i] = NewMutableSecondaryIdx(m, sch, index)
|
||||
mods[i], err = NewMutableSecondaryIdx(ctx, m, sch, tableName, index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return mods, nil
|
||||
}
|
||||
@@ -44,7 +49,7 @@ func GetMutableSecondaryIdxs(ctx context.Context, sch schema.Schema, indexes dur
|
||||
// GetMutableSecondaryIdxsWithPending returns a MutableSecondaryIdx for each secondary index in |indexes|. If an index
|
||||
// is listed in the given |sch|, but does not exist in the given |indexes|, then it is skipped. This is useful when
|
||||
// merging a schema that has a new index, but the index does not exist on the index set being modified.
|
||||
func GetMutableSecondaryIdxsWithPending(ctx context.Context, sch schema.Schema, indexes durable.IndexSet, pendingSize int) ([]MutableSecondaryIdx, error) {
|
||||
func GetMutableSecondaryIdxsWithPending(ctx *sql.Context, sch schema.Schema, tableName string, indexes durable.IndexSet, pendingSize int) ([]MutableSecondaryIdx, error) {
|
||||
mods := make([]MutableSecondaryIdx, 0, sch.Indexes().Count())
|
||||
for _, index := range sch.Indexes().AllIndexes() {
|
||||
|
||||
@@ -69,7 +74,11 @@ func GetMutableSecondaryIdxsWithPending(ctx context.Context, sch schema.Schema,
|
||||
if schema.IsKeyless(sch) {
|
||||
m = prolly.ConvertToSecondaryKeylessIndex(m)
|
||||
}
|
||||
newMutableSecondaryIdx := NewMutableSecondaryIdx(m, sch, index)
|
||||
newMutableSecondaryIdx, err := NewMutableSecondaryIdx(ctx, m, sch, tableName, index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newMutableSecondaryIdx.mut = newMutableSecondaryIdx.mut.WithMaxPending(pendingSize)
|
||||
mods = append(mods, newMutableSecondaryIdx)
|
||||
}
|
||||
@@ -86,13 +95,17 @@ type MutableSecondaryIdx struct {
|
||||
}
|
||||
|
||||
// NewMutableSecondaryIdx returns a MutableSecondaryIdx. |m| is the secondary idx data.
|
||||
func NewMutableSecondaryIdx(idx prolly.Map, sch schema.Schema, def schema.Index) MutableSecondaryIdx {
|
||||
b := index.NewSecondaryKeyBuilder(sch, def, idx.KeyDesc(), idx.Pool(), idx.NodeStore())
|
||||
func NewMutableSecondaryIdx(ctx *sql.Context, idx prolly.Map, sch schema.Schema, tableName string, def schema.Index) (MutableSecondaryIdx, error) {
|
||||
b, err := index.NewSecondaryKeyBuilder(ctx, tableName, sch, def, idx.KeyDesc(), idx.Pool(), idx.NodeStore())
|
||||
if err != nil {
|
||||
return MutableSecondaryIdx{}, err
|
||||
}
|
||||
|
||||
return MutableSecondaryIdx{
|
||||
Name: def.Name(),
|
||||
mut: idx.Mutate(),
|
||||
builder: b,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertEntry inserts a secondary index entry given the key and new value
|
||||
|
||||
@@ -532,11 +532,7 @@ func getSchemaSqlPatch(ctx *sql.Context, toRoot *doltdb.RootValue, td diff.Table
|
||||
if td.IsDrop() {
|
||||
ddlStatements = append(ddlStatements, sqlfmt.DropTableStmt(td.FromName))
|
||||
} else if td.IsAdd() {
|
||||
toPkSch, err := sqlutil.FromDoltSchema("", td.ToName, td.ToSch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt, err := diff.GenerateCreateTableStatement(td.ToName, td.ToSch, toPkSch, td.ToFks, td.ToFksParentSch)
|
||||
stmt, err := sqlfmt.GenerateCreateTableStatement(td.ToName, td.ToSch, td.ToFks, td.ToFksParentSch)
|
||||
if err != nil {
|
||||
return nil, errhand.VerboseErrorFromError(err)
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func resolveProllyConflicts(ctx *sql.Context, tbl *doltdb.Table, tblName string,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutIdxs, err := merge.GetMutableSecondaryIdxs(ctx, sch, idxSet)
|
||||
mutIdxs, err := merge.GetMutableSecondaryIdxs(ctx, sch, tblName, idxSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -22,12 +22,11 @@ import (
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/types"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
|
||||
noms "github.com/dolthub/dolt/go/store/types"
|
||||
)
|
||||
|
||||
@@ -193,11 +192,7 @@ func newSchemaConflict(ctx context.Context, table string, baseRoot *doltdb.RootV
|
||||
}
|
||||
|
||||
func getCreateTableStatement(table string, sch schema.Schema, fks []doltdb.ForeignKey, parents map[string]schema.Schema) (string, error) {
|
||||
pkSch, err := sqlutil.FromDoltSchema("", table, sch)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return diff.GenerateCreateTableStatement(table, sch, pkSch, fks, parents)
|
||||
return sqlfmt.GenerateCreateTableStatement(table, sch, fks, parents)
|
||||
}
|
||||
|
||||
func getSchemaConflictDescription(ctx context.Context, table string, base, ours, theirs schema.Schema) (string, error) {
|
||||
|
||||
@@ -118,7 +118,28 @@ func TestSingleQuery(t *testing.T) {
|
||||
func TestSingleScript(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
var scripts = []queries.ScriptTest{}
|
||||
var scripts = []queries.ScriptTest{
|
||||
{
|
||||
Name: "physical columns added after virtual one",
|
||||
SetUpScript: []string{
|
||||
"create table t (pk int primary key, col1 int as (pk + 1));",
|
||||
"insert into t (pk) values (1), (3)",
|
||||
"alter table t add index idx1 (col1, pk);",
|
||||
"alter table t add index idx2 (col1);",
|
||||
"alter table t add column col2 int;",
|
||||
"alter table t add column col3 int;",
|
||||
"insert into t (pk, col2, col3) values (2, 4, 5);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "select * from t where col1 = 2",
|
||||
Expected: []sql.Row{
|
||||
{1, 2, nil, nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tcc := &testCommitClock{}
|
||||
cleanup := installTestCommitClock(tcc)
|
||||
@@ -133,8 +154,8 @@ func TestSingleScript(t *testing.T) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
engine.EngineAnalyzer().Debug = true
|
||||
engine.EngineAnalyzer().Verbose = true
|
||||
// engine.EngineAnalyzer().Debug = true
|
||||
// engine.EngineAnalyzer().Verbose = true
|
||||
|
||||
enginetest.TestScriptWithEngine(t, engine, harness, script)
|
||||
return nil
|
||||
@@ -154,40 +175,134 @@ func TestSingleMergeScript(t *testing.T) {
|
||||
t.Skip()
|
||||
var scripts = []MergeScriptTest{
|
||||
{
|
||||
Name: "adding a non-null column with a default value to one side",
|
||||
Name: "adding generated column to one side, non-generated column to other side",
|
||||
AncSetUpScript: []string{
|
||||
"set dolt_force_transaction_commit = on;",
|
||||
"create table t (pk int primary key, col1 int);",
|
||||
"insert into t values (1, 1);",
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t values (1), (2);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 int not null default 0",
|
||||
"alter table t add column col3 int;",
|
||||
"insert into t values (2, 2, 2, null);",
|
||||
"alter table t add column col2 varchar(100);",
|
||||
"insert into t (pk, col2) values (3, '3hello'), (4, '4hello');",
|
||||
"alter table t add index (col2);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t values (3, 3);",
|
||||
"alter table t add column col1 int default (pk + 100);",
|
||||
"insert into t (pk) values (5), (6);",
|
||||
"alter table t add index (col1);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{0, 0}},
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t;",
|
||||
Expected: []sql.Row{{1, 1, 0, nil}, {2, 2, 2, nil}, {3, 3, 0, nil}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, violation_type from dolt_constraint_violations_t",
|
||||
Expected: []sql.Row{},
|
||||
Query: "select pk, col1, col2 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 101, nil},
|
||||
{2, 102, nil},
|
||||
{3, 103, "3hello"},
|
||||
{4, 104, "4hello"},
|
||||
{5, 105, nil},
|
||||
{6, 106, nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// {
|
||||
// Name: "adding generated columns to both sides",
|
||||
// AncSetUpScript: []string{
|
||||
// "create table t (pk int primary key);",
|
||||
// "insert into t values (1), (2);",
|
||||
// },
|
||||
// RightSetUpScript: []string{
|
||||
// "alter table t add column col2 varchar(100) as (concat(pk, 'hello'));",
|
||||
// "insert into t (pk) values (3), (4);",
|
||||
// "alter table t add index (col2);",
|
||||
// },
|
||||
// LeftSetUpScript: []string{
|
||||
// "alter table t add column col1 int as (pk + 100) stored;",
|
||||
// "insert into t (pk) values (5), (6);",
|
||||
// "alter table t add index (col1);",
|
||||
// },
|
||||
// Assertions: []queries.ScriptTestAssertion{
|
||||
// {
|
||||
// Query: "call dolt_merge('right');",
|
||||
// Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
// },
|
||||
// {
|
||||
// Query: "select pk, col1, col2 from t;",
|
||||
// Expected: []sql.Row{
|
||||
// {1, 101, "1hello"},
|
||||
// {2, 102, "2hello"},
|
||||
// {3, 103, "3hello"},
|
||||
// {4, 104, "4hello"},
|
||||
// {5, 105, "5hello"},
|
||||
// {6, 106, "6hello"},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// Name: "adding a column with a literal default value",
|
||||
// AncSetUpScript: []string{
|
||||
// "CREATE table t (pk int primary key);",
|
||||
// "INSERT into t values (1);",
|
||||
// },
|
||||
// RightSetUpScript: []string{
|
||||
// "alter table t add column c1 varchar(100) default ('hello');",
|
||||
// "insert into t values (2, 'hi');",
|
||||
// "alter table t add index idx1 (c1, pk);",
|
||||
// },
|
||||
// LeftSetUpScript: []string{
|
||||
// "insert into t values (3);",
|
||||
// },
|
||||
// Assertions: []queries.ScriptTestAssertion{
|
||||
// {
|
||||
// Query: "call dolt_merge('right');",
|
||||
// Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
// },
|
||||
// {
|
||||
// Query: "select * from t;",
|
||||
// Expected: []sql.Row{{1, "hello"}, {2, "hi"}, {3, "hello"}},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// Name: "check constraint violation - right side violates new check constraint",
|
||||
// AncSetUpScript: []string{
|
||||
// "set autocommit = 0;",
|
||||
// "CREATE table t (pk int primary key, col00 int, col01 int, col1 varchar(100) default ('hello'));",
|
||||
// "INSERT into t values (1, 0, 0, 'hi');",
|
||||
// "alter table t add index idx1 (col1);",
|
||||
// },
|
||||
// RightSetUpScript: []string{
|
||||
// "insert into t values (2, 0, 0, DEFAULT);",
|
||||
// },
|
||||
// LeftSetUpScript: []string{
|
||||
// "alter table t drop column col00;",
|
||||
// "alter table t drop column col01;",
|
||||
// "alter table t add constraint CHECK (col1 != concat('he', 'llo'))",
|
||||
// },
|
||||
// Assertions: []queries.ScriptTestAssertion{
|
||||
// {
|
||||
// Query: "call dolt_merge('right');",
|
||||
// Expected: []sql.Row{{"", 0, 1}},
|
||||
// },
|
||||
// {
|
||||
// Query: "select * from dolt_constraint_violations;",
|
||||
// Expected: []sql.Row{{"t", uint64(1)}},
|
||||
// },
|
||||
// {
|
||||
// Query: `select violation_type, pk, col1, violation_info like "\%NOT((col1 = concat('he','llo')))\%" from dolt_constraint_violations_t;`,
|
||||
// Expected: []sql.Row{{uint64(3), 2, "hello", true}},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
}
|
||||
for _, test := range scripts {
|
||||
t.Run("merge right into left", func(t *testing.T) {
|
||||
enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, false))
|
||||
})
|
||||
// t.Run("merge right into left", func(t *testing.T) {
|
||||
// enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, false))
|
||||
// })
|
||||
t.Run("merge left into right", func(t *testing.T) {
|
||||
enginetest.TestScript(t, newDoltHarness(t), convertMergeScriptTest(test, true))
|
||||
})
|
||||
@@ -480,7 +595,9 @@ func TestInsertIntoErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGeneratedColumns(t *testing.T) {
|
||||
enginetest.TestGeneratedColumns(t, newDoltHarness(t))
|
||||
enginetest.TestGeneratedColumns(t,
|
||||
// virtual indexes are failing for certain lookups on this test
|
||||
newDoltHarness(t).WithSkippedQueries([]string{"create table t (pk int primary key, col1 int as (pk + 1));"}))
|
||||
|
||||
for _, script := range GeneratedColumnMergeTestScripts {
|
||||
func() {
|
||||
@@ -2810,6 +2927,7 @@ func TestThreeWayMergeWithSchemaChangeScripts(t *testing.T) {
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsCollations, "collation changes", false)
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsConstraints, "constraint changes", false)
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsSchemaConflicts, "schema conflicts", false)
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsGeneratedColumns, "generated columns", false)
|
||||
|
||||
// Run non-symmetric schema merge tests in just one direction
|
||||
t.Run("type changes", func(t *testing.T) {
|
||||
@@ -2831,6 +2949,7 @@ func TestThreeWayMergeWithSchemaChangeScriptsPrepared(t *testing.T) {
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsCollations, "collation changes", true)
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsConstraints, "constraint changes", true)
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsSchemaConflicts, "schema conflicts", true)
|
||||
runMergeScriptTestsInBothDirections(t, SchemaChangeTestsGeneratedColumns, "generated columns", true)
|
||||
|
||||
// Run non-symmetric schema merge tests in just one direction
|
||||
t.Run("type changes", func(t *testing.T) {
|
||||
|
||||
@@ -4291,8 +4291,8 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('branch1')",
|
||||
SkipResultsCheck: true,
|
||||
Query: "call dolt_merge('branch1')",
|
||||
Expected: []sql.Row{{doltCommit, 1, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
@@ -4307,8 +4307,8 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
Expected: []sql.Row{{3}},
|
||||
},
|
||||
{
|
||||
Query: "call dolt_merge('branch2')",
|
||||
SkipResultsCheck: true,
|
||||
Query: "call dolt_merge('branch2')",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
@@ -4325,6 +4325,55 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "merge a generated column with non-conflicting changes on both sides",
|
||||
SetUpScript: []string{
|
||||
"create table t1 (id bigint primary key, v1 bigint, v2 bigint, v3 bigint as (v1 + v2) stored)",
|
||||
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
|
||||
"call dolt_commit('-Am', 'first commit')",
|
||||
"call dolt_branch('branch1')",
|
||||
"call dolt_branch('branch2')",
|
||||
"call dolt_checkout('branch1')",
|
||||
"update t1 set v1 = 4 where id = 1",
|
||||
"call dolt_commit('-Am', 'branch1 commit')",
|
||||
"call dolt_checkout('branch2')",
|
||||
"update t1 set v2 = 5 where id = 1",
|
||||
"call dolt_commit('-Am', 'branch2 commit')",
|
||||
"call dolt_checkout('main')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('branch1')",
|
||||
Expected: []sql.Row{{doltCommit, 1, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 4, 1, 5},
|
||||
{2, 2, 2, 4},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 5",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
{
|
||||
Query: "call dolt_merge('branch2')",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 4, 5, 9},
|
||||
{2, 2, 2, 4},
|
||||
},
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 9",
|
||||
Expected: []sql.Row{{1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "merge a generated column created on another branch",
|
||||
SetUpScript: []string{
|
||||
@@ -4343,8 +4392,8 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('branch1')",
|
||||
SkipResultsCheck: true,
|
||||
Query: "call dolt_merge('branch1')",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
@@ -4354,12 +4403,10 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
{3, 3, 3, 6},
|
||||
{4, 4, 4, 8},
|
||||
},
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 6",
|
||||
Expected: []sql.Row{{3}},
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 8",
|
||||
@@ -4371,56 +4418,50 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
Name: "merge a virtual column",
|
||||
SetUpScript: []string{
|
||||
"create table t1 (id bigint primary key, v1 bigint, v2 bigint, v3 bigint as (v1 + v2), index (v3))",
|
||||
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
|
||||
"insert into t1 (id, v1, v2) values (1, 2, 3), (4, 5, 6)",
|
||||
"call dolt_commit('-Am', 'first commit')",
|
||||
"call dolt_checkout('-b', 'branch1')",
|
||||
"insert into t1 (id, v1, v2) values (3, 3, 3)",
|
||||
"insert into t1 (id, v1, v2) values (7, 8, 9)",
|
||||
"call dolt_commit('-Am', 'branch1 commit')",
|
||||
"call dolt_checkout('main')",
|
||||
"call dolt_checkout('-b', 'branch2')",
|
||||
"insert into t1 (id, v1, v2) values (4, 4, 4)",
|
||||
"insert into t1 (id, v1, v2) values (10, 11, 12)",
|
||||
"call dolt_commit('-Am', 'branch2 commit')",
|
||||
"call dolt_checkout('main')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('branch1')",
|
||||
SkipResultsCheck: true,
|
||||
Skip: true,
|
||||
Query: "call dolt_merge('branch1')",
|
||||
Expected: []sql.Row{{doltCommit, 1, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 1, 1, 2},
|
||||
{2, 2, 2, 4},
|
||||
{3, 3, 3, 6},
|
||||
{1, 2, 3, 5},
|
||||
{4, 5, 6, 11},
|
||||
{7, 8, 9, 17},
|
||||
},
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 6",
|
||||
Expected: []sql.Row{{3}},
|
||||
Skip: true,
|
||||
Query: "select id from t1 where v3 = 17",
|
||||
Expected: []sql.Row{{7}},
|
||||
},
|
||||
{
|
||||
Query: "call dolt_merge('branch2')",
|
||||
SkipResultsCheck: true,
|
||||
Skip: true,
|
||||
Query: "call dolt_merge('branch2')",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 1, 1, 2},
|
||||
{2, 2, 2, 4},
|
||||
{3, 3, 3, 6},
|
||||
{4, 4, 4, 8},
|
||||
{1, 2, 3, 5},
|
||||
{4, 5, 6, 11},
|
||||
{7, 8, 9, 17},
|
||||
{10, 11, 12, 23},
|
||||
},
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 8",
|
||||
Expected: []sql.Row{{4}},
|
||||
Skip: true,
|
||||
Query: "select id from t1 where v3 = 23",
|
||||
Expected: []sql.Row{{10}},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -4428,57 +4469,39 @@ var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
|
||||
Name: "merge a virtual column created on another branch",
|
||||
SetUpScript: []string{
|
||||
"create table t1 (id bigint primary key, v1 bigint, v2 bigint)",
|
||||
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
|
||||
"insert into t1 (id, v1, v2) values (1, 2, 3), (4, 5, 6)",
|
||||
"call dolt_commit('-Am', 'first commit')",
|
||||
"call dolt_branch('branch1')",
|
||||
"insert into t1 (id, v1, v2) values (3, 3, 3)",
|
||||
"insert into t1 (id, v1, v2) values (7, 8, 9)",
|
||||
"call dolt_commit('-Am', 'main commit')",
|
||||
"call dolt_checkout('branch1')",
|
||||
"alter table t1 add column v3 bigint as (v1 + v2)",
|
||||
"alter table t1 add key idx_v3 (v3)",
|
||||
"insert into t1 (id, v1, v2) values (4, 4, 4)",
|
||||
"insert into t1 (id, v1, v2) values (10, 11, 12)",
|
||||
"call dolt_commit('-Am', 'branch1 commit')",
|
||||
"call dolt_checkout('main')",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('branch1')",
|
||||
SkipResultsCheck: true,
|
||||
Skip: true,
|
||||
Query: "call dolt_merge('branch1')",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 1, 1, 2},
|
||||
{2, 2, 2, 4},
|
||||
{3, 3, 3, 6},
|
||||
{1, 2, 3, 5},
|
||||
{4, 5, 6, 11},
|
||||
{7, 8, 9, 17},
|
||||
{10, 11, 12, 23},
|
||||
},
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 6",
|
||||
Expected: []sql.Row{{3}},
|
||||
Skip: true,
|
||||
Query: "select id from t1 where v3 = 17",
|
||||
Expected: []sql.Row{{7}},
|
||||
},
|
||||
{
|
||||
Query: "call dolt_merge('branch2')",
|
||||
SkipResultsCheck: true,
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select * from t1 order by id",
|
||||
Expected: []sql.Row{
|
||||
{1, 1, 1, 2},
|
||||
{2, 2, 2, 4},
|
||||
{3, 3, 3, 6},
|
||||
{4, 4, 4, 8},
|
||||
},
|
||||
Skip: true,
|
||||
},
|
||||
{
|
||||
Query: "select id from t1 where v3 = 8",
|
||||
Expected: []sql.Row{{4}},
|
||||
Skip: true,
|
||||
Query: "select id from t1 where v3 = 23",
|
||||
Expected: []sql.Row{{10}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -324,6 +324,30 @@ var SchemaChangeTestsBasicCases = []MergeScriptTest{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "right-side adds a column with a default value",
|
||||
AncSetUpScript: []string{
|
||||
"CREATE table t (pk int primary key, c1 varchar(100), c2 varchar(100));",
|
||||
"INSERT into t values ('1', 'BAD', 'hello');",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column c3 varchar(100) default (CONCAT(c2, c1, 'default'));",
|
||||
"insert into t values ('2', 'BAD', 'hello', 'hi');",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t values ('3', 'BAD', 'hi');",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t order by pk;",
|
||||
Expected: []sql.Row{{1, "BAD", "hello", "helloBADdefault"}, {2, "BAD", "hello", "hi"}, {3, "BAD", "hi", "hiBADdefault"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding different columns to both sides",
|
||||
AncSetUpScript: []string{
|
||||
@@ -357,6 +381,73 @@ var SchemaChangeTestsBasicCases = []MergeScriptTest{
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding columns with default values to both sides",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t values (1), (2);",
|
||||
"alter table t add index idx1 (pk);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 varchar(100) default 'abc'",
|
||||
"insert into t values (3, '300'), (4, '400');",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"alter table t add column col1 int default 101;",
|
||||
"insert into t values (5, 50), (6, 60);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 101, "abc"},
|
||||
{2, 101, "abc"},
|
||||
{3, 101, "300"},
|
||||
{4, 101, "400"},
|
||||
{5, 50, "abc"},
|
||||
{6, 60, "abc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding indexed columns to both sides",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t values (1), (2);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 varchar(100);",
|
||||
"insert into t (pk, col2) values (3, '3hello'), (4, '4hello');",
|
||||
"alter table t add index (col2);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"alter table t add column col1 int default (pk + 100);",
|
||||
"insert into t (pk) values (5), (6);",
|
||||
"alter table t add index (col1);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 101, nil},
|
||||
{2, 102, nil},
|
||||
{3, 103, "3hello"},
|
||||
{4, 104, "4hello"},
|
||||
{5, 105, nil},
|
||||
{6, 106, nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// TODO: Need another test with a different type for the same column name, and verify it's an error?
|
||||
Name: "dropping and adding a column with the same name",
|
||||
@@ -2003,3 +2094,274 @@ var SchemaChangeTestsSchemaConflicts = []MergeScriptTest{
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var SchemaChangeTestsGeneratedColumns = []MergeScriptTest{
|
||||
{
|
||||
Name: "reordering a column",
|
||||
AncSetUpScript: []string{
|
||||
"CREATE table t (pk int primary key, col1 int, col2 varchar(100) as (concat(col1, 'hello')) stored);",
|
||||
"INSERT into t (pk, col1) values (1, 10), (2, 20);",
|
||||
"alter table t add index idx1 (pk, col1);",
|
||||
"alter table t add index idx2 (col2);",
|
||||
"alter table t add index idx3 (pk, col1, col2);",
|
||||
"alter table t add index idx4 (col1, col2);",
|
||||
"alter table t add index idx5 (col2, col1);",
|
||||
"alter table t add index idx6 (col2, pk, col1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t modify col1 int after col2;",
|
||||
"insert into t (pk, col1) values (3, 30), (4, 40);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t (pk, col1) values (5, 50), (6, 60);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 10, "10hello"}, {2, 20, "20hello"},
|
||||
{3, 30, "30hello"}, {4, 40, "40hello"},
|
||||
{5, 50, "50hello"}, {6, 60, "60hello"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding columns to a table with a virtual column",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key, col1 int as (pk + 1));",
|
||||
"insert into t (pk) values (1);",
|
||||
"alter table t add index idx1 (col1, pk);",
|
||||
"alter table t add index idx2 (col1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 int;",
|
||||
"alter table t add column col3 int;",
|
||||
"insert into t (pk, col2, col3) values (2, 4, 5);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t (pk) values (3);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2, col3 from t order by pk",
|
||||
Expected: []sql.Row{
|
||||
{1, 2, nil, nil},
|
||||
{2, 3, 4, 5},
|
||||
{3, 4, nil, nil}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding a virtual column to one side, regular columns to other side",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t (pk) values (1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col1 int as (pk + 1)",
|
||||
"insert into t (pk) values (3);",
|
||||
"alter table t add index idx1 (col1, pk);",
|
||||
"alter table t add index idx2 (col1);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"alter table t add column col2 int;",
|
||||
"alter table t add column col3 int;",
|
||||
"insert into t (pk, col2, col3) values (2, 4, 5);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2, col3 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 2, nil, nil},
|
||||
{2, 3, 4, 5},
|
||||
{3, 4, nil, nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding a virtual column to one side",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t (pk) values (1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col1 int as (pk + 1)",
|
||||
"insert into t (pk) values (3);",
|
||||
"alter table t add index idx1 (col1, pk);",
|
||||
"alter table t add index idx2 (col1);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t (pk) values (2);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 2},
|
||||
{2, 3},
|
||||
{3, 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding a stored generated column to one side",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t (pk) values (1);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col1 int as (pk + 1) stored",
|
||||
"insert into t (pk) values (3);",
|
||||
"alter table t add index idx1 (col1, pk);",
|
||||
"alter table t add index idx2 (col1);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"insert into t (pk) values (2);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 2},
|
||||
{2, 3},
|
||||
{3, 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding generated columns to both sides",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t values (1), (2);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 varchar(100) as (concat(pk, 'hello'));",
|
||||
"insert into t (pk) values (3), (4);",
|
||||
"alter table t add index (col2);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"alter table t add column col1 int as (pk + 100) stored;",
|
||||
"insert into t (pk) values (5), (6);",
|
||||
"alter table t add index (col1);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
Skip: true, // this fails merging right into left
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 101, "1hello"},
|
||||
{2, 102, "2hello"},
|
||||
{3, 103, "3hello"},
|
||||
{4, 104, "4hello"},
|
||||
{5, 105, "5hello"},
|
||||
{6, 106, "6hello"},
|
||||
},
|
||||
Skip: true, // this fails merging right into left
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding virtual columns to both sides",
|
||||
AncSetUpScript: []string{
|
||||
"create table t (pk int primary key);",
|
||||
"insert into t values (1), (2);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t add column col2 varchar(100) as (concat(pk, 'hello'));",
|
||||
"insert into t (pk) values (3), (4);",
|
||||
"alter table t add index (col2);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"alter table t add column col1 int as (pk + 100);",
|
||||
"insert into t (pk) values (5), (6);",
|
||||
"alter table t add index (col1);",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
Skip: true, // this fails merging right into left
|
||||
},
|
||||
{
|
||||
Query: "select pk, col1, col2 from t;",
|
||||
Expected: []sql.Row{
|
||||
{1, 101, "1hello"},
|
||||
{2, 102, "2hello"},
|
||||
{3, 103, "3hello"},
|
||||
{4, 104, "4hello"},
|
||||
{5, 105, "5hello"},
|
||||
{6, 106, "6hello"},
|
||||
},
|
||||
Skip: true, // this fails merging right into left
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "convergent schema changes with virtual columns",
|
||||
AncSetUpScript: []string{
|
||||
"set autocommit = 0;",
|
||||
"CREATE table t (pk int primary key, col1 int);",
|
||||
"INSERT into t values (1, 10);",
|
||||
},
|
||||
RightSetUpScript: []string{
|
||||
"alter table t modify column col1 int not null;",
|
||||
"alter table t add column col3 int as (pk + 1);",
|
||||
"alter table t add index idx1 (col3, col1);",
|
||||
},
|
||||
LeftSetUpScript: []string{
|
||||
"alter table t modify column col1 int not null;",
|
||||
"alter table t add column col3 int as (pk + 1);",
|
||||
"alter table t add index idx1 (col3, col1);",
|
||||
"update t set col1=-1000 where t.pk = 1;",
|
||||
},
|
||||
Assertions: []queries.ScriptTestAssertion{
|
||||
{
|
||||
Query: "call dolt_merge('right');",
|
||||
Expected: []sql.Row{{doltCommit, 0, 0}},
|
||||
},
|
||||
{
|
||||
Query: "show create table t;",
|
||||
Skip: true, // there should be an index on col3, but there isn't
|
||||
Expected: []sql.Row{{"t",
|
||||
"CREATE TABLE `t` (\n" +
|
||||
" `pk` int NOT NULL,\n" +
|
||||
" `col1` int NOT NULL,\n" +
|
||||
" `col3` int GENERATED ALWAYS AS ((pk + 1)),\n" +
|
||||
" PRIMARY KEY (`pk`)\n" +
|
||||
" KEY `idx1` (`col3`,`col1`)\n" +
|
||||
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
|
||||
},
|
||||
{
|
||||
Query: "select * from t;",
|
||||
Expected: []sql.Row{{1, -1000, 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -464,9 +464,13 @@ func ordinalMappingsForSecondaryIndex(sch schema.Schema, def schema.Index) (ord
|
||||
}
|
||||
}
|
||||
vals := sch.GetNonPKCols().GetColumns()
|
||||
for j, col := range vals {
|
||||
for _, col := range vals {
|
||||
if col.Name == name {
|
||||
ord[i] = j + len(pks)
|
||||
storedIdx, ok := sch.GetNonPKCols().StoredIndexByTag(col.Tag)
|
||||
if !ok {
|
||||
panic("column " + name + " not found")
|
||||
}
|
||||
ord[i] = storedIdx + len(pks)
|
||||
}
|
||||
}
|
||||
if ord[i] < 0 {
|
||||
|
||||
@@ -405,15 +405,16 @@ func (lb *coveringLookupBuilder) NewRowIter(ctx *sql.Context, part sql.Partition
|
||||
return nil, err
|
||||
}
|
||||
return prollyCoveringIndexIter{
|
||||
idx: lb.idx,
|
||||
indexIter: rangeIter,
|
||||
keyDesc: lb.secKd,
|
||||
valDesc: lb.secVd,
|
||||
keyMap: lb.keyMap,
|
||||
valMap: lb.valMap,
|
||||
ordMap: lb.ordMap,
|
||||
sqlSch: lb.sch.Schema,
|
||||
ns: lb.ns,
|
||||
idx: lb.idx,
|
||||
indexIter: rangeIter,
|
||||
keyDesc: lb.secKd,
|
||||
valDesc: lb.secVd,
|
||||
keyMap: lb.keyMap,
|
||||
valMap: lb.valMap,
|
||||
ordMap: lb.ordMap,
|
||||
sqlSch: lb.sch.Schema,
|
||||
projections: lb.projections,
|
||||
ns: lb.ns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -437,15 +438,16 @@ func (lb *nonCoveringLookupBuilder) NewRowIter(ctx *sql.Context, part sql.Partit
|
||||
return nil, err
|
||||
}
|
||||
return prollyIndexIter{
|
||||
idx: lb.idx,
|
||||
indexIter: rangeIter,
|
||||
primary: lb.pri,
|
||||
pkBld: lb.pkBld,
|
||||
pkMap: lb.pkMap,
|
||||
keyMap: lb.keyMap,
|
||||
valMap: lb.valMap,
|
||||
ordMap: lb.ordMap,
|
||||
sqlSch: lb.sch.Schema,
|
||||
idx: lb.idx,
|
||||
indexIter: rangeIter,
|
||||
primary: lb.pri,
|
||||
pkBld: lb.pkBld,
|
||||
pkMap: lb.pkMap,
|
||||
keyMap: lb.keyMap,
|
||||
valMap: lb.valMap,
|
||||
ordMap: lb.ordMap,
|
||||
sqlSch: lb.sch.Schema,
|
||||
projections: lb.projections,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,9 +16,20 @@ package index
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/memory"
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
"github.com/dolthub/go-mysql-server/sql/analyzer"
|
||||
"github.com/dolthub/go-mysql-server/sql/expression"
|
||||
"github.com/dolthub/go-mysql-server/sql/plan"
|
||||
"github.com/dolthub/go-mysql-server/sql/planbuilder"
|
||||
"github.com/dolthub/go-mysql-server/sql/transform"
|
||||
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
|
||||
"github.com/dolthub/dolt/go/store/pool"
|
||||
"github.com/dolthub/dolt/go/store/prolly"
|
||||
"github.com/dolthub/dolt/go/store/prolly/tree"
|
||||
"github.com/dolthub/dolt/go/store/val"
|
||||
)
|
||||
@@ -26,13 +37,13 @@ import (
|
||||
// NewSecondaryKeyBuilder creates a new SecondaryKeyBuilder instance that can build keys for the secondary index |def|.
|
||||
// The schema of the source table is defined in |sch|, and |idxDesc| describes the tuple layout for the index's keys
|
||||
// (index value tuples are not used).
|
||||
func NewSecondaryKeyBuilder(sch schema.Schema, def schema.Index, idxDesc val.TupleDesc, p pool.BuffPool, nodeStore tree.NodeStore) SecondaryKeyBuilder {
|
||||
func NewSecondaryKeyBuilder(ctx *sql.Context, tableName string, sch schema.Schema, def schema.Index, idxDesc val.TupleDesc, p pool.BuffPool, nodeStore tree.NodeStore) (SecondaryKeyBuilder, error) {
|
||||
b := SecondaryKeyBuilder{
|
||||
builder: val.NewTupleBuilder(idxDesc),
|
||||
pool: p,
|
||||
nodeStore: nodeStore,
|
||||
sch: sch,
|
||||
def: def,
|
||||
indexDef: def,
|
||||
}
|
||||
|
||||
keyless := schema.IsKeyless(sch)
|
||||
@@ -44,10 +55,24 @@ func NewSecondaryKeyBuilder(sch schema.Schema, def schema.Index, idxDesc val.Tup
|
||||
}
|
||||
|
||||
b.mapping = make(val.OrdinalMapping, len(def.AllTags()))
|
||||
var virtualExpressions []sql.Expression
|
||||
for i, tag := range def.AllTags() {
|
||||
j, ok := sch.GetPKCols().TagToIdx[tag]
|
||||
if !ok {
|
||||
if keyless {
|
||||
col := sch.GetNonPKCols().TagToCol[tag]
|
||||
if col.Virtual {
|
||||
if len(virtualExpressions) == 0 {
|
||||
virtualExpressions = make([]sql.Expression, len(def.AllTags()))
|
||||
}
|
||||
|
||||
expr, err := ResolveDefaultExpression(ctx, tableName, sch, col)
|
||||
if err != nil {
|
||||
return SecondaryKeyBuilder{}, err
|
||||
}
|
||||
|
||||
virtualExpressions[i] = expr
|
||||
j = -1
|
||||
} else if keyless {
|
||||
// Skip cardinality column
|
||||
j = b.split + 1 + sch.GetNonPKCols().TagToIdx[tag]
|
||||
} else {
|
||||
@@ -57,20 +82,102 @@ func NewSecondaryKeyBuilder(sch schema.Schema, def schema.Index, idxDesc val.Tup
|
||||
b.mapping[i] = j
|
||||
}
|
||||
|
||||
b.virtualExpressions = virtualExpressions
|
||||
|
||||
if keyless {
|
||||
// last key in index is hash which is the only column in the key
|
||||
b.mapping = append(b.mapping, 0)
|
||||
}
|
||||
return b
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// ResolveDefaultExpression returns a sql.Expression for the column default or generated expression for the
|
||||
// column provided
|
||||
func ResolveDefaultExpression(ctx *sql.Context, tableName string, sch schema.Schema, col schema.Column) (sql.Expression, error) {
|
||||
ct, err := parseCreateTable(ctx, tableName, sch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
colIdx := ct.CreateSchema.Schema.IndexOfColName(col.Name)
|
||||
if colIdx < 0 {
|
||||
return nil, fmt.Errorf("unable to find column %s in analyzed query", col.Name)
|
||||
}
|
||||
|
||||
sqlCol := ct.CreateSchema.Schema[colIdx]
|
||||
expr := sqlCol.Default
|
||||
if expr == nil || expr.Expr == nil {
|
||||
expr = sqlCol.Generated
|
||||
}
|
||||
|
||||
if expr == nil || expr.Expr == nil {
|
||||
return nil, fmt.Errorf("unable to find default or generated expression")
|
||||
}
|
||||
|
||||
return expr.Expr, nil
|
||||
}
|
||||
|
||||
// ResolveCheckExpression returns a sql.Expression for the check provided
|
||||
func ResolveCheckExpression(ctx *sql.Context, tableName string, sch schema.Schema, checkExpr string) (sql.Expression, error) {
|
||||
ct, err := parseCreateTable(ctx, tableName, sch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, check := range ct.Checks() {
|
||||
if stripTableNamesFromExpression(check.Expr).String() == checkExpr {
|
||||
return check.Expr, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unable to find check expression")
|
||||
}
|
||||
|
||||
func stripTableNamesFromExpression(expr sql.Expression) sql.Expression {
|
||||
e, _, _ := transform.Expr(expr, func(e sql.Expression) (sql.Expression, transform.TreeIdentity, error) {
|
||||
if col, ok := e.(*expression.GetField); ok {
|
||||
return col.WithTable(""), transform.NewTree, nil
|
||||
}
|
||||
return e, transform.SameTree, nil
|
||||
})
|
||||
return e
|
||||
}
|
||||
|
||||
func parseCreateTable(ctx *sql.Context, tableName string, sch schema.Schema) (*plan.CreateTable, error) {
|
||||
createTable, err := sqlfmt.GenerateCreateTableStatement(tableName, sch, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := createTable
|
||||
|
||||
mockDatabase := memory.NewDatabase("mydb")
|
||||
mockProvider := memory.NewDBProvider(mockDatabase)
|
||||
catalog := analyzer.NewCatalog(mockProvider)
|
||||
parseCtx := sql.NewEmptyContext()
|
||||
parseCtx.SetCurrentDatabase("mydb")
|
||||
|
||||
pseudoAnalyzedQuery, err := planbuilder.Parse(parseCtx, catalog, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ct, ok := pseudoAnalyzedQuery.(*plan.CreateTable)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected a *plan.CreateTable node, but got %T", pseudoAnalyzedQuery)
|
||||
}
|
||||
return ct, nil
|
||||
}
|
||||
|
||||
type SecondaryKeyBuilder struct {
|
||||
// sch holds the schema of the table on which the secondary index is created
|
||||
sch schema.Schema
|
||||
// def holds the definition of the secondary index
|
||||
def schema.Index
|
||||
// indexDef holds the definition of the secondary index
|
||||
indexDef schema.Index
|
||||
// mapping defines how to map fields from the source table's schema to this index's tuple layout
|
||||
mapping val.OrdinalMapping
|
||||
// virtualExpressions holds the expressions for virtual columns in the index, nil for non-virtual indexes
|
||||
virtualExpressions []sql.Expression
|
||||
// split marks the index in the secondary index's key tuple that splits the main table's
|
||||
// key fields from the main table's value fields.
|
||||
split int
|
||||
@@ -83,7 +190,30 @@ type SecondaryKeyBuilder struct {
|
||||
func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.Tuple) (val.Tuple, error) {
|
||||
for to := range b.mapping {
|
||||
from := b.mapping.MapOrdinal(to)
|
||||
if from < b.split {
|
||||
if from == -1 {
|
||||
// the "from" field is a virtual column
|
||||
expr := b.virtualExpressions[to]
|
||||
sqlCtx, ok := ctx.(*sql.Context)
|
||||
if !ok {
|
||||
sqlCtx = sql.NewContext(ctx)
|
||||
}
|
||||
|
||||
sqlRow, err := BuildRow(sqlCtx, k, v, b.sch, b.nodeStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
value, err := expr.Eval(sqlCtx, sqlRow)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: type conversion
|
||||
err = PutField(ctx, b.nodeStore, b.builder, to, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if from < b.split {
|
||||
// the "from" field comes from the key tuple fields
|
||||
// NOTE: Because we are using Tuple.GetField and TupleBuilder.PutRaw, we are not
|
||||
// interpreting the tuple data at all and just copying the bytes. This should work
|
||||
@@ -104,8 +234,8 @@ func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.T
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b.def.PrefixLengths()) > to {
|
||||
value = val.TrimValueToPrefixLength(value, b.def.PrefixLengths()[to])
|
||||
if len(b.indexDef.PrefixLengths()) > to {
|
||||
value = val.TrimValueToPrefixLength(value, b.indexDef.PrefixLengths()[to])
|
||||
}
|
||||
|
||||
err = PutField(ctx, b.nodeStore, b.builder, to, value)
|
||||
@@ -118,6 +248,13 @@ func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.T
|
||||
return b.builder.Build(b.pool), nil
|
||||
}
|
||||
|
||||
// BuildRow returns a sql.Row for the given key/value tuple pair
|
||||
func BuildRow(ctx *sql.Context, key, value val.Tuple, sch schema.Schema, ns tree.NodeStore) (sql.Row, error) {
|
||||
prollyIter := prolly.NewPointLookup(key, value)
|
||||
rowIter := NewProllyRowIterForSchema(sch, prollyIter, sch.GetKeyDescriptor(), sch.GetValueDescriptor(), sch.GetAllCols().Tags, ns)
|
||||
return rowIter.Next(ctx)
|
||||
}
|
||||
|
||||
// canCopyRawBytes returns true if the bytes for |idxField| can
|
||||
// be copied directly. This is a faster way to populate an index
|
||||
// but requires that no data transformation is needed. For example,
|
||||
@@ -127,7 +264,7 @@ func (b SecondaryKeyBuilder) SecondaryKeyFromRow(ctx context.Context, k, v val.T
|
||||
func (b SecondaryKeyBuilder) canCopyRawBytes(idxField int) bool {
|
||||
if b.builder.Desc.Types[idxField].Enc == val.CellEnc {
|
||||
return false
|
||||
} else if len(b.def.PrefixLengths()) > idxField && b.def.PrefixLengths()[idxField] > 0 {
|
||||
} else if len(b.indexDef.PrefixLengths()) > idxField && b.indexDef.PrefixLengths()[idxField] > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -155,7 +155,8 @@ func TestGetIndexKeyMapping(t *testing.T) {
|
||||
}
|
||||
allTags := append(idxTags, sch.GetPKCols().Tags...)
|
||||
idx := schema.NewIndex("test_idx", idxTags, allTags, nil, schema.IndexProperties{})
|
||||
b := NewSecondaryKeyBuilder(sch, idx, val.TupleDesc{}, nil, nil)
|
||||
b, err := NewSecondaryKeyBuilder(nil, "", sch, idx, val.TupleDesc{}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.Split, b.split)
|
||||
require.Equal(t, tt.Mapping, b.mapping)
|
||||
})
|
||||
|
||||
@@ -42,9 +42,10 @@ type prollyIndexIter struct {
|
||||
// keyMap and valMap transform tuples from
|
||||
// primary row storage into sql.Row's
|
||||
keyMap, valMap val.OrdinalMapping
|
||||
//ordMap are output ordinals for |keyMap| and |valMap|
|
||||
ordMap val.OrdinalMapping
|
||||
sqlSch sql.Schema
|
||||
// ordMap are output ordinals for |keyMap| and |valMap| concatenated
|
||||
ordMap val.OrdinalMapping
|
||||
projections []uint64
|
||||
sqlSch sql.Schema
|
||||
}
|
||||
|
||||
var _ sql.RowIter = prollyIndexIter{}
|
||||
@@ -71,15 +72,16 @@ func newProllyIndexIter(
|
||||
keyProj, valProj, ordProj := projectionMappings(idx.Schema(), projections)
|
||||
|
||||
iter := prollyIndexIter{
|
||||
idx: idx,
|
||||
indexIter: indexIter,
|
||||
primary: primary,
|
||||
pkBld: pkBld,
|
||||
pkMap: pkMap,
|
||||
keyMap: keyProj,
|
||||
valMap: valProj,
|
||||
ordMap: ordProj,
|
||||
sqlSch: pkSch.Schema,
|
||||
idx: idx,
|
||||
indexIter: indexIter,
|
||||
primary: primary,
|
||||
pkBld: pkBld,
|
||||
pkMap: pkMap,
|
||||
keyMap: keyProj,
|
||||
valMap: valProj,
|
||||
ordMap: ordProj,
|
||||
projections: projections,
|
||||
sqlSch: pkSch.Schema,
|
||||
}
|
||||
|
||||
return iter, nil
|
||||
@@ -97,7 +99,7 @@ func (p prollyIndexIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
}
|
||||
pk := p.pkBld.Build(sharePool)
|
||||
|
||||
r := make(sql.Row, len(p.keyMap)+len(p.valMap))
|
||||
r := make(sql.Row, len(p.projections))
|
||||
err = p.primary.Get(ctx, pk, func(key, value val.Tuple) error {
|
||||
return p.rowFromTuples(ctx, key, value, r)
|
||||
})
|
||||
@@ -167,6 +169,7 @@ type prollyCoveringIndexIter struct {
|
||||
|
||||
// |keyMap| and |valMap| are both of len ==
|
||||
keyMap, valMap, ordMap val.OrdinalMapping
|
||||
projections []uint64
|
||||
sqlSch sql.Schema
|
||||
}
|
||||
|
||||
@@ -195,15 +198,16 @@ func newProllyCoveringIndexIter(
|
||||
}
|
||||
|
||||
return prollyCoveringIndexIter{
|
||||
idx: idx,
|
||||
indexIter: indexIter,
|
||||
keyDesc: keyDesc,
|
||||
valDesc: valDesc,
|
||||
keyMap: keyMap,
|
||||
valMap: valMap,
|
||||
ordMap: ordMap,
|
||||
sqlSch: pkSch.Schema,
|
||||
ns: secondary.NodeStore(),
|
||||
idx: idx,
|
||||
indexIter: indexIter,
|
||||
keyDesc: keyDesc,
|
||||
valDesc: valDesc,
|
||||
keyMap: keyMap,
|
||||
valMap: valMap,
|
||||
ordMap: ordMap,
|
||||
sqlSch: pkSch.Schema,
|
||||
projections: projections,
|
||||
ns: secondary.NodeStore(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -214,7 +218,7 @@ func (p prollyCoveringIndexIter) Next(ctx *sql.Context) (sql.Row, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := make(sql.Row, len(p.keyMap)+len(p.valMap))
|
||||
r := make(sql.Row, len(p.projections))
|
||||
if err := p.writeRowFromTuples(ctx, k, v, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -250,7 +254,7 @@ func coveringIndexMapping(d DoltIndex, projections []uint64) (keyMap, ordMap val
|
||||
allMap := make(val.OrdinalMapping, len(projections)*2)
|
||||
var i int
|
||||
for _, p := range projections {
|
||||
if idx, ok := idx.TagToIdx[p]; ok {
|
||||
if idx, ok := idx.StoredIndexByTag(p); ok {
|
||||
allMap[i] = idx
|
||||
allMap[len(projections)+i] = i
|
||||
i++
|
||||
|
||||
@@ -39,25 +39,42 @@ type prollyRowIter struct {
|
||||
|
||||
var _ sql.RowIter = prollyRowIter{}
|
||||
|
||||
func NewProllyRowIter(sch schema.Schema, rows prolly.Map, iter prolly.MapIter, projections []uint64) (sql.RowIter, error) {
|
||||
func NewProllyRowIterForMap(sch schema.Schema, rows prolly.Map, iter prolly.MapIter, projections []uint64) sql.RowIter {
|
||||
if projections == nil {
|
||||
projections = sch.GetAllCols().Tags
|
||||
}
|
||||
|
||||
keyProj, valProj, ordProj := projectionMappings(sch, projections)
|
||||
kd, vd := rows.Descriptors()
|
||||
ns := rows.NodeStore()
|
||||
|
||||
return NewProllyRowIterForSchema(sch, iter, kd, vd, projections, ns)
|
||||
}
|
||||
|
||||
func NewProllyRowIterForSchema(
|
||||
sch schema.Schema,
|
||||
iter prolly.MapIter,
|
||||
kd val.TupleDesc,
|
||||
vd val.TupleDesc,
|
||||
projections []uint64,
|
||||
ns tree.NodeStore,
|
||||
) sql.RowIter {
|
||||
if schema.IsKeyless(sch) {
|
||||
return &prollyKeylessIter{
|
||||
iter: iter,
|
||||
valDesc: vd,
|
||||
valProj: valProj,
|
||||
ordProj: ordProj,
|
||||
rowLen: len(projections),
|
||||
ns: rows.NodeStore(),
|
||||
}, nil
|
||||
return NewKeylessProllyRowIter(sch, iter, vd, projections, ns)
|
||||
}
|
||||
|
||||
return NewKeyedProllyRowIter(sch, iter, kd, vd, projections, ns)
|
||||
}
|
||||
|
||||
func NewKeyedProllyRowIter(
|
||||
sch schema.Schema,
|
||||
iter prolly.MapIter,
|
||||
kd val.TupleDesc,
|
||||
vd val.TupleDesc,
|
||||
projections []uint64,
|
||||
ns tree.NodeStore,
|
||||
) sql.RowIter {
|
||||
keyProj, valProj, ordProj := projectionMappings(sch, projections)
|
||||
|
||||
return prollyRowIter{
|
||||
iter: iter,
|
||||
keyDesc: kd,
|
||||
@@ -66,8 +83,27 @@ func NewProllyRowIter(sch schema.Schema, rows prolly.Map, iter prolly.MapIter, p
|
||||
valProj: valProj,
|
||||
ordProj: ordProj,
|
||||
rowLen: len(projections),
|
||||
ns: rows.NodeStore(),
|
||||
}, nil
|
||||
ns: ns,
|
||||
}
|
||||
}
|
||||
|
||||
func NewKeylessProllyRowIter(
|
||||
sch schema.Schema,
|
||||
iter prolly.MapIter,
|
||||
vd val.TupleDesc,
|
||||
projections []uint64,
|
||||
ns tree.NodeStore,
|
||||
) sql.RowIter {
|
||||
_, valProj, ordProj := projectionMappings(sch, projections)
|
||||
|
||||
return &prollyKeylessIter{
|
||||
iter: iter,
|
||||
valDesc: vd,
|
||||
valProj: valProj,
|
||||
ordProj: ordProj,
|
||||
rowLen: len(projections),
|
||||
ns: ns,
|
||||
}
|
||||
}
|
||||
|
||||
// projectionMappings returns data structures that specify 1) which fields we read
|
||||
|
||||
@@ -197,7 +197,7 @@ func ProllyRowIterFromPartition(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return index.NewProllyRowIter(sch, rows, iter, projections)
|
||||
return index.NewProllyRowIterForMap(sch, rows, iter, projections), nil
|
||||
}
|
||||
|
||||
// SqlTableToRowIter returns a |sql.RowIter| for a full table scan for the given |table|. If
|
||||
@@ -255,7 +255,7 @@ func DoltTablePartitionToRowIter(ctx *sql.Context, name string, table *doltdb.Ta
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
rowIter, err := index.NewProllyRowIter(sch, idx, iter, nil)
|
||||
rowIter := index.NewProllyRowIterForMap(sch, idx, iter, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func RowAsInsertStmt(r row.Row, tableName string, tableSch schema.Schema) (strin
|
||||
b.WriteRune(',')
|
||||
}
|
||||
col, _ := tableSch.GetAllCols().GetByTag(tag)
|
||||
sqlString, err := valueAsSqlString(col.TypeInfo, val)
|
||||
sqlString, err := ValueAsSqlString(col.TypeInfo, val)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func RowAsDeleteStmt(r row.Row, tableName string, tableSch schema.Schema) (strin
|
||||
if seenOne {
|
||||
b.WriteString(" AND ")
|
||||
}
|
||||
sqlString, err := valueAsSqlString(col.TypeInfo, val)
|
||||
sqlString, err := ValueAsSqlString(col.TypeInfo, val)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func RowAsUpdateStmt(r row.Row, tableName string, tableSch schema.Schema, colsTo
|
||||
if seenOne {
|
||||
b.WriteRune(',')
|
||||
}
|
||||
sqlString, err := valueAsSqlString(col.TypeInfo, val)
|
||||
sqlString, err := ValueAsSqlString(col.TypeInfo, val)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func RowAsUpdateStmt(r row.Row, tableName string, tableSch schema.Schema, colsTo
|
||||
if seenOne {
|
||||
b.WriteString(" AND ")
|
||||
}
|
||||
sqlString, err := valueAsSqlString(col.TypeInfo, val)
|
||||
sqlString, err := ValueAsSqlString(col.TypeInfo, val)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -196,7 +196,7 @@ func RowAsTupleString(r row.Row, tableSch schema.Schema) (string, error) {
|
||||
b.WriteRune(',')
|
||||
}
|
||||
col, _ := tableSch.GetAllCols().GetByTag(tag)
|
||||
sqlString, err := valueAsSqlString(col.TypeInfo, val)
|
||||
sqlString, err := ValueAsSqlString(col.TypeInfo, val)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -481,7 +481,7 @@ func SqlRowAsUpdateStmt(r sql.Row, tableName string, tableSch schema.Schema, col
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func valueAsSqlString(ti typeinfo.TypeInfo, value types.Value) (string, error) {
|
||||
func ValueAsSqlString(ti typeinfo.TypeInfo, value types.Value) (string, error) {
|
||||
if types.IsNull(value) {
|
||||
return "NULL", nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlfmt
|
||||
package sqlfmt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/row"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
|
||||
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
|
||||
"github.com/dolthub/dolt/go/libraries/utils/set"
|
||||
"github.com/dolthub/dolt/go/store/types"
|
||||
)
|
||||
@@ -52,38 +53,38 @@ type updateTest struct {
|
||||
}
|
||||
|
||||
func TestTableDropStmt(t *testing.T) {
|
||||
stmt := DropTableStmt("table_name")
|
||||
stmt := sqlfmt.DropTableStmt("table_name")
|
||||
|
||||
assert.Equal(t, expectedDropSql, stmt)
|
||||
}
|
||||
|
||||
func TestTableDropIfExistsStmt(t *testing.T) {
|
||||
stmt := DropTableIfExistsStmt("table_name")
|
||||
stmt := sqlfmt.DropTableIfExistsStmt("table_name")
|
||||
|
||||
assert.Equal(t, expectedDropIfExistsSql, stmt)
|
||||
}
|
||||
|
||||
func TestAlterTableAddColStmt(t *testing.T) {
|
||||
newColDef := "`c0` BIGINT NOT NULL"
|
||||
stmt := AlterTableAddColStmt("table_name", newColDef)
|
||||
stmt := sqlfmt.AlterTableAddColStmt("table_name", newColDef)
|
||||
|
||||
assert.Equal(t, expectedAddColSql, stmt)
|
||||
}
|
||||
|
||||
func TestAlterTableDropColStmt(t *testing.T) {
|
||||
stmt := AlterTableDropColStmt("table_name", "first_name")
|
||||
stmt := sqlfmt.AlterTableDropColStmt("table_name", "first_name")
|
||||
|
||||
assert.Equal(t, expectedDropColSql, stmt)
|
||||
}
|
||||
|
||||
func TestAlterTableRenameColStmt(t *testing.T) {
|
||||
stmt := AlterTableRenameColStmt("table_name", "id", "pk")
|
||||
stmt := sqlfmt.AlterTableRenameColStmt("table_name", "id", "pk")
|
||||
|
||||
assert.Equal(t, expectedRenameColSql, stmt)
|
||||
}
|
||||
|
||||
func TestRenameTableStmt(t *testing.T) {
|
||||
stmt := RenameTableStmt("table_name", "new_table_name")
|
||||
stmt := sqlfmt.RenameTableStmt("table_name", "new_table_name")
|
||||
|
||||
assert.Equal(t, expectedRenameTableSql, stmt)
|
||||
}
|
||||
@@ -158,7 +159,7 @@ func TestRowAsInsertStmt(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stmt, err := RowAsInsertStmt(tt.row, tableName, tt.sch)
|
||||
stmt, err := sqlfmt.RowAsInsertStmt(tt.row, tableName, tt.sch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedOutput, stmt)
|
||||
})
|
||||
@@ -183,7 +184,7 @@ func TestRowAsDeleteStmt(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stmt, err := RowAsDeleteStmt(tt.row, tableName, tt.sch)
|
||||
stmt, err := sqlfmt.RowAsDeleteStmt(tt.row, tableName, tt.sch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedOutput, stmt)
|
||||
})
|
||||
@@ -243,7 +244,7 @@ func TestRowAsUpdateStmt(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stmt, err := RowAsUpdateStmt(tt.row, tableName, tt.sch, tt.collDiff)
|
||||
stmt, err := sqlfmt.RowAsUpdateStmt(tt.row, tableName, tt.sch, tt.collDiff)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedOutput, stmt)
|
||||
})
|
||||
@@ -307,7 +308,7 @@ func TestValueAsSqlString(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
act, err := valueAsSqlString(test.ti, test.val)
|
||||
act, err := sqlfmt.ValueAsSqlString(test.ti, test.val)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.exp, act)
|
||||
})
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package sqlfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/dolthub/go-mysql-server/sql"
|
||||
@@ -31,10 +32,13 @@ func GenerateCreateTableColumnDefinition(col schema.Column, tableCollation sql.C
|
||||
|
||||
// GenerateCreateTableIndentedColumnDefinition returns column definition for CREATE TABLE statement with no indentation
|
||||
func GenerateCreateTableIndentedColumnDefinition(col schema.Column, tableCollation sql.CollationID) string {
|
||||
var defaultVal *sql.ColumnDefaultValue
|
||||
var defaultVal, genVal *sql.ColumnDefaultValue
|
||||
if col.Default != "" {
|
||||
defaultVal = sql.NewUnresolvedColumnDefaultValue(col.Default)
|
||||
}
|
||||
if col.Generated != "" {
|
||||
genVal = sql.NewUnresolvedColumnDefaultValue(col.Generated)
|
||||
}
|
||||
|
||||
return sql.GenerateCreateTableColumnDefinition(
|
||||
&sql.Column{
|
||||
@@ -44,9 +48,8 @@ func GenerateCreateTableIndentedColumnDefinition(col schema.Column, tableCollati
|
||||
AutoIncrement: col.AutoIncrement,
|
||||
Nullable: col.IsNullable(),
|
||||
Comment: col.Comment,
|
||||
// TODO
|
||||
// Generated: nil,
|
||||
// Virtual: false,
|
||||
Generated: genVal,
|
||||
Virtual: col.Virtual,
|
||||
}, col.Default, tableCollation)
|
||||
}
|
||||
|
||||
@@ -256,3 +259,64 @@ func AlterTableDropForeignKeyStmt(tableName, fkName string) string {
|
||||
b.WriteRune(';')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// GenerateCreateTableStatement returns a CREATE TABLE statement for given table. This is a reasonable approximation of
|
||||
// `SHOW CREATE TABLE` in the engine, but may have some differences. Callers are advised to use the engine when
|
||||
// possible.
|
||||
func GenerateCreateTableStatement(tblName string, sch schema.Schema, fks []doltdb.ForeignKey, fksParentSch map[string]schema.Schema) (string, error) {
|
||||
colStmts := make([]string, sch.GetAllCols().Size())
|
||||
|
||||
// Statement creation parts for each column
|
||||
for i, col := range sch.GetAllCols().GetColumns() {
|
||||
colStmts[i] = GenerateCreateTableIndentedColumnDefinition(col, sql.CollationID(sch.GetCollation()))
|
||||
}
|
||||
|
||||
primaryKeyCols := sch.GetPKCols().GetColumnNames()
|
||||
if len(primaryKeyCols) > 0 {
|
||||
primaryKey := sql.GenerateCreateTablePrimaryKeyDefinition(primaryKeyCols)
|
||||
colStmts = append(colStmts, primaryKey)
|
||||
}
|
||||
|
||||
indexes := sch.Indexes().AllIndexes()
|
||||
for _, index := range indexes {
|
||||
// The primary key may or may not be declared as an index by the table. Don't print it twice if it's here.
|
||||
if isPrimaryKeyIndex(index, sch) {
|
||||
continue
|
||||
}
|
||||
colStmts = append(colStmts, GenerateCreateTableIndexDefinition(index))
|
||||
}
|
||||
|
||||
for _, fk := range fks {
|
||||
colStmts = append(colStmts, GenerateCreateTableForeignKeyDefinition(fk, sch, fksParentSch[fk.ReferencedTableName]))
|
||||
}
|
||||
|
||||
for _, check := range sch.Checks().AllChecks() {
|
||||
colStmts = append(colStmts, GenerateCreateTableCheckConstraintClause(check))
|
||||
}
|
||||
|
||||
coll := sql.CollationID(sch.GetCollation())
|
||||
createTableStmt := sql.GenerateCreateTableStatement(tblName, colStmts, coll.CharacterSet().Name(), coll.Name())
|
||||
return fmt.Sprintf("%s;", createTableStmt), nil
|
||||
}
|
||||
|
||||
// isPrimaryKeyIndex returns whether the index given matches the table's primary key columns. Order is not considered.
|
||||
func isPrimaryKeyIndex(index schema.Index, sch schema.Schema) bool {
|
||||
var pks = sch.GetPKCols().GetColumns()
|
||||
var pkMap = make(map[string]struct{})
|
||||
for _, c := range pks {
|
||||
pkMap[c.Name] = struct{}{}
|
||||
}
|
||||
|
||||
indexCols := index.ColumnNames()
|
||||
if len(indexCols) != len(pks) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range index.ColumnNames() {
|
||||
if _, ok := pkMap[c]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2311,31 +2311,23 @@ func (t *AlterableDoltTable) createIndex(ctx *sql.Context, idx sql.IndexDef, key
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := creation.CreateIndex(
|
||||
ctx,
|
||||
table,
|
||||
idx.Name,
|
||||
columns,
|
||||
allocatePrefixLengths(idx.Columns),
|
||||
schema.IndexProperties{
|
||||
IsUnique: idx.Constraint == sql.IndexConstraint_Unique,
|
||||
IsSpatial: idx.Constraint == sql.IndexConstraint_Spatial,
|
||||
IsFullText: idx.Constraint == sql.IndexConstraint_Fulltext,
|
||||
IsUserDefined: true,
|
||||
Comment: idx.Comment,
|
||||
FullTextProperties: schema.FullTextProperties{
|
||||
ConfigTable: tableNames.Config,
|
||||
PositionTable: tableNames.Position,
|
||||
DocCountTable: tableNames.DocCount,
|
||||
GlobalCountTable: tableNames.GlobalCount,
|
||||
RowCountTable: tableNames.RowCount,
|
||||
KeyType: uint8(keyCols.Type),
|
||||
KeyName: keyCols.Name,
|
||||
KeyPositions: keyPositions,
|
||||
},
|
||||
ret, err := creation.CreateIndex(ctx, table, t.Name(), idx.Name, columns, allocatePrefixLengths(idx.Columns), schema.IndexProperties{
|
||||
IsUnique: idx.Constraint == sql.IndexConstraint_Unique,
|
||||
IsSpatial: idx.Constraint == sql.IndexConstraint_Spatial,
|
||||
IsFullText: idx.Constraint == sql.IndexConstraint_Fulltext,
|
||||
IsUserDefined: true,
|
||||
Comment: idx.Comment,
|
||||
FullTextProperties: schema.FullTextProperties{
|
||||
ConfigTable: tableNames.Config,
|
||||
PositionTable: tableNames.Position,
|
||||
DocCountTable: tableNames.DocCount,
|
||||
GlobalCountTable: tableNames.GlobalCount,
|
||||
RowCountTable: tableNames.RowCount,
|
||||
KeyType: uint8(keyCols.Type),
|
||||
KeyName: keyCols.Name,
|
||||
KeyPositions: keyPositions,
|
||||
},
|
||||
t.opts,
|
||||
)
|
||||
}, t.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2631,21 +2623,13 @@ func (t *AlterableDoltTable) CreateIndexForForeignKey(ctx *sql.Context, idx sql.
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := creation.CreateIndex(
|
||||
ctx,
|
||||
table,
|
||||
idx.Name,
|
||||
columns,
|
||||
allocatePrefixLengths(idx.Columns),
|
||||
schema.IndexProperties{
|
||||
IsUnique: idx.Constraint == sql.IndexConstraint_Unique,
|
||||
IsSpatial: idx.Constraint == sql.IndexConstraint_Spatial,
|
||||
IsFullText: idx.Constraint == sql.IndexConstraint_Fulltext,
|
||||
IsUserDefined: false,
|
||||
Comment: "",
|
||||
},
|
||||
t.opts,
|
||||
)
|
||||
ret, err := creation.CreateIndex(ctx, table, t.Name(), idx.Name, columns, allocatePrefixLengths(idx.Columns), schema.IndexProperties{
|
||||
IsUnique: idx.Constraint == sql.IndexConstraint_Unique,
|
||||
IsSpatial: idx.Constraint == sql.IndexConstraint_Spatial,
|
||||
IsFullText: idx.Constraint == sql.IndexConstraint_Fulltext,
|
||||
IsUserDefined: false,
|
||||
Comment: "",
|
||||
}, t.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -275,21 +275,13 @@ func (t *TempTable) CreateIndex(ctx *sql.Context, idx sql.IndexDef) error {
|
||||
cols[i] = c.Name
|
||||
}
|
||||
|
||||
ret, err := creation.CreateIndex(
|
||||
ctx,
|
||||
t.table,
|
||||
idx.Name,
|
||||
cols,
|
||||
allocatePrefixLengths(idx.Columns),
|
||||
schema.IndexProperties{
|
||||
IsUnique: idx.Constraint == sql.IndexConstraint_Unique,
|
||||
IsSpatial: idx.Constraint == sql.IndexConstraint_Spatial,
|
||||
IsFullText: idx.Constraint == sql.IndexConstraint_Fulltext,
|
||||
IsUserDefined: true,
|
||||
Comment: idx.Comment,
|
||||
},
|
||||
t.opts,
|
||||
)
|
||||
ret, err := creation.CreateIndex(ctx, t.table, t.Name(), idx.Name, cols, allocatePrefixLengths(idx.Columns), schema.IndexProperties{
|
||||
IsUnique: idx.Constraint == sql.IndexConstraint_Unique,
|
||||
IsSpatial: idx.Constraint == sql.IndexConstraint_Spatial,
|
||||
IsFullText: idx.Constraint == sql.IndexConstraint_Fulltext,
|
||||
IsUserDefined: true,
|
||||
Comment: idx.Comment,
|
||||
}, t.opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,9 +43,9 @@ type CreateIndexReturn struct {
|
||||
|
||||
// CreateIndex creates the given index on the given table with the given schema. Returns the updated table, updated schema, and created index.
|
||||
func CreateIndex(
|
||||
ctx context.Context,
|
||||
ctx *sql.Context,
|
||||
table *doltdb.Table,
|
||||
indexName string,
|
||||
tableName, indexName string,
|
||||
columns []string,
|
||||
prefixLengths []uint16,
|
||||
props schema.IndexProperties,
|
||||
@@ -113,7 +113,7 @@ func CreateIndex(
|
||||
|
||||
// TODO: in the case that we're replacing an implicit index with one the user specified, we could do this more
|
||||
// cheaply in some cases by just renaming it, rather than building it from scratch. But that's harder to get right.
|
||||
indexRows, err := BuildSecondaryIndex(ctx, newTable, index, opts)
|
||||
indexRows, err := BuildSecondaryIndex(ctx, newTable, index, tableName, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func CreateIndex(
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BuildSecondaryIndex(ctx context.Context, tbl *doltdb.Table, idx schema.Index, opts editor.Options) (durable.Index, error) {
|
||||
func BuildSecondaryIndex(ctx *sql.Context, tbl *doltdb.Table, idx schema.Index, tableName string, opts editor.Options) (durable.Index, error) {
|
||||
switch tbl.Format() {
|
||||
case types.Format_LD_1:
|
||||
m, err := editor.RebuildIndex(ctx, tbl, idx.Name(), opts)
|
||||
@@ -150,7 +150,7 @@ func BuildSecondaryIndex(ctx context.Context, tbl *doltdb.Table, idx schema.Inde
|
||||
return nil, err
|
||||
}
|
||||
primary := durable.ProllyMapFromIndex(m)
|
||||
return BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, idx, primary)
|
||||
return BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, tableName, idx, primary)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown NomsBinFormat")
|
||||
@@ -159,10 +159,18 @@ func BuildSecondaryIndex(ctx context.Context, tbl *doltdb.Table, idx schema.Inde
|
||||
|
||||
// BuildSecondaryProllyIndex builds secondary index data for the given primary
|
||||
// index row data |primary|. |sch| is the current schema of the table.
|
||||
func BuildSecondaryProllyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, idx schema.Index, primary prolly.Map) (durable.Index, error) {
|
||||
func BuildSecondaryProllyIndex(
|
||||
ctx *sql.Context,
|
||||
vrw types.ValueReadWriter,
|
||||
ns tree.NodeStore,
|
||||
sch schema.Schema,
|
||||
tableName string,
|
||||
idx schema.Index,
|
||||
primary prolly.Map,
|
||||
) (durable.Index, error) {
|
||||
if idx.IsUnique() {
|
||||
kd := idx.Schema().GetKeyDescriptor()
|
||||
return BuildUniqueProllyIndex(ctx, vrw, ns, sch, idx, primary, func(ctx context.Context, existingKey, newKey val.Tuple) error {
|
||||
return BuildUniqueProllyIndex(ctx, vrw, ns, sch, tableName, idx, primary, func(ctx context.Context, existingKey, newKey val.Tuple) error {
|
||||
msg := FormatKeyForUniqKeyErr(newKey, kd)
|
||||
return sql.NewUniqueKeyErr(msg, false, nil)
|
||||
})
|
||||
@@ -182,7 +190,10 @@ func BuildSecondaryProllyIndex(ctx context.Context, vrw types.ValueReadWriter, n
|
||||
|
||||
p := primary.Pool()
|
||||
mut := secondary.Mutate()
|
||||
secondaryBld := index.NewSecondaryKeyBuilder(sch, idx, secondary.KeyDesc(), p, secondary.NodeStore())
|
||||
secondaryBld, err := index.NewSecondaryKeyBuilder(ctx, tableName, sch, idx, secondary.KeyDesc(), p, secondary.NodeStore())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iter, err := primary.IterAll(ctx)
|
||||
if err != nil {
|
||||
@@ -238,7 +249,16 @@ type DupEntryCb func(ctx context.Context, existingKey, newKey val.Tuple) error
|
||||
// BuildUniqueProllyIndex builds a unique index based on the given |primary| row
|
||||
// data. If any duplicate entries are found, they are passed to |cb|. If |cb|
|
||||
// returns a non-nil error then the process is stopped.
|
||||
func BuildUniqueProllyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, idx schema.Index, primary prolly.Map, cb DupEntryCb) (durable.Index, error) {
|
||||
func BuildUniqueProllyIndex(
|
||||
ctx *sql.Context,
|
||||
vrw types.ValueReadWriter,
|
||||
ns tree.NodeStore,
|
||||
sch schema.Schema,
|
||||
tableName string,
|
||||
idx schema.Index,
|
||||
primary prolly.Map,
|
||||
cb DupEntryCb,
|
||||
) (durable.Index, error) {
|
||||
empty, err := durable.NewEmptyIndex(ctx, vrw, ns, idx.Schema())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -255,7 +275,10 @@ func BuildUniqueProllyIndex(ctx context.Context, vrw types.ValueReadWriter, ns t
|
||||
p := primary.Pool()
|
||||
|
||||
prefixDesc := secondary.KeyDesc().PrefixDesc(idx.Count())
|
||||
secondaryBld := index.NewSecondaryKeyBuilder(sch, idx, secondary.KeyDesc(), p, secondary.NodeStore())
|
||||
secondaryBld, err := index.NewSecondaryKeyBuilder(ctx, tableName, sch, idx, secondary.KeyDesc(), p, secondary.NodeStore())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mut := secondary.Mutate()
|
||||
for {
|
||||
|
||||
@@ -71,7 +71,7 @@ func NewTableIterator(ctx context.Context, sch schema.Schema, idx durable.Index,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rowItr, err = index.NewProllyRowIter(sch, m, itr, nil)
|
||||
rowItr = index.NewProllyRowIterForMap(sch, m, itr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -209,16 +209,16 @@ type DiffOp uint16
|
||||
const (
|
||||
DiffOpLeftAdd DiffOp = iota // leftAdd
|
||||
DiffOpRightAdd // rightAdd
|
||||
DiffOpLeftDelete //leftDelete
|
||||
DiffOpRightDelete //rightDelete
|
||||
DiffOpLeftModify //leftModify
|
||||
DiffOpRightModify //rightModify
|
||||
DiffOpConvergentAdd //convergentAdd
|
||||
DiffOpConvergentDelete //convergentDelete
|
||||
DiffOpConvergentModify //convergentModify
|
||||
DiffOpDivergentModifyResolved //divergenModifytResolved
|
||||
DiffOpDivergentDeleteConflict //divergentDeleteConflict
|
||||
DiffOpDivergentModifyConflict //divergentModifyConflict
|
||||
DiffOpLeftDelete // leftDelete
|
||||
DiffOpRightDelete // rightDelete
|
||||
DiffOpLeftModify // leftModify
|
||||
DiffOpRightModify // rightModify
|
||||
DiffOpConvergentAdd // convergentAdd
|
||||
DiffOpConvergentDelete // convergentDelete
|
||||
DiffOpConvergentModify // convergentModify
|
||||
DiffOpDivergentModifyResolved // divergentModifyResolved
|
||||
DiffOpDivergentDeleteConflict // divergentDeleteConflict
|
||||
DiffOpDivergentModifyConflict // divergentModifyConflict
|
||||
)
|
||||
|
||||
// ThreeWayDiff is a generic object for encoding a three way diff.
|
||||
|
||||
Reference in New Issue
Block a user