Merge remote-tracking branch 'origin/main' into andy/turn-on-chunk-journal

This commit is contained in:
Andy Arthur
2023-03-02 14:07:20 -08:00
57 changed files with 1920 additions and 905 deletions
+1 -1
View File
@@ -176,7 +176,7 @@ jobs:
with:
region: us-west-2
toAddresses: '["${{ github.event.client_payload.email_recipient }}"]'
subject: 'System Table Performance Benchmarks: ${{ github.event.client_payload.version }}'
subject: 'Merge Performance Benchmarks: ${{ github.event.client_payload.version }}'
bodyPath: ${{ steps.html.outputs.html }}
template: 'SysbenchTemplate'
+7
View File
@@ -113,6 +113,7 @@ const (
DecorateFlag = "decorate"
OneLineFlag = "oneline"
ShallowFlag = "shallow"
CachedFlag = "cached"
)
const (
@@ -310,6 +311,12 @@ func CreateLogArgParser() *argparser.ArgParser {
return ap
}
func CreatePatchArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(CachedFlag, "c", "Show only the staged data changes.")
return ap
}
func CreateGCArgParser() *argparser.ArgParser {
ap := argparser.NewArgParser()
ap.SupportsFlag(ShallowFlag, "s", "perform a fast, but incomplete garbage collection pass")
+39 -153
View File
@@ -33,8 +33,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped/tabular"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
@@ -65,7 +63,6 @@ const (
whereParam = "where"
limitParam = "limit"
SQLFlag = "sql"
CachedFlag = "cached"
SkinnyFlag = "skinny"
MergeBase = "merge-base"
DiffMode = "diff-mode"
@@ -148,7 +145,7 @@ func (cmd DiffCmd) ArgParser() *argparser.ArgParser {
ap.SupportsString(FormatFlag, "r", "result output format", "How to format diff output. Valid values are tabular, sql, json. Defaults to tabular.")
ap.SupportsString(whereParam, "", "column", "filters columns based on values in the diff. See {{.EmphasisLeft}}dolt diff --help{{.EmphasisRight}} for details.")
ap.SupportsInt(limitParam, "", "record_count", "limits to the first N diffs.")
ap.SupportsFlag(CachedFlag, "c", "Show only the unstaged data changes.")
ap.SupportsFlag(cli.CachedFlag, "c", "Show only the staged data changes.")
ap.SupportsFlag(SkinnyFlag, "sk", "Shows only primary key columns and any columns with data changes.")
ap.SupportsFlag(MergeBase, "", "Uses merge base of the first commit and second commit (or HEAD if not supplied) as the first commit")
ap.SupportsString(DiffMode, "", "diff mode", "Determines how to display modified rows with tabular output. Valid values are row, line, in-place, context. Defaults to context.")
@@ -234,7 +231,7 @@ func parseDiffArgs(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPar
dArgs.limit, _ = apr.GetInt(limitParam)
dArgs.where = apr.GetValueOrDefault(whereParam, "")
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(CachedFlag), apr.Contains(MergeBase))
tableNames, err := dArgs.applyDiffRoots(ctx, dEnv, apr.Args, apr.Contains(cli.CachedFlag), apr.Contains(MergeBase))
if err != nil {
return nil, err
}
@@ -326,8 +323,7 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
}
// treat the first arg as a ref spec
fromRoot, ok := maybeResolve(ctx, dEnv, args[0])
fromRoot, ok := diff.MaybeResolveRoot(ctx, dEnv.RepoStateReader(), dEnv.DoltDB, args[0])
// if it doesn't resolve, treat it as a table name
if !ok {
// `dolt diff table`
@@ -351,8 +347,7 @@ func (dArgs *diffArgs) applyDiffRoots(ctx context.Context, dEnv *env.DoltEnv, ar
return nil, nil
}
toRoot, ok := maybeResolve(ctx, dEnv, args[1])
toRoot, ok := diff.MaybeResolveRoot(ctx, dEnv.RepoStateReader(), dEnv.DoltDB, args[1])
if !ok {
// `dolt diff from_commit [...tables]`
if useMergeBase {
@@ -386,7 +381,7 @@ func (dArgs *diffArgs) applyMergeBase(ctx context.Context, dEnv *env.DoltEnv, le
return err
}
fromRoot, ok := maybeResolve(ctx, dEnv, mergeBaseStr)
fromRoot, ok := diff.MaybeResolveRoot(ctx, dEnv.RepoStateReader(), dEnv.DoltDB, mergeBaseStr)
if !ok {
return fmt.Errorf("merge base invalid %s", mergeBaseStr)
}
@@ -420,7 +415,7 @@ func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv,
}
if len(refs[1]) > 0 {
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
if toRoot, ok = diff.MaybeResolveRoot(ctx, dEnv.RepoStateReader(), dEnv.DoltDB, refs[1]); !ok {
return fmt.Errorf("to ref in three dot diff must be valid ref: %s", refs[1])
}
dArgs.toRoot = toRoot
@@ -438,7 +433,7 @@ func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv,
ok := true
if len(refs[0]) > 0 {
if fromRoot, ok = maybeResolve(ctx, dEnv, refs[0]); !ok {
if fromRoot, ok = diff.MaybeResolveRoot(ctx, dEnv.RepoStateReader(), dEnv.DoltDB, refs[0]); !ok {
return fmt.Errorf("from ref in two dot diff must be valid ref: %s", refs[0])
}
dArgs.fromRoot = fromRoot
@@ -446,7 +441,7 @@ func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv,
}
if len(refs[1]) > 0 {
if toRoot, ok = maybeResolve(ctx, dEnv, refs[1]); !ok {
if toRoot, ok = diff.MaybeResolveRoot(ctx, dEnv.RepoStateReader(), dEnv.DoltDB, refs[1]); !ok {
return fmt.Errorf("to ref in two dot diff must be valid ref: %s", refs[1])
}
dArgs.toRoot = toRoot
@@ -459,26 +454,6 @@ func (dArgs *diffArgs) applyDotRevisions(ctx context.Context, dEnv *env.DoltEnv,
return nil
}
// todo: distinguish between non-existent CommitSpec and other errors, don't assume non-existent
func maybeResolve(ctx context.Context, dEnv *env.DoltEnv, spec string) (*doltdb.RootValue, bool) {
cs, err := doltdb.NewCommitSpec(spec)
if err != nil {
return nil, false
}
cm, err := dEnv.DoltDB.Resolve(ctx, cs, dEnv.RepoStateReader().CWBHeadRef())
if err != nil {
return nil, false
}
root, err := cm.GetRootValue(ctx)
if err != nil {
return nil, false
}
return root, true
}
var diffSummarySchema = sql.Schema{
&sql.Column{Name: "Table name", Type: types.Text, Nullable: false},
&sql.Column{Name: "Diff type", Type: types.Text, Nullable: false},
@@ -504,8 +479,12 @@ func printDiffSummary(ctx context.Context, tds []diff.TableDelta, dArgs *diffArg
if err != nil {
return errhand.BuildDError("could not get table delta summary").AddCause(err).Build()
}
tableName := summ.TableName
if summ.DiffType == "renamed" {
tableName = fmt.Sprintf("%s -> %s", summ.FromTableName, summ.ToTableName)
}
err = wr.WriteSqlRow(ctx, sql.Row{td.CurName(), summ.DiffType, summ.DataChange, summ.SchemaChange})
err = wr.WriteSqlRow(ctx, sql.Row{tableName, summ.DiffType, summ.DataChange, summ.SchemaChange})
if err != nil {
return errhand.BuildDError("could not write table delta summary").AddCause(err).Build()
}
@@ -615,9 +594,9 @@ func diffUserTable(
}
func writeSqlSchemaDiff(ctx context.Context, td diff.TableDelta, toSchemas map[string]schema.Schema) errhand.VerboseError {
ddlStatements, err := sqlSchemaDiff(ctx, td, toSchemas)
ddlStatements, err := diff.SqlSchemaDiff(ctx, td, toSchemas)
if err != nil {
return err
return errhand.VerboseErrorFromError(err)
}
for _, stmt := range ddlStatements {
@@ -627,99 +606,6 @@ func writeSqlSchemaDiff(ctx context.Context, td diff.TableDelta, toSchemas map[s
return nil
}
// sqlSchemaDiff returns a slice of DDL statements that will transform the schema in the from delta to the schema in
// the to delta.
// TODO: this doesn't handle constraints or triggers
// TODO: this should live in the diff package
func sqlSchemaDiff(ctx context.Context, td diff.TableDelta, toSchemas map[string]schema.Schema) ([]string, errhand.VerboseError) {
fromSch, toSch, err := td.GetSchemas(ctx)
if err != nil {
return nil, errhand.BuildDError("cannot retrieve schema for table %s", td.ToName).AddCause(err).Build()
}
var ddlStatements []string
if td.IsDrop() {
ddlStatements = append(ddlStatements, sqlfmt.DropTableStmt(td.FromName))
} else if td.IsAdd() {
sqlDb := sqle.NewSingleTableDatabase(td.ToName, toSch, td.ToFks, td.ToFksParentSch)
sqlCtx, engine, _ := sqle.PrepareCreateTableStmt(ctx, sqlDb)
stmt, err := sqle.GetCreateTableStmt(sqlCtx, engine, td.ToName)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
ddlStatements = append(ddlStatements, stmt)
} else {
if td.FromName != td.ToName {
ddlStatements = append(ddlStatements, sqlfmt.RenameTableStmt(td.FromName, td.ToName))
}
eq := schema.SchemasAreEqual(fromSch, toSch)
if eq && !td.HasFKChanges() {
return ddlStatements, nil
}
colDiffs, unionTags := diff.DiffSchColumns(fromSch, toSch)
for _, tag := range unionTags {
cd := colDiffs[tag]
switch cd.DiffType {
case diff.SchDiffNone:
case diff.SchDiffAdded:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddColStmt(td.ToName, sqlfmt.FmtCol(0, 0, 0, *cd.New)))
case diff.SchDiffRemoved:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropColStmt(td.ToName, cd.Old.Name))
case diff.SchDiffModified:
// Ignore any primary key set changes here
if cd.Old.IsPartOfPK != cd.New.IsPartOfPK {
continue
}
if cd.Old.Name != cd.New.Name {
ddlStatements = append(ddlStatements, sqlfmt.AlterTableRenameColStmt(td.ToName, cd.Old.Name, cd.New.Name))
}
}
}
// Print changes between a primary key set change. It contains an ALTER TABLE DROP and an ALTER TABLE ADD
if !schema.ColCollsAreEqual(fromSch.GetPKCols(), toSch.GetPKCols()) {
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropPks(td.ToName))
if toSch.GetPKCols().Size() > 0 {
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddPrimaryKeys(td.ToName, toSch.GetPKCols()))
}
}
for _, idxDiff := range diff.DiffSchIndexes(fromSch, toSch) {
switch idxDiff.DiffType {
case diff.SchDiffNone:
case diff.SchDiffAdded:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddIndexStmt(td.ToName, idxDiff.To))
case diff.SchDiffRemoved:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropIndexStmt(td.FromName, idxDiff.From))
case diff.SchDiffModified:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropIndexStmt(td.FromName, idxDiff.From))
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddIndexStmt(td.ToName, idxDiff.To))
}
}
for _, fkDiff := range diff.DiffForeignKeys(td.FromFks, td.ToFks) {
switch fkDiff.DiffType {
case diff.SchDiffNone:
case diff.SchDiffAdded:
parentSch := toSchemas[fkDiff.To.ReferencedTableName]
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddForeignKeyStmt(fkDiff.To, toSch, parentSch))
case diff.SchDiffRemoved:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropForeignKeyStmt(fkDiff.From))
case diff.SchDiffModified:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropForeignKeyStmt(fkDiff.From))
parentSch := toSchemas[fkDiff.To.ReferencedTableName]
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddForeignKeyStmt(fkDiff.To, toSch, parentSch))
}
}
}
return ddlStatements, nil
}
func diffRows(
ctx *sql.Context,
se *engine.SqlEngine,
@@ -757,7 +643,7 @@ func diffRows(
// can't diff
if !diffable {
// TODO: this messes up some structured output if the user didn't redirect it
cli.PrintErrf("Primary key sets differ between revisions for table %s, skipping data diff\n", td.ToName)
cli.PrintErrf("Primary key sets differ between revisions for table '%s', skipping data diff\n", td.ToName)
err := rowWriter.Close(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
@@ -765,7 +651,7 @@ func diffRows(
return nil
} else if dArgs.diffOutput == SQLDiffOutput && !canSqlDiff {
// TODO: this is overly broad, we can absolutely do better
_, _ = fmt.Fprintf(cli.CliErr, "Incompatible schema change, skipping data diff\n")
_, _ = fmt.Fprintf(cli.CliErr, "Incompatible schema change, skipping data diff for table '%s'\n", td.ToName)
err := rowWriter.Close(ctx)
if err != nil {
return errhand.VerboseErrorFromError(err)
@@ -894,7 +780,7 @@ func writeDiffResults(
modifiedColNames map[string]bool,
dArgs *diffArgs,
) error {
ds, err := newDiffSplitter(diffQuerySch, targetSch)
ds, err := diff.NewDiffSplitter(diffQuerySch, targetSch)
if err != nil {
return err
}
@@ -907,25 +793,25 @@ func writeDiffResults(
return err
}
oldRow, newRow, err := ds.splitDiffResultRow(r)
oldRow, newRow, err := ds.SplitDiffResultRow(r)
if err != nil {
return err
}
if dArgs.skinny {
var filteredOldRow, filteredNewRow rowDiff
for i, changeType := range newRow.colDiffs {
var filteredOldRow, filteredNewRow diff.RowDiff
for i, changeType := range newRow.ColDiffs {
if (changeType == diff.Added|diff.Removed) || modifiedColNames[targetSch[i].Name] {
if i < len(oldRow.row) {
filteredOldRow.row = append(filteredOldRow.row, oldRow.row[i])
filteredOldRow.colDiffs = append(filteredOldRow.colDiffs, oldRow.colDiffs[i])
filteredOldRow.rowDiff = oldRow.rowDiff
if i < len(oldRow.Row) {
filteredOldRow.Row = append(filteredOldRow.Row, oldRow.Row[i])
filteredOldRow.ColDiffs = append(filteredOldRow.ColDiffs, oldRow.ColDiffs[i])
filteredOldRow.RowDiff = oldRow.RowDiff
}
if i < len(newRow.row) {
filteredNewRow.row = append(filteredNewRow.row, newRow.row[i])
filteredNewRow.colDiffs = append(filteredNewRow.colDiffs, newRow.colDiffs[i])
filteredNewRow.rowDiff = newRow.rowDiff
if i < len(newRow.Row) {
filteredNewRow.Row = append(filteredNewRow.Row, newRow.Row[i])
filteredNewRow.ColDiffs = append(filteredNewRow.ColDiffs, newRow.ColDiffs[i])
filteredNewRow.RowDiff = newRow.RowDiff
}
}
}
@@ -935,18 +821,18 @@ func writeDiffResults(
}
// We are guaranteed to have "ModeRow" for writers that do not support combined rows
if dArgs.diffMode != diff.ModeRow && oldRow.rowDiff == diff.ModifiedOld && newRow.rowDiff == diff.ModifiedNew {
if err = writer.WriteCombinedRow(ctx, oldRow.row, newRow.row, dArgs.diffMode); err != nil {
if dArgs.diffMode != diff.ModeRow && oldRow.RowDiff == diff.ModifiedOld && newRow.RowDiff == diff.ModifiedNew {
if err = writer.WriteCombinedRow(ctx, oldRow.Row, newRow.Row, dArgs.diffMode); err != nil {
return err
}
} else {
if oldRow.row != nil {
if err = writer.WriteRow(ctx, oldRow.row, oldRow.rowDiff, oldRow.colDiffs); err != nil {
if oldRow.Row != nil {
if err = writer.WriteRow(ctx, oldRow.Row, oldRow.RowDiff, oldRow.ColDiffs); err != nil {
return err
}
}
if newRow.row != nil {
if err = writer.WriteRow(ctx, newRow.row, newRow.rowDiff, newRow.colDiffs); err != nil {
if newRow.Row != nil {
if err = writer.WriteRow(ctx, newRow.Row, newRow.RowDiff, newRow.ColDiffs); err != nil {
return err
}
}
@@ -971,23 +857,23 @@ func getModifiedCols(
break
}
ds, err := newDiffSplitter(diffQuerySch, unionSch)
ds, err := diff.NewDiffSplitter(diffQuerySch, unionSch)
if err != nil {
return modifiedColNames, err
}
oldRow, newRow, err := ds.splitDiffResultRow(r)
oldRow, newRow, err := ds.SplitDiffResultRow(r)
if err != nil {
return modifiedColNames, err
}
for i, changeType := range newRow.colDiffs {
for i, changeType := range newRow.ColDiffs {
if changeType != diff.None || unionSch[i].PrimaryKey {
modifiedColNames[unionSch[i].Name] = true
}
}
for i, changeType := range oldRow.colDiffs {
for i, changeType := range oldRow.ColDiffs {
if changeType != diff.None || unionSch[i].PrimaryKey {
modifiedColNames[unionSch[i].Name] = true
}
+1 -1
View File
@@ -349,7 +349,7 @@ func (j *jsonDiffWriter) WriteSchemaDiff(ctx context.Context, toRoot *doltdb.Roo
return errhand.BuildDError("could not read schemas from toRoot").AddCause(err).Build()
}
stmts, err := sqlSchemaDiff(ctx, td, toSchemas)
stmts, err := diff.SqlSchemaDiff(ctx, td, toSchemas)
if err != nil {
return err
}
+1 -1
View File
@@ -56,7 +56,7 @@ import (
)
const (
Version = "0.53.2"
Version = "0.54.1"
)
var dumpDocsCommand = &commands.DumpDocsCmd{}
@@ -23,13 +23,12 @@
package eventsapi
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
@@ -8,7 +8,6 @@ package eventsapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
@@ -23,11 +23,10 @@
package eventsapi
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
@@ -148,6 +147,11 @@ const (
ClientEventType_FILTER_BRANCH ClientEventType = 53
ClientEventType_DUMP ClientEventType = 54
ClientEventType_CHERRY_PICK ClientEventType = 55
ClientEventType_STASH ClientEventType = 56
ClientEventType_STASH_CLEAR ClientEventType = 57
ClientEventType_STASH_DROP ClientEventType = 58
ClientEventType_STASH_LIST ClientEventType = 59
ClientEventType_STASH_POP ClientEventType = 60
)
// Enum value maps for ClientEventType.
@@ -209,6 +213,11 @@ var (
53: "FILTER_BRANCH",
54: "DUMP",
55: "CHERRY_PICK",
56: "STASH",
57: "STASH_CLEAR",
58: "STASH_DROP",
59: "STASH_LIST",
60: "STASH_POP",
}
ClientEventType_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
@@ -267,6 +276,11 @@ var (
"FILTER_BRANCH": 53,
"DUMP": 54,
"CHERRY_PICK": 55,
"STASH": 56,
"STASH_CLEAR": 57,
"STASH_DROP": 58,
"STASH_LIST": 59,
"STASH_POP": 60,
}
)
@@ -454,7 +468,7 @@ var file_dolt_services_eventsapi_v1alpha1_event_constants_proto_rawDesc = []byte
0x52, 0x4d, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
0x12, 0x09, 0x0a, 0x05, 0x4c, 0x49, 0x4e, 0x55, 0x58, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57,
0x49, 0x4e, 0x44, 0x4f, 0x57, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x41, 0x52, 0x57,
0x49, 0x4e, 0x10, 0x03, 0x2a, 0x9b, 0x07, 0x0a, 0x0f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x45,
0x49, 0x4e, 0x10, 0x03, 0x2a, 0xe6, 0x07, 0x0a, 0x0f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45,
0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08,
0x0a, 0x04, 0x49, 0x4e, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54,
@@ -512,26 +526,31 @@ var file_dolt_services_eventsapi_v1alpha1_event_constants_proto_rawDesc = []byte
0x4f, 0x4e, 0x10, 0x34, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x5f, 0x42,
0x52, 0x41, 0x4e, 0x43, 0x48, 0x10, 0x35, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x55, 0x4d, 0x50, 0x10,
0x36, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x48, 0x45, 0x52, 0x52, 0x59, 0x5f, 0x50, 0x49, 0x43, 0x4b,
0x10, 0x37, 0x2a, 0x6a, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x49, 0x44, 0x12, 0x16,
0x0a, 0x12, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x59, 0x54, 0x45, 0x53, 0x5f,
0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13,
0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4d, 0x53, 0x5f, 0x45, 0x4c, 0x41, 0x50,
0x53, 0x45, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x41,
0x50, 0x49, 0x5f, 0x52, 0x50, 0x43, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x2a, 0x45,
0x0a, 0x0b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x49, 0x44, 0x12, 0x19, 0x0a,
0x15, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x4d, 0x4f,
0x54, 0x45, 0x5f, 0x55, 0x52, 0x4c, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x45, 0x10, 0x02, 0x22,
0x04, 0x08, 0x01, 0x10, 0x01, 0x2a, 0x2d, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x49, 0x44, 0x12, 0x16,
0x0a, 0x12, 0x41, 0x50, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x50, 0x5f, 0x44, 0x4f,
0x4c, 0x54, 0x10, 0x01, 0x42, 0x51, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x68, 0x75, 0x62, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f,
0x67, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x6f, 0x6c,
0x74, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74,
0x73, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x76,
0x65, 0x6e, 0x74, 0x73, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x10, 0x37, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x53, 0x48, 0x10, 0x38, 0x12, 0x0f, 0x0a,
0x0b, 0x53, 0x54, 0x41, 0x53, 0x48, 0x5f, 0x43, 0x4c, 0x45, 0x41, 0x52, 0x10, 0x39, 0x12, 0x0e,
0x0a, 0x0a, 0x53, 0x54, 0x41, 0x53, 0x48, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x3a, 0x12, 0x0e,
0x0a, 0x0a, 0x53, 0x54, 0x41, 0x53, 0x48, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x3b, 0x12, 0x0d,
0x0a, 0x09, 0x53, 0x54, 0x41, 0x53, 0x48, 0x5f, 0x50, 0x4f, 0x50, 0x10, 0x3c, 0x2a, 0x6a, 0x0a,
0x08, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54,
0x52, 0x49, 0x43, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x14, 0x0a, 0x10, 0x42, 0x59, 0x54, 0x45, 0x53, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x4c,
0x4f, 0x41, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x4f, 0x57, 0x4e, 0x4c,
0x4f, 0x41, 0x44, 0x5f, 0x4d, 0x53, 0x5f, 0x45, 0x4c, 0x41, 0x50, 0x53, 0x45, 0x44, 0x10, 0x02,
0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x41, 0x50, 0x49, 0x5f, 0x52, 0x50,
0x43, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x2a, 0x45, 0x0a, 0x0b, 0x41, 0x74, 0x74,
0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x49, 0x44, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x54, 0x54, 0x52,
0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x55, 0x52,
0x4c, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x45, 0x10, 0x02, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01,
0x2a, 0x2d, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x50, 0x50,
0x5f, 0x49, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x50, 0x5f, 0x44, 0x4f, 0x4c, 0x54, 0x10, 0x01, 0x42,
0x51, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f,
0x6c, 0x74, 0x68, 0x75, 0x62, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f, 0x67, 0x6f, 0x2f, 0x67, 0x65,
0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x61, 0x70, 0x69, 0x2f,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x61,
0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -21,12 +21,11 @@
package remotesapi
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
@@ -8,7 +8,6 @@ package remotesapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
@@ -21,11 +21,10 @@
package remotesapi
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
@@ -8,7 +8,6 @@ package remotesapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
+2 -2
View File
@@ -15,7 +15,7 @@ require (
github.com/dolthub/fslock v0.0.3
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
github.com/dolthub/vitess v0.0.0-20230223032306-95d4b04eabad
github.com/dolthub/vitess v0.0.0-20230301224006-436948ebe944
github.com/dustin/go-humanize v1.0.0
github.com/fatih/color v1.13.0
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
@@ -58,7 +58,7 @@ require (
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cespare/xxhash v1.1.0
github.com/creasty/defaults v1.6.0
github.com/dolthub/go-mysql-server v0.14.1-0.20230227175231-786abd289f41
github.com/dolthub/go-mysql-server v0.14.1-0.20230302200901-1fcf1b00e774
github.com/google/flatbuffers v2.0.6+incompatible
github.com/jmoiron/sqlx v1.3.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
+4 -4
View File
@@ -166,16 +166,16 @@ github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txC
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.14.1-0.20230227175231-786abd289f41 h1:8vc9pwtRgqb1RIJyWHsTetx+VZnd7pZlzCewTQIXk7Y=
github.com/dolthub/go-mysql-server v0.14.1-0.20230227175231-786abd289f41/go.mod h1:I2Mu8LSpwUII53EyBXqJMEKTQH5DUetV4ulP88JVsKA=
github.com/dolthub/go-mysql-server v0.14.1-0.20230302200901-1fcf1b00e774 h1:3Tb9SRa8KuS3cHMH6177xaTUCGuk7d/8hO0JLkcbTVg=
github.com/dolthub/go-mysql-server v0.14.1-0.20230302200901-1fcf1b00e774/go.mod h1:3mo5KeTO+4GV3BEgtaaYq/+ghuLYoW3bdSz7D9sKRco=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488 h1:0HHu0GWJH0N6a6keStrHhUAK5/o9LVfkh44pvsV4514=
github.com/dolthub/ishell v0.0.0-20221214210346-d7db0b066488/go.mod h1:ehexgi1mPxRTk0Mok/pADALuHbvATulTh6gzr7NzZto=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474/go.mod h1:kMz7uXOXq4qRriCEyZ/LUeTqraLJCjf0WVZcUi6TxUY=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
github.com/dolthub/vitess v0.0.0-20230223032306-95d4b04eabad h1:9FPQtKoqyREEsHfGKNU2DImktOusXTXklLtvTxtIuZ0=
github.com/dolthub/vitess v0.0.0-20230223032306-95d4b04eabad/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dolthub/vitess v0.0.0-20230301224006-436948ebe944 h1:Rlccv6h7kWyJLxc8IiWwjLqwTlNkOvCFbtJzFu2kEcA=
github.com/dolthub/vitess v0.0.0-20230301224006-436948ebe944/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -12,18 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package commands
package diff
import (
"context"
"fmt"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
)
type diffSplitter struct {
type DiffSplitter struct {
diffQuerySch sql.Schema
targetSch sql.Schema
queryToTarget map[int]int
@@ -32,16 +34,16 @@ type diffSplitter struct {
fromLen int
}
type rowDiff struct {
row sql.Row
rowDiff diff.ChangeType
colDiffs []diff.ChangeType
type RowDiff struct {
Row sql.Row
RowDiff ChangeType
ColDiffs []ChangeType
}
// newDiffSplitter returns a splitter that knows how to split unified diff query rows with the schema given into
// NewDiffSplitter returns a splitter that knows how to split unified diff query rows with the schema given into
// |old| and |new| rows in the union schema given. In the diff query schema, all |from| columns are expected to precede
// all |to| columns
func newDiffSplitter(diffQuerySch sql.Schema, targetSch sql.Schema) (*diffSplitter, error) {
func NewDiffSplitter(diffQuerySch sql.Schema, targetSch sql.Schema) (*DiffSplitter, error) {
resultToTarget := make(map[int]int)
fromTo := make(map[int]int)
toFrom := make(map[int]int)
@@ -77,7 +79,7 @@ func newDiffSplitter(diffQuerySch sql.Schema, targetSch sql.Schema) (*diffSplitt
fromLen = len(diffQuerySch) - 1
}
return &diffSplitter{
return &DiffSplitter{
diffQuerySch: diffQuerySch,
targetSch: targetSch,
fromLen: fromLen,
@@ -87,17 +89,17 @@ func newDiffSplitter(diffQuerySch sql.Schema, targetSch sql.Schema) (*diffSplitt
}, nil
}
func newRowDiff(size int) rowDiff {
return rowDiff{
colDiffs: make([]diff.ChangeType, size),
func newRowDiff(size int) RowDiff {
return RowDiff{
ColDiffs: make([]ChangeType, size),
}
}
func (ds diffSplitter) splitDiffResultRow(row sql.Row) (rowDiff, rowDiff, error) {
func (ds DiffSplitter) SplitDiffResultRow(row sql.Row) (RowDiff, RowDiff, error) {
// split rows in the result set into old, new
diffTypeColIdx := ds.diffQuerySch.IndexOfColName("diff_type")
if diffTypeColIdx < 0 {
return rowDiff{}, rowDiff{}, fmt.Errorf("expected a diff_type column")
return RowDiff{}, RowDiff{}, fmt.Errorf("expected a diff_type column")
}
diffType := row[diffTypeColIdx]
@@ -106,58 +108,80 @@ func (ds diffSplitter) splitDiffResultRow(row sql.Row) (rowDiff, rowDiff, error)
diffTypeStr := diffType.(string)
if diffTypeStr == "removed" || diffTypeStr == "modified" {
oldRow.row = make(sql.Row, len(ds.targetSch))
oldRow.Row = make(sql.Row, len(ds.targetSch))
if diffTypeStr == "modified" {
oldRow.rowDiff = diff.ModifiedOld
oldRow.RowDiff = ModifiedOld
} else {
oldRow.rowDiff = diff.Removed
oldRow.RowDiff = Removed
}
for i := 0; i < ds.fromLen; i++ {
cmp := ds.diffQuerySch[i].Type.Compare
oldRow.row[ds.queryToTarget[i]] = row[i]
oldRow.Row[ds.queryToTarget[i]] = row[i]
if diffTypeStr == "modified" {
fromToIndex, ok := ds.fromTo[i]
if ok {
if n, err := cmp(row[i], row[fromToIndex]); err != nil {
return rowDiff{}, rowDiff{}, err
return RowDiff{}, RowDiff{}, err
} else if n != 0 {
oldRow.colDiffs[ds.queryToTarget[i]] = diff.ModifiedOld
oldRow.ColDiffs[ds.queryToTarget[i]] = ModifiedOld
}
} else {
oldRow.colDiffs[ds.queryToTarget[i]] = diff.ModifiedOld
oldRow.ColDiffs[ds.queryToTarget[i]] = ModifiedOld
}
} else {
oldRow.colDiffs[ds.queryToTarget[i]] = diff.Removed
oldRow.ColDiffs[ds.queryToTarget[i]] = Removed
}
}
}
if diffTypeStr == "added" || diffTypeStr == "modified" {
newRow.row = make(sql.Row, len(ds.targetSch))
newRow.Row = make(sql.Row, len(ds.targetSch))
if diffTypeStr == "modified" {
newRow.rowDiff = diff.ModifiedNew
newRow.RowDiff = ModifiedNew
} else {
newRow.rowDiff = diff.Added
newRow.RowDiff = Added
}
for i := ds.fromLen; i < len(ds.diffQuerySch)-1; i++ {
cmp := ds.diffQuerySch[i].Type.Compare
newRow.row[ds.queryToTarget[i]] = row[i]
newRow.Row[ds.queryToTarget[i]] = row[i]
if diffTypeStr == "modified" {
// need this to compare map[string]interface{} and other incomparable result types
if n, err := cmp(row[i], row[ds.toFrom[i]]); err != nil {
return rowDiff{}, rowDiff{}, err
return RowDiff{}, RowDiff{}, err
} else if n != 0 {
newRow.colDiffs[ds.queryToTarget[i]] = diff.ModifiedNew
newRow.ColDiffs[ds.queryToTarget[i]] = ModifiedNew
}
} else {
newRow.colDiffs[ds.queryToTarget[i]] = diff.Added
newRow.ColDiffs[ds.queryToTarget[i]] = Added
}
}
}
return oldRow, newRow, nil
}
// MaybeResolveRoot returns a root value and true if the a commit exists for given spec string; nil and false if it does not exist.
// todo: distinguish between non-existent CommitSpec and other errors, don't assume non-existent
func MaybeResolveRoot(ctx context.Context, rsr env.RepoStateReader, doltDB *doltdb.DoltDB, spec string) (*doltdb.RootValue, bool) {
cs, err := doltdb.NewCommitSpec(spec)
if err != nil {
// it's non-existent CommitSpec
return nil, false
}
cm, err := doltDB.Resolve(ctx, cs, rsr.CWBHeadRef())
if err != nil {
return nil, false
}
root, err := cm.GetRootValue(ctx)
if err != nil {
return nil, false
}
return root, true
}
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package commands
package diff
import (
"testing"
@@ -21,12 +21,10 @@ import (
"github.com/dolthub/go-mysql-server/sql/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
)
type splitRow struct {
old, new rowDiff
old, new RowDiff
}
func TestDiffSplitter(t *testing.T) {
@@ -60,30 +58,30 @@ func TestDiffSplitter(t *testing.T) {
expectedRows: []splitRow{
{
old: emptyRowDiff(2),
new: rowDiff{
row: sql.Row{1, 2},
rowDiff: diff.Added,
colDiffs: []diff.ChangeType{diff.Added, diff.Added},
new: RowDiff{
Row: sql.Row{1, 2},
RowDiff: Added,
ColDiffs: []ChangeType{Added, Added},
},
},
{
old: rowDiff{
row: sql.Row{3, 4},
rowDiff: diff.Removed,
colDiffs: []diff.ChangeType{diff.Removed, diff.Removed},
old: RowDiff{
Row: sql.Row{3, 4},
RowDiff: Removed,
ColDiffs: []ChangeType{Removed, Removed},
},
new: emptyRowDiff(2),
},
{
old: rowDiff{
row: sql.Row{5, 6},
rowDiff: diff.ModifiedOld,
colDiffs: []diff.ChangeType{diff.None, diff.ModifiedOld},
old: RowDiff{
Row: sql.Row{5, 6},
RowDiff: ModifiedOld,
ColDiffs: []ChangeType{None, ModifiedOld},
},
new: rowDiff{
row: sql.Row{5, 100},
rowDiff: diff.ModifiedNew,
colDiffs: []diff.ChangeType{diff.None, diff.ModifiedNew},
new: RowDiff{
Row: sql.Row{5, 100},
RowDiff: ModifiedNew,
ColDiffs: []ChangeType{None, ModifiedNew},
},
},
},
@@ -110,30 +108,30 @@ func TestDiffSplitter(t *testing.T) {
expectedRows: []splitRow{
{
old: emptyRowDiff(3),
new: rowDiff{
row: sql.Row{nil, 1, 2},
rowDiff: diff.Added,
colDiffs: []diff.ChangeType{diff.None, diff.Added, diff.Added},
new: RowDiff{
Row: sql.Row{nil, 1, 2},
RowDiff: Added,
ColDiffs: []ChangeType{None, Added, Added},
},
},
{
old: rowDiff{
row: sql.Row{3, 4, nil},
rowDiff: diff.Removed,
colDiffs: []diff.ChangeType{diff.Removed, diff.Removed, diff.None},
old: RowDiff{
Row: sql.Row{3, 4, nil},
RowDiff: Removed,
ColDiffs: []ChangeType{Removed, Removed, None},
},
new: emptyRowDiff(3),
},
{
old: rowDiff{
row: sql.Row{5, 6, nil},
rowDiff: diff.ModifiedOld,
colDiffs: []diff.ChangeType{diff.ModifiedOld, diff.None, diff.None},
old: RowDiff{
Row: sql.Row{5, 6, nil},
RowDiff: ModifiedOld,
ColDiffs: []ChangeType{ModifiedOld, None, None},
},
new: rowDiff{
row: sql.Row{nil, 6, 100},
rowDiff: diff.ModifiedNew,
colDiffs: []diff.ChangeType{diff.None, diff.None, diff.ModifiedNew},
new: RowDiff{
Row: sql.Row{nil, 6, 100},
RowDiff: ModifiedNew,
ColDiffs: []ChangeType{None, None, ModifiedNew},
},
},
},
@@ -156,18 +154,18 @@ func TestDiffSplitter(t *testing.T) {
expectedRows: []splitRow{
{
old: emptyRowDiff(2),
new: rowDiff{
row: sql.Row{1, 2},
rowDiff: diff.Added,
colDiffs: []diff.ChangeType{diff.Added, diff.Added},
new: RowDiff{
Row: sql.Row{1, 2},
RowDiff: Added,
ColDiffs: []ChangeType{Added, Added},
},
},
{
old: emptyRowDiff(2),
new: rowDiff{
row: sql.Row{3, 4},
rowDiff: diff.Added,
colDiffs: []diff.ChangeType{diff.Added, diff.Added},
new: RowDiff{
Row: sql.Row{3, 4},
RowDiff: Added,
ColDiffs: []ChangeType{Added, Added},
},
},
},
@@ -190,18 +188,18 @@ func TestDiffSplitter(t *testing.T) {
expectedRows: []splitRow{
{
new: emptyRowDiff(2),
old: rowDiff{
row: sql.Row{1, 2},
rowDiff: diff.Removed,
colDiffs: []diff.ChangeType{diff.Removed, diff.Removed},
old: RowDiff{
Row: sql.Row{1, 2},
RowDiff: Removed,
ColDiffs: []ChangeType{Removed, Removed},
},
},
{
new: emptyRowDiff(2),
old: rowDiff{
row: sql.Row{3, 4},
rowDiff: diff.Removed,
colDiffs: []diff.ChangeType{diff.Removed, diff.Removed},
old: RowDiff{
Row: sql.Row{3, 4},
RowDiff: Removed,
ColDiffs: []ChangeType{Removed, Removed},
},
},
},
@@ -210,12 +208,12 @@ func TestDiffSplitter(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
ds, err := newDiffSplitter(tc.diffQuerySch, tc.tableSch)
ds, err := NewDiffSplitter(tc.diffQuerySch, tc.tableSch)
require.NoError(t, err)
var splitRows []splitRow
for _, row := range tc.diffQueryRows {
old, new, err := ds.splitDiffResultRow(row)
old, new, err := ds.SplitDiffResultRow(row)
require.NoError(t, err)
splitRows = append(splitRows, splitRow{old, new})
}
@@ -225,9 +223,9 @@ func TestDiffSplitter(t *testing.T) {
}
}
func emptyRowDiff(columns int) rowDiff {
return rowDiff{
colDiffs: make([]diff.ChangeType, columns),
func emptyRowDiff(columns int) RowDiff {
return RowDiff{
ColDiffs: make([]ChangeType, columns),
}
}
+230 -19
View File
@@ -19,12 +19,16 @@ import (
"fmt"
"sort"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
)
@@ -58,10 +62,12 @@ type TableDelta struct {
}
type TableDeltaSummary struct {
DiffType string
DataChange bool
SchemaChange bool
TableName string
DiffType string
DataChange bool
SchemaChange bool
TableName string
FromTableName string
ToTableName string
}
// GetStagedUnstagedTableDeltas represents staged and unstaged changes as TableDelta slices.
@@ -414,10 +420,11 @@ func (td TableDelta) GetSummary(ctx context.Context) (*TableDeltaSummary, error)
}
return &TableDeltaSummary{
TableName: td.FromName,
DataChange: !isEmpty,
SchemaChange: true,
DiffType: "dropped",
TableName: td.FromName,
FromTableName: td.FromName,
DataChange: !isEmpty,
SchemaChange: true,
DiffType: "dropped",
}, nil
}
@@ -429,10 +436,12 @@ func (td TableDelta) GetSummary(ctx context.Context) (*TableDeltaSummary, error)
}
return &TableDeltaSummary{
TableName: td.ToName,
DataChange: dataChanged,
SchemaChange: true,
DiffType: "renamed",
TableName: td.ToName,
FromTableName: td.FromName,
ToTableName: td.ToName,
DataChange: dataChanged,
SchemaChange: true,
DiffType: "renamed",
}, nil
}
@@ -445,6 +454,7 @@ func (td TableDelta) GetSummary(ctx context.Context) (*TableDeltaSummary, error)
return &TableDeltaSummary{
TableName: td.ToName,
ToTableName: td.ToName,
DataChange: !isEmpty,
SchemaChange: true,
DiffType: "added",
@@ -464,10 +474,12 @@ func (td TableDelta) GetSummary(ctx context.Context) (*TableDeltaSummary, error)
}
return &TableDeltaSummary{
TableName: td.ToName,
DataChange: dataChanged,
SchemaChange: schemaChanged,
DiffType: "modified",
TableName: td.FromName,
FromTableName: td.FromName,
ToTableName: td.ToName,
DataChange: dataChanged,
SchemaChange: schemaChanged,
DiffType: "modified",
}, nil
}
@@ -519,3 +531,202 @@ func fkSlicesAreEqual(from, to []doltdb.ForeignKey) bool {
}
return true
}
// SqlSchemaDiff returns a slice of DDL statements that will transform the schema in the from delta to the schema in
// the to delta.
// TODO: this doesn't handle constraints or triggers
func SqlSchemaDiff(ctx context.Context, td TableDelta, toSchemas map[string]schema.Schema) ([]string, error) {
fromSch, toSch, err := td.GetSchemas(ctx)
if err != nil {
return nil, fmt.Errorf("cannot retrieve schema for table %s, cause: %s", td.ToName, err.Error())
}
var ddlStatements []string
if td.IsDrop() {
ddlStatements = append(ddlStatements, sqlfmt.DropTableStmt(td.FromName))
} else if td.IsAdd() {
toPkSch, err := sqlutil.FromDoltSchema(td.ToName, td.ToSch)
if err != nil {
return nil, err
}
stmt, err := generateCreateTableStatement(td.ToName, td.ToSch, toPkSch, td.ToFks, td.ToFksParentSch)
if err != nil {
return nil, errhand.VerboseErrorFromError(err)
}
ddlStatements = append(ddlStatements, stmt)
} else {
stmts, err := getNonCreateNonDropTableSqlSchemaDiff(td, toSchemas, fromSch, toSch)
if err != nil {
return nil, err
}
ddlStatements = append(ddlStatements, stmts...)
}
return ddlStatements, nil
}
func getNonCreateNonDropTableSqlSchemaDiff(td TableDelta, toSchemas map[string]schema.Schema, fromSch, toSch schema.Schema) ([]string, error) {
if td.IsAdd() || td.IsDrop() {
// use add and drop specific methods
return nil, nil
}
var ddlStatements []string
if td.FromName != td.ToName {
ddlStatements = append(ddlStatements, sqlfmt.RenameTableStmt(td.FromName, td.ToName))
}
eq := schema.SchemasAreEqual(fromSch, toSch)
if eq && !td.HasFKChanges() {
return ddlStatements, nil
}
colDiffs, unionTags := DiffSchColumns(fromSch, toSch)
for _, tag := range unionTags {
cd := colDiffs[tag]
switch cd.DiffType {
case SchDiffNone:
case SchDiffAdded:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddColStmt(td.ToName, sqlfmt.GenerateCreateTableColumnDefinition(*cd.New)))
case SchDiffRemoved:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropColStmt(td.ToName, cd.Old.Name))
case SchDiffModified:
// Ignore any primary key set changes here
if cd.Old.IsPartOfPK != cd.New.IsPartOfPK {
continue
}
if cd.Old.Name != cd.New.Name {
ddlStatements = append(ddlStatements, sqlfmt.AlterTableRenameColStmt(td.ToName, cd.Old.Name, cd.New.Name))
}
}
}
// Print changes between a primary key set change. It contains an ALTER TABLE DROP and an ALTER TABLE ADD
if !schema.ColCollsAreEqual(fromSch.GetPKCols(), toSch.GetPKCols()) {
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropPks(td.ToName))
if toSch.GetPKCols().Size() > 0 {
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddPrimaryKeys(td.ToName, toSch.GetPKCols()))
}
}
for _, idxDiff := range DiffSchIndexes(fromSch, toSch) {
switch idxDiff.DiffType {
case SchDiffNone:
case SchDiffAdded:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddIndexStmt(td.ToName, idxDiff.To))
case SchDiffRemoved:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropIndexStmt(td.FromName, idxDiff.From))
case SchDiffModified:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropIndexStmt(td.FromName, idxDiff.From))
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddIndexStmt(td.ToName, idxDiff.To))
}
}
for _, fkDiff := range DiffForeignKeys(td.FromFks, td.ToFks) {
switch fkDiff.DiffType {
case SchDiffNone:
case SchDiffAdded:
parentSch := toSchemas[fkDiff.To.ReferencedTableName]
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddForeignKeyStmt(fkDiff.To, toSch, parentSch))
case SchDiffRemoved:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropForeignKeyStmt(fkDiff.From))
case SchDiffModified:
ddlStatements = append(ddlStatements, sqlfmt.AlterTableDropForeignKeyStmt(fkDiff.From))
parentSch := toSchemas[fkDiff.To.ReferencedTableName]
ddlStatements = append(ddlStatements, sqlfmt.AlterTableAddForeignKeyStmt(fkDiff.To, toSch, parentSch))
}
}
return ddlStatements, nil
}
func GetDataDiffStatement(tableName string, sch schema.Schema, row sql.Row, rowDiffType ChangeType, colDiffTypes []ChangeType) (string, error) {
if len(row) != len(colDiffTypes) {
return "", fmt.Errorf("expected the same size for columns and diff types, got %d and %d", len(row), len(colDiffTypes))
}
switch rowDiffType {
case Added:
return sqlfmt.SqlRowAsInsertStmt(row, tableName, sch)
case Removed:
return sqlfmt.SqlRowAsDeleteStmt(row, tableName, sch, 0)
case ModifiedNew:
updatedCols := set.NewEmptyStrSet()
for i, diffType := range colDiffTypes {
if diffType != None {
updatedCols.Add(sch.GetAllCols().GetByIndex(i).Name)
}
}
return sqlfmt.SqlRowAsUpdateStmt(row, tableName, sch, updatedCols)
case ModifiedOld:
// do nothing, we only issue UPDATE for ModifiedNew
return "", nil
default:
return "", fmt.Errorf("unexpected row diff type: %v", rowDiffType)
}
}
// generateCreateTableStatement returns CREATE TABLE statement for given table. This function was made to share the same
// 'create table' statement logic as GMS. We initially were running `SHOW CREATE TABLE` query to get the statement;
// however, it cannot be done for cases that need this statement in sql shell mode. Dolt uses its own Schema and
// Column and other object types which are not directly compatible with GMS, so we try to use as much shared logic
// as possible with GMS to get 'create table' statement in Dolt.
func generateCreateTableStatement(tblName string, sch schema.Schema, pkSchema sql.PrimaryKeySchema, fks []doltdb.ForeignKey, fksParentSch map[string]schema.Schema) (string, error) {
sqlSch := pkSchema.Schema
colStmts := make([]string, len(sqlSch))
// Statement creation parts for each column
for i, col := range sch.GetAllCols().GetColumns() {
colStmts[i] = sqlfmt.GenerateCreateTableIndentedColumnDefinition(col)
}
primaryKeyCols := sch.GetPKCols().GetColumnNames()
if len(primaryKeyCols) > 0 {
primaryKey := sql.GenerateCreateTablePrimaryKeyDefinition(primaryKeyCols)
colStmts = append(colStmts, primaryKey)
}
indexes := sch.Indexes().AllIndexes()
for _, index := range indexes {
// The primary key may or may not be declared as an index by the table. Don't print it twice if it's here.
if isPrimaryKeyIndex(index, sch) {
continue
}
colStmts = append(colStmts, sqlfmt.GenerateCreateTableIndexDefinition(index))
}
for _, fk := range fks {
colStmts = append(colStmts, sqlfmt.GenerateCreateTableForeignKeyDefinition(fk, sch, fksParentSch[fk.ReferencedTableName]))
}
for _, check := range sch.Checks().AllChecks() {
colStmts = append(colStmts, sqlfmt.GenerateCreateTableCheckConstraintClause(check))
}
coll := sql.CollationID(sch.GetCollation())
createTableStmt := sql.GenerateCreateTableStatement(tblName, colStmts, coll.CharacterSet().Name(), coll.Name())
return fmt.Sprintf("%s;", createTableStmt), nil
}
// isPrimaryKeyIndex returns whether the index given matches the table's primary key columns. Order is not considered.
func isPrimaryKeyIndex(index schema.Index, sch schema.Schema) bool {
var pks = sch.GetPKCols().GetColumns()
var pkMap = make(map[string]struct{})
for _, c := range pks {
pkMap[c.Name] = struct{}{}
}
indexCols := index.ColumnNames()
if len(indexCols) != len(pks) {
return false
}
for _, c := range index.ColumnNames() {
if _, ok := pkMap[c]; !ok {
return false
}
}
return true
}
+8 -2
View File
@@ -106,7 +106,13 @@ func MultiEnvForDirectory(
return false
}
newEnv := Load(ctx, GetCurrentUserHomeDir, newFs, doltdb.LocalDirDoltDB, dEnv.Version)
// TODO: get rid of version altogether
version := ""
if dEnv != nil {
version = dEnv.Version
}
newEnv := Load(ctx, GetCurrentUserHomeDir, newFs, doltdb.LocalDirDoltDB, version)
if newEnv.Valid() {
envSet[dirToDBName(dir)] = newEnv
}
@@ -117,7 +123,7 @@ func MultiEnvForDirectory(
// if the current directory database is in our set, add it first so it will be the current database
var ok bool
if dEnv, ok = envSet[dbName]; ok {
if dEnv, ok = envSet[dbName]; ok && dEnv.Valid() {
mrEnv.addEnv(dbName, dEnv)
delete(envSet, dbName)
}
+20 -24
View File
@@ -684,36 +684,32 @@ func translateTuples(ctx context.Context, kt, vt translator, differ <-chan types
}
}
func writeProllyMap(ctx context.Context, prev prolly.Map, writer <-chan val.Tuple) (prolly.Map, error) {
return prolly.MutateMapWithTupleIter(ctx, prev, channelProvider{tuples: writer})
}
type channelProvider struct {
tuples <-chan val.Tuple
}
var _ prolly.TupleIter = channelProvider{}
func (p channelProvider) Next(ctx context.Context) (val.Tuple, val.Tuple) {
func writeProllyMap(ctx context.Context, prev prolly.Map, writer <-chan val.Tuple) (m prolly.Map, err error) {
var (
k, v val.Tuple
ok bool
)
select {
case k, ok = <-p.tuples:
if !ok {
return nil, nil // done
mut := prev.Mutate()
for {
select {
case k, ok = <-writer:
if !ok {
m, err = mut.Map(ctx)
return // done
}
case <-ctx.Done():
return
}
case _ = <-ctx.Done():
return nil, nil
}
select {
case v, ok = <-p.tuples:
assertTrue(ok)
case _ = <-ctx.Done():
return nil, nil
select {
case v, ok = <-writer:
assertTrue(ok)
case <-ctx.Done():
return
}
if err = mut.Put(ctx, k, v); err != nil {
return
}
}
return k, v
}
@@ -46,6 +46,7 @@ func RunModifyTypeTests(t *testing.T, tests []ModifyTypeTest) {
if len(name) > 200 {
name = name[:200]
}
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -395,6 +395,7 @@ func (a *binlogReplicaApplier) processBinlogEvent(ctx *sql.Context, engine *gms.
ctx.SetSessionVariable(ctx, "unique_checks", 1)
}
ctx.SetCurrentDatabase(query.Database)
executeQueryWithEngine(ctx, engine, query.SQL)
createCommit = strings.ToLower(query.SQL) != "begin"
@@ -908,15 +909,20 @@ func loadReplicaServerId() (uint32, error) {
func executeQueryWithEngine(ctx *sql.Context, engine *gms.Engine, query string) {
if ctx.GetCurrentDatabase() == "" {
ctx.GetLogger().Warn("No current database selected")
ctx.GetLogger().WithFields(logrus.Fields{
"query": query,
}).Warn("No current database selected")
}
_, iter, err := engine.Query(ctx, query)
if err != nil {
// Log any errors, except for commits with "nothing to commit"
if err.Error() != "nothing to commit" {
msg := fmt.Sprintf("ERROR executing query: %v ", err.Error())
ctx.GetLogger().Errorf(msg)
ctx.GetLogger().WithFields(logrus.Fields{
"error": err.Error(),
"query": query,
}).Errorf("Error executing query")
msg := fmt.Sprintf("Error executing query: %v", err.Error())
DoltBinlogReplicaController.setSqlError(mysql.ERUnknownError, msg)
}
return
@@ -29,14 +29,20 @@ func TestBinlogReplicationMultiDb(t *testing.T) {
// Make changes on the primary to db01 and db02
primaryDatabase.MustExec("create database db02;")
primaryDatabase.MustExec("create table db01.t01 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("create table db02.t02 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("insert into db01.t01 (pk) values (1), (3), (5), (8), (9);")
primaryDatabase.MustExec("insert into db02.t02 (pk) values (2), (4), (6), (7), (10);")
primaryDatabase.MustExec("delete from db01.t01 where pk=9;")
primaryDatabase.MustExec("use db01;")
primaryDatabase.MustExec("create table t01 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("use db02;")
primaryDatabase.MustExec("create table t02 (pk int primary key, c1 int default (0))")
primaryDatabase.MustExec("use db01;")
primaryDatabase.MustExec("insert into t01 (pk) values (1), (3), (5), (8), (9);")
primaryDatabase.MustExec("use db02;")
primaryDatabase.MustExec("insert into t02 (pk) values (2), (4), (6), (7), (10);")
primaryDatabase.MustExec("use db01;")
primaryDatabase.MustExec("delete from t01 where pk=9;")
primaryDatabase.MustExec("delete from db02.t02 where pk=10;")
primaryDatabase.MustExec("use db02;")
primaryDatabase.MustExec("update db01.t01 set pk=7 where pk=8;")
primaryDatabase.MustExec("update db02.t02 set pk=8 where pk=7;")
primaryDatabase.MustExec("update t02 set pk=8 where pk=7;")
// Verify the changes in db01 on the replica
waitForReplicaToCatchUp(t)
@@ -41,7 +41,8 @@ type DiffSummaryTableFunction struct {
}
var diffSummaryTableSchema = sql.Schema{
&sql.Column{Name: "table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "from_table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "to_table_name", Type: types.LongText, Nullable: false},
&sql.Column{Name: "diff_type", Type: types.Text, Nullable: false},
&sql.Column{Name: "data_change", Type: types.Boolean, Nullable: false},
&sql.Column{Name: "schema_change", Type: types.Boolean, Nullable: false},
@@ -268,8 +269,6 @@ func (ds *DiffSummaryTableFunction) RowIter(ctx *sql.Context, row sql.Row) (sql.
summs := []*diff.TableDeltaSummary{}
if summ != nil {
// Old name of renamed table can be matched, use provided name in result
summ.TableName = tableName
summs = []*diff.TableDeltaSummary{summ}
}
@@ -396,9 +395,10 @@ func (d *diffSummaryTableFunctionRowIter) Close(context *sql.Context) error {
func getRowFromSummary(ds *diff.TableDeltaSummary) sql.Row {
return sql.Row{
ds.TableName, // table_name
ds.DiffType, // diff_type
ds.DataChange, // data_change
ds.SchemaChange, // schema_change
ds.FromTableName, // from_table_name
ds.ToTableName, // to_table_name
ds.DiffType, // diff_type
ds.DataChange, // data_change
ds.SchemaChange, // schema_change
}
}
@@ -16,7 +16,6 @@ package sqle
import (
"fmt"
"io"
"strings"
"github.com/dolthub/go-mysql-server/sql"
@@ -169,7 +168,7 @@ func (dtf *DiffTableFunction) RowIter(ctx *sql.Context, _ sql.Row) (sql.RowIter,
ddb := sqledb.DbData().Ddb
dp := dtables.NewDiffPartition(dtf.tableDelta.ToTable, dtf.tableDelta.FromTable, toCommitStr, fromCommitStr, dtf.toDate, dtf.fromDate, dtf.tableDelta.ToSch, dtf.tableDelta.FromSch)
return NewDiffTableFunctionRowIterForSinglePartition(*dp, ddb, dtf.joiner), nil
return dtables.NewDiffPartitionRowIter(*dp, ddb, dtf.joiner), nil
}
// findMatchingDelta returns the best matching table delta for the table name
@@ -551,74 +550,3 @@ func (dtf *DiffTableFunction) String() string {
func (dtf *DiffTableFunction) Name() string {
return "dolt_diff"
}
//------------------------------------
// diffTableFunctionRowIter
//------------------------------------
var _ sql.RowIter = (*diffTableFunctionRowIter)(nil)
type diffTableFunctionRowIter struct {
diffPartitions *dtables.DiffPartitions
ddb *doltdb.DoltDB
joiner *rowconv.Joiner
currentPartition *sql.Partition
currentRowIter *sql.RowIter
}
func NewDiffTableFunctionRowIter(partitions *dtables.DiffPartitions, ddb *doltdb.DoltDB, joiner *rowconv.Joiner) *diffTableFunctionRowIter {
return &diffTableFunctionRowIter{
diffPartitions: partitions,
ddb: ddb,
joiner: joiner,
}
}
func NewDiffTableFunctionRowIterForSinglePartition(partition sql.Partition, ddb *doltdb.DoltDB, joiner *rowconv.Joiner) *diffTableFunctionRowIter {
return &diffTableFunctionRowIter{
currentPartition: &partition,
ddb: ddb,
joiner: joiner,
}
}
func (itr *diffTableFunctionRowIter) Next(ctx *sql.Context) (sql.Row, error) {
for {
if itr.currentPartition == nil {
nextPartition, err := itr.diffPartitions.Next(ctx)
if err != nil {
return nil, err
}
itr.currentPartition = &nextPartition
}
if itr.currentRowIter == nil {
dp := (*itr.currentPartition).(dtables.DiffPartition)
rowIter, err := dp.GetRowIter(ctx, itr.ddb, itr.joiner, sql.IndexLookup{})
if err != nil {
return nil, err
}
itr.currentRowIter = &rowIter
}
row, err := (*itr.currentRowIter).Next(ctx)
if err == io.EOF {
itr.currentPartition = nil
itr.currentRowIter = nil
if itr.diffPartitions == nil {
return nil, err
}
continue
} else if err != nil {
return nil, err
} else {
return row, nil
}
}
}
func (itr *diffTableFunctionRowIter) Close(_ *sql.Context) error {
return nil
}
@@ -0,0 +1,413 @@
// Copyright 2023 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dprocedures
import (
"context"
"fmt"
"io"
"sort"
"strings"
"time"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/go-mysql-server/sql/plan"
"github.com/dolthub/vitess/go/mysql"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/store/types"
)
// doltPatch is the stored procedure version for the CLI command `dolt patch` (CLI command not implemented yet).
func doltPatch(ctx *sql.Context, args ...string) (sql.RowIter, error) {
res, err := doDoltPatch(ctx, args)
if err != nil {
return nil, err
}
return newPatchRowIter(res), nil
}
func doDoltPatch(ctx *sql.Context, args []string) ([]string, error) {
dbName := ctx.GetCurrentDatabase()
if len(dbName) == 0 {
return nil, fmt.Errorf("error: empty database name")
}
apr, err := cli.CreatePatchArgParser().Parse(args)
if err != nil {
return nil, err
}
dSess := dsess.DSessFromSess(ctx.Session)
doltDB, ok := dSess.GetDoltDB(ctx, dbName)
if !ok {
return nil, fmt.Errorf("failed to get DoltDB")
}
dbData, ok := dSess.GetDbData(ctx, dbName)
if !ok {
return nil, fmt.Errorf("failed to get dbData")
}
roots, ok := dSess.GetRoots(ctx, dbName)
if !ok {
return nil, sql.ErrDatabaseNotFound.New(dbName)
}
fromRef, fromRoot, toRef, toRoot, tables := parseRevisionsAndTablesArgs(ctx, dbData, doltDB, roots, apr)
tableSet, err := validateTablesAndGetTablesSet(ctx, fromRoot, toRoot, tables)
if err != nil {
return nil, err
}
tableDeltas, err := diff.GetTableDeltas(ctx, fromRoot, toRoot)
if err != nil {
return nil, errhand.BuildDError("error: unable to diff tables").AddCause(err).Build()
}
sort.Slice(tableDeltas, func(i, j int) bool {
return strings.Compare(tableDeltas[i].ToName, tableDeltas[j].ToName) < 0
})
var finalRes []string
for _, td := range tableDeltas {
if !tableSet.Contains(td.FromName) && !tableSet.Contains(td.ToName) {
continue
}
if td.FromTable == nil && td.ToTable == nil {
return nil, errhand.BuildDError("error: both tables in tableDelta are nil").Build()
}
ddlStatements, err := getSchemaSqlPatch(ctx, toRoot, td)
if err != nil {
return nil, err
}
finalRes = append(finalRes, ddlStatements...)
if canGetDataDiff(ctx, td) {
res, err := getUserTableSqlPatch(ctx, dbData, td, fromRef, toRef)
if err != nil {
return nil, err
}
finalRes = append(finalRes, res...)
}
}
return finalRes, nil
}
func getSchemaSqlPatch(ctx *sql.Context, toRoot *doltdb.RootValue, td diff.TableDelta) ([]string, error) {
toSchemas, err := toRoot.GetAllSchemas(ctx)
if err != nil {
return nil, fmt.Errorf("could not read schemas from toRoot, cause: %s", err.Error())
}
return diff.SqlSchemaDiff(ctx, td, toSchemas)
}
func canGetDataDiff(ctx *sql.Context, td diff.TableDelta) bool {
if td.IsDrop() {
return false // don't output DELETE FROM statements after DROP TABLE
}
// not diffable
if !schema.ArePrimaryKeySetsDiffable(td.Format(), td.FromSch, td.ToSch) {
ctx.Session.Warn(&sql.Warning{
Level: "Warning",
Code: mysql.ERNotSupportedYet,
Message: fmt.Sprintf("Primary key sets differ between revisions for table '%s', skipping data diff", td.ToName),
})
return false
}
// cannot sql diff
if td.ToSch == nil || (td.FromSch != nil && !schema.SchemasAreEqual(td.FromSch, td.ToSch)) {
// TODO(8/24/22 Zach): this is overly broad, we can absolutely do better
ctx.Session.Warn(&sql.Warning{
Level: "Warning",
Code: mysql.ERNotSupportedYet,
Message: fmt.Sprintf("Incompatible schema change, skipping data diff for table '%s'", td.ToName),
})
return false
}
return true
}
func getUserTableSqlPatch(ctx *sql.Context, dbData env.DbData, td diff.TableDelta, fromRef, toRef string) ([]string, error) {
// ToTable is used as target table as cannot be nil at this point
diffSch, projections, ri, err := getDiffQuery(ctx, dbData, td, fromRef, toRef)
if err != nil {
return nil, err
}
targetPkSch, err := sqlutil.FromDoltSchema(td.ToName, td.ToSch)
if err != nil {
return nil, err
}
return getDiffResults(ctx, diffSch, targetPkSch.Schema, projections, ri, td.ToName, td.ToSch)
}
// getDiffQuery returns diff schema for specified columns and array of sql.Expression as projection to be used
// on diff table function row iter. This function attempts to imitate running a query
// fmt.Sprintf("select %s, %s from dolt_diff('%s', '%s', '%s')", columnsWithDiff, "diff_type", fromRef, toRef, tableName)
// on sql engine, which returns the schema and rowIter of the final data diff result.
func getDiffQuery(ctx *sql.Context, dbData env.DbData, td diff.TableDelta, fromRef, toRef string) (sql.Schema, []sql.Expression, sql.RowIter, error) {
diffTableSchema, j, err := dtables.GetDiffTableSchemaAndJoiner(td.ToTable.Format(), td.FromSch, td.ToSch)
if err != nil {
return nil, nil, nil, err
}
diffPKSch, err := sqlutil.FromDoltSchema("", diffTableSchema)
if err != nil {
return nil, nil, nil, err
}
columnsWithDiff := getColumnNamesWithDiff(td.FromSch, td.ToSch)
diffSqlSch, projections := getDiffSqlSchema(diffPKSch.Schema, columnsWithDiff)
// using arbitrary time since we do not care about the commit time in the result
now := time.Now()
dp := dtables.NewDiffPartition(td.ToTable, td.FromTable, toRef, fromRef, (*types.Timestamp)(&now), (*types.Timestamp)(&now), td.ToSch, td.FromSch)
ri := dtables.NewDiffPartitionRowIter(*dp, dbData.Ddb, j)
return diffSqlSch, projections, ri, nil
}
func getColumnNamesWithDiff(fromSch, toSch schema.Schema) []string {
var cols []string
if fromSch != nil {
_ = fromSch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
cols = append(cols, fmt.Sprintf("from_%s", col.Name))
return false, nil
})
}
if toSch != nil {
_ = toSch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
cols = append(cols, fmt.Sprintf("to_%s", col.Name))
return false, nil
})
}
return cols
}
// getDiffSqlSchema returns the schema of columns with data diff and "diff_type". This is used for diff splitter.
// When extracting the diff schema, the ordering must follow the ordering of given columns
func getDiffSqlSchema(diffTableSch sql.Schema, columns []string) (sql.Schema, []sql.Expression) {
type column struct {
sqlCol *sql.Column
idx int
}
columns = append(columns, "diff_type")
colMap := make(map[string]*column)
for _, c := range columns {
colMap[c] = nil
}
var cols = make([]*sql.Column, len(columns))
var getFieldCols = make([]sql.Expression, len(columns))
for i, c := range diffTableSch {
if _, ok := colMap[c.Name]; ok {
colMap[c.Name] = &column{c, i}
}
}
for i, c := range columns {
col := colMap[c].sqlCol
cols[i] = col
getFieldCols[i] = expression.NewGetField(colMap[c].idx, col.Type, col.Name, col.Nullable)
}
return cols, getFieldCols
}
func getDiffResults(ctx *sql.Context, diffQuerySch, targetSch sql.Schema, projections []sql.Expression, iter sql.RowIter, tn string, tsch schema.Schema) ([]string, error) {
ds, err := diff.NewDiffSplitter(diffQuerySch, targetSch)
if err != nil {
return nil, err
}
var res []string
for {
r, err := iter.Next(ctx)
if err == io.EOF {
return res, nil
} else if err != nil {
return nil, err
}
r, err = plan.ProjectRow(ctx, projections, r)
if err != nil {
return nil, err
}
oldRow, newRow, err := ds.SplitDiffResultRow(r)
if err != nil {
return nil, err
}
var stmt string
if oldRow.Row != nil {
stmt, err = diff.GetDataDiffStatement(tn, tsch, oldRow.Row, oldRow.RowDiff, oldRow.ColDiffs)
if err != nil {
return nil, err
}
}
if newRow.Row != nil {
stmt, err = diff.GetDataDiffStatement(tn, tsch, newRow.Row, newRow.RowDiff, newRow.ColDiffs)
if err != nil {
return nil, err
}
}
if stmt != "" {
res = append(res, stmt)
}
}
}
// parseRevisionsAndTablesArgs checks given arguments whether each refers to a revision or a table name.
// It returns from revision name, from root values, to revision name, to root values and potential table names.
func parseRevisionsAndTablesArgs(ctx *sql.Context, dbData env.DbData, doltDB *doltdb.DoltDB, roots doltdb.Roots, apr *argparser.ArgParseResults) (string, *doltdb.RootValue, string, *doltdb.RootValue, []string) {
var fromRef, toRef string
var fromRoot, toRoot *doltdb.RootValue
fromRoot = roots.Staged
fromRef = "STAGED"
toRoot = roots.Working
toRef = "WORKING"
if apr.Contains(cli.CachedFlag) {
fromRoot = roots.Head
fromRef = "HEAD"
toRoot = roots.Staged
toRef = "STAGED"
}
// `dolt diff`
if apr.NArg() == 0 {
return fromRef, fromRoot, toRef, toRoot, apr.Args
}
from, ok := diff.MaybeResolveRoot(ctx, dbData.Rsr, doltDB, apr.Args[0])
if !ok {
// `dolt diff [...tables]`
return fromRef, fromRoot, toRef, toRoot, apr.Args
}
fromRoot = from
fromRef = apr.Args[0]
if apr.NArg() == 1 {
// `dolt diff from_commit`
return fromRef, fromRoot, toRef, toRoot, apr.Args[1:]
}
to, ok := diff.MaybeResolveRoot(ctx, dbData.Rsr, doltDB, apr.Args[1])
if !ok {
// `dolt diff from_commit [...tables]`
return fromRef, fromRoot, toRef, toRoot, apr.Args[1:]
}
toRoot = to
toRef = apr.Args[1]
// `dolt diff from_commit to_commit [...tables]`
return fromRef, fromRoot, toRef, toRoot, apr.Args[2:]
}
// validateTablesAndGetTablesSet takes array of table names or an empty array and returns the table names
// in string set type. If the array is empty, it returns union of table names on from and to roots.
func validateTablesAndGetTablesSet(ctx context.Context, fromRoot, toRoot *doltdb.RootValue, tables []string) (*set.StrSet, error) {
tableSet := set.NewStrSet(nil)
// if no tables or docs were specified as args, diff all tables and docs
if len(tables) == 0 {
utn, err := doltdb.UnionTableNames(ctx, fromRoot, toRoot)
if err != nil {
return nil, err
}
tableSet.Add(utn...)
} else {
for _, tableName := range tables {
// verify table args exist in at least one root
_, ok, err := fromRoot.GetTable(ctx, tableName)
if err != nil {
return nil, err
}
if ok {
tableSet.Add(tableName)
continue
}
_, ok, err = toRoot.GetTable(ctx, tableName)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("table %s does not exist in either revision", tableName)
}
}
}
return tableSet, nil
}
var _ sql.RowIter = (*patchRowIter)(nil)
type patchRowIter struct {
stmts []string
idx int
}
func newPatchRowIter(stmts []string) sql.RowIter {
return &patchRowIter{
stmts: stmts,
idx: 0,
}
}
func (p *patchRowIter) Next(ctx *sql.Context) (sql.Row, error) {
defer func() {
p.idx++
}()
if p.idx >= len(p.stmts) {
return nil, io.EOF
}
if p.stmts == nil {
return nil, io.EOF
}
stmt := p.stmts[p.idx]
return sql.Row{stmt}, nil
}
func (p *patchRowIter) Close(_ *sql.Context) error {
p.stmts = nil
p.idx = 0
return nil
}
@@ -36,6 +36,7 @@ var DoltProcedures = []sql.ExternalStoredProcedureDetails{
{Name: "dolt_gc", Schema: int64Schema("success"), Function: doltGC},
{Name: "dolt_merge", Schema: int64Schema("fast_forward", "conflicts"), Function: doltMerge},
{Name: "dolt_patch", Schema: stringSchema("statement"), Function: doltPatch},
{Name: "dolt_pull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull},
{Name: "dolt_push", Schema: int64Schema("success"), Function: doltPush},
{Name: "dolt_remote", Schema: int64Schema("status"), Function: doltRemote},
@@ -58,6 +59,7 @@ var DoltProcedures = []sql.ExternalStoredProcedureDetails{
// {Name: "dgc", Schema: int64Schema("status"), Function: doltGC},
{Name: "dmerge", Schema: int64Schema("fast_forward", "conflicts"), Function: doltMerge},
{Name: "dpatch", Schema: stringSchema("statement"), Function: doltPatch},
{Name: "dpull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull},
{Name: "dpush", Schema: int64Schema("success"), Function: doltPush},
{Name: "dremote", Schema: int64Schema("status"), Function: doltRemote},
@@ -484,3 +484,66 @@ func maybeTime(t *time.Time) interface{} {
}
return nil
}
//------------------------------------
// diffPartitionRowIter
//------------------------------------
var _ sql.RowIter = (*diffPartitionRowIter)(nil)
type diffPartitionRowIter struct {
diffPartitions *DiffPartitions
ddb *doltdb.DoltDB
joiner *rowconv.Joiner
currentPartition *sql.Partition
currentRowIter *sql.RowIter
}
func NewDiffPartitionRowIter(partition sql.Partition, ddb *doltdb.DoltDB, joiner *rowconv.Joiner) *diffPartitionRowIter {
return &diffPartitionRowIter{
currentPartition: &partition,
ddb: ddb,
joiner: joiner,
}
}
func (itr *diffPartitionRowIter) Next(ctx *sql.Context) (sql.Row, error) {
for {
if itr.currentPartition == nil {
nextPartition, err := itr.diffPartitions.Next(ctx)
if err != nil {
return nil, err
}
itr.currentPartition = &nextPartition
}
if itr.currentRowIter == nil {
dp := (*itr.currentPartition).(DiffPartition)
rowIter, err := dp.GetRowIter(ctx, itr.ddb, itr.joiner, sql.IndexLookup{})
if err != nil {
return nil, err
}
itr.currentRowIter = &rowIter
}
row, err := (*itr.currentRowIter).Next(ctx)
if err == io.EOF {
itr.currentPartition = nil
itr.currentRowIter = nil
if itr.diffPartitions == nil {
return nil, err
}
continue
} else if err != nil {
return nil, err
} else {
return row, nil
}
}
}
func (itr *diffPartitionRowIter) Close(_ *sql.Context) error {
return nil
}
@@ -130,7 +130,7 @@ func handleStagedUnstagedTables(staged, unstaged []diff.TableDelta, itr *StatusI
itr.statuses[idx] = tblDiffTypeToLabel[diff.RemovedTable]
} else if td.IsRename() {
itr.tables[idx] = fmt.Sprintf("%s -> %s", td.FromName, td.ToName)
itr.statuses[idx] = tblDiffTypeToLabel[diff.RemovedTable]
itr.statuses[idx] = tblDiffTypeToLabel[diff.RenamedTable]
} else {
itr.tables[idx] = td.CurName()
itr.statuses[idx] = tblDiffTypeToLabel[diff.ModifiedTable]
@@ -823,6 +823,53 @@ var DoltScripts = []queries.ScriptTest{
},
},
},
{
Name: "simple tests on DOLT_PATCH() stored procedure",
SetUpScript: []string{
"CREATE TABLE parent (id int PRIMARY KEY, id_ext int, v1 int, v2 text, INDEX v1 (v1));",
"CREATE TABLE child (id int primary key, v1 int);",
"CALL DOLT_COMMIT('-Am','added tables')",
"ALTER TABLE child ADD CONSTRAINT fk_named FOREIGN KEY (v1) REFERENCES parent(v1);",
"insert into parent values (0, 1, 2, NULL);",
"ALTER TABLE parent DROP PRIMARY KEY;",
"ALTER TABLE parent ADD PRIMARY KEY(id, id_ext);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "CALL DOLT_PATCH()",
Expected: []sql.Row{
{"ALTER TABLE `child` ADD INDEX `v1`(`v1`);"},
{"ALTER TABLE `child` ADD CONSTRAINT `fk_named` FOREIGN KEY (`v1`) REFERENCES `parent` (`v1`);"},
{"ALTER TABLE `parent` DROP PRIMARY KEY;"},
{"ALTER TABLE `parent` ADD PRIMARY KEY (id,id_ext);"}},
},
{
Query: "CALL DOLT_PATCH('HEAD~')",
Expected: []sql.Row{
{"CREATE TABLE `child` (\n `id` int NOT NULL,\n `v1` int,\n PRIMARY KEY (`id`),\n KEY `v1` (`v1`),\n CONSTRAINT `fk_named` FOREIGN KEY (`v1`) REFERENCES `parent` (`v1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"},
{"CREATE TABLE `parent` (\n `id` int NOT NULL,\n `id_ext` int NOT NULL,\n `v1` int,\n `v2` text,\n PRIMARY KEY (`id`,`id_ext`),\n KEY `v1` (`v1`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin;"},
{"INSERT INTO `parent` (`id`,`id_ext`,`v1`,`v2`) VALUES (0,1,2,NULL);"}},
},
{
Query: "CALL DOLT_PATCH('child')",
Expected: []sql.Row{
{"ALTER TABLE `child` ADD INDEX `v1`(`v1`);"},
{"ALTER TABLE `child` ADD CONSTRAINT `fk_named` FOREIGN KEY (`v1`) REFERENCES `parent` (`v1`);"}},
},
{
Query: "SHOW WARNINGS;",
Expected: []sql.Row{
{"Warning", 1235, "Incompatible schema change, skipping data diff for table 'child'"}},
},
{
Query: "CALL DOLT_PATCH('HEAD','HEAD~','parent')",
Expected: []sql.Row{
{"DROP TABLE `parent`;"},
},
},
},
},
}
func makeLargeInsert(sz int) string {
@@ -2157,29 +2157,29 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
{
// table is added, no data changes
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{{"t", "added", false, true}},
Expected: []sql.Row{{"", "t", "added", false, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
// change from and to commits
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
// table is dropped
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit5, 't');",
Expected: []sql.Row{{"t", "dropped", true, true}},
Expected: []sql.Row{{"t", "", "dropped", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit4, 't');",
Expected: []sql.Row{{"t", "added", true, true}},
Expected: []sql.Row{{"", "t", "added", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
@@ -2220,28 +2220,28 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
{
// table is added, no data diff, result is empty
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{{"t", "added", false, true}},
Expected: []sql.Row{{"", "t", "added", false, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
// table is dropped
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit5, 't');",
Expected: []sql.Row{{"t", "dropped", true, true}},
Expected: []sql.Row{{"t", "", "dropped", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit4, 't');",
Expected: []sql.Row{{"t", "added", true, true}},
Expected: []sql.Row{{"", "t", "added", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
@@ -2287,32 +2287,47 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit0, @Commit1);",
Expected: []sql.Row{{"t", "added", true, true}},
Expected: []sql.Row{{"", "t", "added", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2);",
Expected: []sql.Row{{"t2", "added", true, true}},
Expected: []sql.Row{{"", "t2", "added", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3);",
Expected: []sql.Row{{"t", "modified", true, false}, {"t2", "modified", true, false}},
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3);",
Expected: []sql.Row{
{"t", "t", "modified", true, false},
{"t2", "t2", "modified", true, false},
},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4);",
Expected: []sql.Row{{"t", "modified", true, false}, {"t2", "modified", true, false}},
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4);",
Expected: []sql.Row{
{"t", "t", "modified", true, false},
{"t2", "t2", "modified", true, false},
},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit0, @Commit4);",
Expected: []sql.Row{{"t", "added", true, true}, {"t2", "added", true, true}},
Query: "SELECT * from dolt_diff_summary(@Commit0, @Commit4);",
Expected: []sql.Row{
{"", "t", "added", true, true},
{"", "t2", "added", true, true},
},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit2);",
Expected: []sql.Row{{"t", "modified", true, false}, {"t2", "modified", true, false}},
Expected: []sql.Row{
{"t", "t", "modified", true, false},
{"t2", "t2", "modified", true, false},
},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, 'WORKING');",
Expected: []sql.Row{{"t", "modified", true, false}, {"t2", "modified", true, false}, {"keyless", "added", false, true}},
Query: "SELECT * from dolt_diff_summary(@Commit3, 'WORKING');",
Expected: []sql.Row{
{"t", "t", "modified", true, false},
{"t2", "t2", "modified", true, false},
{"", "keyless", "added", false, true}},
},
},
},
@@ -2334,19 +2349,19 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit1, 'WORKING', 't')",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary('STAGED', 'WORKING', 't')",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary('STAGED..WORKING', 't')",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary('WORKING', 'STAGED', 't')",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary('WORKING', 'WORKING', 't')",
@@ -2370,7 +2385,7 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
},
{
Query: "SELECT * from dolt_diff_summary('HEAD', 'STAGED', 't')",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
},
},
@@ -2412,70 +2427,70 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary('main', 'branch1', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('main..branch1', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('main', 'branch1');",
Expected: []sql.Row{
{"t", "modified", true, true},
{"newtable", "dropped", true, true},
{"t", "t", "modified", true, true},
{"newtable", "", "dropped", true, true},
},
},
{
Query: "SELECT * from dolt_diff_summary('main..branch1');",
Expected: []sql.Row{
{"t", "modified", true, true},
{"newtable", "dropped", true, true},
{"t", "t", "modified", true, true},
{"newtable", "", "dropped", true, true},
},
},
{
Query: "SELECT * from dolt_diff_summary('branch1', 'main', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('branch1..main', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('main~2', 'branch1', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('main~2..branch1', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
// Three dot
{
Query: "SELECT * from dolt_diff_summary('main...branch1', 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('main...branch1');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('branch1...main', 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary('branch1...main');",
Expected: []sql.Row{
{"t", "modified", true, false},
{"newtable", "added", true, true},
{"t", "t", "modified", true, false},
{"", "newtable", "added", true, true},
},
},
{
Query: "SELECT * from dolt_diff_summary('branch1...main^');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary('branch1...main', 'newtable');",
Expected: []sql.Row{{"newtable", "added", true, true}},
Expected: []sql.Row{{"", "newtable", "added", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('main...main', 'newtable');",
@@ -2516,27 +2531,27 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit5, 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
},
},
@@ -2573,26 +2588,27 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit2, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit2, @Commit3, 't');",
Expected: []sql.Row{{"t", "modified", true, true}}, // TODO: Data change should be false for renamed column
Expected: []sql.Row{{"t", "t", "modified", true, true}}, // TODO: Data change should be false for renamed column
},
{
Query: "SELECT * from dolt_diff_summary(@Commit3, @Commit4, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit4, @Commit5, 't');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
Expected: []sql.Row{{"t", "modified", true, false}},
Expected: []sql.Row{{"t", "t", "modified", true, false}},
},
},
},
{
Name: "new table",
SetUpScript: []string{
@@ -2601,11 +2617,11 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from dolt_diff_summary('HEAD', 'WORKING')",
Expected: []sql.Row{{"t1", "added", false, true}},
Expected: []sql.Row{{"", "t1", "added", false, true}},
},
{
Query: "select * from dolt_diff_summary('WORKING', 'HEAD')",
Expected: []sql.Row{{"t1", "dropped", false, true}},
Expected: []sql.Row{{"t1", "", "dropped", false, true}},
},
{
Query: "insert into t1 values (1,2)",
@@ -2613,14 +2629,15 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
},
{
Query: "select * from dolt_diff_summary('HEAD', 'WORKING', 't1')",
Expected: []sql.Row{{"t1", "added", true, true}},
Expected: []sql.Row{{"", "t1", "added", true, true}},
},
{
Query: "select * from dolt_diff_summary('WORKING', 'HEAD', 't1')",
Expected: []sql.Row{{"t1", "dropped", true, true}},
Expected: []sql.Row{{"t1", "", "dropped", true, true}},
},
},
},
{
Name: "dropped table",
SetUpScript: []string{
@@ -2634,14 +2651,15 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't1')",
Expected: []sql.Row{{"t1", "dropped", true, true}},
Expected: []sql.Row{{"t1", "", "dropped", true, true}},
},
{
Query: "select * from dolt_diff_summary('HEAD', 'HEAD~', 't1')",
Expected: []sql.Row{{"t1", "added", true, true}},
Expected: []sql.Row{{"", "t1", "added", true, true}},
},
},
},
{
Name: "renamed table",
SetUpScript: []string{
@@ -2657,32 +2675,33 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't2')",
Expected: []sql.Row{{"t2", "renamed", true, true}},
Expected: []sql.Row{{"t1", "t2", "renamed", true, true}},
},
{
Query: "select * from dolt_diff_summary('HEAD~..HEAD', 't2')",
Expected: []sql.Row{{"t2", "renamed", true, true}},
Expected: []sql.Row{{"t1", "t2", "renamed", true, true}},
},
{
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD')",
Expected: []sql.Row{{"t2", "renamed", true, true}},
Expected: []sql.Row{{"t1", "t2", "renamed", true, true}},
},
{
Query: "select * from dolt_diff_summary('HEAD~..HEAD')",
Expected: []sql.Row{{"t2", "renamed", true, true}},
Expected: []sql.Row{{"t1", "t2", "renamed", true, true}},
},
{
// Old table name can be matched as well
Query: "select * from dolt_diff_summary('HEAD~', 'HEAD', 't1')",
Expected: []sql.Row{{"t1", "renamed", true, true}},
Expected: []sql.Row{{"t1", "t2", "renamed", true, true}},
},
{
// Old table name can be matched as well
Query: "select * from dolt_diff_summary('HEAD~..HEAD', 't1')",
Expected: []sql.Row{{"t1", "renamed", true, true}},
Expected: []sql.Row{{"t1", "t2", "renamed", true, true}},
},
},
},
{
Name: "add multiple columns, then set and unset a value. Should not show a diff",
SetUpScript: []string{
@@ -2701,7 +2720,7 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
Assertions: []queries.ScriptTestAssertion{
{
Query: "SELECT * from dolt_diff_summary('HEAD~2', 'HEAD');",
Expected: []sql.Row{{"t", "modified", true, true}},
Expected: []sql.Row{{"t", "t", "modified", true, true}},
},
{
Query: "SELECT * from dolt_diff_summary('HEAD~', 'HEAD');",
@@ -2745,7 +2764,7 @@ var DiffSummaryTableFunctionScriptTests = []queries.ScriptTest{
{
Query: "SELECT * from dolt_diff_summary('HEAD~', 'HEAD')",
Expected: []sql.Row{
{"t2", "modified", true, false},
{"t2", "t2", "modified", true, false},
},
ExpectedWarning: dtables.PrimaryKeyChangeWarningCode,
ExpectedWarningsCount: 1,
+36 -103
View File
@@ -15,137 +15,70 @@
package sqlfmt
import (
"fmt"
"strings"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
)
// FmtCol converts a column to a string with a given indent space count, name width, and type width. If nameWidth or
// typeWidth are 0 or less than the length of the name or type, then the length of the name or type will be used
func FmtCol(indent, nameWidth, typeWidth int, col schema.Column) string {
sqlType := col.TypeInfo.ToSqlType()
return FmtColWithNameAndType(indent, nameWidth, typeWidth, col.Name, sqlType.String(), col)
// GenerateCreateTableColumnDefinition returns column definition for CREATE TABLE statement with no indentation
func GenerateCreateTableColumnDefinition(col schema.Column) string {
colStr := sql.GenerateCreateTableColumnDefinition(col.Name, col.TypeInfo.ToSqlType(), col.IsNullable(), col.AutoIncrement, col.Default != "", col.Default, col.Comment)
return strings.TrimPrefix(colStr, " ")
}
// FmtColWithNameAndType creates a string representing a column within a sql create table statement with a given indent
// space count, name width, and type width. If nameWidth or typeWidth are 0 or less than the length of the name or
// type, then the length of the name or type will be used.
func FmtColWithNameAndType(indent, nameWidth, typeWidth int, colName, typeStr string, col schema.Column) string {
colName = QuoteIdentifier(colName)
fmtStr := fmt.Sprintf("%%%ds%%%ds %%%ds", indent, nameWidth, typeWidth)
colStr := fmt.Sprintf(fmtStr, "", colName, typeStr)
for _, cnst := range col.Constraints {
switch cnst.GetConstraintType() {
case schema.NotNullConstraintType:
colStr += " NOT NULL"
default:
panic("FmtColWithNameAndType doesn't know how to format constraint type: " + cnst.GetConstraintType())
}
}
if col.AutoIncrement {
colStr += " AUTO_INCREMENT"
}
if col.Default != "" {
colStr += " DEFAULT " + col.Default
}
if col.Comment != "" {
colStr += " COMMENT " + QuoteComment(col.Comment)
}
return colStr
// GenerateCreateTableIndentedColumnDefinition returns column definition for CREATE TABLE statement with no indentation
func GenerateCreateTableIndentedColumnDefinition(col schema.Column) string {
return sql.GenerateCreateTableColumnDefinition(col.Name, col.TypeInfo.ToSqlType(), col.IsNullable(), col.AutoIncrement, col.Default != "", col.Default, col.Comment)
}
// FmtColPrimaryKey creates a string representing a primary key constraint within a sql create table statement with a
// given indent.
func FmtColPrimaryKey(indent int, colStr string, newline bool) string {
st := "%%%ds PRIMARY KEY (%s)"
if newline {
st += "\n"
}
fmtStr := fmt.Sprintf(st, indent, colStr)
return fmt.Sprintf(fmtStr, "")
// GenerateCreateTableIndexDefinition returns index definition for CREATE TABLE statement with indentation of 2 spaces
func GenerateCreateTableIndexDefinition(index schema.Index) string {
return sql.GenerateCreateTableIndexDefinition(index.IsUnique(), index.IsSpatial(), index.Name(), sql.QuoteIdentifiers(index.ColumnNames()), index.Comment())
}
func FmtIndex(index schema.Index) string {
sb := strings.Builder{}
if index.IsUnique() {
sb.WriteString("UNIQUE ")
}
sb.WriteString("INDEX ")
sb.WriteString(QuoteIdentifier(index.Name()))
sb.WriteString(" (")
for i, indexColName := range index.ColumnNames() {
if i != 0 {
sb.WriteRune(',')
}
sb.WriteString(QuoteIdentifier(indexColName))
}
sb.WriteRune(')')
if len(index.Comment()) > 0 {
sb.WriteString(" COMMENT ")
sb.WriteString(QuoteComment(index.Comment()))
}
return sb.String()
}
func FmtForeignKey(fk doltdb.ForeignKey, sch, parentSch schema.Schema) string {
sb := strings.Builder{}
sb.WriteString("CONSTRAINT ")
sb.WriteString(QuoteIdentifier(fk.Name))
sb.WriteString(" FOREIGN KEY (")
// GenerateCreateTableForeignKeyDefinition returns foreign key definition for CREATE TABLE statement with indentation of 2 spaces
func GenerateCreateTableForeignKeyDefinition(fk doltdb.ForeignKey, sch, parentSch schema.Schema) string {
var fkCols []string
if fk.IsResolved() {
for i, tag := range fk.TableColumns {
if i != 0 {
sb.WriteRune(',')
}
for _, tag := range fk.TableColumns {
c, _ := sch.GetAllCols().GetByTag(tag)
sb.WriteString(QuoteIdentifier(c.Name))
fkCols = append(fkCols, c.Name)
}
} else {
for i, col := range fk.UnresolvedFKDetails.TableColumns {
if i != 0 {
sb.WriteRune(',')
}
sb.WriteString(QuoteIdentifier(col))
for _, col := range fk.UnresolvedFKDetails.TableColumns {
fkCols = append(fkCols, col)
}
}
sb.WriteString(")\n REFERENCES ")
sb.WriteString(QuoteIdentifier(fk.ReferencedTableName))
sb.WriteString(" (")
var parentCols []string
if fk.IsResolved() {
for i, tag := range fk.ReferencedTableColumns {
if i != 0 {
sb.WriteRune(',')
}
for _, tag := range fk.ReferencedTableColumns {
c, _ := parentSch.GetAllCols().GetByTag(tag)
sb.WriteString(QuoteIdentifier(c.Name))
parentCols = append(parentCols, c.Name)
}
} else {
for i, col := range fk.UnresolvedFKDetails.ReferencedTableColumns {
if i != 0 {
sb.WriteRune(',')
}
sb.WriteString(QuoteIdentifier(col))
for _, col := range fk.UnresolvedFKDetails.ReferencedTableColumns {
parentCols = append(parentCols, col)
}
}
sb.WriteRune(')')
onDelete := ""
if fk.OnDelete != doltdb.ForeignKeyReferentialAction_DefaultAction {
sb.WriteString("\n ON DELETE ")
sb.WriteString(fk.OnDelete.String())
onDelete = fk.OnDelete.String()
}
onUpdate := ""
if fk.OnUpdate != doltdb.ForeignKeyReferentialAction_DefaultAction {
sb.WriteString("\n ON UPDATE ")
sb.WriteString(fk.OnUpdate.String())
onUpdate = fk.OnUpdate.String()
}
return sb.String()
return sql.GenerateCreateTableForiegnKeyDefinition(fk.Name, fkCols, fk.ReferencedTableName, parentCols, onDelete, onUpdate)
}
// GenerateCreateTableCheckConstraintClause returns check constraint clause definition for CREATE TABLE statement with indentation of 2 spaces
func GenerateCreateTableCheckConstraintClause(check schema.Check) string {
return sql.GenerateCreateTableCheckConstraintClause(check.Name(), check.Expression(), check.Enforced())
}
func DropTableStmt(tableName string) string {
@@ -1,70 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqlfmt
import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/types"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFmtCol(t *testing.T) {
tests := []struct {
Col schema.Column
Indent int
NameWidth int
TypeWidth int
Expected string
}{
{
schema.NewColumn("first", 0, types.StringKind, true),
0,
0,
0,
"`first` varchar(16383)",
},
{
schema.NewColumn("last", 123, types.IntKind, true),
2,
0,
0,
" `last` bigint",
},
{
schema.NewColumn("title", 2, types.UintKind, true),
0,
10,
0,
" `title` bigint unsigned",
},
{
schema.NewColumn("aoeui", 52, types.UintKind, true),
0,
10,
15,
" `aoeui` bigint unsigned",
},
}
for _, test := range tests {
t.Run(test.Expected, func(t *testing.T) {
actual := FmtCol(test.Indent, test.NameWidth, test.TypeWidth, test.Col)
assert.Equal(t, test.Expected, actual)
})
}
}
@@ -23,10 +23,8 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlfmt"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
"github.com/dolthub/dolt/go/libraries/utils/set"
)
type SqlDiffWriter struct {
@@ -50,51 +48,12 @@ func NewSqlDiffWriter(tableName string, schema schema.Schema, wr io.WriteCloser)
}
}
func (w SqlDiffWriter) WriteRow(
ctx context.Context,
row sql.Row,
rowDiffType diff.ChangeType,
colDiffTypes []diff.ChangeType,
) error {
if len(row) != len(colDiffTypes) {
return fmt.Errorf("expected the same size for columns and diff types, got %d and %d", len(row), len(colDiffTypes))
}
switch rowDiffType {
case diff.Added:
stmt, err := sqlfmt.SqlRowAsInsertStmt(row, w.tableName, w.sch)
if err != nil {
return err
}
return iohelp.WriteLine(w.writeCloser, stmt)
case diff.Removed:
stmt, err := sqlfmt.SqlRowAsDeleteStmt(row, w.tableName, w.sch, 0)
if err != nil {
return err
}
return iohelp.WriteLine(w.writeCloser, stmt)
case diff.ModifiedNew:
updatedCols := set.NewEmptyStrSet()
for i, diffType := range colDiffTypes {
if diffType != diff.None {
updatedCols.Add(w.sch.GetAllCols().GetByIndex(i).Name)
}
}
stmt, err := sqlfmt.SqlRowAsUpdateStmt(row, w.tableName, w.sch, updatedCols)
if err != nil {
return err
}
return iohelp.WriteLine(w.writeCloser, stmt)
case diff.ModifiedOld:
// do nothing, we only issue UPDATE for ModifiedNew
return nil
default:
return fmt.Errorf("unexpected row diff type: %v", rowDiffType)
func (w SqlDiffWriter) WriteRow(ctx context.Context, row sql.Row, rowDiffType diff.ChangeType, colDiffTypes []diff.ChangeType) error {
stmt, err := diff.GetDataDiffStatement(w.tableName, w.sch, row, rowDiffType, colDiffTypes)
if err != nil {
return err
}
return iohelp.WriteLine(w.writeCloser, stmt)
}
func (w SqlDiffWriter) WriteCombinedRow(ctx context.Context, oldRow, newRow sql.Row, mode diff.Mode) error {
+2 -1
View File
@@ -518,6 +518,7 @@ func (mp manualPart) run(ctx context.Context, buff []byte) error {
if err != nil {
return err
}
defer reader.Close()
_, err = io.ReadFull(reader, buff[mp.start:mp.end])
return err
}
@@ -613,7 +614,7 @@ func (s3p awsTablePersister) uploadPart(ctx context.Context, data []byte, key, u
return
}
func (s3p awsTablePersister) PruneTableFiles(ctx context.Context, contents manifestContents, t time.Time) error {
func (s3p awsTablePersister) PruneTableFiles(ctx context.Context, keeper func() []addr, t time.Time) error {
return chunks.ErrUnsupportedOperation
}
+2 -1
View File
@@ -108,8 +108,9 @@ func (suite *BlockStoreSuite) TestChunkStoreMissingDir() {
func (suite *BlockStoreSuite) TestChunkStoreNotDir() {
existingFile := filepath.Join(suite.dir, "path-exists-but-is-a-file")
_, err := os.Create(existingFile)
f, err := os.Create(existingFile)
suite.NoError(err)
defer f.Close()
_, err = NewLocalStore(context.Background(), constants.FormatDefaultString, existingFile, testMemTableSize, NewUnlimitedMemQuotaProvider())
suite.Error(err)
+1 -1
View File
@@ -166,7 +166,7 @@ func (bsp *blobstorePersister) Exists(ctx context.Context, name addr, chunkCount
return bsp.bs.Exists(ctx, name.String())
}
func (bsp *blobstorePersister) PruneTableFiles(ctx context.Context, contents manifestContents, t time.Time) error {
func (bsp *blobstorePersister) PruneTableFiles(ctx context.Context, keeper func() []addr, t time.Time) error {
return nil
}
+8 -9
View File
@@ -91,7 +91,7 @@ func (c noopConjoiner) chooseConjoinees(sources []tableSpec) (conjoinees, keeper
// process actor has already landed a conjoin of its own. Callers must
// handle this, likely by rebasing against upstream and re-evaluating the
// situation.
func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents, mm manifestUpdater, p tablePersister, stats *Stats) (manifestContents, error) {
func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents, mm manifestUpdater, p tablePersister, stats *Stats) (manifestContents, cleanupFunc, error) {
var conjoined tableSpec
var conjoinees, keepers, appendixSpecs []tableSpec
var cleanup cleanupFunc
@@ -108,12 +108,12 @@ func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents,
var err error
conjoinees, keepers, err = s.chooseConjoinees(upstream.specs)
if err != nil {
return manifestContents{}, err
return manifestContents{}, nil, err
}
conjoined, cleanup, err = conjoinTables(ctx, conjoinees, p, stats)
if err != nil {
return manifestContents{}, err
return manifestContents{}, nil, err
}
}
@@ -137,12 +137,11 @@ func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents,
var err error
upstream, err = mm.Update(ctx, upstream.lock, newContents, stats, nil)
if err != nil {
return manifestContents{}, err
return manifestContents{}, nil, err
}
if newContents.lock == upstream.lock {
cleanup()
return upstream, nil
return upstream, cleanup, nil
}
// Optimistic lock failure. Someone else moved to the root, the
@@ -158,11 +157,11 @@ func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents,
// and let the client retry
if len(appendixSpecs) > 0 {
if len(upstream.appendix) != len(appendixSpecs) {
return upstream, nil
return upstream, func() {}, nil
}
for i := range upstream.appendix {
if upstream.appendix[i].name != appendixSpecs[i].name {
return upstream, nil
return upstream, func() {}, nil
}
}
@@ -179,7 +178,7 @@ func conjoin(ctx context.Context, s conjoinStrategy, upstream manifestContents,
}
for _, c := range conjoinees {
if _, present := upstreamNames[c.name]; !present {
return upstream, nil // Bail!
return upstream, func() {}, nil // Bail!
}
conjoineeSet[c.name] = struct{}{}
}
+8 -8
View File
@@ -206,7 +206,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setup(startLock, startRoot, c.precompact)
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, fm, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, fm, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -227,7 +227,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
specs := append([]tableSpec{}, upstream.specs...)
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, newTable), nil)
}}
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -247,7 +247,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
u := updatePreemptManifest{fm, func() {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, upstream.specs[1:], nil)
}}
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -289,7 +289,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setupAppendix(startLock, startRoot, c.precompact, c.appendix)
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, fm, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, fm, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -313,7 +313,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, newTable), upstream.appendix)
}}
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -338,7 +338,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, upstream.specs...), append(app, newTable))
}}
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -362,7 +362,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
u := updatePreemptManifest{fm, func() {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, upstream.specs[len(c.appendix)+1:], upstream.appendix[:])
}}
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
@@ -386,7 +386,7 @@ func testConjoin(t *testing.T, factory func(t *testing.T) tablePersister) {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, specs, append([]tableSpec{}, newTable))
}}
_, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
_, _, err := conjoin(context.Background(), inlineConjoiner{}, upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
+156 -46
View File
@@ -26,10 +26,12 @@ import (
"context"
"errors"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/dolthub/dolt/go/libraries/utils/file"
@@ -39,12 +41,26 @@ import (
const tempTablePrefix = "nbs_table_"
func newFSTablePersister(dir string, q MemoryQuotaProvider) tablePersister {
return &fsTablePersister{dir, q}
return &fsTablePersister{dir, q, sync.Mutex{}, nil, make(map[string]struct{})}
}
type fsTablePersister struct {
dir string
q MemoryQuotaProvider
// Protects the following two maps.
removeMu sync.Mutex
// While we are running PruneTableFiles, any newly created table files are
// added to this map. The file delete loop will never delete anything which
// appears in this map. Files should be added to this map before they are
// written.
toKeep map[string]struct{}
// Any temp files we are currently writing are always present in this map.
// The logic should be taken before we generate the new temp file, and the
// new temp file should be added to this map. Care should be taken to always
// remove the entry from this map when we are done processing the temp file
// or else this map will grow without bound.
curTmps map[string]struct{}
}
var _ tablePersister = &fsTablePersister{}
@@ -55,6 +71,11 @@ func (ftp *fsTablePersister) Open(ctx context.Context, name addr, chunkCount uin
}
func (ftp *fsTablePersister) Exists(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (bool, error) {
ftp.removeMu.Lock()
defer ftp.removeMu.Unlock()
if ftp.toKeep != nil {
ftp.toKeep[filepath.Join(ftp.dir, name.String())] = struct{}{}
}
return tableFileExists(ctx, ftp.dir, name)
}
@@ -63,7 +84,6 @@ func (ftp *fsTablePersister) Persist(ctx context.Context, mt *memTable, haver ch
defer stats.PersistLatency.SampleTimeSince(t1)
name, data, chunkCount, err := mt.write(haver, stats)
if err != nil {
return emptyChunkSource{}, err
}
@@ -76,7 +96,7 @@ func (ftp *fsTablePersister) Path() string {
}
func (ftp *fsTablePersister) CopyTableFile(ctx context.Context, r io.ReadCloser, fileId string, fileSz uint64, chunkCount uint32) error {
tn, err := func() (n string, err error) {
tn, f, err := func() (n string, cleanup func(), err error) {
defer func() {
cerr := r.Close()
if err == nil {
@@ -84,10 +104,20 @@ func (ftp *fsTablePersister) CopyTableFile(ctx context.Context, r io.ReadCloser,
}
}()
ftp.removeMu.Lock()
var temp *os.File
temp, err = tempfiles.MovableTempFileProvider.NewFile(ftp.dir, tempTablePrefix)
if err != nil {
return "", err
ftp.removeMu.Unlock()
return "", func() {}, err
}
ftp.curTmps[filepath.Clean(temp.Name())] = struct{}{}
ftp.removeMu.Unlock()
cleanup = func() {
ftp.removeMu.Lock()
delete(ftp.curTmps, filepath.Clean(temp.Name()))
ftp.removeMu.Unlock()
}
defer func() {
@@ -99,21 +129,32 @@ func (ftp *fsTablePersister) CopyTableFile(ctx context.Context, r io.ReadCloser,
_, err = io.Copy(temp, r)
if err != nil {
return "", err
return "", cleanup, err
}
return temp.Name(), nil
return temp.Name(), cleanup, nil
}()
defer f()
if err != nil {
return err
}
path := filepath.Join(ftp.dir, fileId)
ftp.removeMu.Lock()
if ftp.toKeep != nil {
ftp.toKeep[filepath.Clean(path)] = struct{}{}
}
defer ftp.removeMu.Unlock()
return file.Rename(tn, path)
}
func (ftp *fsTablePersister) TryMoveCmpChunkTableWriter(ctx context.Context, filename string, w *CmpChunkTableWriter) error {
path := filepath.Join(ftp.dir, filename)
ftp.removeMu.Lock()
if ftp.toKeep != nil {
ftp.toKeep[filepath.Clean(path)] = struct{}{}
}
defer ftp.removeMu.Unlock()
return w.FlushToFile(path)
}
@@ -122,17 +163,25 @@ func (ftp *fsTablePersister) persistTable(ctx context.Context, name addr, data [
return emptyChunkSource{}, nil
}
tempName, err := func() (tempName string, ferr error) {
tempName, f, err := func() (tempName string, cleanup func(), ferr error) {
ftp.removeMu.Lock()
var temp *os.File
temp, ferr = tempfiles.MovableTempFileProvider.NewFile(ftp.dir, tempTablePrefix)
if ferr != nil {
return "", ferr
ftp.removeMu.Unlock()
return "", func() {}, ferr
}
ftp.curTmps[filepath.Clean(temp.Name())] = struct{}{}
ftp.removeMu.Unlock()
cleanup = func() {
ftp.removeMu.Lock()
delete(ftp.curTmps, filepath.Clean(temp.Name()))
ftp.removeMu.Unlock()
}
defer func() {
closeErr := temp.Close()
if ferr == nil {
ferr = closeErr
}
@@ -140,20 +189,23 @@ func (ftp *fsTablePersister) persistTable(ctx context.Context, name addr, data [
_, ferr = io.Copy(temp, bytes.NewReader(data))
if ferr != nil {
return "", ferr
return "", cleanup, ferr
}
return temp.Name(), nil
return temp.Name(), cleanup, nil
}()
defer f()
if err != nil {
return nil, err
}
newName := filepath.Join(ftp.dir, name.String())
ftp.removeMu.Lock()
if ftp.toKeep != nil {
ftp.toKeep[filepath.Clean(newName)] = struct{}{}
}
err = file.Rename(tempName, newName)
ftp.removeMu.Unlock()
if err != nil {
return nil, err
}
@@ -168,16 +220,25 @@ func (ftp *fsTablePersister) ConjoinAll(ctx context.Context, sources chunkSource
}
if plan.chunkCount == 0 {
return emptyChunkSource{}, nil, nil
return emptyChunkSource{}, func() {}, nil
}
name := nameFromSuffixes(plan.suffixes())
tempName, err := func() (tempName string, ferr error) {
tempName, f, err := func() (tempName string, cleanup func(), ferr error) {
ftp.removeMu.Lock()
var temp *os.File
temp, ferr = tempfiles.MovableTempFileProvider.NewFile(ftp.dir, tempTablePrefix)
if ferr != nil {
return "", ferr
ftp.removeMu.Unlock()
return "", func() {}, ferr
}
ftp.curTmps[filepath.Clean(temp.Name())] = struct{}{}
ftp.removeMu.Unlock()
cleanup = func() {
ftp.removeMu.Lock()
delete(ftp.curTmps, filepath.Clean(temp.Name()))
ftp.removeMu.Unlock()
}
defer func() {
@@ -189,40 +250,52 @@ func (ftp *fsTablePersister) ConjoinAll(ctx context.Context, sources chunkSource
}()
for _, sws := range plan.sources.sws {
var r io.Reader
var r io.ReadCloser
r, _, ferr = sws.source.reader(ctx)
if ferr != nil {
return "", ferr
return "", cleanup, ferr
}
n, ferr := io.CopyN(temp, r, int64(sws.dataLen))
if ferr != nil {
return "", ferr
r.Close()
return "", cleanup, ferr
}
if uint64(n) != sws.dataLen {
return "", errors.New("failed to copy all data")
r.Close()
return "", cleanup, errors.New("failed to copy all data")
}
err := r.Close()
if err != nil {
return "", cleanup, err
}
}
_, ferr = temp.Write(plan.mergedIndex)
if ferr != nil {
return "", ferr
return "", cleanup, ferr
}
return temp.Name(), nil
return temp.Name(), cleanup, nil
}()
defer f()
if err != nil {
return nil, nil, err
}
err = file.Rename(tempName, filepath.Join(ftp.dir, name.String()))
path := filepath.Join(ftp.dir, name.String())
ftp.removeMu.Lock()
if ftp.toKeep != nil {
ftp.toKeep[filepath.Clean(path)] = struct{}{}
}
err = file.Rename(tempName, path)
if err != nil {
return nil, nil, err
}
ftp.removeMu.Unlock()
cs, err := ftp.Open(ctx, name, plan.chunkCount, stats)
if err != nil {
@@ -235,16 +308,42 @@ func (ftp *fsTablePersister) ConjoinAll(ctx context.Context, sources chunkSource
}, nil
}
func (ftp *fsTablePersister) PruneTableFiles(ctx context.Context, contents manifestContents, mtime time.Time) error {
ss := contents.getSpecSet()
func (ftp *fsTablePersister) PruneTableFiles(ctx context.Context, keeper func() []addr, mtime time.Time) error {
ftp.removeMu.Lock()
if ftp.toKeep != nil {
ftp.removeMu.Unlock()
return errors.New("shallow gc already in progress")
}
ftp.toKeep = make(map[string]struct{})
ftp.removeMu.Unlock()
defer func() {
ftp.removeMu.Lock()
ftp.toKeep = nil
ftp.removeMu.Unlock()
}()
toKeep := make(map[string]struct{})
for _, k := range keeper() {
toKeep[filepath.Clean(filepath.Join(ftp.dir, k.String()))] = struct{}{}
}
ftp.removeMu.Lock()
for f := range toKeep {
ftp.toKeep[f] = struct{}{}
}
ftp.removeMu.Unlock()
fileInfos, err := os.ReadDir(ftp.dir)
if err != nil {
return err
}
ea := make(gcErrAccum)
unfilteredTableFiles := make([]string, 0)
unfilteredTempFiles := make([]string, 0)
for _, info := range fileInfos {
if info.IsDir() {
continue
@@ -253,10 +352,7 @@ func (ftp *fsTablePersister) PruneTableFiles(ctx context.Context, contents manif
filePath := path.Join(ftp.dir, info.Name())
if strings.HasPrefix(info.Name(), tempTablePrefix) {
err = file.Remove(filePath)
if err != nil {
ea.add(filePath, err)
}
unfilteredTempFiles = append(unfilteredTempFiles, filePath)
continue
}
@@ -264,31 +360,45 @@ func (ftp *fsTablePersister) PruneTableFiles(ctx context.Context, contents manif
continue // not a table file
}
addy, err := parseAddr(info.Name())
_, err := parseAddr(info.Name())
if err != nil {
continue // not a table file
}
if _, ok := ss[addy]; ok {
continue // file is referenced in the manifest
}
i, err := info.Info()
if err != nil {
ea.add(filePath, err)
continue
}
ctime := i.ModTime()
if ctime.After(mtime) {
continue // file has been updated more recently than manifest
continue // file has been updated more recently than our cutoff time
}
err = file.Remove(filePath)
if err != nil {
ea.add(filePath, err)
unfilteredTableFiles = append(unfilteredTableFiles, filePath)
}
for _, p := range unfilteredTempFiles {
ftp.removeMu.Lock()
if _, ok := ftp.curTmps[filepath.Clean(p)]; !ok {
err := file.Remove(p)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
ea.add(p, err)
}
}
ftp.removeMu.Unlock()
}
for _, p := range unfilteredTableFiles {
ftp.removeMu.Lock()
if _, ok := ftp.toKeep[filepath.Clean(p)]; !ok {
err := file.Remove(p)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
ea.add(p, err)
}
}
ftp.removeMu.Unlock()
}
if !ea.isEmpty() {
+8 -1
View File
@@ -183,10 +183,17 @@ func TestFSTablePersisterConjoinAllDups(t *testing.T) {
sources[2], err = sources[0].clone()
require.NoError(t, err)
src, _, err := fts.ConjoinAll(ctx, sources, &Stats{})
src, cleanup, err := fts.ConjoinAll(ctx, sources, &Stats{})
require.NoError(t, err)
defer src.close()
// After ConjoinAll runs, we can close the sources and
// call the cleanup func.
for _, s := range sources {
s.close()
}
cleanup()
if assert.True(mustUint32(src.count()) > 0) {
buff, err := os.ReadFile(filepath.Join(dir, src.hash().String()))
require.NoError(t, err)
+2 -2
View File
@@ -218,8 +218,8 @@ func (j *chunkJournal) Exists(ctx context.Context, name addr, chunkCount uint32,
}
// PruneTableFiles implements tablePersister.
func (j *chunkJournal) PruneTableFiles(ctx context.Context, contents manifestContents, mtime time.Time) error {
return j.persister.PruneTableFiles(ctx, contents, mtime)
func (j *chunkJournal) PruneTableFiles(ctx context.Context, keeper func() []addr, mtime time.Time) error {
return j.persister.PruneTableFiles(ctx, keeper, mtime)
}
func (j *chunkJournal) Path() string {
+1 -1
View File
@@ -117,7 +117,7 @@ func (s journalChunkSource) hash() addr {
// reader implements chunkSource.
func (s journalChunkSource) reader(context.Context) (io.ReadCloser, uint64, error) {
rdr, sz, err := s.journal.snapshot()
return io.NopCloser(rdr), uint64(sz), err
return rdr, uint64(sz), err
}
func (s journalChunkSource) getRecordRanges(requests []getRecord) (map[hash.Hash]Range, error) {
+4
View File
@@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/dolthub/dolt/go/libraries/utils/file"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/types"
)
@@ -33,6 +34,7 @@ func makeTestChunkJournal(t *testing.T) *chunkJournal {
ctx := context.Background()
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
t.Cleanup(func() { file.RemoveAll(dir) })
m, err := getFileManifest(ctx, dir, syncFlush)
require.NoError(t, err)
q := NewUnlimitedMemQuotaProvider()
@@ -40,6 +42,7 @@ func makeTestChunkJournal(t *testing.T) *chunkJournal {
nbf := types.Format_Default.VersionString()
j, err := newChunkJournal(ctx, nbf, dir, m, p.(*fsTablePersister))
require.NoError(t, err)
t.Cleanup(func() { j.Close() })
return j
}
@@ -98,6 +101,7 @@ func TestReadRecordRanges(t *testing.T) {
rdr, sz, err := jcs.(journalChunkSource).journal.snapshot()
require.NoError(t, err)
defer rdr.Close()
buf = make([]byte, sz)
n, err := rdr.Read(buf)
+19 -4
View File
@@ -335,8 +335,6 @@ func (wr *journalWriter) flushIndexRecord(root hash.Hash, end int64) (err error)
// readAt reads len(p) bytes from the journal at offset |off|.
func (wr *journalWriter) readAt(p []byte, off int64) (n int, err error) {
wr.lock.RLock()
defer wr.lock.RUnlock()
var bp []byte
if off < wr.off {
// fill some or all of |p| from |wr.file|
@@ -399,9 +397,18 @@ func (wr *journalWriter) maybeFlush() (err error) {
return wr.flush()
}
type journalWriterSnapshot struct {
io.Reader
closer func() error
}
func (s journalWriterSnapshot) Close() error {
return s.closer()
}
// snapshot returns an io.Reader with a consistent view of
// the current state of the journal file.
func (wr *journalWriter) snapshot() (io.Reader, int64, error) {
func (wr *journalWriter) snapshot() (io.ReadCloser, int64, error) {
wr.lock.Lock()
defer wr.lock.Unlock()
if err := wr.flush(); err != nil {
@@ -413,7 +420,12 @@ func (wr *journalWriter) snapshot() (io.Reader, int64, error) {
if err != nil {
return nil, 0, err
}
return io.LimitReader(f, wr.off), wr.off, nil
return journalWriterSnapshot{
io.LimitReader(f, wr.off),
func() error {
return f.Close()
},
}, wr.off, nil
}
func (wr *journalWriter) offset() int64 {
@@ -444,6 +456,9 @@ func (wr *journalWriter) Close() (err error) {
if err = wr.flush(); err != nil {
return err
}
if wr.index != nil {
wr.index.Close()
}
if cerr := wr.journal.Sync(); cerr != nil {
err = cerr
}
+1 -1
View File
@@ -631,7 +631,7 @@ func (ftp fakeTablePersister) Exists(ctx context.Context, name addr, chunkCount
return true, nil
}
func (ftp fakeTablePersister) PruneTableFiles(_ context.Context, _ manifestContents, _ time.Time) error {
func (ftp fakeTablePersister) PruneTableFiles(_ context.Context, _ func() []addr, _ time.Time) error {
return chunks.ErrUnsupportedOperation
}
+2 -3
View File
@@ -44,8 +44,10 @@ func TestStats(t *testing.T) {
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer file.RemoveAll(dir)
store, err := NewLocalStore(context.Background(), constants.FormatDefaultString, dir, testMemTableSize, NewUnlimitedMemQuotaProvider())
require.NoError(t, err)
defer store.Close()
assert.EqualValues(1, stats(store).OpenLatency.Samples())
@@ -147,7 +149,4 @@ func TestStats(t *testing.T) {
assert.Equal(uint64(1), stats(store).ConjoinLatency.Samples())
// TODO: Once random conjoin hack is out, test other conjoin stats
defer store.Close()
defer file.RemoveAll(dir)
}
+43 -69
View File
@@ -165,6 +165,8 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
defer nbs.mu.Unlock()
nbs.waitForGC()
nbs.checkAllManifestUpdatesExist(ctx, updates)
nbs.mm.LockForUpdate()
defer func() {
unlockErr := nbs.mm.UnlockForUpdate()
@@ -211,11 +213,6 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
}
}
err = nbs.tables.checkAllTablesExist(ctx, contents.specs, nbs.stats)
if err != nil {
return manifestContents{}, err
}
updatedContents, err = nbs.mm.Update(ctx, originalLock, contents, nbs.stats, nil)
if err != nil {
return manifestContents{}, err
@@ -247,6 +244,8 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
defer nbs.mu.Unlock()
nbs.waitForGC()
nbs.checkAllManifestUpdatesExist(ctx, updates)
nbs.mm.LockForUpdate()
defer func() {
unlockErr := nbs.mm.UnlockForUpdate()
@@ -294,11 +293,6 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
return manifestContents{}, err
}
err = nbs.tables.checkAllTablesExist(ctx, contents.specs, nbs.stats)
if err != nil {
return manifestContents{}, err
}
updatedContents, err = nbs.mm.Update(ctx, originalLock, contents, nbs.stats, nil)
if err != nil {
return manifestContents{}, err
@@ -324,6 +318,27 @@ func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updat
return updatedContents, nil
}
func (nbs *NomsBlockStore) checkAllManifestUpdatesExist(ctx context.Context, updates map[hash.Hash]uint32) error {
eg, ctx := errgroup.WithContext(ctx)
eg.SetLimit(128)
for h, c := range updates {
h := h
c := c
eg.Go(func() error {
a := addr(h)
ok, err := nbs.p.Exists(ctx, a, c, nbs.stats)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("missing table file referenced in UpdateManifest call: %v", a)
}
return nil
})
}
return eg.Wait()
}
func fromManifestAppendixOptionNewContents(upstream manifestContents, appendixSpecs []tableSpec, option ManifestAppendixOption) (manifestContents, error) {
contents, upstreamAppendixSpecs := upstream.removeAppendixSpecs()
switch option {
@@ -1105,7 +1120,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
}
if nbs.c.conjoinRequired(nbs.tables) {
newUpstream, err := conjoin(ctx, nbs.c, nbs.upstream, nbs.mm, nbs.p, nbs.stats)
newUpstream, cleanup, err := conjoin(ctx, nbs.c, nbs.upstream, nbs.mm, nbs.p, nbs.stats)
if err != nil {
return err
}
@@ -1122,6 +1137,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
if err != nil {
return err
}
cleanup()
return errOptimisticLockFailedTables
}
@@ -1301,32 +1317,14 @@ func (nbs *NomsBlockStore) Size(ctx context.Context) (uint64, error) {
nbs.mu.Lock()
defer nbs.mu.Unlock()
exists, contents, err := nbs.mm.m.ParseIfExists(ctx, nbs.stats, nil)
if err != nil {
return uint64(0), err
}
if !exists {
return uint64(0), nil
}
css, err := nbs.chunkSourcesByAddr()
if err != nil {
return uint64(0), err
}
numSpecs := contents.NumTableSpecs()
size := uint64(0)
for i := 0; i < numSpecs; i++ {
info := contents.getSpec(i)
cs, ok := css[info.name]
if !ok {
return uint64(0), errors.New("manifest referenced table file for which there is no chunkSource.")
}
for _, cs := range nbs.tables.upstream {
size += cs.currentSize()
}
for _, cs := range nbs.tables.novel {
size += cs.currentSize()
}
return size, nil
}
@@ -1409,44 +1407,20 @@ func (nbs *NomsBlockStore) PruneTableFiles(ctx context.Context) (err error) {
}
func (nbs *NomsBlockStore) pruneTableFiles(ctx context.Context, checker refCheck) (err error) {
nbs.mu.Lock()
defer nbs.mu.Unlock()
nbs.waitForGC()
mtime := time.Now()
nbs.mm.LockForUpdate()
defer func() {
unlockErr := nbs.mm.UnlockForUpdate()
if err == nil {
err = unlockErr
return nbs.p.PruneTableFiles(ctx, func() []addr {
nbs.mu.Lock()
defer nbs.mu.Unlock()
keepers := make([]addr, 0, len(nbs.tables.novel)+len(nbs.tables.upstream))
for a, _ := range nbs.tables.novel {
keepers = append(keepers, a)
}
}()
for {
// flush all tables and update manifest
err = nbs.updateManifest(ctx, nbs.upstream.root, nbs.upstream.root, checker)
if err == nil {
break
} else if err == errOptimisticLockFailedTables {
continue
} else {
return err
for a, _ := range nbs.tables.upstream {
keepers = append(keepers, a)
}
// Same behavior as Commit
// infinitely retries without backoff in the case off errOptimisticLockFailedTables
}
ok, contents, t, err := nbs.mm.Fetch(ctx, &Stats{})
if err != nil {
return err
}
if !ok {
return nil // no manifest exists
}
return nbs.p.PruneTableFiles(ctx, contents, t)
return keepers
}, mtime)
}
func (nbs *NomsBlockStore) setGCInProgress(inProgress bool) bool {
+4 -2
View File
@@ -58,8 +58,10 @@ type tablePersister interface {
// Exists checks if a table named |name| exists.
Exists(ctx context.Context, name addr, chunkCount uint32, stats *Stats) (bool, error)
// PruneTableFiles deletes old table files that are no longer referenced in the manifest.
PruneTableFiles(ctx context.Context, contents manifestContents, mtime time.Time) error
// PruneTableFiles deletes table files which the persister would normally be responsible for and
// which are not in the included |keeper| set and have not be written or modified more recently
// than the provided |mtime|.
PruneTableFiles(ctx context.Context, keeper func() []addr, mtime time.Time) error
io.Closer
}
+6 -29
View File
@@ -332,29 +332,6 @@ func (ts tableSet) flatten(ctx context.Context) (tableSet, error) {
return flattened, nil
}
func (ts tableSet) checkAllTablesExist(ctx context.Context, specs []tableSpec, stats *Stats) error {
eg, ectx := errgroup.WithContext(ctx)
eg.SetLimit(128)
for _, s := range specs {
// if the table file already exists in our upstream chunkSourceSet, we do not need to
// check with the upstream if it still exists.
if _, ok := ts.upstream[s.name]; ok {
continue
}
spec := s
eg.Go(func() error {
exists, err := ts.p.Exists(ectx, spec.name, spec.chunkCount, stats)
if err != nil {
return err
} else if !exists {
return fmt.Errorf("table spec does not exist")
}
return nil
})
}
return eg.Wait()
}
// rebase returns a new tableSet holding the novel tables managed by |ts| and
// those specified by |specs|.
func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats) (tableSet, error) {
@@ -389,10 +366,6 @@ func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats)
novel[t2.hash()] = t2
}
// newly opened tables are unowned, we must
// close them if the rebase operation fails
opened := make(chunkSourceSet, len(specs))
eg, ctx := errgroup.WithContext(ctx)
mu := new(sync.Mutex)
upstream := make(chunkSourceSet, len(specs))
@@ -401,6 +374,11 @@ func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats)
if cs, ok := ts.upstream[s.name]; ok {
cl, err := cs.clone()
if err != nil {
_ = eg.Wait()
for _, cs := range upstream {
// close any opened chunkSources
_ = cs.close()
}
return tableSet{}, err
}
mu.Lock()
@@ -417,14 +395,13 @@ func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats)
}
mu.Lock()
upstream[cs.hash()] = cs
opened[cs.hash()] = cs
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
for _, cs := range opened {
for _, cs := range upstream {
// close any opened chunkSources
_ = cs.close()
}
+37
View File
@@ -22,6 +22,8 @@ teardown() {
teardown_common
}
@test "diff-stat: stat/summary comparing working table to last commit" {
dolt sql -q "insert into test values (0, 0, 0, 0, 0, 0)"
dolt sql -q "insert into test values (1, 1, 1, 1, 1, 1)"
@@ -338,4 +340,39 @@ SQL
run dolt diff --stat
[ $status -eq 0 ]
[[ $output =~ "1 Row Modified (100.00%)" ]]
}
@test "diff-stat: stat/summary for renamed table" {
dolt sql -q "insert into test values (0, 0, 0, 0, 0, 0)"
dolt sql -q "insert into test values (1, 1, 1, 1, 1, 1)"
dolt add test
dolt commit -m "table created"
dolt sql -q "alter table test rename to test2"
run dolt diff --stat
[ "$status" -eq 0 ]
[[ "$output" =~ "No data changes. See schema changes by using -s or --schema." ]] || false
run dolt diff --summary
[ "$status" -eq 0 ]
[[ "$output" =~ "| Table name | Diff type | Data change | Schema change |" ]] || false
[[ "$output" =~ "| test -> test2 | renamed | false | true |" ]] || false
dolt sql -q "insert into test2 values (2, 2, 2, 2, 2, 2)"
run dolt diff --stat
[ "$status" -eq 0 ]
echo "$output"
[[ "$output" =~ "2 Rows Unmodified (100.00%)" ]] || false
[[ "$output" =~ "1 Row Added (50.00%)" ]] || false
[[ "$output" =~ "0 Rows Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Rows Modified (0.00%)" ]] || false
[[ "$output" =~ "6 Cells Added (50.00%)" ]] || false
[[ "$output" =~ "0 Cells Deleted (0.00%)" ]] || false
[[ "$output" =~ "0 Cells Modified (0.00%)" ]] || false
[[ "$output" =~ "(2 Row Entries vs 3 Row Entries)" ]] || false
run dolt diff --summary
[ "$status" -eq 0 ]
[[ "$output" =~ "| Table name | Diff type | Data change | Schema change |" ]] || false
[[ "$output" =~ "| test -> test2 | renamed | true | true |" ]] || false
}
+9 -9
View File
@@ -1017,7 +1017,7 @@ SQL
dolt sql -q "UPDATE t SET val1=2 where pk=1"
run dolt diff -r sql
[ $status -eq 0 ]
[[ "$output" = 'UPDATE `t` SET `val1`=2 WHERE `pk`=1;' ]] || false
[[ "$output" =~ 'UPDATE `t` SET `val1`=2 WHERE `pk`=1;' ]] || false
dolt commit -am "cm2"
@@ -1025,7 +1025,7 @@ SQL
dolt diff -r sql
run dolt diff -r sql
[ $status -eq 0 ]
[[ "$output" = 'UPDATE `t` SET `val1`=3,`val2`=4 WHERE `pk`=1;' ]] || false
[[ "$output" =~ 'UPDATE `t` SET `val1`=3,`val2`=4 WHERE `pk`=1;' ]] || false
dolt commit -am "cm3"
@@ -1040,7 +1040,7 @@ SQL
dolt sql -q "update t set val1=30,val3=4 where pk=1"
run dolt diff -r sql
[ $status -eq 0 ]
[[ "$output" = 'UPDATE `t` SET `val1`=30,`val3`=4 WHERE `pk`=1;' ]] || false
[[ "$output" =~ 'UPDATE `t` SET `val1`=30,`val3`=4 WHERE `pk`=1;' ]] || false
}
@test "diff: skinny flag only shows row changed without schema changes" {
@@ -1188,7 +1188,7 @@ SQL
[ $status -eq 0 ]
[ "${lines[0]}" = 'ALTER TABLE `t` DROP PRIMARY KEY;' ]
[ "${lines[1]}" = 'ALTER TABLE `t` ADD PRIMARY KEY (pk);' ]
[ "${lines[2]}" = 'Primary key sets differ between revisions for table t, skipping data diff' ]
[ "${lines[2]}" = "Primary key sets differ between revisions for table 't', skipping data diff" ]
dolt commit -am "cm6"
@@ -1200,7 +1200,7 @@ SQL
[ "${lines[0]}" = 'ALTER TABLE `t` ADD `pk2` int;' ]
[ "${lines[1]}" = 'ALTER TABLE `t` DROP PRIMARY KEY;' ]
[ "${lines[2]}" = 'ALTER TABLE `t` ADD PRIMARY KEY (pk,val);' ]
[ "${lines[3]}" = 'Primary key sets differ between revisions for table t, skipping data diff' ]
[ "${lines[3]}" = "Primary key sets differ between revisions for table 't', skipping data diff" ]
}
@test "diff: adding and removing primary key" {
@@ -1217,13 +1217,13 @@ SQL
[ $status -eq 0 ]
[ "${lines[0]}" = 'ALTER TABLE `t` DROP PRIMARY KEY;' ]
[ "${lines[1]}" = 'ALTER TABLE `t` ADD PRIMARY KEY (pk);' ]
[ "${lines[2]}" = 'Primary key sets differ between revisions for table t, skipping data diff' ]
[ "${lines[2]}" = "Primary key sets differ between revisions for table 't', skipping data diff" ]
dolt diff
run dolt diff
[ $status -eq 0 ]
[[ "$output" =~ '+ PRIMARY KEY (`pk`)' ]] || false
[[ "$output" =~ 'Primary key sets differ between revisions for table t, skipping data diff' ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
dolt commit -am 'added primary key'
@@ -1234,13 +1234,13 @@ SQL
run dolt diff -r sql
[ $status -eq 0 ]
[ "${lines[0]}" = 'ALTER TABLE `t` DROP PRIMARY KEY;' ]
[[ "$output" =~ 'Primary key sets differ between revisions for table t, skipping data diff' ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
dolt diff
run dolt diff
[ $status -eq 0 ]
[[ "$output" =~ '- PRIMARY KEY (`pk`)' ]] || false
[[ "$output" =~ 'Primary key sets differ between revisions for table t, skipping data diff' ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
}
@test "diff: created and dropped tables include schema and data changes in results" {
+3 -3
View File
@@ -273,13 +273,13 @@ SQL
run no_stdout dolt diff -r json
[ $status -eq 0 ]
[ "$output" = 'Primary key sets differ between revisions for table t, skipping data diff' ]
[ "$output" = "Primary key sets differ between revisions for table 't', skipping data diff" ]
dolt diff -r json
run dolt diff -r json
[ $status -eq 0 ]
[[ "$output" =~ '{"tables":[{"name":"t","schema_diff":["ALTER TABLE `t` DROP PRIMARY KEY;","ALTER TABLE `t` ADD PRIMARY KEY (pk);"]' ]] || false
[[ "$output" =~ 'Primary key sets differ between revisions for table t, skipping data diff' ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
dolt commit -am 'added primary key'
@@ -289,7 +289,7 @@ SQL
run dolt diff -r json
[ $status -eq 0 ]
[[ "$output" =~ '{"tables":[{"name":"t","schema_diff":["ALTER TABLE `t` DROP PRIMARY KEY;"]' ]] || false
[[ "$output" =~ 'Primary key sets differ between revisions for table t, skipping data diff' ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
}
function no_stderr {
@@ -21,7 +21,7 @@ teardown() {
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
[[ ! "$output" =~ "euna1i8brh95lo9mcg05s3m8h781fr8a" ]] || false
[[ ! "$output" =~ "d0q6hb3vcq1oe178usc6rd28db1cnh26" ]] || false
dolt migrate
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
@@ -29,7 +29,7 @@ teardown() {
dolt tag -v
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "euna1i8brh95lo9mcg05s3m8h781fr8a" ]] || false
[[ "$output" =~ "d0q6hb3vcq1oe178usc6rd28db1cnh26" ]] || false
[[ ! "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
# validate TEXT migration
@@ -47,7 +47,7 @@ teardown() {
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
[[ ! "$output" =~ "euna1i8brh95lo9mcg05s3m8h781fr8a" ]] || false
[[ ! "$output" =~ "d0q6hb3vcq1oe178usc6rd28db1cnh26" ]] || false
dolt migrate
[[ $(cat ./.dolt/noms/manifest | cut -f 2 -d :) = "$TARGET_NBF" ]] || false
@@ -55,7 +55,7 @@ teardown() {
dolt tag -v
run dolt tag -v
[ "$status" -eq 0 ]
[[ "$output" =~ "euna1i8brh95lo9mcg05s3m8h781fr8a" ]] || false
[[ "$output" =~ "d0q6hb3vcq1oe178usc6rd28db1cnh26" ]] || false
[[ ! "$output" =~ "r9jv07tf9un3fm1fg72v7ad9er89oeo7" ]] || false
# validate TEXT migration
@@ -311,7 +311,7 @@ teardown() {
dolt diff --data
run dolt diff --data
[ "$status" -eq 0 ]
[[ "$output" =~ "Primary key sets differ between revisions for table t, skipping data diff" ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
}
@test "primary-key-changes: diff on composite schema" {
@@ -338,7 +338,7 @@ teardown() {
dolt diff --data
run dolt diff --data
[ "$status" -eq 0 ]
[[ "$output" =~ "Primary key sets differ between revisions for table t, skipping data diff" ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 't', skipping data diff" ]] || false
run dolt diff --stat
[ "$status" -eq 1 ]
+412
View File
@@ -0,0 +1,412 @@
#!/usr/bin/env bats
load $BATS_TEST_DIRNAME/helper/common.bash
setup() {
setup_common
}
teardown() {
assert_feature_version
teardown_common
}
@test "sql-patch: --cached flag shows staged changes" {
dolt sql <<SQL
CREATE TABLE test (id INT PRIMARY KEY, col1 TEXT);
SQL
dolt add test
run dolt sql -q "CALL DOLT_PATCH('--cached')"
[ "$status" -eq 0 ]
[ "${lines[0]}" = "+-------------------------------------------------------------------+" ]
[ "${lines[1]}" = "| statement |" ]
[ "${lines[2]}" = "+-------------------------------------------------------------------+" ]
[ "${lines[3]}" = "| CREATE TABLE \`test\` ( |" ]
[ "${lines[4]}" = "| \`id\` int NOT NULL, |" ]
[ "${lines[5]}" = "| \`col1\` text, |" ]
[ "${lines[6]}" = "| PRIMARY KEY (\`id\`) |" ]
[ "${lines[7]}" = "| ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin; |" ]
[ "${lines[8]}" = "+-------------------------------------------------------------------+" ]
}
@test "sql-patch: output reconciles INSERT query" {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt table import -u test `batshelper 1pk5col-ints.csv`
dolt add test
dolt commit -m "Added two initial row"
dolt checkout -b newbranch
dolt sql -q 'INSERT INTO test (pk, c1, c2, c3, c4, c5) VALUES (2, 11, 0, 0, 0, 0)'
dolt add test
dolt commit -m "Added a third row"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
# 3 lines for tabular lines and 1 for header and 1 for output statement
[ "${#lines[@]}" -eq 5 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: output reconciles UPDATE query" {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt table import -u test `batshelper 1pk5col-ints.csv`
dolt add test
dolt commit -m "Added one initial row"
dolt checkout -b newbranch
dolt sql -q 'UPDATE test SET c1=11, c5=6 WHERE pk=0'
dolt add test
dolt commit -m "modified first row"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 5 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: output reconciles DELETE query" {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt table import -u test `batshelper 1pk5col-ints.csv`
dolt add test
dolt commit -m "Added one initial row"
dolt checkout -b newbranch
dolt sql -q 'DELETE FROM test WHERE pk=0'
dolt add test
dolt commit -m "deleted first row"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 5 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: output reconciles change to PRIMARY KEY field in row " {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt table import -u test `batshelper 1pk5col-ints.csv`
dolt add test
dolt commit -m "Added one initial row"
dolt checkout -b newbranch
dolt sql -q 'UPDATE test SET pk=2 WHERE pk=1'
dolt add test
dolt commit -m "modified first row"
dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 6 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: output reconciles RENAME, DROP and ADD column" {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt sql -q 'insert into test values (1,1,1,1,1,1)'
dolt add .
dolt commit -m "added row"
dolt checkout -b newbranch
dolt sql -q "alter table test rename column c1 to c0"
dolt sql -q "alter table test drop column c4"
dolt sql -q "alter table test add c6 bigint"
dolt add .
dolt commit -m "renamed column"
dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 7 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: reconciles CREATE TABLE with row INSERTS" {
dolt checkout -b firstbranch
dolt checkout -b newbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
PRIMARY KEY (pk)
);
SQL
dolt sql -q 'insert into test values (1,1)'
dolt sql -q 'insert into test values (2,2)'
dolt commit -Am "created new table"
dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 11 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: reconciles DROP TABLE" {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
PRIMARY KEY (pk)
);
SQL
dolt sql -q 'insert into test values (1,1,1)'
dolt add .
dolt commit -m "setup table"
dolt checkout -b newbranch
dolt sql -q 'drop table test'
dolt add .
dolt commit -m "removed table"
dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 5 ]
[[ ! "$output" =~ "DELETE FROM" ]] || false
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: reconciles RENAME TABLE with schema changes" {
dolt checkout -b firstbranch
dolt sql <<SQL
CREATE TABLE test (
pk BIGINT NOT NULL COMMENT 'tag:0',
c1 BIGINT COMMENT 'tag:1',
c2 BIGINT COMMENT 'tag:2',
c3 BIGINT COMMENT 'tag:3',
c4 BIGINT COMMENT 'tag:4',
c5 BIGINT COMMENT 'tag:5',
PRIMARY KEY (pk)
);
SQL
dolt sql -q 'insert into test values (1,1,1,1,1,1)'
dolt add .
dolt commit -m "created table"
dolt checkout -b newbranch
dolt sql -q 'ALTER TABLE test RENAME COLUMN c2 to col2'
dolt sql -q 'ALTER TABLE test ADD COLUMN c6 int'
dolt sql -q='RENAME TABLE test TO newname'
dolt sql -q 'ALTER TABLE newname DROP COLUMN c3'
dolt sql -q 'insert into newname values (2,1,1,1,1,1)'
dolt add .
dolt commit -m "renamed table and added data"
dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 8 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: diff sql recreates tables with all types" {
dolt checkout -b firstbranch
dolt checkout -b newbranch
dolt sql <<SQL
CREATE TABLE test (
\`pk\` BIGINT NOT NULL COMMENT 'tag:0',
\`int\` BIGINT COMMENT 'tag:1',
\`string\` LONGTEXT COMMENT 'tag:2',
\`boolean\` BOOLEAN COMMENT 'tag:3',
\`float\` DOUBLE COMMENT 'tag:4',
\`uint\` BIGINT UNSIGNED COMMENT 'tag:5',
\`uuid\` CHAR(36) CHARACTER SET ascii COLLATE ascii_bin COMMENT 'tag:6',
PRIMARY KEY (pk)
);
SQL
# dolt table import -u test `batshelper 1pksupportedtypes.csv`
dolt add .
dolt commit -m "created new table"
dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
run dolt sql -q "CALL DOLT_PATCH('firstbranch','newbranch')"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 14 ]
match_diff_and_patch_results firstbranch newbranch
}
@test "sql-patch: reconciles multi PRIMARY KEY and FOREIGN KEY" {
dolt sql <<SQL
CREATE TABLE parent (
id int PRIMARY KEY,
id_ext int,
v1 int,
v2 text COMMENT 'tag:1',
INDEX v1 (v1)
);
CREATE TABLE child (
id int primary key,
v1 int
);
SQL
dolt sql -q "ALTER TABLE child ADD CONSTRAINT fk_named FOREIGN KEY (v1) REFERENCES parent(v1);"
dolt sql -q "insert into parent values (0, 1, 2, NULL);"
dolt sql -q "ALTER TABLE parent DROP PRIMARY KEY;"
dolt sql -q "ALTER TABLE parent ADD PRIMARY KEY(id, id_ext);"
dolt sql -q "CALL DOLT_PATCH()"
run dolt sql -q "CALL DOLT_PATCH()"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 20 ]
match_diff_and_patch_results
}
@test "sql-patch: reconciles CHECK CONSTRAINTS" {
dolt sql <<SQL
create table foo (
pk int,
c1 int,
CHECK (c1 > 3),
PRIMARY KEY (pk)
);
SQL
dolt sql -q "CALL DOLT_PATCH()"
run dolt sql -q "CALL DOLT_PATCH()"
[ "$status" -eq 0 ]
[ "${#lines[@]}" -eq 10 ]
match_diff_and_patch_results
}
@test "sql-patch: any error causing no data diff is shown as warnings." {
dolt sql <<SQL
CREATE TABLE parent (
id int PRIMARY KEY,
id_ext int,
v1 int,
v2 text COMMENT 'tag:1',
INDEX v1 (v1)
);
CREATE TABLE child (
id int primary key,
v1 int
);
SQL
dolt commit -Am "add tables"
dolt sql -q "ALTER TABLE child ADD CONSTRAINT fk_named FOREIGN KEY (v1) REFERENCES parent(v1);"
dolt sql -q "insert into parent values (0, 1, 2, NULL);"
dolt sql -q "ALTER TABLE parent DROP PRIMARY KEY"
dolt sql -q "ALTER TABLE parent ADD PRIMARY KEY(id, id_ext);"
run dolt diff -r sql child
[ "$status" -eq 0 ]
[[ "$output" =~ "Incompatible schema change, skipping data diff for table 'child'" ]] || false
diff_output_0=${lines[0]}
diff_output_1=${lines[1]}
run dolt sql -q "CALL DOLT_PATCH('child'); SHOW WARNINGS;"
[ "$status" -eq 0 ]
[[ "${lines[1]}" =~ "statement" ]] || false
[[ "${lines[3]}" =~ "$diff_output_0" ]] || false
[[ "${lines[4]}" =~ "$diff_output_1" ]] || false
[[ "$output" =~ "Incompatible schema change, skipping data diff for table 'child'" ]] || false
run dolt diff -r sql parent
[ "$status" -eq 0 ]
[[ "$output" =~ "Primary key sets differ between revisions for table 'parent', skipping data diff" ]] || false
diff_output_0=${lines[0]}
diff_output_1=${lines[1]}
run dolt sql -q "CALL DOLT_PATCH('parent'); SHOW WARNINGS;"
[ "$status" -eq 0 ]
[[ "${lines[1]}" =~ "statement" ]] || false
[[ "${lines[3]}" =~ "$diff_output_0" ]] || false
[[ "${lines[4]}" =~ "$diff_output_1" ]] || false
[[ "$output" =~ "Primary key sets differ between revisions for table 'parent', skipping data diff" ]] || false
}
# either no arguments or give two commit revisions
match_diff_and_patch_results() {
if [ -z "$1" ] && [ -z "$2" ]; then
run dolt diff -r sql
else
run dolt diff -r sql $1 $2
fi
[ "$status" -eq 0 ]
diff_array=( "${lines[@]}" )
if [ -z "$1" ] && [ -z "$2" ]; then
run dolt sql -q "CALL DOLT_PATCH()"
else
run dolt sql -q "CALL DOLT_PATCH('$1','$2')"
fi
[ "$status" -eq 0 ]
[[ "${lines[1]}" =~ "statement" ]] || false
patch_array=( "${lines[@]:3}" )
# to iterate over only the row values; remove one additional line for the last tabular closure line
idx=$(( ${#patch_array[@]} - 2 ))
# do not include the last element of patch_array, which will be the closing line for tabular output
# this also removes the last element of diff_array, which can be an error message.
for i in $(seq 0 $idx); do
# printf "%s ---- %s\n" "${patch_array[i]}" "${diff_array[i]}"
[[ "${patch_array[i]}" =~ "${diff_array[i]}" ]] || false
done
}
+24
View File
@@ -34,6 +34,30 @@ teardown() {
[[ "$output" =~ 'test,true,new table' ]] || false
}
@test "sql-status: status properly works with table rename" {
# Test is staged
dolt add test
run dolt sql -r csv -q "select * from dolt_status"
[ "$status" -eq 0 ]
[[ "$output" =~ 'test,true,new table' ]] || false
# Rename test to test2
run dolt sql -r csv -q "alter table test rename to test2"
[ "$status" -eq 0 ]
# Confirm table is now marked as renamed, test still staged
run dolt sql -r csv -q "select * from dolt_status"
[ "$status" -eq 0 ]
[[ "$output" =~ 'test,true,new table' ]] || false
[[ "$output" =~ 'test -> test2,false,renamed' ]] || false
# Confirm table is now marked as staged
dolt add test2
run dolt sql -r csv -q "select * from dolt_status"
[ "$status" -eq 0 ]
[[ "$output" =~ 'test2,true,new table' ]] || false
}
@test "sql-status: table that has staged and unstaged changes shows up twice" {
# Stage one set of changes.
dolt add test
@@ -85,6 +85,11 @@ enum ClientEventType {
FILTER_BRANCH = 53;
DUMP = 54;
CHERRY_PICK = 55;
STASH = 56;
STASH_CLEAR = 57;
STASH_DROP = 58;
STASH_LIST = 59;
STASH_POP = 60;
}
enum MetricID {