Merge remote-tracking branch 'origin/main' into andy/filtered-reader

This commit is contained in:
Andy Arthur
2022-07-06 12:36:15 -07:00
116 changed files with 1354 additions and 927 deletions
+4
View File
@@ -75,6 +75,10 @@ func (cmd AddCmd) Exec(ctx context.Context, commandStr string, args []string, dE
allFlag := apr.Contains(cli.AllFlag)
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), helpPr)
}
roots, err := dEnv.Roots(ctx)
if err != nil {
return handleStageError(err)
+3
View File
@@ -82,6 +82,9 @@ func (cmd CheckoutCmd) Exec(ctx context.Context, commandStr string, args []strin
ap := cli.CreateCheckoutArgParser()
helpPrt, usagePrt := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, checkoutDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, helpPrt)
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), helpPrt)
}
if (apr.Contains(cli.CheckoutCoBranch) && apr.NArg() > 1) || (!apr.Contains(cli.CheckoutCoBranch) && apr.NArg() == 0) {
usagePrt()
+4 -2
View File
@@ -77,7 +77,9 @@ func (cmd CherryPickCmd) Exec(ctx context.Context, commandStr string, args []str
ap := cli.CreateCherryPickArgParser()
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, cherryPickDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
// This command creates a commit, so we need user identity
if !cli.CheckUserNameAndEmail(dEnv) {
return 1
@@ -254,7 +256,7 @@ func getParentAndCherryRoots(ctx context.Context, ddb *doltdb.DoltDB, cherryComm
return nil, nil, err
}
} else {
parentRoot, err = doltdb.EmptyRootValue(ctx, ddb.ValueReadWriter())
parentRoot, err = doltdb.EmptyRootValue(ctx, ddb.ValueReadWriter(), ddb.NodeStore())
if err != nil {
return nil, nil, err
}
+4
View File
@@ -70,6 +70,10 @@ func (cmd CleanCmd) Exec(ctx context.Context, commandStr string, args []string,
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, cleanDocContent, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
roots, err := dEnv.Roots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
+86 -56
View File
@@ -16,22 +16,23 @@ package cnfcmds
import (
"context"
"fmt"
"io"
"strings"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands"
"github.com/dolthub/dolt/go/cmd/dolt/commands/engine"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/pipeline"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/sqlutil"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped/fwt"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped/nullprinter"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped/tabular"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/iohelp"
)
@@ -56,10 +57,6 @@ func (cmd CatCmd) Description() string {
return "Writes out the table conflicts."
}
func (cmd CatCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
return types.IsFormat_DOLT_1(nbf)
}
func (cmd CatCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(catDocs, ap)
@@ -103,7 +100,7 @@ func (cmd CatCmd) Exec(ctx context.Context, commandStr string, args []string, dE
// If no commit was resolved from the first argument, assume the args are all table names and print the conflicts
if cm == nil {
if verr := printConflicts(ctx, root, args); verr != nil {
if verr := printConflicts(ctx, dEnv, root, args); verr != nil {
return exitWithVerr(verr)
}
@@ -122,7 +119,7 @@ func (cmd CatCmd) Exec(ctx context.Context, commandStr string, args []string, dE
return exitWithVerr(errhand.BuildDError("unable to get the root value").AddCause(err).Build())
}
if verr = printConflicts(ctx, root, tblNames); verr != nil {
if verr = printConflicts(ctx, dEnv, root, tblNames); verr != nil {
return exitWithVerr(verr)
}
@@ -134,7 +131,7 @@ func exitWithVerr(verr errhand.VerboseError) int {
return 1
}
func printConflicts(ctx context.Context, root *doltdb.RootValue, tblNames []string) errhand.VerboseError {
func printConflicts(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue, tblNames []string) errhand.VerboseError {
if len(tblNames) == 1 && tblNames[0] == "." {
var err error
tblNames, err = root.GetTableNames(ctx)
@@ -143,6 +140,11 @@ func printConflicts(ctx context.Context, root *doltdb.RootValue, tblNames []stri
}
}
eng, err := engine.NewSqlEngineForEnv(ctx, dEnv)
if err != nil {
return errhand.VerboseErrorFromError(err)
}
for _, tblName := range tblNames {
verr := func() errhand.VerboseError {
if has, err := root.HasTable(ctx, tblName); err != nil {
@@ -152,70 +154,46 @@ func printConflicts(ctx context.Context, root *doltdb.RootValue, tblNames []stri
}
tbl, _, err := root.GetTable(ctx, tblName)
if err != nil {
return errhand.BuildDError("error: unable to read database").AddCause(err).Build()
}
has, err := root.HasConflicts(ctx)
has, err := tbl.HasConflicts(ctx)
if err != nil {
return errhand.BuildDError("failed to read conflicts").AddCause(err).Build()
return errhand.BuildDError("error: unable to read database").AddCause(err).Build()
}
if !has {
return nil
}
cnfRd, err := merge.NewConflictReader(ctx, tbl)
baseSch, sch, mergeSch, err := tbl.GetConflictSchemas(ctx, tblName)
if err != nil {
return errhand.BuildDError("failed to read conflicts").AddCause(err).Build()
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
unionSch, err := untyped.UntypedSchemaUnion(baseSch, sch, mergeSch)
if err != nil {
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
sqlUnionSch, err := sqlutil.FromDoltSchema(tblName, unionSch)
if err != nil {
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
defer cnfRd.Close()
splitter, err := merge.NewConflictSplitter(ctx, tbl.ValueReadWriter(), cnfRd.GetJoiner())
sqlCtx, err := engine.NewLocalSqlContext(ctx, eng)
if err != nil {
return errhand.BuildDError("error: unable to handle schemas").AddCause(err).Build()
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
cnfWr, err := merge.NewConflictSink(iohelp.NopWrCloser(cli.CliOut), splitter.GetSchema(), " | ")
defer cnfWr.Close()
confSqlSch, rowItr, err := eng.Query(sqlCtx, buildConflictQuery(baseSch, sch, mergeSch, tblName))
if err != nil {
return errhand.BuildDError("error: unable to read database").AddCause(err).Build()
return errhand.BuildDError("failed to fetch conflicts").AddCause(err).Build()
}
nullPrinter := nullprinter.NewNullPrinter(splitter.GetSchema())
fwtTr := fwt.NewAutoSizingFWTTransformer(splitter.GetSchema(), fwt.HashFillWhenTooLong, 1000)
transforms := pipeline.NewTransformCollection(
pipeline.NewNamedTransform("split", splitter.SplitConflicts),
pipeline.NewNamedTransform(nullprinter.NullPrintingStage, nullPrinter.ProcessRow),
pipeline.NamedTransform{Name: "fwt", Func: fwtTr.TransformToFWT},
)
// TODO: Pipeline should be contextified.
srcProcFunc := pipeline.ProcFuncForSourceFunc(func() (row.Row, pipeline.ImmutableProperties, error) { return cnfRd.NextConflict(ctx) })
sinkProcFunc := pipeline.ProcFuncForSinkFunc(cnfWr.ProcRowWithProps)
p := pipeline.NewAsyncPipeline(srcProcFunc, sinkProcFunc, transforms, func(failure *pipeline.TransformRowFailure) (quit bool) {
panic("")
})
colNames, err := schema.ExtractAllColNames(splitter.GetSchema())
tw := tabular.NewFixedWidthConflictTableWriter(sqlUnionSch.Schema, iohelp.NopWrCloser(cli.CliOut), 100)
err = writeConflictResults(sqlCtx, confSqlSch, sqlUnionSch.Schema, rowItr, tw)
if err != nil {
return errhand.BuildDError("error: failed to read columns from schema").AddCause(err).Build()
return errhand.BuildDError("failed to print conflicts").AddCause(err).Build()
}
r, err := untyped.NewRowFromTaggedStrings(tbl.Format(), splitter.GetSchema(), colNames)
if err != nil {
return errhand.BuildDError("error: failed to create header row for printing").AddCause(err).Build()
}
p.InjectRow("fwt", r)
p.Start()
p.Wait()
return nil
}()
@@ -227,3 +205,55 @@ func printConflicts(ctx context.Context, root *doltdb.RootValue, tblNames []stri
return nil
}
func writeConflictResults(
ctx *sql.Context,
resultSch sql.Schema,
targetSch sql.Schema,
iter sql.RowIter,
writer *tabular.FixedWidthConflictTableWriter) (err error) {
cs, err := newConflictSplitter(resultSch, targetSch)
if err != nil {
return err
}
for {
r, err := iter.Next(ctx)
if err == io.EOF {
return writer.Close(ctx)
} else if err != nil {
return err
}
conflictRows, err := cs.splitConflictRow(r)
if err != nil {
return err
}
for _, cR := range conflictRows {
err := writer.WriteRow(ctx, cR.version, cR.row, cR.diffType)
if err != nil {
return err
}
}
}
}
func buildConflictQuery(base, sch, mergeSch schema.Schema, tblName string) string {
cols := castColumnWithPrefix(base.GetAllCols().GetColumnNames(), "base_")
cols = append(cols, castColumnWithPrefix(sch.GetAllCols().GetColumnNames(), "our_")...)
cols = append(cols, castColumnWithPrefix(mergeSch.GetAllCols().GetColumnNames(), "their_")...)
colNames := strings.Join(cols, ", ")
query := fmt.Sprintf("SELECT %s, our_diff_type, their_diff_type from dolt_conflicts_%s", colNames, tblName)
return query
}
func castColumnWithPrefix(arr []string, prefix string) []string {
out := make([]string, len(arr))
for i := range arr {
n := prefix + arr[i]
out[i] = fmt.Sprintf("cast (%s as char) as `%s`", n, n)
}
return out
}
@@ -0,0 +1,156 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cnfcmds
import (
"fmt"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
)
const (
basePrefix = "base_"
theirPrefix = "their_"
ourPrefix = "our_"
)
type conflictSplitter struct {
conflictQuerySch sql.Schema
targetSch sql.Schema
baseToTarget, ourToTarget, theirToTarget map[int]int
ourToBase, theirToBase map[int]int
ourDiffTypeIdx, theirDiffTypeIdx int
}
func newConflictSplitter(conflictQuerySch sql.Schema, targetSch sql.Schema) (*conflictSplitter, error) {
baseToTarget, ourToTarget, theirToTarget := make(map[int]int), make(map[int]int), make(map[int]int)
ourToBase, theirToBase := make(map[int]int), make(map[int]int)
ourDiffTypeIdx, theirDiffTypeIdx := -1, -1
for i := 0; i < len(conflictQuerySch); i++ {
if conflictQuerySch[i].Name == "our_diff_type" {
ourDiffTypeIdx = i
continue
}
if conflictQuerySch[i].Name == "their_diff_type" {
theirDiffTypeIdx = i
continue
}
var colName string
var mapper map[int]int
if strings.HasPrefix(conflictQuerySch[i].Name, basePrefix) {
colName = conflictQuerySch[i].Name[5:]
mapper = baseToTarget
} else if strings.HasPrefix(conflictQuerySch[i].Name, ourPrefix) {
colName = conflictQuerySch[i].Name[4:]
mapper = ourToTarget
if base := conflictQuerySch.IndexOfColName(basePrefix + colName); base >= 0 {
ourToBase[i] = base
}
} else if strings.HasPrefix(conflictQuerySch[i].Name, theirPrefix) {
colName = conflictQuerySch[i].Name[6:]
mapper = theirToTarget
if base := conflictQuerySch.IndexOfColName(basePrefix + colName); base >= 0 {
theirToBase[i] = base
}
}
targetIdx := targetSch.IndexOfColName(colName)
if targetIdx < 0 {
return nil, fmt.Errorf("couldn't find a column named %s", colName)
}
mapper[i] = targetIdx
}
if ourDiffTypeIdx == -1 || theirDiffTypeIdx == -1 {
return nil, fmt.Errorf("our_diff_type or their_diff_type missing from conflict sql results")
}
return &conflictSplitter{
conflictQuerySch: conflictQuerySch,
targetSch: targetSch,
baseToTarget: baseToTarget,
ourToTarget: ourToTarget,
theirToTarget: theirToTarget,
ourDiffTypeIdx: ourDiffTypeIdx,
theirDiffTypeIdx: theirDiffTypeIdx,
}, nil
}
type conflictRow struct {
version string
row sql.Row
diffType diff.ChangeType
}
func (cs conflictSplitter) splitConflictRow(row sql.Row) ([]conflictRow, error) {
baseRow, ourRow, theirRow := make(sql.Row, len(cs.targetSch)), make(sql.Row, len(cs.targetSch)), make(sql.Row, len(cs.targetSch))
ourDiffType := changeTypeFromString(row[cs.ourDiffTypeIdx].(string))
theirDiffType := changeTypeFromString(row[cs.theirDiffTypeIdx].(string))
for from, to := range cs.baseToTarget {
baseRow[to] = row[from]
}
if ourDiffType == diff.Removed {
ourRow = baseRow
} else {
for from, to := range cs.ourToTarget {
ourRow[to] = row[from]
}
}
if theirDiffType == diff.Removed {
theirRow = baseRow
} else {
for from, to := range cs.theirToTarget {
theirRow[to] = row[from]
}
}
if ourDiffType == diff.Added || theirDiffType == diff.Added {
return []conflictRow{
{version: "ours", row: ourRow, diffType: ourDiffType},
{version: "theirs", row: theirRow, diffType: theirDiffType},
}, nil
}
return []conflictRow{
{version: "base", row: baseRow, diffType: diff.None},
{version: "ours", row: ourRow, diffType: ourDiffType},
{version: "theirs", row: theirRow, diffType: theirDiffType},
}, nil
}
func changeTypeFromString(str string) diff.ChangeType {
switch str {
case merge.ConflictDiffTypeAdded:
return diff.Added
case merge.ConflictDiffTypeRemoved:
return diff.Removed
case merge.ConflictDiffTypeModified:
return diff.ModifiedNew
default:
panic(fmt.Sprintf("unhandled diff type string %s", str))
}
}
+4
View File
@@ -107,6 +107,10 @@ func (cmd ResolveCmd) Exec(ctx context.Context, commandStr string, args []string
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, resDocumentation, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if dEnv.IsLocked() {
return commands.HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), usage)
}
var verr errhand.VerboseError
if apr.ContainsAny(autoResolverParams...) {
verr = autoResolve(ctx, apr, dEnv)
+4
View File
@@ -82,6 +82,10 @@ func (cmd CommitCmd) Exec(ctx context.Context, commandStr string, args []string,
allFlag := apr.Contains(cli.AllFlag)
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
roots, err := dEnv.Roots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.BuildDError("Couldn't get working root").AddCause(err).Build(), usage)
@@ -92,7 +92,7 @@ func (cmd VerifyConstraintsCmd) Exec(ctx context.Context, commandStr string, arg
return commands.HandleVErrAndExitCode(errhand.BuildDError("Unable to get head root.").AddCause(err).Build(), nil)
}
if verifyAllRows {
comparingRoot, err = doltdb.EmptyRootValue(ctx, comparingRoot.VRW())
comparingRoot, err = doltdb.EmptyRootValue(ctx, comparingRoot.VRW(), comparingRoot.NodeStore())
if err != nil {
return commands.HandleVErrAndExitCode(errhand.BuildDError("Unable to create an empty root.").AddCause(err).Build(), nil)
}
+4
View File
@@ -104,6 +104,10 @@ func (cmd FilterBranchCmd) Exec(ctx context.Context, commandStr string, args []s
return HandleVErrAndExitCode(verr, usage)
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
query := apr.Arg(0)
notFound := make(missingTbls)
replay := func(ctx context.Context, commit, _, _ *doltdb.Commit) (*doltdb.RootValue, error) {
+4
View File
@@ -99,6 +99,10 @@ func (cmd GarbageCollectionCmd) Exec(ctx context.Context, commandStr string, arg
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, gcDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
var err error
if apr.Contains(gcShallowFlag) {
err = dEnv.DoltDB.ShallowGC(ctx)
+10 -9
View File
@@ -22,6 +22,7 @@ import (
"github.com/fatih/color"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
@@ -95,7 +96,7 @@ func (cmd CatCmd) Exec(ctx context.Context, commandStr string, args []string, dE
usage()
return 0
} else if apr.NArg() != 2 {
return HandleErr(errhand.BuildDError("Both the table and index names must be provided.").Build(), usage)
return commands.HandleVErrAndExitCode(errhand.BuildDError("Both the table and index names must be provided.").Build(), usage)
}
cmd.resultFormat = formatTabular
@@ -108,13 +109,13 @@ func (cmd CatCmd) Exec(ctx context.Context, commandStr string, args []string, dE
case "json":
cmd.resultFormat = formatJson
default:
return HandleErr(errhand.BuildDError("Invalid argument for --result-format. Valid values are tabular, csv, json").Build(), usage)
return commands.HandleVErrAndExitCode(errhand.BuildDError("Invalid argument for --result-format. Valid values are tabular, csv, json").Build(), usage)
}
}
working, err := dEnv.WorkingRoot(context.Background())
if err != nil {
return HandleErr(errhand.BuildDError("Unable to get working.").AddCause(err).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("Unable to get working.").AddCause(err).Build(), nil)
}
tableName := apr.Arg(0)
@@ -122,27 +123,27 @@ func (cmd CatCmd) Exec(ctx context.Context, commandStr string, args []string, dE
table, ok, err := working.GetTable(ctx, tableName)
if err != nil {
return HandleErr(errhand.BuildDError("Unable to get table `%s`.", tableName).AddCause(err).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("Unable to get table `%s`.", tableName).AddCause(err).Build(), nil)
}
if !ok {
return HandleErr(errhand.BuildDError("The table `%s` does not exist.", tableName).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("The table `%s` does not exist.", tableName).Build(), nil)
}
tblSch, err := table.GetSchema(ctx)
if err != nil {
return HandleErr(errhand.BuildDError("Unable to get schema for `%s`.", tableName).AddCause(err).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("Unable to get schema for `%s`.", tableName).AddCause(err).Build(), nil)
}
index := tblSch.Indexes().GetByName(indexName)
if index == nil {
return HandleErr(errhand.BuildDError("The index `%s` does not exist on table `%s`.", indexName, tableName).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("The index `%s` does not exist on table `%s`.", indexName, tableName).Build(), nil)
}
indexRowData, err := table.GetNomsIndexRowData(ctx, index.Name())
if err != nil {
return HandleErr(errhand.BuildDError("The index `%s` does not have a data map.", indexName).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("The index `%s` does not have a data map.", indexName).Build(), nil)
}
err = cmd.prettyPrintResults(ctx, index.Schema(), indexRowData)
if err != nil {
return HandleErr(errhand.BuildDError("Unable to display data for `%s`.", indexName).AddCause(err).Build(), nil)
return commands.HandleVErrAndExitCode(errhand.BuildDError("Unable to display data for `%s`.", indexName).AddCause(err).Build(), nil)
}
return 0
+6 -2
View File
@@ -17,13 +17,13 @@ package indexcmds
import (
"context"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/commands"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/store/types"
)
var rebuildDocs = cli.CommandDocumentationContent{
@@ -74,6 +74,10 @@ func (cmd RebuildCmd) Exec(ctx context.Context, commandStr string, args []string
return HandleErr(errhand.BuildDError("Both the table and index names must be provided.").Build(), usage)
}
if dEnv.IsLocked() {
return commands.HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), usage)
}
working, err := dEnv.WorkingRoot(context.Background())
if err != nil {
return HandleErr(errhand.BuildDError("Unable to get working.").AddCause(err).Build(), nil)
+4
View File
@@ -92,6 +92,10 @@ func (cmd MergeCmd) Exec(ctx context.Context, commandStr string, args []string,
return 1
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
var verr errhand.VerboseError
if apr.Contains(cli.AbortParam) {
mergeActive, err := dEnv.IsMergeActive(ctx)
+4
View File
@@ -76,6 +76,10 @@ func (cmd MergeBaseCmd) Exec(ctx context.Context, commandStr string, args []stri
return HandleVErrAndExitCode(verr, usage)
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
left, verr := ResolveCommitWithVErr(dEnv, apr.Arg(0))
if verr != nil {
return HandleVErrAndExitCode(verr, usage)
+5
View File
@@ -85,7 +85,12 @@ func (cmd PullCmd) Exec(ctx context.Context, commandStr string, args []string, d
remoteName = apr.Arg(0)
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
pullSpec, err := env.NewPullSpec(ctx, dEnv.RepoStateReader(), remoteName, apr.Contains(cli.SquashParam), apr.Contains(cli.NoFFParam), apr.Contains(cli.ForceFlag), apr.NArg() == 1)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
}
+4
View File
@@ -90,6 +90,10 @@ func (cmd ResetCmd) Exec(ctx context.Context, commandStr string, args []string,
return HandleDocTableVErrAndExitCode()
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
roots, err := dEnv.Roots(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
+5
View File
@@ -78,6 +78,11 @@ func (cmd RevertCmd) Exec(ctx context.Context, commandStr string, args []string,
usage()
return 1
}
if dEnv.IsLocked() {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), help)
}
headCommit, err := dEnv.HeadCommit(ctx)
if err != nil {
return HandleVErrAndExitCode(errhand.VerboseErrorFromError(err), usage)
+7 -3
View File
@@ -169,6 +169,10 @@ func (cmd ImportCmd) Exec(ctx context.Context, commandStr string, args []string,
return 1
}
if dEnv.IsLocked() {
return commands.HandleVErrAndExitCode(errhand.VerboseErrorFromError(env.ErrActiveServerLock.New(dEnv.LockFile())), usage)
}
return commands.HandleVErrAndExitCode(importSchema(ctx, dEnv, apr), usage)
}
@@ -296,7 +300,7 @@ func importSchema(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars
return errhand.BuildDError("error: failed to get table.").AddCause(err).Build()
}
empty, err := durable.NewEmptyIndex(ctx, root.VRW(), sch)
empty, err := durable.NewEmptyIndex(ctx, root.VRW(), root.NodeStore(), sch)
if err != nil {
return errhand.BuildDError("error: failed to get table.").AddCause(err).Build()
}
@@ -308,13 +312,13 @@ func importSchema(ctx context.Context, dEnv *env.DoltEnv, apr *argparser.ArgPars
return errhand.BuildDError("error: failed to create table.").AddCause(err).Build()
}
} else {
indexSet, err = durable.NewIndexSetWithEmptyIndexes(ctx, root.VRW(), sch)
indexSet, err = durable.NewIndexSetWithEmptyIndexes(ctx, root.VRW(), root.NodeStore(), sch)
if err != nil {
return errhand.BuildDError("error: failed to get table.").AddCause(err).Build()
}
}
tbl, err = doltdb.NewTable(ctx, root.VRW(), sch, empty, indexSet, nil)
tbl, err = doltdb.NewTable(ctx, root.VRW(), root.NodeStore(), sch, empty, indexSet, nil)
if err != nil {
return errhand.BuildDError("error: failed to get table.").AddCause(err).Build()
}
+1 -1
View File
@@ -185,7 +185,7 @@ func Serve(
}
if ok, f := mrEnv.IsLocked(); ok {
startError = fmt.Errorf("%w: '%s'", env.ErrActiveServerLock, f)
startError = env.ErrActiveServerLock.New(f)
return
}
if err = mrEnv.Lock(); err != nil {
+1
View File
@@ -84,6 +84,7 @@ func (cmd MvCmd) Exec(ctx context.Context, commandStr string, args []string, dEn
oldName := apr.Arg(0)
newName := apr.Arg(1)
queryStr := ""
if force := apr.Contains(forceParam); force {
queryStr = fmt.Sprintf("DROP TABLE IF EXISTS `%s`;", newName)
+1 -1
View File
@@ -62,7 +62,7 @@ require (
)
require (
github.com/dolthub/go-mysql-server v0.12.1-0.20220701205010-9c7a0775ad68
github.com/dolthub/go-mysql-server v0.12.1-0.20220706193244-e3d0b36b4bdd
github.com/google/flatbuffers v2.0.6+incompatible
github.com/gosuri/uilive v0.0.4
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
+2 -2
View File
@@ -175,8 +175,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220701205010-9c7a0775ad68 h1:IA2EKBtQyEor56O03p3XqH+dODo27b9bWnmvVUioJNw=
github.com/dolthub/go-mysql-server v0.12.1-0.20220701205010-9c7a0775ad68/go.mod h1:E7d38ZJM2YloaxJl+f7P3MYwfAA+NxAxBEKzNkzlUag=
github.com/dolthub/go-mysql-server v0.12.1-0.20220706193244-e3d0b36b4bdd h1:2klfowOpzFJp87HKTWnJkqVm6oAHhqfEEnV/LEFWH9E=
github.com/dolthub/go-mysql-server v0.12.1-0.20220706193244-e3d0b36b4bdd/go.mod h1:E7d38ZJM2YloaxJl+f7P3MYwfAA+NxAxBEKzNkzlUag=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
+6 -4
View File
@@ -30,6 +30,7 @@ import (
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -107,18 +108,19 @@ type AWSFactory struct {
}
// CreateDB creates an AWS backed database
func (fact AWSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func (fact AWSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
var db datas.Database
cs, err := fact.newChunkStore(ctx, nbf, urlObj, params)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
vrw := types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(cs)
db = datas.NewTypesDatabase(vrw, ns)
return db, vrw, nil
return db, vrw, ns, nil
}
func (fact AWSFactory) newChunkStore(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (chunks.ChunkStore, error) {
+5 -4
View File
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/earl"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -53,7 +54,7 @@ const (
// DBFactory is an interface for creating concrete datas.Database instances which may have different backing stores.
type DBFactory interface {
CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error)
CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error)
}
// DBFactories is a map from url scheme name to DBFactory. Additional factories can be added to the DBFactories map
@@ -70,11 +71,11 @@ var DBFactories = map[string]DBFactory{
// CreateDB creates a database based on the supplied urlStr, and creation params. The DBFactory used for creation is
// determined by the scheme of the url. Naked urls will use https by default.
func CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
urlObj, err := earl.Parse(urlStr)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
scheme := urlObj.Scheme
@@ -86,5 +87,5 @@ func CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, para
return fact.CreateDB(ctx, nbf, urlObj, params)
}
return nil, nil, fmt.Errorf("unknown url scheme: '%s'", urlObj.Scheme)
return nil, nil, nil, fmt.Errorf("unknown url scheme: '%s'", urlObj.Scheme)
}
@@ -54,9 +54,10 @@ func TestCreateFileDB(t *testing.T) {
func TestCreateMemDB(t *testing.T) {
ctx := context.Background()
db, vrw, err := CreateDB(ctx, types.Format_Default, "mem://", nil)
db, vrw, ns, err := CreateDB(ctx, types.Format_Default, "mem://", nil)
assert.NoError(t, err)
assert.NotNil(t, db)
assert.NotNil(t, vrw)
assert.NotNil(t, ns)
}
+10 -8
View File
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -43,11 +44,11 @@ type FileFactory struct {
}
// CreateDB creates an local filesys backed database
func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
path, err := url.PathUnescape(urlObj.Path)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
path = filepath.FromSlash(path)
@@ -55,40 +56,41 @@ func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat,
err = validateDir(path)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
q := nbs.NewUnlimitedMemQuotaProvider()
newGenSt, err := nbs.NewLocalStore(ctx, nbf.VersionString(), path, defaultMemTableSize, q)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
oldgenPath := filepath.Join(path, "oldgen")
err = validateDir(oldgenPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return nil, nil, err
return nil, nil, nil, err
}
err = os.Mkdir(oldgenPath, os.ModePerm)
if err != nil && !errors.Is(err, os.ErrExist) {
return nil, nil, err
return nil, nil, nil, err
}
}
oldGenSt, err := nbs.NewLocalStore(ctx, newGenSt.Version(), oldgenPath, defaultMemTableSize, q)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
st := nbs.NewGenerationalCS(oldGenSt, newGenSt)
// metrics?
vrw := types.NewValueStore(st)
ns := tree.NewNodeStore(st)
return datas.NewTypesDatabase(vrw), vrw, nil
return datas.NewTypesDatabase(vrw, ns), vrw, ns, nil
}
func validateDir(path string) error {
+8 -6
View File
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/libraries/events"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -51,28 +52,29 @@ func NewDoltRemoteFactory(insecure bool) DoltRemoteFactory {
// CreateDB creates a database backed by a remote server that implements the GRPC rpcs defined by
// remoteapis.ChunkStoreServiceClient
func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
var db datas.Database
dpi, ok := params[GRPCDialProviderParam]
if dpi == nil || !ok {
return nil, nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
return nil, nil, nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
}
dp, ok := dpi.(GRPCDialProvider)
if !ok {
return nil, nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
return nil, nil, nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
}
cs, err := fact.newChunkStore(ctx, nbf, urlObj, params, dp)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
vrw := types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(cs)
db = datas.NewTypesDatabase(vrw, ns)
return db, vrw, err
return db, vrw, ns, err
}
var NoCachingParameter = "__dolt__NO_CACHING"
+13 -10
View File
@@ -24,6 +24,7 @@ import (
"github.com/dolthub/dolt/go/store/blobstore"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -32,12 +33,12 @@ type GSFactory struct {
}
// CreateDB creates an GCS backed database
func (fact GSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func (fact GSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
var db datas.Database
gcs, err := storage.NewClient(ctx)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
bs := blobstore.NewGCSBlobstore(gcs, urlObj.Host, urlObj.Path)
@@ -45,13 +46,14 @@ func (fact GSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, ur
gcsStore, err := nbs.NewBSStore(ctx, nbf.VersionString(), bs, defaultMemTableSize, q)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
vrw := types.NewValueStore(gcsStore)
db = datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(gcsStore)
db = datas.NewTypesDatabase(vrw, ns)
return db, vrw, nil
return db, vrw, ns, nil
}
// LocalBSFactory is a DBFactory implementation for creating a local filesystem blobstore backed databases for testing
@@ -59,12 +61,12 @@ type LocalBSFactory struct {
}
// CreateDB creates a local filesystem blobstore backed database
func (fact LocalBSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func (fact LocalBSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
var db datas.Database
absPath, err := filepath.Abs(filepath.Join(urlObj.Host, urlObj.Path))
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
bs := blobstore.NewLocalBlobstore(absPath)
@@ -72,11 +74,12 @@ func (fact LocalBSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinForma
bsStore, err := nbs.NewBSStore(ctx, nbf.VersionString(), bs, defaultMemTableSize, q)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
vrw := types.NewValueStore(bsStore)
db = datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(bsStore)
db = datas.NewTypesDatabase(vrw, ns)
return db, vrw, err
return db, vrw, ns, err
}
+5 -3
View File
@@ -20,6 +20,7 @@ import (
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -28,12 +29,13 @@ type MemFactory struct {
}
// CreateDB creates an in memory backed database
func (fact MemFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
func (fact MemFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
var db datas.Database
storage := &chunks.MemoryStorage{}
cs := storage.NewViewWithDefaultFormat()
vrw := types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(cs)
db = datas.NewTypesDatabase(vrw, ns)
return db, vrw, nil
return db, vrw, ns, nil
}
+6 -5
View File
@@ -26,6 +26,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -57,7 +58,7 @@ func TestDocDiff(t *testing.T) {
sch := createTestDocsSchema()
licRow := makeDocRow(t, sch, doltdocs.LicenseDoc, types.String("license row"))
m, _ := createTestRows(t, ddb.ValueReadWriter(), sch, []row.Row{licRow})
tbl1, err := CreateTestTable(ddb.ValueReadWriter(), sch, m)
tbl1, err := CreateTestTable(ddb.ValueReadWriter(), ddb.NodeStore(), sch, m)
assert.NoError(t, err)
// Create root2 with tbl1 on it (one doc: license)
@@ -75,7 +76,7 @@ func TestDocDiff(t *testing.T) {
// Create tbl2 with one readme row
readmeRow := makeDocRow(t, sch, doltdocs.ReadmeDoc, types.String("readme row"))
m, _ = createTestRows(t, ddb.ValueReadWriter(), sch, []row.Row{readmeRow})
tbl2, err := CreateTestTable(ddb.ValueReadWriter(), sch, m)
tbl2, err := CreateTestTable(ddb.ValueReadWriter(), ddb.NodeStore(), sch, m)
assert.NoError(t, err)
// Create root3 with tbl2 on it (one doc: readme)
@@ -93,7 +94,7 @@ func TestDocDiff(t *testing.T) {
// Create tbl3 with 2 doc rows (readme, license)
readmeRowUpdated := makeDocRow(t, sch, doltdocs.ReadmeDoc, types.String("a different readme"))
m, _ = createTestRows(t, ddb.ValueReadWriter(), sch, []row.Row{readmeRowUpdated, licRow})
tbl3, err := CreateTestTable(ddb.ValueReadWriter(), sch, m)
tbl3, err := CreateTestTable(ddb.ValueReadWriter(), ddb.NodeStore(), sch, m)
assert.NoError(t, err)
// Create root4 with tbl3 on it (two docs: readme and license)
@@ -117,8 +118,8 @@ func TestDocDiff(t *testing.T) {
}
}
func CreateTestTable(vrw types.ValueReadWriter, tSchema schema.Schema, rowData types.Map) (*doltdb.Table, error) {
tbl, err := doltdb.NewNomsTable(context.Background(), vrw, tSchema, rowData, nil, nil)
func CreateTestTable(vrw types.ValueReadWriter, ns tree.NodeStore, tSchema schema.Schema, rowData types.Map) (*doltdb.Table, error) {
tbl, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tSchema, rowData, nil, nil)
if err != nil {
return nil, err
+2 -2
View File
@@ -367,7 +367,7 @@ func (td TableDelta) GetRowData(ctx context.Context) (from, to durable.Index, er
return from, to, err
}
} else {
from, _ = durable.NewEmptyIndex(ctx, td.ToTable.ValueReadWriter(), td.ToSch)
from, _ = durable.NewEmptyIndex(ctx, td.ToTable.ValueReadWriter(), td.ToTable.NodeStore(), td.ToSch)
}
if td.ToTable != nil {
@@ -376,7 +376,7 @@ func (td TableDelta) GetRowData(ctx context.Context) (from, to durable.Index, er
return from, to, err
}
} else {
to, _ = durable.NewEmptyIndex(ctx, td.FromTable.ValueReadWriter(), td.FromSch)
to, _ = durable.NewEmptyIndex(ctx, td.FromTable.ValueReadWriter(), td.FromTable.NodeStore(), td.FromSch)
}
return from, to, nil
+10 -8
View File
@@ -21,6 +21,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -30,16 +31,17 @@ var errHasNoRootValue = errors.New("no root value")
// Commit contains information on a commit that was written to noms
type Commit struct {
vrw types.ValueReadWriter
ns tree.NodeStore
parents []*datas.Commit
dCommit *datas.Commit
}
func NewCommit(ctx context.Context, vrw types.ValueReadWriter, commit *datas.Commit) (*Commit, error) {
func NewCommit(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, commit *datas.Commit) (*Commit, error) {
parents, err := datas.GetCommitParents(ctx, vrw, commit.NomsValue())
if err != nil {
return nil, err
}
return &Commit{vrw, parents, commit}, nil
return &Commit{vrw, ns, parents, commit}, nil
}
// HashOf returns the hash of the commit
@@ -84,17 +86,17 @@ func (c *Commit) GetRootValue(ctx context.Context) (*RootValue, error) {
if rootV == nil {
return nil, errHasNoRootValue
}
return newRootValue(c.vrw, rootV)
return newRootValue(c.vrw, c.ns, rootV)
}
func (c *Commit) GetParent(ctx context.Context, idx int) (*Commit, error) {
return NewCommit(ctx, c.vrw, c.parents[idx])
return NewCommit(ctx, c.vrw, c.ns, c.parents[idx])
}
var ErrNoCommonAncestor = errors.New("no common ancestor")
func GetCommitAncestor(ctx context.Context, cm1, cm2 *Commit) (*Commit, error) {
addr, err := getCommitAncestorAddr(ctx, cm1.dCommit, cm2.dCommit, cm1.vrw, cm2.vrw)
addr, err := getCommitAncestorAddr(ctx, cm1.dCommit, cm2.dCommit, cm1.vrw, cm2.vrw, cm1.ns, cm2.ns)
if err != nil {
return nil, err
}
@@ -104,11 +106,11 @@ func GetCommitAncestor(ctx context.Context, cm1, cm2 *Commit) (*Commit, error) {
return nil, err
}
return NewCommit(ctx, cm1.vrw, targetCommit)
return NewCommit(ctx, cm1.vrw, cm1.ns, targetCommit)
}
func getCommitAncestorAddr(ctx context.Context, c1, c2 *datas.Commit, vrw1, vrw2 types.ValueReadWriter) (hash.Hash, error) {
ancestorAddr, ok, err := datas.FindCommonAncestor(ctx, c1, c2, vrw1, vrw2)
func getCommitAncestorAddr(ctx context.Context, c1, c2 *datas.Commit, vrw1, vrw2 types.ValueReadWriter, ns1, ns2 tree.NodeStore) (hash.Hash, error) {
ancestorAddr, ok, err := datas.FindCommonAncestor(ctx, c1, c2, vrw1, vrw2, ns1, ns2)
if err != nil {
return hash.Hash{}, err
}
@@ -106,7 +106,7 @@ func TestPushOnWriteHook(t *testing.T) {
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.vrw, tSchema)
tbl, err := CreateTestTable(ddb.vrw, tSchema, rowData)
tbl, err := CreateTestTable(ddb.vrw, ddb.ns, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
@@ -244,7 +244,7 @@ func TestAsyncPushOnWrite(t *testing.T) {
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.vrw, tSchema)
tbl, err := CreateTestTable(ddb.vrw, tSchema, rowData)
tbl, err := CreateTestTable(ddb.vrw, ddb.ns, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
+4 -3
View File
@@ -20,6 +20,7 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -132,7 +133,7 @@ func (cmItr *commitItr) Next(ctx context.Context) (hash.Hash, *Commit, error) {
next := cmItr.unprocessed[numUnprocessed-1]
cmItr.unprocessed = cmItr.unprocessed[:numUnprocessed-1]
cmItr.curr, err = hashToCommit(ctx, cmItr.ddb.ValueReadWriter(), next)
cmItr.curr, err = hashToCommit(ctx, cmItr.ddb.ValueReadWriter(), cmItr.ddb.ns, next)
if err != nil {
return hash.Hash{}, nil, err
@@ -141,12 +142,12 @@ func (cmItr *commitItr) Next(ctx context.Context) (hash.Hash, *Commit, error) {
return next, cmItr.curr, nil
}
func hashToCommit(ctx context.Context, vrw types.ValueReadWriter, h hash.Hash) (*Commit, error) {
func hashToCommit(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, h hash.Hash) (*Commit, error) {
dc, err := datas.LoadCommitAddr(ctx, vrw, h)
if err != nil {
return nil, err
}
return NewCommit(ctx, vrw, dc)
return NewCommit(ctx, vrw, ns, dc)
}
// CommitFilter is a function that returns true if a commit should be filtered out, and false if it should be kept
+21 -14
View File
@@ -30,6 +30,7 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/datas/pull"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/types/edits"
)
@@ -66,14 +67,16 @@ var ErrCannotDeleteLastBranch = errors.New("cannot delete the last branch")
type DoltDB struct {
db hooksDatabase
vrw types.ValueReadWriter
ns tree.NodeStore
}
// DoltDBFromCS creates a DoltDB from a noms chunks.ChunkStore
func DoltDBFromCS(cs chunks.ChunkStore) *DoltDB {
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(cs)
db := datas.NewTypesDatabase(vrw, ns)
return &DoltDB{hooksDatabase{Database: db}, vrw}
return &DoltDB{hooksDatabase{Database: db}, vrw, ns}
}
// LoadDoltDB will acquire a reference to the underlying noms db. If the Location is InMemDoltDB then a reference
@@ -101,13 +104,13 @@ func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr
urlStr = fmt.Sprintf("file://%s", filepath.ToSlash(absPath))
}
db, vrw, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
db, vrw, ns, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
if err != nil {
return nil, err
}
return &DoltDB{hooksDatabase{Database: db}, vrw}, nil
return &DoltDB{hooksDatabase{Database: db}, vrw, ns}, nil
}
// NomsRoot returns the hash of the noms dataset map
@@ -162,7 +165,7 @@ func (ddb *DoltDB) WriteEmptyRepoWithCommitTimeAndDefaultBranch(
return errors.New("database already exists")
}
rv, err := EmptyRootValue(ctx, ddb.vrw)
rv, err := EmptyRootValue(ctx, ddb.vrw, ddb.ns)
if err != nil {
return err
@@ -318,7 +321,7 @@ func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef)
return nil, err
}
commit, err := NewCommit(ctx, ddb.vrw, commitVal)
commit, err := NewCommit(ctx, ddb.vrw, ddb.ns, commitVal)
if err != nil {
return nil, err
}
@@ -332,7 +335,7 @@ func (ddb *DoltDB) ResolveCommitRef(ctx context.Context, ref ref.DoltRef) (*Comm
if err != nil {
return nil, err
}
return NewCommit(ctx, ddb.vrw, commitVal)
return NewCommit(ctx, ddb.vrw, ddb.ns, commitVal)
}
// ResolveTag takes a TagRef and returns the corresponding Tag object.
@@ -350,7 +353,7 @@ func (ddb *DoltDB) ResolveTag(ctx context.Context, tagRef ref.TagRef) (*Tag, err
return nil, fmt.Errorf("tagRef head is not a tag")
}
return NewTag(ctx, tagRef.GetPath(), ds, ddb.vrw)
return NewTag(ctx, tagRef.GetPath(), ds, ddb.vrw, ddb.ns)
}
// ResolveWorkingSet takes a WorkingSetRef and returns the corresponding WorkingSet object.
@@ -369,7 +372,7 @@ func (ddb *DoltDB) ResolveWorkingSet(ctx context.Context, workingSetRef ref.Work
return nil, fmt.Errorf("workingSetRef head is not a workingSetRef")
}
return NewWorkingSet(ctx, workingSetRef.GetPath(), ddb.vrw, ds)
return NewWorkingSet(ctx, workingSetRef.GetPath(), ddb.vrw, ddb.ns, ds)
}
// TODO: convenience method to resolve the head commit of a branch.
@@ -407,7 +410,7 @@ func (ddb *DoltDB) ReadRootValue(ctx context.Context, h hash.Hash) (*RootValue,
if err != nil {
return nil, err
}
return decodeRootNomsValue(ddb.vrw, val)
return decodeRootNomsValue(ddb.vrw, ddb.ns, val)
}
// Commit will update a branch's head value to be that of a previously committed root value hash
@@ -548,7 +551,7 @@ func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Has
return nil, err
}
return NewCommit(ctx, ddb.vrw, dc)
return NewCommit(ctx, ddb.vrw, ddb.ns, dc)
}
// dangling commits are unreferenced by any branch or ref. They are created in the course of programmatic updates
@@ -572,7 +575,7 @@ func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash
}
commitOpts := datas.CommitOptions{Parents: parents, Meta: cm}
dcommit, err := datas.NewCommitForValue(ctx, datas.ChunkStoreFromDatabase(ddb.db), ddb.vrw, val, commitOpts)
dcommit, err := datas.NewCommitForValue(ctx, datas.ChunkStoreFromDatabase(ddb.db), ddb.vrw, ddb.ns, val, commitOpts)
if err != nil {
return nil, err
}
@@ -582,7 +585,7 @@ func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash
return nil, err
}
return NewCommit(ctx, ddb.vrw, dcommit)
return NewCommit(ctx, ddb.vrw, ddb.ns, dcommit)
}
// ValueReadWriter returns the underlying noms database as a types.ValueReadWriter.
@@ -590,6 +593,10 @@ func (ddb *DoltDB) ValueReadWriter() types.ValueReadWriter {
return ddb.vrw
}
func (ddb *DoltDB) NodeStore() tree.NodeStore {
return ddb.ns
}
func (ddb *DoltDB) Format() *types.NomsBinFormat {
return ddb.vrw.Format()
}
@@ -1011,7 +1018,7 @@ func (ddb *DoltDB) CommitWithWorkingSet(
return nil, err
}
return NewCommit(ctx, ddb.vrw, dc)
return NewCommit(ctx, ddb.vrw, ddb.ns, dc)
}
// DeleteWorkingSet deletes the working set given
+4 -3
View File
@@ -32,6 +32,7 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/test"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -69,8 +70,8 @@ func createTestSchema(t *testing.T) schema.Schema {
return sch
}
func CreateTestTable(vrw types.ValueReadWriter, tSchema schema.Schema, rowData types.Map) (*Table, error) {
tbl, err := NewNomsTable(context.Background(), vrw, tSchema, rowData, nil, nil)
func CreateTestTable(vrw types.ValueReadWriter, ns tree.NodeStore, tSchema schema.Schema, rowData types.Map) (*Table, error) {
tbl, err := NewNomsTable(context.Background(), vrw, ns, tSchema, rowData, nil, nil)
if err != nil {
return nil, err
@@ -288,7 +289,7 @@ func TestLDNoms(t *testing.T) {
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.vrw, tSchema)
tbl, err = CreateTestTable(ddb.vrw, tSchema, rowData)
tbl, err = CreateTestTable(ddb.vrw, ddb.ns, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
@@ -55,14 +55,13 @@ func RefFromArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, idx Ar
}
// NewEmptyArtifactIndex returns an ArtifactIndex with no artifacts.
func NewEmptyArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, tableSch schema.Schema) (ArtifactIndex, error) {
func NewEmptyArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, tableSch schema.Schema) (ArtifactIndex, error) {
switch vrw.Format() {
case types.Format_LD_1, types.Format_7_18, types.Format_DOLT_DEV:
panic("TODO")
case types.Format_DOLT_1:
kd := shim.KeyDescriptorFromSchema(tableSch)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
m, err := prolly.NewArtifactMapFromTuples(ctx, ns, kd)
if err != nil {
return nil, err
@@ -84,11 +83,11 @@ func ProllyMapFromArtifactIndex(i ArtifactIndex) prolly.ArtifactMap {
return i.(prollyArtifactIndex).index
}
func artifactIndexFromRef(ctx context.Context, vrw types.ValueReadWriter, tableSch schema.Schema, r types.Ref) (ArtifactIndex, error) {
return artifactIndexFromAddr(ctx, vrw, tableSch, r.TargetHash())
func artifactIndexFromRef(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, tableSch schema.Schema, r types.Ref) (ArtifactIndex, error) {
return artifactIndexFromAddr(ctx, vrw, ns, tableSch, r.TargetHash())
}
func artifactIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, tableSch schema.Schema, addr hash.Hash) (ArtifactIndex, error) {
func artifactIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, tableSch schema.Schema, addr hash.Hash) (ArtifactIndex, error) {
v, err := vrw.ReadValue(ctx, addr)
if err != nil {
return nil, err
@@ -101,7 +100,6 @@ func artifactIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, table
case types.Format_DOLT_1:
root := shim.NodeFromValue(v)
kd := shim.KeyDescriptorFromSchema(tableSch)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
m := prolly.NewArtifactMap(root, ns, kd)
return ArtifactIndexFromProllyMap(m), nil
@@ -47,7 +47,7 @@ func RefFromConflictIndex(ctx context.Context, vrw types.ValueReadWriter, idx Co
}
// NewEmptyConflictIndex returns an ConflictIndex with no rows.
func NewEmptyConflictIndex(ctx context.Context, vrw types.ValueReadWriter, oursSch, theirsSch, baseSch schema.Schema) (ConflictIndex, error) {
func NewEmptyConflictIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, oursSch, theirsSch, baseSch schema.Schema) (ConflictIndex, error) {
switch vrw.Format() {
case types.Format_LD_1, types.Format_7_18, types.Format_DOLT_DEV:
m, err := types.NewMap(ctx, vrw)
@@ -60,7 +60,6 @@ func NewEmptyConflictIndex(ctx context.Context, vrw types.ValueReadWriter, oursS
kd, oursVD := shim.MapDescriptorsFromSchema(oursSch)
theirsVD := shim.ValueDescriptorFromSchema(theirsSch)
baseVD := shim.ValueDescriptorFromSchema(baseSch)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
m := prolly.NewEmptyConflictMap(ns, kd, oursVD, theirsVD, baseVD)
@@ -92,11 +91,11 @@ func ProllyMapFromConflictIndex(i ConflictIndex) prolly.ConflictMap {
return i.(prollyConflictIndex).index
}
func conflictIndexFromRef(ctx context.Context, vrw types.ValueReadWriter, ourSch, theirSch, baseSch schema.Schema, r types.Ref) (ConflictIndex, error) {
return conflictIndexFromAddr(ctx, vrw, ourSch, theirSch, baseSch, r.TargetHash())
func conflictIndexFromRef(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, ourSch, theirSch, baseSch schema.Schema, r types.Ref) (ConflictIndex, error) {
return conflictIndexFromAddr(ctx, vrw, ns, ourSch, theirSch, baseSch, r.TargetHash())
}
func conflictIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ourSch, theirSch, baseSch schema.Schema, addr hash.Hash) (ConflictIndex, error) {
func conflictIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, ourSch, theirSch, baseSch schema.Schema, addr hash.Hash) (ConflictIndex, error) {
v, err := vrw.ReadValue(ctx, addr)
if err != nil {
return nil, err
@@ -107,7 +106,7 @@ func conflictIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ourSc
return ConflictIndexFromNomsMap(v.(types.Map), vrw), nil
case types.Format_DOLT_1:
m := shim.ConflictMapFromValue(v, ourSch, theirSch, baseSch, vrw)
m := shim.ConflictMapFromValue(v, ourSch, theirSch, baseSch, ns)
return ConflictIndexFromProllyMap(m), nil
default:
+27 -25
View File
@@ -90,11 +90,11 @@ func RefFromIndex(ctx context.Context, vrw types.ValueReadWriter, idx Index) (ty
}
// indexFromRef reads the types.Ref from storage and returns the Index it points to.
func indexFromRef(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, r types.Ref) (Index, error) {
return indexFromAddr(ctx, vrw, sch, r.TargetHash())
func indexFromRef(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, r types.Ref) (Index, error) {
return indexFromAddr(ctx, vrw, ns, sch, r.TargetHash())
}
func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, addr hash.Hash) (Index, error) {
func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, addr hash.Hash) (Index, error) {
v, err := vrw.ReadValue(ctx, addr)
if err != nil {
return nil, err
@@ -102,10 +102,10 @@ func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, sch schema.Sc
switch vrw.Format() {
case types.Format_LD_1, types.Format_7_18, types.Format_DOLT_DEV:
return IndexFromNomsMap(v.(types.Map), vrw), nil
return IndexFromNomsMap(v.(types.Map), vrw, ns), nil
case types.Format_DOLT_1:
pm := shim.MapFromValue(v, sch, vrw)
pm := shim.MapFromValue(v, sch, ns)
return IndexFromProllyMap(pm), nil
default:
@@ -114,18 +114,17 @@ func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, sch schema.Sc
}
// NewEmptyIndex returns an index with no rows.
func NewEmptyIndex(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema) (Index, error) {
func NewEmptyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema) (Index, error) {
switch vrw.Format() {
case types.Format_LD_1, types.Format_7_18, types.Format_DOLT_DEV:
m, err := types.NewMap(ctx, vrw)
if err != nil {
return nil, err
}
return IndexFromNomsMap(m, vrw), nil
return IndexFromNomsMap(m, vrw, ns), nil
case types.Format_DOLT_1:
kd, vd := shim.MapDescriptorsFromSchema(sch)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
m, err := prolly.NewMapFromTuples(ctx, ns, kd, vd)
if err != nil {
return nil, err
@@ -140,6 +139,7 @@ func NewEmptyIndex(ctx context.Context, vrw types.ValueReadWriter, sch schema.Sc
type nomsIndex struct {
index types.Map
vrw types.ValueReadWriter
ns tree.NodeStore
}
var _ Index = nomsIndex{}
@@ -168,10 +168,11 @@ func NomsMapFromIndex(i Index) types.Map {
}
// IndexFromNomsMap wraps a types.Map and returns it as an Index.
func IndexFromNomsMap(m types.Map, vrw types.ValueReadWriter) Index {
func IndexFromNomsMap(m types.Map, vrw types.ValueReadWriter, ns tree.NodeStore) Index {
return nomsIndex{
index: m,
vrw: vrw,
ns: ns,
}
}
@@ -318,11 +319,10 @@ func (i prollyIndex) AddColumnToRows(ctx context.Context, newCol string, newSche
}
// NewIndexSet returns an empty IndexSet.
func NewIndexSet(ctx context.Context, vrw types.ValueReadWriter) IndexSet {
func NewIndexSet(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) IndexSet {
if vrw.Format().UsesFlatbuffers() {
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
emptyam := prolly.NewEmptyAddressMap(ns)
return doltDevIndexSet{vrw, emptyam}
return doltDevIndexSet{vrw, ns, emptyam}
}
empty, _ := types.NewMap(ctx, vrw)
@@ -332,10 +332,10 @@ func NewIndexSet(ctx context.Context, vrw types.ValueReadWriter) IndexSet {
}
}
func NewIndexSetWithEmptyIndexes(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema) (IndexSet, error) {
s := NewIndexSet(ctx, vrw)
func NewIndexSetWithEmptyIndexes(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema) (IndexSet, error) {
s := NewIndexSet(ctx, vrw, ns)
for _, index := range sch.Indexes().AllIndexes() {
empty, err := NewEmptyIndex(ctx, vrw, index.Schema())
empty, err := NewEmptyIndex(ctx, vrw, ns, index.Schema())
if err != nil {
return nil, err
}
@@ -350,6 +350,7 @@ func NewIndexSetWithEmptyIndexes(ctx context.Context, vrw types.ValueReadWriter,
type nomsIndexSet struct {
indexes types.Map
vrw types.ValueReadWriter
ns tree.NodeStore
}
var _ IndexSet = nomsIndexSet{}
@@ -374,12 +375,12 @@ func (s nomsIndexSet) GetIndex(ctx context.Context, sch schema.Schema, name stri
return nil, fmt.Errorf("index not found: %s", name)
}
return indexFromRef(ctx, s.vrw, idx.Schema(), v.(types.Ref))
return indexFromRef(ctx, s.vrw, s.ns, idx.Schema(), v.(types.Ref))
}
// PutIndex implements IndexSet.
func (s nomsIndexSet) PutNomsIndex(ctx context.Context, name string, idx types.Map) (IndexSet, error) {
return s.PutIndex(ctx, name, IndexFromNomsMap(idx, s.vrw))
return s.PutIndex(ctx, name, IndexFromNomsMap(idx, s.vrw, s.ns))
}
// PutIndex implements IndexSet.
@@ -394,7 +395,7 @@ func (s nomsIndexSet) PutIndex(ctx context.Context, name string, idx Index) (Ind
return nil, err
}
return nomsIndexSet{indexes: im, vrw: s.vrw}, nil
return nomsIndexSet{indexes: im, vrw: s.vrw, ns: s.ns}, nil
}
// DropIndex implements IndexSet.
@@ -404,7 +405,7 @@ func (s nomsIndexSet) DropIndex(ctx context.Context, name string) (IndexSet, err
return nil, err
}
return nomsIndexSet{indexes: im, vrw: s.vrw}, nil
return nomsIndexSet{indexes: im, vrw: s.vrw, ns: s.ns}, nil
}
func (s nomsIndexSet) RenameIndex(ctx context.Context, oldName, newName string) (IndexSet, error) {
@@ -422,7 +423,7 @@ func (s nomsIndexSet) RenameIndex(ctx context.Context, oldName, newName string)
return nil, err
}
return nomsIndexSet{indexes: im, vrw: s.vrw}, nil
return nomsIndexSet{indexes: im, vrw: s.vrw, ns: s.ns}, nil
}
func mapFromIndexSet(ic IndexSet) types.Map {
@@ -431,6 +432,7 @@ func mapFromIndexSet(ic IndexSet) types.Map {
type doltDevIndexSet struct {
vrw types.ValueReadWriter
ns tree.NodeStore
am prolly.AddressMap
}
@@ -452,7 +454,7 @@ func (is doltDevIndexSet) GetIndex(ctx context.Context, sch schema.Schema, name
if idxSch == nil {
return nil, fmt.Errorf("index schema not found: %s", name)
}
return indexFromAddr(ctx, is.vrw, idxSch.Schema(), addr)
return indexFromAddr(ctx, is.vrw, is.ns, idxSch.Schema(), addr)
}
func (is doltDevIndexSet) PutIndex(ctx context.Context, name string, idx Index) (IndexSet, error) {
@@ -471,11 +473,11 @@ func (is doltDevIndexSet) PutIndex(ctx context.Context, name string, idx Index)
return nil, err
}
return doltDevIndexSet{is.vrw, am}, nil
return doltDevIndexSet{is.vrw, is.ns, am}, nil
}
func (is doltDevIndexSet) PutNomsIndex(ctx context.Context, name string, idx types.Map) (IndexSet, error) {
return is.PutIndex(ctx, name, IndexFromNomsMap(idx, is.vrw))
return is.PutIndex(ctx, name, IndexFromNomsMap(idx, is.vrw, is.ns))
}
func (is doltDevIndexSet) DropIndex(ctx context.Context, name string) (IndexSet, error) {
@@ -488,7 +490,7 @@ func (is doltDevIndexSet) DropIndex(ctx context.Context, name string) (IndexSet,
if err != nil {
return nil, err
}
return doltDevIndexSet{is.vrw, am}, nil
return doltDevIndexSet{is.vrw, is.ns, am}, nil
}
func (is doltDevIndexSet) RenameIndex(ctx context.Context, oldName, newName string) (IndexSet, error) {
@@ -522,5 +524,5 @@ func (is doltDevIndexSet) RenameIndex(ctx context.Context, oldName, newName stri
return nil, err
}
return doltDevIndexSet{is.vrw, am}, nil
return doltDevIndexSet{is.vrw, is.ns, am}, nil
}
+59 -47
View File
@@ -112,6 +112,7 @@ type Table interface {
type nomsTable struct {
vrw types.ValueReadWriter
ns tree.NodeStore
tableStruct types.Struct
}
@@ -120,14 +121,14 @@ var _ Table = nomsTable{}
var sharePool = pool.NewBuffPool()
// NewNomsTable makes a new Table.
func NewNomsTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, rows types.Map, indexes IndexSet, autoIncVal types.Value) (Table, error) {
return NewTable(ctx, vrw, sch, nomsIndex{index: rows, vrw: vrw}, indexes, autoIncVal)
func NewNomsTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows types.Map, indexes IndexSet, autoIncVal types.Value) (Table, error) {
return NewTable(ctx, vrw, ns, sch, nomsIndex{index: rows, vrw: vrw}, indexes, autoIncVal)
}
// NewTable returns a new Table.
func NewTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
func NewTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
if vrw.Format().UsesFlatbuffers() {
return newDoltDevTable(ctx, vrw, sch, rows, indexes, autoIncVal)
return newDoltDevTable(ctx, vrw, ns, sch, rows, indexes, autoIncVal)
}
schVal, err := encoding.MarshalSchemaAsNomsValue(ctx, vrw, sch)
@@ -146,7 +147,7 @@ func NewTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema,
}
if indexes == nil {
indexes = NewIndexSet(ctx, vrw)
indexes = NewIndexSet(ctx, vrw, ns)
}
indexesRef, err := refFromNomsValue(ctx, vrw, mapFromIndexSet(indexes))
@@ -169,11 +170,11 @@ func NewTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema,
return nil, err
}
return nomsTable{vrw, tableStruct}, nil
return nomsTable{vrw, ns, tableStruct}, nil
}
// TableFromAddr deserializes the table in the chunk at |addr|.
func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, addr hash.Hash) (Table, error) {
func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, addr hash.Hash) (Table, error) {
val, err := vrw.ReadValue(ctx, addr)
if err != nil {
return nil, err
@@ -186,7 +187,7 @@ func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, addr hash.Has
return nil, err
}
return nomsTable{vrw: vrw, tableStruct: st}, nil
return nomsTable{vrw: vrw, tableStruct: st, ns: ns}, nil
} else {
sm, ok := val.(types.SerialMessage)
if !ok {
@@ -197,7 +198,7 @@ func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, addr hash.Has
err = errors.New("table ref is unexpected noms value; GetFileID == " + serial.GetFileID([]byte(sm)))
return nil, err
}
return doltDevTable{vrw, serial.GetRootAsTable([]byte(sm), 0)}, nil
return doltDevTable{vrw, ns, serial.GetRootAsTable([]byte(sm), 0)}, nil
}
}
@@ -224,6 +225,15 @@ func VrwFromTable(t Table) types.ValueReadWriter {
}
}
func NodeStoreFromTable(t Table) tree.NodeStore {
if nt, ok := t.(nomsTable); ok {
return nt.ns
} else {
ddt := t.(doltDevTable)
return ddt.ns
}
}
// valueReadWriter returns the valueReadWriter for this table.
func (t nomsTable) valueReadWriter() types.ValueReadWriter {
return t.vrw
@@ -277,7 +287,7 @@ func (t nomsTable) SetSchema(ctx context.Context, sch schema.Schema) (Table, err
return nil, err
}
return nomsTable{t.vrw, newTableStruct}, nil
return nomsTable{t.vrw, t.ns, newTableStruct}, nil
}
// SetTableRows implements Table.
@@ -292,7 +302,7 @@ func (t nomsTable) SetTableRows(ctx context.Context, updatedRows Index) (Table,
return nil, err
}
return nomsTable{t.vrw, updatedSt}, nil
return nomsTable{t.vrw, t.ns, updatedSt}, nil
}
// GetTableRows implements Table.
@@ -307,7 +317,7 @@ func (t nomsTable) GetTableRows(ctx context.Context) (Index, error) {
return nil, err
}
return indexFromRef(ctx, t.vrw, sch, val.(types.Ref))
return indexFromRef(ctx, t.vrw, t.ns, sch, val.(types.Ref))
}
// GetIndexes implements Table.
@@ -317,7 +327,7 @@ func (t nomsTable) GetIndexes(ctx context.Context) (IndexSet, error) {
return nil, err
}
if !ok {
return NewIndexSet(ctx, t.vrw), nil
return NewIndexSet(ctx, t.vrw, t.ns), nil
}
im, err := iv.(types.Ref).TargetValue(ctx, t.vrw)
@@ -328,13 +338,14 @@ func (t nomsTable) GetIndexes(ctx context.Context) (IndexSet, error) {
return nomsIndexSet{
indexes: im.(types.Map),
vrw: t.vrw,
ns: t.ns,
}, nil
}
// SetIndexes implements Table.
func (t nomsTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, error) {
if indexes == nil {
indexes = NewIndexSet(ctx, t.vrw)
indexes = NewIndexSet(ctx, t.vrw, t.ns)
}
indexesRef, err := refFromNomsValue(ctx, t.vrw, mapFromIndexSet(indexes))
@@ -347,7 +358,7 @@ func (t nomsTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, err
return nil, err
}
return nomsTable{t.vrw, newTableStruct}, nil
return nomsTable{t.vrw, t.ns, newTableStruct}, nil
}
// GetArtifacts implements Table.
@@ -366,10 +377,10 @@ func (t nomsTable) GetArtifacts(ctx context.Context) (ArtifactIndex, error) {
return nil, err
}
if !ok {
return NewEmptyArtifactIndex(ctx, t.vrw, sch)
return NewEmptyArtifactIndex(ctx, t.vrw, t.ns, sch)
}
return artifactIndexFromRef(ctx, t.vrw, sch, val.(types.Ref))
return artifactIndexFromRef(ctx, t.vrw, t.ns, sch, val.(types.Ref))
}
// SetArtifacts implements Table.
@@ -388,7 +399,7 @@ func (t nomsTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex) (T
return nil, err
}
return nomsTable{t.vrw, updated}, nil
return nomsTable{t.vrw, t.ns, updated}, nil
}
// HasConflicts implements Table.
@@ -408,7 +419,7 @@ func (t nomsTable) GetConflicts(ctx context.Context) (conflict.ConflictSchema, C
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
empty, err := NewEmptyConflictIndex(ctx, t.vrw, sch, sch, sch)
empty, err := NewEmptyConflictIndex(ctx, t.vrw, t.ns, sch, sch, sch)
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
@@ -426,14 +437,14 @@ func (t nomsTable) GetConflicts(ctx context.Context) (conflict.ConflictSchema, C
}
if conflictsVal == nil {
confIndex, err := NewEmptyConflictIndex(ctx, t.vrw, schemas.Schema, schemas.MergeSchema, schemas.Base)
confIndex, err := NewEmptyConflictIndex(ctx, t.vrw, t.ns, schemas.Schema, schemas.MergeSchema, schemas.Base)
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
return conflict.ConflictSchema{}, confIndex, nil
}
i, err := conflictIndexFromRef(ctx, t.vrw, schemas.Schema, schemas.MergeSchema, schemas.Base, conflictsVal.(types.Ref))
i, err := conflictIndexFromRef(ctx, t.vrw, t.ns, schemas.Schema, schemas.MergeSchema, schemas.Base, conflictsVal.(types.Ref))
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
@@ -467,7 +478,7 @@ func (t nomsTable) SetConflicts(ctx context.Context, schemas conflict.ConflictSc
return nil, err
}
return nomsTable{t.vrw, updatedSt}, nil
return nomsTable{t.vrw, t.ns, updatedSt}, nil
}
// GetConflictSchemas implements Table.
@@ -519,7 +530,7 @@ func (t nomsTable) ClearConflicts(ctx context.Context) (Table, error) {
return nil, err
}
return nomsTable{t.vrw, tSt}, nil
return nomsTable{t.vrw, t.ns, tSt}, nil
}
// GetConstraintViolations implements Table.
@@ -547,7 +558,7 @@ func (t nomsTable) SetConstraintViolations(ctx context.Context, violationsMap ty
if err != nil {
return nil, err
}
return nomsTable{t.vrw, updatedStruct}, nil
return nomsTable{t.vrw, t.ns, updatedStruct}, nil
}
constraintViolationsRef, err := refFromNomsValue(ctx, t.vrw, violationsMap)
if err != nil {
@@ -557,7 +568,7 @@ func (t nomsTable) SetConstraintViolations(ctx context.Context, violationsMap ty
if err != nil {
return nil, err
}
return nomsTable{t.vrw, updatedStruct}, nil
return nomsTable{t.vrw, t.ns, updatedStruct}, nil
}
// GetAutoIncrement implements Table.
@@ -590,7 +601,7 @@ func (t nomsTable) SetAutoIncrement(ctx context.Context, val uint64) (Table, err
if err != nil {
return nil, err
}
return nomsTable{t.vrw, st}, nil
return nomsTable{t.vrw, t.ns, st}, nil
}
func (t nomsTable) DebugString(ctx context.Context) string {
@@ -689,6 +700,7 @@ func schemaFromAddr(ctx context.Context, vrw types.ValueReadWriter, addr hash.Ha
type doltDevTable struct {
vrw types.ValueReadWriter
ns tree.NodeStore
msg *serial.Table
}
@@ -764,7 +776,7 @@ func (fields serialTableFields) write() *serial.Table {
return serial.GetRootAsTable(builder.FinishedBytes(), 0)
}
func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
schVal, err := encoding.MarshalSchemaAsNomsValue(ctx, vrw, sch)
if err != nil {
return nil, err
@@ -782,7 +794,7 @@ func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.
}
if indexes == nil {
indexes = NewIndexSet(ctx, vrw)
indexes = NewIndexSet(ctx, vrw, ns)
}
var autoInc uint64
@@ -804,7 +816,7 @@ func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.
autoincval: autoInc,
}.write()
return doltDevTable{vrw, msg}, nil
return doltDevTable{vrw, ns, msg}, nil
}
func (t doltDevTable) nomsValue() types.Value {
@@ -842,7 +854,7 @@ func (t doltDevTable) SetSchema(ctx context.Context, sch schema.Schema) (Table,
addr := schRef.TargetHash()
msg := t.clone()
copy(msg.SchemaBytes(), addr[:])
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetTableRows(ctx context.Context) (Index, error) {
@@ -853,13 +865,13 @@ func (t doltDevTable) GetTableRows(ctx context.Context) (Index, error) {
if err != nil {
return nil, err
}
return IndexFromNomsMap(tv.(types.Map), t.vrw), nil
return IndexFromNomsMap(tv.(types.Map), t.vrw, t.ns), nil
} else {
sch, err := t.GetSchema(ctx)
if err != nil {
return nil, err
}
m := shim.MapFromValue(types.TupleRowStorage(rowbytes), sch, t.vrw)
m := shim.MapFromValue(types.TupleRowStorage(rowbytes), sch, t.ns)
return IndexFromProllyMap(m), nil
}
}
@@ -874,21 +886,21 @@ func (t doltDevTable) SetTableRows(ctx context.Context, rows Index) (Table, erro
fields.rows = rowsbytes
msg := fields.write()
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetIndexes(ctx context.Context) (IndexSet, error) {
ambytes := t.msg.SecondaryIndexesBytes()
node := tree.NodeFromBytes(ambytes)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(t.vrw))
return doltDevIndexSet{t.vrw, prolly.NewAddressMap(node, ns)}, nil
ns := t.ns
return doltDevIndexSet{t.vrw, t.ns, prolly.NewAddressMap(node, ns)}, nil
}
func (t doltDevTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, error) {
fields := t.fields()
fields.indexes = indexes.(doltDevIndexSet).am
msg := fields.write()
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetConflicts(ctx context.Context) (conflict.ConflictSchema, ConflictIndex, error) {
@@ -903,7 +915,7 @@ func (t doltDevTable) GetConflicts(ctx context.Context) (conflict.ConflictSchema
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
empty, err := NewEmptyConflictIndex(ctx, t.vrw, sch, sch, sch)
empty, err := NewEmptyConflictIndex(ctx, t.vrw, t.ns, sch, sch, sch)
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
@@ -932,12 +944,12 @@ func (t doltDevTable) GetConflicts(ctx context.Context) (conflict.ConflictSchema
mapaddr := hash.New(conflicts.DataBytes())
var conflictIdx ConflictIndex
if mapaddr.IsEmpty() {
conflictIdx, err = NewEmptyConflictIndex(ctx, t.vrw, ourschema, theirschema, baseschema)
conflictIdx, err = NewEmptyConflictIndex(ctx, t.vrw, t.ns, ourschema, theirschema, baseschema)
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
} else {
conflictIdx, err = conflictIndexFromAddr(ctx, t.vrw, ourschema, theirschema, baseschema, mapaddr)
conflictIdx, err = conflictIndexFromAddr(ctx, t.vrw, t.ns, ourschema, theirschema, baseschema, mapaddr)
if err != nil {
return conflict.ConflictSchema{}, nil, err
}
@@ -959,10 +971,10 @@ func (t doltDevTable) GetArtifacts(ctx context.Context) (ArtifactIndex, error) {
addr := hash.New(t.msg.ArtifactsBytes())
if addr.IsEmpty() {
return NewEmptyArtifactIndex(ctx, t.vrw, sch)
return NewEmptyArtifactIndex(ctx, t.vrw, t.ns, sch)
}
return artifactIndexFromAddr(ctx, t.vrw, sch, addr)
return artifactIndexFromAddr(ctx, t.vrw, t.ns, sch, addr)
}
// SetArtifacts implements Table.
@@ -981,7 +993,7 @@ func (t doltDevTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex)
}
msg := t.clone()
copy(msg.ArtifactsBytes(), addr[:])
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) HasConflicts(ctx context.Context) (bool, error) {
@@ -1018,7 +1030,7 @@ func (t doltDevTable) SetConflicts(ctx context.Context, sch conflict.ConflictSch
copy(cmsg.TheirSchemaBytes(), theiraddr[:])
copy(cmsg.AncestorSchemaBytes(), baseaddr[:])
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) ClearConflicts(ctx context.Context) (Table, error) {
@@ -1029,7 +1041,7 @@ func (t doltDevTable) ClearConflicts(ctx context.Context) (Table, error) {
copy(conflicts.OurSchemaBytes(), emptyhash[:])
copy(conflicts.TheirSchemaBytes(), emptyhash[:])
copy(conflicts.AncestorSchemaBytes(), emptyhash[:])
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetConstraintViolations(ctx context.Context) (types.Map, error) {
@@ -1055,7 +1067,7 @@ func (t doltDevTable) SetConstraintViolations(ctx context.Context, violations ty
}
msg := t.clone()
copy(msg.ViolationsBytes(), addr[:])
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetAutoIncrement(ctx context.Context) (uint64, error) {
@@ -1074,7 +1086,7 @@ func (t doltDevTable) SetAutoIncrement(ctx context.Context, val uint64) (Table,
fields.autoincval = val
msg = fields.write()
}
return doltDevTable{t.vrw, msg}, nil
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) clone() *serial.Table {
@@ -1088,7 +1100,7 @@ func (t doltDevTable) clone() *serial.Table {
func (t doltDevTable) fields() serialTableFields {
ambytes := t.msg.SecondaryIndexesBytes()
node := tree.NodeFromBytes(ambytes)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(t.vrw))
ns := t.ns
conflicts := t.msg.Conflicts(nil)
return serialTableFields{
+35 -32
View File
@@ -54,6 +54,7 @@ var DoltFeatureVersion FeatureVersion = 3 // last bumped when storing creation t
// RootValue is the value of the Database and is the committed value in every Dolt commit.
type RootValue struct {
vrw types.ValueReadWriter
ns tree.NodeStore
st rvStorage
fkc *ForeignKeyCollection // cache the first load
}
@@ -81,7 +82,7 @@ func tmIterAll(ctx context.Context, tm tableMap, cb func(name string, addr hash.
type rvStorage interface {
GetFeatureVersion() (FeatureVersion, bool, error)
GetTablesMap(ctx context.Context, vr types.ValueReadWriter) (tableMap, error)
GetTablesMap(ctx context.Context, vr types.ValueReadWriter, ns tree.NodeStore) (tableMap, error)
GetSuperSchemaMap(ctx context.Context, vr types.ValueReader) (types.Map, bool, error)
GetForeignKeys(ctx context.Context, vr types.ValueReader) (types.Value, bool, error)
@@ -89,7 +90,7 @@ type rvStorage interface {
SetForeignKeyMap(ctx context.Context, vrw types.ValueReadWriter, m types.Value) (rvStorage, error)
SetFeatureVersion(v FeatureVersion) (rvStorage, error)
EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, edits []tableEdit) (rvStorage, error)
EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error)
DebugString(ctx context.Context) string
nomsValue() types.Value
@@ -111,7 +112,7 @@ func (r nomsRvStorage) GetFeatureVersion() (FeatureVersion, bool, error) {
}
}
func (r nomsRvStorage) GetTablesMap(context.Context, types.ValueReadWriter) (tableMap, error) {
func (r nomsRvStorage) GetTablesMap(context.Context, types.ValueReadWriter, tree.NodeStore) (tableMap, error) {
v, found, err := r.valueSt.MaybeGet(tablesKey)
if err != nil {
return nil, err
@@ -175,8 +176,8 @@ func (r nomsRvStorage) SetSuperSchemaMap(ctx context.Context, vrw types.ValueRea
return nomsRvStorage{st}, nil
}
func (r nomsRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, edits []tableEdit) (rvStorage, error) {
m, err := r.GetTablesMap(ctx, vrw)
func (r nomsRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
m, err := r.GetTablesMap(ctx, vrw, ns)
if err != nil {
return nil, err
}
@@ -250,7 +251,7 @@ func (r nomsRvStorage) nomsValue() types.Value {
return r.valueSt
}
func newRootValue(vrw types.ValueReadWriter, v types.Value) (*RootValue, error) {
func newRootValue(vrw types.ValueReadWriter, ns tree.NodeStore, v types.Value) (*RootValue, error) {
var storage rvStorage
if vrw.Format().UsesFlatbuffers() {
@@ -277,20 +278,20 @@ func newRootValue(vrw types.ValueReadWriter, v types.Value) (*RootValue, error)
}
}
return &RootValue{vrw, storage, nil}, nil
return &RootValue{vrw, ns, storage, nil}, nil
}
// LoadRootValueFromRootIshAddr takes the hash of the commit or the hash of a
// working set and returns the corresponding RootValue.
func LoadRootValueFromRootIshAddr(ctx context.Context, vrw types.ValueReadWriter, h hash.Hash) (*RootValue, error) {
func LoadRootValueFromRootIshAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, h hash.Hash) (*RootValue, error) {
val, err := datas.LoadRootNomsValueFromRootIshAddr(ctx, vrw, h)
if err != nil {
return nil, err
}
return decodeRootNomsValue(vrw, val)
return decodeRootNomsValue(vrw, ns, val)
}
func decodeRootNomsValue(vrw types.ValueReadWriter, val types.Value) (*RootValue, error) {
func decodeRootNomsValue(vrw types.ValueReadWriter, ns tree.NodeStore, val types.Value) (*RootValue, error) {
if val == nil {
return nil, ErrNoRootValAtHash
}
@@ -299,7 +300,7 @@ func decodeRootNomsValue(vrw types.ValueReadWriter, val types.Value) (*RootValue
return nil, ErrNoRootValAtHash
}
return newRootValue(vrw, val)
return newRootValue(vrw, ns, val)
}
func isRootValue(nbf *types.NomsBinFormat, val types.Value) bool {
@@ -315,11 +316,10 @@ func isRootValue(nbf *types.NomsBinFormat, val types.Value) bool {
return false
}
func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter) (*RootValue, error) {
func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) (*RootValue, error) {
if vrw.Format().UsesFlatbuffers() {
builder := flatbuffers.NewBuilder(80)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
emptyam := prolly.NewEmptyAddressMap(ns)
ambytes := []byte(tree.ValueFromNode(emptyam.Node()).(types.TupleRowStorage))
tablesoff := builder.CreateByteVector(ambytes)
@@ -334,7 +334,7 @@ func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter) (*RootValue,
serial.RootValueAddSuperSchemasAddr(builder, ssoff)
builder.FinishWithFileIdentifier(serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
bs := builder.FinishedBytes()
return newRootValue(vrw, types.SerialMessage(bs))
return newRootValue(vrw, ns, types.SerialMessage(bs))
}
empty, err := types.NewMap(ctx, vrw)
@@ -354,13 +354,17 @@ func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter) (*RootValue,
return nil, err
}
return newRootValue(vrw, st)
return newRootValue(vrw, ns, st)
}
func (root *RootValue) VRW() types.ValueReadWriter {
return root.vrw
}
func (root *RootValue) NodeStore() tree.NodeStore {
return root.ns
}
// GetFeatureVersion returns the feature version of this root, if one is written
func (root *RootValue) GetFeatureVersion(ctx context.Context) (ver FeatureVersion, ok bool, err error) {
return root.st.GetFeatureVersion()
@@ -375,7 +379,7 @@ func (root *RootValue) setFeatureVersion(v FeatureVersion) (*RootValue, error) {
}
func (root *RootValue) HasTable(ctx context.Context, tName string) (bool, error) {
tableMap, err := root.st.GetTablesMap(ctx, root.vrw)
tableMap, err := root.st.GetTablesMap(ctx, root.vrw, root.ns)
if err != nil {
return false, err
}
@@ -671,7 +675,7 @@ func (root *RootValue) GetTable(ctx context.Context, tName string) (*Table, bool
return nil, false, nil
}
table, err := durable.TableFromAddr(ctx, root.VRW(), addr)
table, err := durable.TableFromAddr(ctx, root.VRW(), root.ns, addr)
if err != nil {
return nil, false, err
}
@@ -745,7 +749,7 @@ func (root *RootValue) GetTableNames(ctx context.Context) ([]string, error) {
}
func (root *RootValue) getTableMap(ctx context.Context) (tableMap, error) {
return root.st.GetTablesMap(ctx, root.vrw)
return root.st.GetTablesMap(ctx, root.vrw, root.ns)
}
func (root *RootValue) TablesInConflict(ctx context.Context) ([]string, error) {
@@ -827,7 +831,7 @@ func (root *RootValue) IterTables(ctx context.Context, cb func(name string, tabl
}
return tm.Iter(ctx, func(name string, addr hash.Hash) (bool, error) {
nt, err := durable.TableFromAddr(ctx, root.VRW(), addr)
nt, err := durable.TableFromAddr(ctx, root.VRW(), root.ns, addr)
if err != nil {
return true, err
}
@@ -896,7 +900,7 @@ func (root *RootValue) PutSuperSchema(ctx context.Context, tName string, ss *sch
}
func (root *RootValue) withStorage(st rvStorage) *RootValue {
return &RootValue{root.vrw, st, nil}
return &RootValue{root.vrw, root.ns, st, nil}
}
func (root *RootValue) nomsValue() types.Value {
@@ -923,7 +927,7 @@ func putTable(ctx context.Context, root *RootValue, tName string, ref types.Ref)
panic("Don't attempt to put a table with a name that fails the IsValidTableName check")
}
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, []tableEdit{{name: tName, ref: &ref}})
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, root.ns, []tableEdit{{name: tName, ref: &ref}})
if err != nil {
return nil, err
}
@@ -933,12 +937,12 @@ func putTable(ctx context.Context, root *RootValue, tName string, ref types.Ref)
// CreateEmptyTable creates an empty table in this root with the name and schema given, returning the new root value.
func (root *RootValue) CreateEmptyTable(ctx context.Context, tName string, sch schema.Schema) (*RootValue, error) {
empty, err := durable.NewEmptyIndex(ctx, root.vrw, sch)
empty, err := durable.NewEmptyIndex(ctx, root.vrw, root.ns, sch)
if err != nil {
return nil, err
}
indexes := durable.NewIndexSet(ctx, root.VRW())
indexes := durable.NewIndexSet(ctx, root.VRW(), root.ns)
err = sch.Indexes().Iter(func(index schema.Index) (stop bool, err error) {
// create an empty map for every index
indexes, err = indexes.PutIndex(ctx, index.Name(), empty)
@@ -948,7 +952,7 @@ func (root *RootValue) CreateEmptyTable(ctx context.Context, tName string, sch s
return nil, err
}
tbl, err := NewTable(ctx, root.VRW(), sch, empty, indexes, nil)
tbl, err := NewTable(ctx, root.VRW(), root.ns, sch, empty, indexes, nil)
if err != nil {
return nil, err
}
@@ -1040,7 +1044,7 @@ func (root *RootValue) UpdateSuperSchemasFromOther(ctx context.Context, tblNames
// column tag information, use this method instead of a table drop + add.
func (root *RootValue) RenameTable(ctx context.Context, oldName, newName string) (*RootValue, error) {
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, []tableEdit{{old_name: oldName, name: newName}})
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, root.ns, []tableEdit{{old_name: oldName, name: newName}})
if err != nil {
return nil, err
}
@@ -1089,7 +1093,7 @@ func (root *RootValue) RemoveTables(ctx context.Context, skipFKHandling bool, al
edits[i].name = name
}
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, edits)
newStorage, err := root.st.EditTablesMap(ctx, root.vrw, root.ns, edits)
if err != nil {
return nil, err
}
@@ -1387,15 +1391,14 @@ func (r fbRvStorage) GetFeatureVersion() (FeatureVersion, bool, error) {
return FeatureVersion(r.srv.FeatureVersion()), true, nil
}
func (r fbRvStorage) getAddressMap(vrw types.ValueReadWriter) prolly.AddressMap {
func (r fbRvStorage) getAddressMap(vrw types.ValueReadWriter, ns tree.NodeStore) prolly.AddressMap {
tbytes := r.srv.TablesBytes()
node := shim.NodeFromValue(types.TupleRowStorage(tbytes))
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
return prolly.NewAddressMap(node, ns)
}
func (r fbRvStorage) GetTablesMap(ctx context.Context, vrw types.ValueReadWriter) (tableMap, error) {
am := r.getAddressMap(vrw)
func (r fbRvStorage) GetTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) (tableMap, error) {
am := r.getAddressMap(vrw, ns)
return fbTableMap{am}, nil
}
@@ -1457,10 +1460,10 @@ func (r fbRvStorage) SetSuperSchemaMap(ctx context.Context, vrw types.ValueReadW
return ret, nil
}
func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, edits []tableEdit) (rvStorage, error) {
func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
builder := flatbuffers.NewBuilder(80)
am := r.getAddressMap(vrw)
am := r.getAddressMap(vrw, ns)
ae := am.Editor()
for _, e := range edits {
if e.old_name != "" {
+10 -15
View File
@@ -29,7 +29,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/shim"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -79,8 +78,8 @@ type Table struct {
}
// NewNomsTable creates a noms Struct which stores row data, index data, and schema.
func NewNomsTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, rows types.Map, indexes durable.IndexSet, autoIncVal types.Value) (*Table, error) {
dt, err := durable.NewNomsTable(ctx, vrw, sch, rows, indexes, autoIncVal)
func NewNomsTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows types.Map, indexes durable.IndexSet, autoIncVal types.Value) (*Table, error) {
dt, err := durable.NewNomsTable(ctx, vrw, ns, sch, rows, indexes, autoIncVal)
if err != nil {
return nil, err
}
@@ -89,8 +88,8 @@ func NewNomsTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Sch
}
// NewTable creates a durable object which stores row data, index data, and schema.
func NewTable(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, rows durable.Index, indexes durable.IndexSet, autoIncVal types.Value) (*Table, error) {
dt, err := durable.NewTable(ctx, vrw, sch, rows, indexes, autoIncVal)
func NewTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows durable.Index, indexes durable.IndexSet, autoIncVal types.Value) (*Table, error) {
dt, err := durable.NewTable(ctx, vrw, ns, sch, rows, indexes, autoIncVal)
if err != nil {
return nil, err
}
@@ -108,12 +107,8 @@ func (t *Table) ValueReadWriter() types.ValueReadWriter {
return durable.VrwFromTable(t.table)
}
// NodeStore returns the NodeStore for this table.
func (t *Table) NodeStore() tree.NodeStore {
if t == nil {
return nil
}
return tree.NewNodeStore(shim.ChunkStoreFromVRW(t.ValueReadWriter()))
return durable.NodeStoreFromTable(t.table)
}
// SetConflicts sets the merge conflicts for this table.
@@ -279,11 +274,11 @@ func (t *Table) getProllyConflictSchemas(ctx context.Context, tblName string) (b
return nil, nil, nil, err
}
baseTbl, err := tableFromRootIsh(ctx, t.ValueReadWriter(), art.Metadata.BaseRootIsh, tblName)
baseTbl, err := tableFromRootIsh(ctx, t.ValueReadWriter(), t.NodeStore(), art.Metadata.BaseRootIsh, tblName)
if err != nil {
return nil, nil, nil, err
}
theirTbl, err := tableFromRootIsh(ctx, t.ValueReadWriter(), art.TheirRootIsh, tblName)
theirTbl, err := tableFromRootIsh(ctx, t.ValueReadWriter(), t.NodeStore(), art.TheirRootIsh, tblName)
if err != nil {
return nil, nil, nil, err
}
@@ -300,8 +295,8 @@ func (t *Table) getProllyConflictSchemas(ctx context.Context, tblName string) (b
return baseSch, ourSch, theirSch, nil
}
func tableFromRootIsh(ctx context.Context, vrw types.ValueReadWriter, h hash.Hash, tblName string) (*Table, error) {
rv, err := LoadRootValueFromRootIshAddr(ctx, vrw, h)
func tableFromRootIsh(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, h hash.Hash, tblName string) (*Table, error) {
rv, err := LoadRootValueFromRootIshAddr(ctx, vrw, ns, h)
if err != nil {
return nil, err
}
@@ -410,7 +405,7 @@ func (t *Table) HashOf() (hash.Hash, error) {
// Calls to UpdateNomsRows will not be written to the database. The root must
// be updated with the updated table, and the root must be committed or written.
func (t *Table) UpdateNomsRows(ctx context.Context, updatedRows types.Map) (*Table, error) {
table, err := t.table.SetTableRows(ctx, durable.IndexFromNomsMap(updatedRows, t.ValueReadWriter()))
table, err := t.table.SetTableRows(ctx, durable.IndexFromNomsMap(updatedRows, t.ValueReadWriter(), t.NodeStore()))
if err != nil {
return nil, err
}
+3 -2
View File
@@ -20,6 +20,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -32,7 +33,7 @@ type Tag struct {
}
// NewTag creates a new Tag object.
func NewTag(ctx context.Context, name string, ds datas.Dataset, vrw types.ValueReadWriter) (*Tag, error) {
func NewTag(ctx context.Context, name string, ds datas.Dataset, vrw types.ValueReadWriter, ns tree.NodeStore) (*Tag, error) {
meta, commitAddr, err := ds.HeadTag()
if err != nil {
return nil, err
@@ -41,7 +42,7 @@ func NewTag(ctx context.Context, name string, ds datas.Dataset, vrw types.ValueR
if err != nil {
return nil, err
}
commit, err := NewCommit(ctx, vrw, dc)
commit, err := NewCommit(ctx, vrw, ns, dc)
if err != nil {
return nil, err
}
+6 -5
View File
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -128,7 +129,7 @@ func (ws WorkingSet) Meta() *datas.WorkingSetMeta {
}
// NewWorkingSet creates a new WorkingSet object.
func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter, ds datas.Dataset) (*WorkingSet, error) {
func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter, ns tree.NodeStore, ds datas.Dataset) (*WorkingSet, error) {
dsws, err := ds.HeadWorkingSet()
if err != nil {
return nil, err
@@ -148,7 +149,7 @@ func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
if err != nil {
return nil, err
}
workingRoot, err := newRootValue(vrw, workingRootVal)
workingRoot, err := newRootValue(vrw, ns, workingRootVal)
if err != nil {
return nil, err
}
@@ -160,7 +161,7 @@ func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
return nil, err
}
stagedRoot, err = newRootValue(vrw, stagedRootVal)
stagedRoot, err = newRootValue(vrw, ns, stagedRootVal)
if err != nil {
return nil, err
}
@@ -177,7 +178,7 @@ func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
return nil, err
}
commit, err := NewCommit(ctx, vrw, fromDCommit)
commit, err := NewCommit(ctx, vrw, ns, fromDCommit)
if err != nil {
return nil, err
}
@@ -187,7 +188,7 @@ func NewWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
return nil, err
}
preMergeWorkingRoot, err := newRootValue(vrw, preMergeWorkingV)
preMergeWorkingRoot, err := newRootValue(vrw, ns, preMergeWorkingV)
if err != nil {
return nil, err
}
+4 -3
View File
@@ -23,6 +23,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -80,7 +81,7 @@ func updateDocsTable(ctx context.Context, docTbl *doltdb.Table, docs Docs) (*dol
}
// createDocsTable creates a new in memory table that stores the given doc details.
func createDocsTable(ctx context.Context, vrw types.ValueReadWriter, docs Docs) (*doltdb.Table, error) {
func createDocsTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, docs Docs) (*doltdb.Table, error) {
rows := make([]row.Row, 0, len(docs))
@@ -121,7 +122,7 @@ func createDocsTable(ctx context.Context, vrw types.ValueReadWriter, docs Docs)
return nil, err
}
newDocsTbl, err := doltdb.NewNomsTable(ctx, vrw, DocsSchema, rowMap, nil, nil)
newDocsTbl, err := doltdb.NewNomsTable(ctx, vrw, ns, DocsSchema, rowMap, nil, nil)
if err != nil {
return nil, err
}
@@ -139,7 +140,7 @@ func CreateOrUpdateDocsTable(ctx context.Context, root *doltdb.RootValue, docs D
if found {
return updateDocsTable(ctx, docsTbl, docs)
} else {
return createDocsTable(ctx, root.VRW(), docs)
return createDocsTable(ctx, root.VRW(), root.NodeStore(), docs)
}
}
+5 -4
View File
@@ -25,6 +25,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
filesys2 "github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -46,7 +47,7 @@ func TestAddNewerTextAndValueFromTable(t *testing.T) {
sch := createTestDocsSchema()
rows := []row.Row{}
m, _ := createTestRows(t, ddb.ValueReadWriter(), sch, rows)
tbl, err := CreateTestTable(ddb.ValueReadWriter(), sch, m)
tbl, err := CreateTestTable(ddb.ValueReadWriter(), ddb.NodeStore(), sch, m)
require.NoError(t, err)
// If a table doesn't have doc row, doc Text and Value should remain nil
@@ -66,7 +67,7 @@ func TestAddNewerTextAndValueFromTable(t *testing.T) {
// Update tbl to have 2 doc rows, readme and license
rows = getDocRows(t, sch, types.String("text in doc_text"))
m, _ = createTestRows(t, ddb.ValueReadWriter(), sch, rows)
tbl, err = CreateTestTable(ddb.ValueReadWriter(), sch, m)
tbl, err = CreateTestTable(ddb.ValueReadWriter(), ddb.NodeStore(), sch, m)
require.NoError(t, err)
// If a table has a doc row, Text and Value and should be updated to the `doc_text` value in that row.
@@ -133,8 +134,8 @@ func TestAddNewerTextAndDocPkFromRow(t *testing.T) {
assert.Equal(t, LicenseDoc, doc3.DocPk)
}
func CreateTestTable(vrw types.ValueReadWriter, tSchema schema.Schema, rowData types.Map) (*doltdb.Table, error) {
tbl, err := doltdb.NewNomsTable(context.Background(), vrw, tSchema, rowData, nil, nil)
func CreateTestTable(vrw types.ValueReadWriter, ns tree.NodeStore, tSchema schema.Schema, rowData types.Map) (*doltdb.Table, error) {
tbl, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tSchema, rowData, nil, nil)
if err != nil {
return nil, err
@@ -70,6 +70,7 @@ func CreateEnvWithSeedData(t *testing.T) *env.DoltEnv {
ctx := context.Background()
vrw := dEnv.DoltDB.ValueReadWriter()
ns := dEnv.DoltDB.NodeStore()
rowMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
@@ -86,7 +87,7 @@ func CreateEnvWithSeedData(t *testing.T) *env.DoltEnv {
ai := sch.Indexes().AllIndexes()
sch.Indexes().Merge(ai...)
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rowMap, nil, nil)
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rowMap, nil, nil)
require.NoError(t, err)
tbl, err = editor.RebuildAllIndexes(ctx, tbl, editor.TestEditorOptions(vrw))
require.NoError(t, err)
+4 -2
View File
@@ -102,6 +102,7 @@ func CreateTestTable(t *testing.T, dEnv *env.DoltEnv, tableName string, sch sche
ctx := context.Background()
vrw := dEnv.DoltDB.ValueReadWriter()
ns := dEnv.DoltDB.NodeStore()
rowMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
@@ -115,7 +116,7 @@ func CreateTestTable(t *testing.T, dEnv *env.DoltEnv, tableName string, sch sche
rowMap, err = me.Map(ctx)
require.NoError(t, err)
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rowMap, nil, nil)
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rowMap, nil, nil)
require.NoError(t, err)
tbl, err = editor.RebuildAllIndexes(ctx, tbl, editor.TestEditorOptions(vrw))
require.NoError(t, err)
@@ -137,7 +138,8 @@ func putTableToWorking(ctx context.Context, dEnv *env.DoltEnv, sch schema.Schema
}
vrw := dEnv.DoltDB.ValueReadWriter()
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rows, indexData, autoVal)
ns := dEnv.DoltDB.NodeStore()
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rows, indexData, autoVal)
if err != nil {
return err
}
@@ -330,6 +330,7 @@ func (mr *MultiRepoTestSetup) PushToRemote(dbName, remoteName, branchName string
func createTestTable(dEnv *env.DoltEnv, tableName string, sch schema.Schema, errhand func(args ...interface{}), rs ...row.Row) {
ctx := context.Background()
vrw := dEnv.DoltDB.ValueReadWriter()
ns := dEnv.DoltDB.NodeStore()
rowMap, err := types.NewMap(ctx, vrw)
if err != nil {
@@ -346,7 +347,7 @@ func createTestTable(dEnv *env.DoltEnv, tableName string, sch schema.Schema, err
errhand(err)
}
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rowMap, nil, nil)
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rowMap, nil, nil)
if err != nil {
errhand(err)
}
@@ -380,7 +381,8 @@ func putTableToWorking(ctx context.Context, dEnv *env.DoltEnv, sch schema.Schema
}
vrw := dEnv.DoltDB.ValueReadWriter()
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rows, indexData, autoVal)
ns := dEnv.DoltDB.NodeStore()
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rows, indexData, autoVal)
if err != nil {
return err
}
+7 -6
View File
@@ -1281,25 +1281,26 @@ func (dEnv *DoltEnv) BulkDbEaFactory() editor.DbEaFactory {
return editor.NewBulkImportTEAFactory(dEnv.DoltDB.Format(), dEnv.DoltDB.ValueReadWriter(), dEnv.TempTableFilesDir())
}
func (dEnv *DoltEnv) lockFile() string {
return filepath.Join(dbfactory.DoltDir, ServerLockFile)
func (dEnv *DoltEnv) LockFile() string {
f, _ := dEnv.FS.Abs(filepath.Join(dbfactory.DoltDir, ServerLockFile))
return f
}
// IsLocked returns true if this database's lockfile exists
func (dEnv *DoltEnv) IsLocked() bool {
ok, _ := dEnv.FS.Exists(dEnv.lockFile())
ok, _ := dEnv.FS.Exists(dEnv.LockFile())
return ok
}
// Lock writes this database's lockfile or errors if it already exists
func (dEnv *DoltEnv) Lock() error {
if dEnv.IsLocked() {
return ErrActiveServerLock
return ErrActiveServerLock.New(dEnv.LockFile())
}
return dEnv.FS.WriteFile(dEnv.lockFile(), []byte{})
return dEnv.FS.WriteFile(dEnv.LockFile(), []byte{})
}
// Unlock deletes this database's lockfile
func (dEnv *DoltEnv) Unlock() error {
return dEnv.FS.DeleteFile(dEnv.lockFile())
return dEnv.FS.DeleteFile(dEnv.LockFile())
}
+7 -7
View File
@@ -16,7 +16,6 @@ package env
import (
"context"
"errors"
"fmt"
"net/url"
"os"
@@ -24,16 +23,17 @@ import (
"strings"
"unicode"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/libraries/utils/earl"
"gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/libraries/utils/earl"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
)
var ErrActiveServerLock = errors.New("database locked by another sql-server; either clone the database to run a second server, or delete the '.dolt/sql-server.lock' if no other sql-servers are active")
var ErrActiveServerLock = errors.NewKind("database locked by another sql-server; either clone the database to run a second server, or delete the '%s' if no other sql-servers are active")
// EnvNameAndPath is a simple tuple of the name of an environment and the path to where it is on disk
type EnvNameAndPath struct {
@@ -143,7 +143,7 @@ func (mrEnv *MultiRepoEnv) GetWorkingRoots(ctx context.Context) (map[string]*dol
func (mrEnv *MultiRepoEnv) IsLocked() (bool, string) {
for _, e := range mrEnv.envs {
if e.env.IsLocked() {
return true, e.env.lockFile()
return true, e.env.LockFile()
}
}
return false, ""
@@ -153,7 +153,7 @@ func (mrEnv *MultiRepoEnv) IsLocked() (bool, string) {
// child envs will be returned with their initial lock state.
func (mrEnv *MultiRepoEnv) Lock() error {
if ok, f := mrEnv.IsLocked(); ok {
return fmt.Errorf("%w: '%s'", ErrActiveServerLock, f)
return ErrActiveServerLock.New(f)
}
var err error
@@ -31,9 +31,6 @@ import (
)
const (
mergeVersionProp = "merge_version"
mergeRowOperation = "row_operation"
oursStr = "our"
theirsStr = "their"
baseStr = "base"
@@ -111,16 +108,6 @@ func NewConflictReader(ctx context.Context, tbl *doltdb.Table) (*ConflictReader,
return &ConflictReader{confItr: confItr, joiner: joiner, sch: readerSch, nbf: tbl.Format()}, nil
}
func tagMappingConverter(ctx context.Context, vrw types.ValueReadWriter, src, dest schema.Schema) (*rowconv.RowConverter, error) {
mapping, err := rowconv.TagMapping(src, dest)
if err != nil {
return nil, err
}
return rowconv.NewRowConverter(ctx, vrw, mapping)
}
// GetSchema gets the schema of the rows that this reader will return
func (cr *ConflictReader) GetSchema() schema.Schema {
return cr.sch
@@ -1,154 +0,0 @@
// Copyright 2019 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package merge
import (
"context"
"errors"
"fmt"
"io"
"github.com/fatih/color"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/pipeline"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped/tabular"
"github.com/dolthub/dolt/go/store/types"
)
var WriteBufSize = 256 * 1024
var mergeVersionToLabel = map[MergeVersion]string{
OurVersion: "ours ",
TheirVersion: "theirs",
BaseVersion: "base ",
Blank: " ",
}
var diffTypeToOpLabel = map[types.DiffChangeType]string{
types.DiffChangeAdded: " + ",
types.DiffChangeRemoved: " - ",
types.DiffChangeModified: " * ",
}
var deleteColor = color.New(color.FgRed, color.CrossedOut)
var modifiedColor = color.New(color.FgYellow)
var addedColor = color.New(color.FgGreen)
var diffTypeToColor = map[types.DiffChangeType]diff.ColorFunc{
types.DiffChangeAdded: addedColor.Sprint,
types.DiffChangeModified: modifiedColor.Sprint,
types.DiffChangeRemoved: deleteColor.Sprint,
}
type ConflictSink struct {
sch schema.Schema
ttw *tabular.TextTableWriter
}
const (
opColTag = schema.ReservedTagMin
sourceColTag = schema.ReservedTagMin + 1
)
func NewConflictSink(wr io.WriteCloser, sch schema.Schema, colSep string) (*ConflictSink, error) {
_, additionalCols := untyped.NewUntypedSchemaWithFirstTag(opColTag, "op", "source")
outSch, err := untyped.UntypedSchemaUnion(additionalCols, sch)
if err != nil {
return nil, err
}
ttw, err := tabular.NewTextTableWriter(wr, outSch)
if err != nil {
return nil, err
}
return &ConflictSink{outSch, ttw}, nil
}
// GetSchema gets the schema of the rows that this writer writes
func (cs *ConflictSink) GetSchema() schema.Schema {
return cs.sch
}
var noColorFunc = func(i ...interface{}) string {
if len(i) == 0 {
return ""
} else {
return fmt.Sprint(i...)
}
}
func (cs *ConflictSink) ProcRowWithProps(r row.Row, props pipeline.ReadableMap) error {
taggedVals := make(row.TaggedValues)
colorFunc := noColorFunc
mergeVersion, ok := props.Get(mergeVersionProp)
// The column header row won't have properties to read
if !ok {
mergeVersion = Blank
}
taggedVals[opColTag] = types.String(" ")
taggedVals[sourceColTag] = types.String(mergeVersionToLabel[mergeVersion.(MergeVersion)])
if mergeVersion != BaseVersion {
mergeRowOp, ok := props.Get(mergeRowOperation)
// The column header row won't have properties to read
if ok {
dt := mergeRowOp.(types.DiffChangeType)
taggedVals[opColTag] = types.String(diffTypeToOpLabel[dt])
colorFunc = diffTypeToColor[dt]
} else {
taggedVals[opColTag] = types.String(" ")
}
}
err := cs.sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {
if val, ok := r.GetColVal(tag); ok {
taggedVals[tag] = types.String(colorFunc(string(val.(types.String))))
}
return false, nil
})
if err != nil {
return err
}
r, err = row.New(r.Format(), cs.sch, taggedVals)
if err != nil {
return err
}
return cs.ttw.WriteRow(context.TODO(), r)
}
// Close should release resources being held
func (cs *ConflictSink) Close() error {
if cs.ttw != nil {
if err := cs.ttw.Close(context.TODO()); err != nil {
return err
}
cs.ttw = nil
return nil
} else {
return errors.New("already closed")
}
}
@@ -1,142 +0,0 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package merge
import (
"context"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/rowconv"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/pipeline"
"github.com/dolthub/dolt/go/libraries/doltcore/table/untyped"
"github.com/dolthub/dolt/go/store/types"
)
// MergeVersion defines which version a value of a row corresponds to
type MergeVersion int
const (
// BaseVersion represents the state of a row at the most recent ancestor
BaseVersion MergeVersion = iota
// OurVersion represents the state of a row on our branch that is being merged into
OurVersion
// TheirVersion represents the state of a row on their branch which we are merging
TheirVersion
// Blank is used for displaying a row without a version label
Blank
)
var typeToMergeVersion = map[string]MergeVersion{
oursStr: OurVersion,
theirsStr: TheirVersion,
baseStr: BaseVersion,
}
// ConflictsSplitter splits a conflict into base, ours, and their version of a row
type ConflictSplitter struct {
joiner *rowconv.Joiner
sch schema.Schema
converters map[string]*rowconv.RowConverter
}
// NewConflictSplitter creates a new ConflictSplitter
func NewConflictSplitter(ctx context.Context, vrw types.ValueReadWriter, joiner *rowconv.Joiner) (ConflictSplitter, error) {
baseSch := joiner.SchemaForName(baseStr)
ourSch := joiner.SchemaForName(oursStr)
theirSch := joiner.SchemaForName(theirsStr)
sch, err := untyped.UntypedSchemaUnion(baseSch, ourSch, theirSch)
if err != nil {
return ConflictSplitter{}, err
}
converters := make(map[string]*rowconv.RowConverter)
converters[oursStr], err = tagMappingConverter(ctx, vrw, ourSch, sch)
if err != nil {
return ConflictSplitter{}, err
}
converters[theirsStr], err = tagMappingConverter(ctx, vrw, theirSch, sch)
if err != nil {
return ConflictSplitter{}, err
}
converters[baseStr], err = tagMappingConverter(ctx, vrw, baseSch, sch)
if err != nil {
return ConflictSplitter{}, err
}
return ConflictSplitter{joiner: joiner, sch: sch, converters: converters}, nil
}
// GetSchema returns the common schema which all rows will share
func (ds ConflictSplitter) GetSchema() schema.Schema {
return ds.sch
}
// SplitConflicts takes a conflict row and splits it into ours, theirs, and base versions and provides pipeline properties
// which can be used to distinguished which is which and what type of conflict occurred.
func (ds ConflictSplitter) SplitConflicts(inRow row.Row, _ pipeline.ReadableMap) (rowData []*pipeline.TransformedRowResult, badRowDetails string) {
rows, err := ds.joiner.Split(inRow)
if err != nil {
return nil, err.Error()
}
var baseRow row.Row
has := make(map[string]bool)
baseRow, has[baseStr] = rows[baseStr]
_, has[oursStr] = rows[oursStr]
_, has[theirsStr] = rows[theirsStr]
if has[baseStr] {
baseRow, err = ds.converters[baseStr].Convert(baseRow)
if err != nil {
return nil, err.Error()
}
}
rowData = make([]*pipeline.TransformedRowResult, 0, 3)
for _, rowType := range []string{baseStr, oursStr, theirsStr} {
row, ok := rows[rowType]
props := map[string]interface{}{mergeVersionProp: typeToMergeVersion[rowType]}
if ok {
converted, err := ds.converters[rowType].Convert(row)
if err != nil {
return nil, err.Error()
}
if !has[baseStr] {
props[mergeRowOperation] = types.DiffChangeAdded
} else {
props[mergeRowOperation] = types.DiffChangeModified
}
rowData = append(rowData, &pipeline.TransformedRowResult{RowData: converted, PropertyUpdates: props})
} else if rowType != baseStr {
props[mergeRowOperation] = types.DiffChangeRemoved
rowData = append(rowData, &pipeline.TransformedRowResult{RowData: baseRow, PropertyUpdates: props})
}
}
return rowData, ""
}
@@ -31,6 +31,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -335,8 +336,8 @@ func assertProllyConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table,
c++
ours := mustGetRowValueFromTable(t, ctx, tbl, conf.Key)
theirs := mustGetRowValueFromRootIsh(t, ctx, tbl.ValueReadWriter(), conf.TheirRootIsh, tblName, conf.Key)
base := mustGetRowValueFromRootIsh(t, ctx, tbl.ValueReadWriter(), conf.Metadata.BaseRootIsh, tblName, conf.Key)
theirs := mustGetRowValueFromRootIsh(t, ctx, tbl.ValueReadWriter(), tbl.NodeStore(), conf.TheirRootIsh, tblName, conf.Key)
base := mustGetRowValueFromRootIsh(t, ctx, tbl.ValueReadWriter(), tbl.NodeStore(), conf.Metadata.BaseRootIsh, tblName, conf.Key)
copy(h[:], conf.Key.GetField(0))
expectedConf, ok := expectedSet[h]
@@ -401,8 +402,8 @@ func mustGetRowValueFromTable(t *testing.T, ctx context.Context, tbl *doltdb.Tab
return value
}
func mustGetRowValueFromRootIsh(t *testing.T, ctx context.Context, vrw types.ValueReadWriter, rootIsh hash.Hash, tblName string, key val.Tuple) val.Tuple {
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, vrw, rootIsh)
func mustGetRowValueFromRootIsh(t *testing.T, ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, rootIsh hash.Hash, tblName string, key val.Tuple) val.Tuple {
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, vrw, ns, rootIsh)
require.NoError(t, err)
tbl, ok, err := rv.GetTable(ctx, tblName)
require.NoError(t, err)
+8 -5
View File
@@ -34,6 +34,7 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/valutil"
"github.com/dolthub/dolt/go/store/atomicerr"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -60,11 +61,12 @@ type Merger struct {
mergeRoot *doltdb.RootValue
ancRoot *doltdb.RootValue
vrw types.ValueReadWriter
ns tree.NodeStore
}
// NewMerger creates a new merger utility object.
func NewMerger(ctx context.Context, theirRootIsh, ancRootIsh hash.Hash, root, mergeRoot, ancRoot *doltdb.RootValue, vrw types.ValueReadWriter) *Merger {
return &Merger{theirRootIsh, ancRootIsh, root, mergeRoot, ancRoot, vrw}
func NewMerger(ctx context.Context, theirRootIsh, ancRootIsh hash.Hash, root, mergeRoot, ancRoot *doltdb.RootValue, vrw types.ValueReadWriter, ns tree.NodeStore) *Merger {
return &Merger{theirRootIsh, ancRootIsh, root, mergeRoot, ancRoot, vrw, ns}
}
// MergeTable merges schema and table data for the table tblName.
@@ -118,11 +120,11 @@ func (merger *Merger) MergeTable(ctx context.Context, tblName string, opts edito
// If both added the same table, pretend it was in the ancestor all along with no data
// Don't touch ancHash to avoid triggering other short-circuit logic below
ancHasTable, ancSchema, ancTbl = true, rootSchema, tbl
ancRows, err = durable.NewEmptyIndex(ctx, merger.vrw, ancSchema)
ancRows, err = durable.NewEmptyIndex(ctx, merger.vrw, merger.ns, ancSchema)
if err != nil {
return nil, nil, err
}
ancIndexSet, err = durable.NewIndexSetWithEmptyIndexes(ctx, merger.vrw, ancSchema)
ancIndexSet, err = durable.NewIndexSetWithEmptyIndexes(ctx, merger.vrw, merger.ns, ancSchema)
if err != nil {
return nil, nil, err
}
@@ -208,6 +210,7 @@ func (merger *Merger) MergeTable(ctx context.Context, tblName string, opts edito
updatedTbl, stats, err = mergeTableData(
ctx,
merger.vrw,
merger.ns,
tblName,
postMergeSchema, rootSchema, mergeSchema, ancSchema,
tbl, mergeTbl, updatedTbl,
@@ -996,7 +999,7 @@ func MergeRoots(ctx context.Context, theirRootIsh, ancRootIsh hash.Hash, ourRoot
// Merge tables one at a time. This is done based on name. With table names from ourRoot being merged first,
// renaming a table will return delete/modify conflict error consistently.
// TODO: merge based on a more durable table identity that persists across renames
merger := NewMerger(ctx, theirRootIsh, ancRootIsh, ourRoot, theirRoot, ancRoot, ourRoot.VRW())
merger := NewMerger(ctx, theirRootIsh, ancRootIsh, ourRoot, theirRoot, ancRoot, ourRoot.VRW(), ourRoot.NodeStore())
for _, tblName := range tblNames {
mergedTable, stats, err := merger.MergeTable(ctx, tblName, opts, isCherryPick)
if err != nil {
@@ -49,6 +49,7 @@ import (
func mergeTableData(
ctx context.Context,
vrw types.ValueReadWriter,
ns tree.NodeStore,
tblName string,
postMergeSchema, rootSchema, mergeSchema, ancSchema schema.Schema,
tbl, mergeTbl, updatedTbl *doltdb.Table,
@@ -130,6 +131,7 @@ func mergeTableData(
updatedTbl, err = mergeProllySecondaryIndexes(
ctx,
vrw,
ns,
postMergeSchema, rootSchema, mergeSchema, ancSchema,
mergedData,
tbl, mergeTbl, updatedTbl,
@@ -106,6 +106,7 @@ type confVals struct {
func mergeProllySecondaryIndexes(
ctx context.Context,
vrw types.ValueReadWriter,
ns tree.NodeStore,
postMergeSchema, rootSch, mergeSch, ancSch schema.Schema,
mergedData durable.Index,
tbl, mergeTbl, tableToUpdate *doltdb.Table,
@@ -125,6 +126,7 @@ func mergeProllySecondaryIndexes(
mergedSet, err := mergeProllyIndexSets(
ctx,
vrw,
ns,
postMergeSchema, rootSch, mergeSch, ancSch,
mergedData,
rootSet, mergeSet, ancSet,
@@ -146,13 +148,14 @@ func mergeProllySecondaryIndexes(
func mergeProllyIndexSets(
ctx context.Context,
vrw types.ValueReadWriter,
ns tree.NodeStore,
postMergeSchema, rootSch, mergeSch, ancSch schema.Schema,
mergedData durable.Index,
root, merge, anc durable.IndexSet,
artEditor prolly.ArtifactsEditor,
theirRootIsh hash.Hash,
tblName string) (durable.IndexSet, error) {
mergedIndexSet := durable.NewIndexSet(ctx, vrw)
mergedIndexSet := durable.NewIndexSet(ctx, vrw, ns)
mergedM := durable.ProllyMapFromIndex(mergedData)
@@ -191,7 +194,7 @@ func mergeProllyIndexSets(
mergedIndex, err := func() (durable.Index, error) {
if !rootOK || !mergeOK || !ancOK {
return buildIndex(ctx, vrw, postMergeSchema, index, mergedM, artEditor, theirRootIsh, tblName)
return buildIndex(ctx, vrw, ns, postMergeSchema, index, mergedM, artEditor, theirRootIsh, tblName)
}
if index.IsUnique() {
@@ -232,7 +235,7 @@ func mergeProllyIndexSets(
return mergedIndexSet, nil
}
func buildIndex(ctx context.Context, vrw types.ValueReadWriter, postMergeSchema schema.Schema, index schema.Index, m prolly.Map, artEditor prolly.ArtifactsEditor, theirRootIsh hash.Hash, tblName string) (durable.Index, error) {
func buildIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, postMergeSchema schema.Schema, index schema.Index, m prolly.Map, artEditor prolly.ArtifactsEditor, theirRootIsh hash.Hash, tblName string) (durable.Index, error) {
if index.IsUnique() {
meta, err := makeUniqViolMeta(postMergeSchema, index)
if err != nil {
@@ -249,6 +252,7 @@ func buildIndex(ctx context.Context, vrw types.ValueReadWriter, postMergeSchema
mergedMap, err := creation.BuildUniqueProllyIndex(
ctx,
vrw,
ns,
postMergeSchema,
index,
m,
@@ -271,7 +275,7 @@ func buildIndex(ctx context.Context, vrw types.ValueReadWriter, postMergeSchema
return mergedMap, nil
}
mergedIndex, err := creation.BuildSecondaryProllyIndex(ctx, vrw, postMergeSchema, index, m)
mergedIndex, err := creation.BuildSecondaryProllyIndex(ctx, vrw, ns, postMergeSchema, index, m)
if err != nil {
return nil, err
}
+20 -20
View File
@@ -307,8 +307,8 @@ func TestMergeCommits(t *testing.T) {
t.Skip()
}
vrw, rightCommitHash, ancCommitHash, root, mergeRoot, ancRoot, expectedRows, expectedArtifacts := setupMergeTest(t)
merger := NewMerger(context.Background(), rightCommitHash, ancCommitHash, root, mergeRoot, ancRoot, vrw)
vrw, ns, rightCommitHash, ancCommitHash, root, mergeRoot, ancRoot, expectedRows, expectedArtifacts := setupMergeTest(t)
merger := NewMerger(context.Background(), rightCommitHash, ancCommitHash, root, mergeRoot, ancRoot, vrw, ns)
opts := editor.TestEditorOptions(vrw)
// TODO: stats
merged, _, err := merger.MergeTable(context.Background(), tableName, opts, false)
@@ -320,7 +320,7 @@ func TestMergeCommits(t *testing.T) {
assert.NoError(t, err)
sch, err := tbl.GetSchema(context.Background())
assert.NoError(t, err)
expected, err := doltdb.NewTable(context.Background(), vrw, sch, expectedRows, nil, nil)
expected, err := doltdb.NewTable(context.Background(), vrw, ns, sch, expectedRows, nil, nil)
assert.NoError(t, err)
expected, err = rebuildAllProllyIndexes(context.Background(), expected)
assert.NoError(t, err)
@@ -357,9 +357,9 @@ func TestNomsMergeCommits(t *testing.T) {
t.Skip()
}
vrw, rightCmHash, ancCmHash, root, mergeRoot, ancRoot, expectedRows, expectedConflicts, expectedStats := setupNomsMergeTest(t)
vrw, ns, rightCmHash, ancCmHash, root, mergeRoot, ancRoot, expectedRows, expectedConflicts, expectedStats := setupNomsMergeTest(t)
merger := NewMerger(context.Background(), rightCmHash, ancCmHash, root, mergeRoot, ancRoot, vrw)
merger := NewMerger(context.Background(), rightCmHash, ancCmHash, root, mergeRoot, ancRoot, vrw, ns)
opts := editor.TestEditorOptions(vrw)
merged, stats, err := merger.MergeTable(context.Background(), tableName, opts, false)
if err != nil {
@@ -371,7 +371,7 @@ func TestNomsMergeCommits(t *testing.T) {
assert.NoError(t, err)
sch, err := tbl.GetSchema(context.Background())
assert.NoError(t, err)
expected, err := doltdb.NewNomsTable(context.Background(), vrw, sch, expectedRows, nil, nil)
expected, err := doltdb.NewNomsTable(context.Background(), vrw, ns, sch, expectedRows, nil, nil)
assert.NoError(t, err)
expected, err = editor.RebuildAllIndexes(context.Background(), expected, editor.TestEditorOptions(vrw))
assert.NoError(t, err)
@@ -418,13 +418,12 @@ func sortTests(t []testRow) {
})
}
func setupMergeTest(t *testing.T) (types.ValueReadWriter, hash.Hash, hash.Hash, *doltdb.RootValue, *doltdb.RootValue, *doltdb.RootValue, durable.Index, prolly.ArtifactMap) {
func setupMergeTest(t *testing.T) (types.ValueReadWriter, tree.NodeStore, hash.Hash, hash.Hash, *doltdb.RootValue, *doltdb.RootValue, *doltdb.RootValue, durable.Index, prolly.ArtifactMap) {
ddb := mustMakeEmptyRepo(t)
vrw := ddb.ValueReadWriter()
ns := ddb.NodeStore()
sortTests(testRows)
ns := tree.NewNodeStore(shim.ChunkStoreFromVRW(vrw))
var initialKVs []val.Tuple
var expectedKVs []val.Tuple
@@ -474,17 +473,17 @@ func setupMergeTest(t *testing.T) (types.ValueReadWriter, hash.Hash, hash.Hash,
mergeRows, err := rightMut.Map(context.Background())
require.NoError(t, err)
rootTbl, err := doltdb.NewTable(context.Background(), vrw, sch, durable.IndexFromProllyMap(updatedRows), nil, nil)
rootTbl, err := doltdb.NewTable(context.Background(), vrw, ns, sch, durable.IndexFromProllyMap(updatedRows), nil, nil)
require.NoError(t, err)
rootTbl, err = rebuildAllProllyIndexes(context.Background(), rootTbl)
require.NoError(t, err)
mergeTbl, err := doltdb.NewTable(context.Background(), vrw, sch, durable.IndexFromProllyMap(mergeRows), nil, nil)
mergeTbl, err := doltdb.NewTable(context.Background(), vrw, ns, sch, durable.IndexFromProllyMap(mergeRows), nil, nil)
require.NoError(t, err)
mergeTbl, err = rebuildAllProllyIndexes(context.Background(), mergeTbl)
require.NoError(t, err)
ancTbl, err := doltdb.NewTable(context.Background(), vrw, sch, durable.IndexFromProllyMap(initialRows), nil, nil)
ancTbl, err := doltdb.NewTable(context.Background(), vrw, ns, sch, durable.IndexFromProllyMap(initialRows), nil, nil)
require.NoError(t, err)
ancTbl, err = rebuildAllProllyIndexes(context.Background(), ancTbl)
require.NoError(t, err)
@@ -511,12 +510,13 @@ func setupMergeTest(t *testing.T) (types.ValueReadWriter, hash.Hash, hash.Hash,
expectedArtifacts, err := artEditor.Flush(context.Background())
require.NoError(t, err)
return vrw, rightCmHash, baseCmHash, root, mergeRoot, ancRoot, durable.IndexFromProllyMap(expectedRows), expectedArtifacts
return vrw, ns, rightCmHash, baseCmHash, root, mergeRoot, ancRoot, durable.IndexFromProllyMap(expectedRows), expectedArtifacts
}
func setupNomsMergeTest(t *testing.T) (types.ValueReadWriter, hash.Hash, hash.Hash, *doltdb.RootValue, *doltdb.RootValue, *doltdb.RootValue, types.Map, types.Map, *MergeStats) {
func setupNomsMergeTest(t *testing.T) (types.ValueReadWriter, tree.NodeStore, hash.Hash, hash.Hash, *doltdb.RootValue, *doltdb.RootValue, *doltdb.RootValue, types.Map, types.Map, *MergeStats) {
ddb := mustMakeEmptyRepo(t)
vrw := ddb.ValueReadWriter()
ns := ddb.NodeStore()
sortTests(testRows)
var initalKVs []types.Value
@@ -575,29 +575,29 @@ func setupNomsMergeTest(t *testing.T) (types.ValueReadWriter, hash.Hash, hash.Ha
mergeRows, err := rightE.Map(context.Background())
require.NoError(t, err)
tbl, err := doltdb.NewNomsTable(context.Background(), vrw, sch, initialRows, nil, nil)
tbl, err := doltdb.NewNomsTable(context.Background(), vrw, ns, sch, initialRows, nil, nil)
require.NoError(t, err)
tbl, err = editor.RebuildAllIndexes(context.Background(), tbl, editor.TestEditorOptions(vrw))
require.NoError(t, err)
updatedTbl, err := doltdb.NewNomsTable(context.Background(), vrw, sch, updatedRows, nil, nil)
updatedTbl, err := doltdb.NewNomsTable(context.Background(), vrw, ns, sch, updatedRows, nil, nil)
require.NoError(t, err)
updatedTbl, err = editor.RebuildAllIndexes(context.Background(), updatedTbl, editor.TestEditorOptions(vrw))
require.NoError(t, err)
mergeTbl, err := doltdb.NewNomsTable(context.Background(), vrw, sch, mergeRows, nil, nil)
mergeTbl, err := doltdb.NewNomsTable(context.Background(), vrw, ns, sch, mergeRows, nil, nil)
require.NoError(t, err)
mergeTbl, err = editor.RebuildAllIndexes(context.Background(), mergeTbl, editor.TestEditorOptions(vrw))
require.NoError(t, err)
ancTable, err := doltdb.NewNomsTable(context.Background(), vrw, sch, initialRows, nil, nil)
ancTable, err := doltdb.NewNomsTable(context.Background(), vrw, ns, sch, initialRows, nil, nil)
require.NoError(t, err)
ancTable, err = editor.RebuildAllIndexes(context.Background(), ancTable, editor.TestEditorOptions(vrw))
require.NoError(t, err)
rightCmHash, ancCommitHash, root, mergeRoot, ancRoot := buildLeftRightAncCommitsAndBranches(t, ddb, updatedTbl, mergeTbl, ancTable)
return vrw, rightCmHash, ancCommitHash, root, mergeRoot, ancRoot, expectedRows, expectedConflicts, calcExpectedStats(t)
return vrw, ns, rightCmHash, ancCommitHash, root, mergeRoot, ancRoot, expectedRows, expectedConflicts, calcExpectedStats(t)
}
// rebuildAllProllyIndexes builds the data for the secondary indexes in |tbl|'s
@@ -624,7 +624,7 @@ func rebuildAllProllyIndexes(ctx context.Context, tbl *doltdb.Table) (*doltdb.Ta
primary := durable.ProllyMapFromIndex(tableRowData)
for _, index := range sch.Indexes().AllIndexes() {
rebuiltIndexRowData, err := creation.BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), sch, index, primary)
rebuiltIndexRowData, err := creation.BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, index, primary)
if err != nil {
return nil, err
}
+4 -3
View File
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/libraries/utils/set"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -41,7 +42,7 @@ func Theirs(key types.Value, cnf conflict.Conflict) (types.Value, error) {
return cnf.MergeValue, nil
}
func ResolveTable(ctx context.Context, vrw types.ValueReadWriter, tblName string, root *doltdb.RootValue, autoResFunc AutoResolver, opts editor.Options) (*doltdb.RootValue, error) {
func ResolveTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, tblName string, root *doltdb.RootValue, autoResFunc AutoResolver, opts editor.Options) (*doltdb.RootValue, error) {
tbl, ok, err := root.GetTable(ctx, tblName)
if err != nil {
return nil, err
@@ -75,7 +76,7 @@ func ResolveTable(ctx context.Context, vrw types.ValueReadWriter, tblName string
return nil, err
}
confIdx, err := durable.NewEmptyConflictIndex(ctx, vrw, schemas.Schema, schemas.MergeSchema, schemas.Base)
confIdx, err := durable.NewEmptyConflictIndex(ctx, vrw, ns, schemas.Schema, schemas.MergeSchema, schemas.Base)
if err != nil {
return nil, err
}
@@ -291,7 +292,7 @@ func autoResolve(ctx context.Context, dEnv *env.DoltEnv, root *doltdb.RootValue,
var err error
opts := editor.Options{Deaf: dEnv.DbEaFactory(), Tempdir: dEnv.TempTableFilesDir()}
for _, tblName := range tbls {
root, err = ResolveTable(ctx, root.VRW(), tblName, root, autoResolver, opts)
root, err = ResolveTable(ctx, root.VRW(), root.NodeStore(), tblName, root, autoResolver, opts)
if err != nil {
return err
}
+1 -1
View File
@@ -72,7 +72,7 @@ func Revert(ctx context.Context, ddb *doltdb.DoltDB, root *doltdb.RootValue, hea
return nil, "", err
}
} else {
theirRoot, err = doltdb.EmptyRootValue(ctx, ddb.ValueReadWriter())
theirRoot, err = doltdb.EmptyRootValue(ctx, ddb.ValueReadWriter(), ddb.NodeStore())
if err != nil {
return nil, "", err
}
+2 -2
View File
@@ -99,7 +99,7 @@ func AddForeignKeyViolations(ctx context.Context, newRoot, baseRoot *doltdb.Root
return nil, nil, err
}
// Parent does not exist in the ancestor so we use an empty map
emptyIdx, err := durable.NewEmptyIndex(ctx, postParent.Table.ValueReadWriter(), postParent.Schema)
emptyIdx, err := durable.NewEmptyIndex(ctx, postParent.Table.ValueReadWriter(), postParent.Table.NodeStore(), postParent.Schema)
if err != nil {
return nil, nil, err
}
@@ -122,7 +122,7 @@ func AddForeignKeyViolations(ctx context.Context, newRoot, baseRoot *doltdb.Root
}
innerFoundViolations := false
// Child does not exist in the ancestor so we use an empty map
emptyIdx, err := durable.NewEmptyIndex(ctx, postChild.Table.ValueReadWriter(), postChild.Schema)
emptyIdx, err := durable.NewEmptyIndex(ctx, postChild.Table.ValueReadWriter(), postChild.Table.NodeStore(), postChild.Schema)
if err != nil {
return nil, nil, err
}
@@ -52,7 +52,7 @@ func createTestSchema() schema.Schema {
func TestNomsMarshalling(t *testing.T) {
tSchema := createTestSchema()
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
if err != nil {
t.Fatal("Could not create in mem noms db.")
@@ -158,7 +158,7 @@ func TestTypeInfoMarshalling(t *testing.T) {
nbf, err := types.GetFormatForVersionString(constants.FormatDefaultString)
require.NoError(t, err)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), nbf, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), nbf, nil, nil)
require.NoError(t, err)
val, err := MarshalSchemaAsNomsValue(context.Background(), vrw, originalSch)
require.NoError(t, err)
@@ -108,7 +108,12 @@ func (ti *decimalType) ConvertValueToNomsValue(ctx context.Context, vrw types.Va
if !decVal.Valid {
return nil, fmt.Errorf(`"%v" has unexpectedly encountered a null value from embedded type`, ti.String())
}
return types.Decimal(decVal.Decimal), nil
dec, err := ti.sqlDecimalType.BoundsCheck(decVal.Decimal)
if err != nil {
return nil, err
}
return types.Decimal(dec), nil
}
// Equals implements TypeInfo interface.
+1 -1
View File
@@ -427,7 +427,7 @@ func insertKeyedData(ctx context.Context, nbf *types.NomsBinFormat, oldTable *do
}
// Create the new Table and rebuild all the indexes
newTable, err := doltdb.NewNomsTable(ctx, oldTable.ValueReadWriter(), newSchema, empty, nil, nil)
newTable, err := doltdb.NewNomsTable(ctx, oldTable.ValueReadWriter(), oldTable.NodeStore(), newSchema, empty, nil, nil)
if err != nil {
return nil, err
}
@@ -81,7 +81,7 @@ func DoDoltConstraintsVerify(ctx *sql.Context, args []string) (int, error) {
var comparingRoot *doltdb.RootValue
if verifyAll {
comparingRoot, err = doltdb.EmptyRootValue(ctx, workingRoot.VRW())
comparingRoot, err = doltdb.EmptyRootValue(ctx, workingRoot.VRW(), workingRoot.NodeStore())
if err != nil {
return 1, err
}
@@ -437,7 +437,7 @@ func (tx *DoltTransaction) stompConflicts(ctx *sql.Context, mergedRoot *doltdb.R
var err error
root := mergedRoot
for _, tblName := range tablesWithConflicts {
root, err = merge.ResolveTable(ctx, mergedRoot.VRW(), tblName, root, merge.Theirs, tx.mergeEditOpts)
root, err = merge.ResolveTable(ctx, mergedRoot.VRW(), mergedRoot.NodeStore(), tblName, root, merge.Theirs, tx.mergeEditOpts)
if err != nil {
return nil, err
}
@@ -30,6 +30,7 @@ import (
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/shim"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -107,6 +108,7 @@ type prollyConflictRowIter struct {
itr prolly.ConflictArtifactIter
tblName string
vrw types.ValueReadWriter
ns tree.NodeStore
ourRows prolly.Map
keyless bool
@@ -159,6 +161,7 @@ func newProllyConflictRowIter(ctx *sql.Context, ct ProllyConflictsTable) (*proll
itr: itr,
tblName: ct.tblName,
vrw: ct.tbl.ValueReadWriter(),
ns: ct.tbl.NodeStore(),
ourRows: ourRows,
keyless: keyless,
kd: kd,
@@ -342,7 +345,7 @@ func (itr *prollyConflictRowIter) nextConflictVals(ctx *sql.Context) (c conf, er
// the currently loaded maps. |baseHash| and |theirHash| are table hashes.
func (itr *prollyConflictRowIter) loadTableMaps(ctx context.Context, baseHash, theirHash hash.Hash) error {
if itr.baseHash.Compare(baseHash) != 0 {
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, itr.vrw, baseHash)
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, itr.vrw, itr.ns, baseHash)
if err != nil {
return err
}
@@ -363,7 +366,7 @@ func (itr *prollyConflictRowIter) loadTableMaps(ctx context.Context, baseHash, t
}
if itr.theirHash.Compare(theirHash) != 0 {
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, itr.vrw, theirHash)
rv, err := doltdb.LoadRootValueFromRootIshAddr(ctx, itr.vrw, itr.ns, theirHash)
if err != nil {
return err
}
@@ -256,11 +256,6 @@ func newProllyDiffIter(ctx *sql.Context, dp DiffPartition, ddb *doltdb.DoltDB, t
}
to := durable.ProllyMapFromIndex(t)
fromConverter, err := NewProllyRowConverter(fSch, targetFromSchema, ctx.Warn, dp.from.NodeStore())
if err != nil {
return prollyDiffIter{}, err
}
var nodeStore tree.NodeStore
if dp.to != nil {
nodeStore = dp.to.NodeStore()
@@ -268,6 +263,11 @@ func newProllyDiffIter(ctx *sql.Context, dp DiffPartition, ddb *doltdb.DoltDB, t
nodeStore = dp.from.NodeStore()
}
fromConverter, err := NewProllyRowConverter(fSch, targetFromSchema, ctx.Warn, nodeStore)
if err != nil {
return prollyDiffIter{}, err
}
toConverter, err := NewProllyRowConverter(tSch, targetToSchema, ctx.Warn, nodeStore)
if err != nil {
return prollyDiffIter{}, err
@@ -229,7 +229,7 @@ func tableData(ctx *sql.Context, tbl *doltdb.Table, ddb *doltdb.DoltDB) (durable
var err error
if tbl == nil {
data, err = durable.NewEmptyIndex(ctx, ddb.ValueReadWriter(), schema.EmptySchema)
data, err = durable.NewEmptyIndex(ctx, ddb.ValueReadWriter(), ddb.NodeStore(), schema.EmptySchema)
if err != nil {
return nil, nil, err
}
@@ -16,14 +16,19 @@ package dtables
import (
"context"
"io"
"github.com/google/uuid"
"gopkg.in/src-d/go-errors.v1"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/shim"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
var queryCatalogCols = schema.NewColCollection(
@@ -50,7 +55,34 @@ type SavedQuery struct {
Order uint64
}
func savedQueryFromKV(id string, valTuple types.Tuple) (SavedQuery, error) {
func savedQueryFromKVProlly(id string, value val.Tuple) (SavedQuery, error) {
orderVal, ok := catalogVd.GetUint64(0, value)
if !ok {
orderVal = 0
}
nameVal, ok := catalogVd.GetString(1, value)
if !ok {
nameVal = ""
}
queryVal, ok := catalogVd.GetString(2, value)
if !ok {
nameVal = ""
}
descVal, ok := catalogVd.GetString(3, value)
if !ok {
descVal = ""
}
return SavedQuery{
ID: id,
Name: nameVal,
Query: queryVal,
Description: descVal,
Order: orderVal,
}, nil
}
func savedQueryFromKVNoms(id string, valTuple types.Tuple) (SavedQuery, error) {
tv, err := row.ParseTaggedValues(valTuple)
if err != nil {
@@ -83,6 +115,8 @@ func (sq SavedQuery) asRow(nbf *types.NomsBinFormat) (row.Row, error) {
}
var DoltQueryCatalogSchema = schema.MustSchemaFromCols(queryCatalogCols)
var catalogKd = shim.KeyDescriptorFromSchema(DoltQueryCatalogSchema)
var catalogVd = shim.ValueDescriptorFromSchema(DoltQueryCatalogSchema)
// Creates the query catalog table if it doesn't exist.
func createQueryCatalogIfNotExists(ctx context.Context, root *doltdb.RootValue) (*doltdb.RootValue, error) {
@@ -130,13 +164,34 @@ func newQueryCatalogEntry(ctx context.Context, root *doltdb.RootValue, id, name,
return SavedQuery{}, nil, err
}
var sq SavedQuery
var newTable *doltdb.Table
if types.IsFormat_DOLT_1(tbl.Format()) {
sq, newTable, err = newQueryCatalogEntryProlly(ctx, tbl, id, name, query, description)
} else {
sq, newTable, err = newQueryCatalogEntryNoms(ctx, tbl, id, name, query, description)
}
if err != nil {
return SavedQuery{}, nil, err
}
root, err = root.PutTable(ctx, doltdb.DoltQueryCatalogTableName, newTable)
if err != nil {
return SavedQuery{}, nil, err
}
return sq, root, err
}
func newQueryCatalogEntryNoms(ctx context.Context, tbl *doltdb.Table, id, name, query, description string) (SavedQuery, *doltdb.Table, error) {
data, err := tbl.GetNomsRowData(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
order := getMaxQueryOrder(data, ctx) + 1
existingSQ, err := RetrieveFromQueryCatalog(ctx, root, id)
order := getMaxQueryOrderNoms(data, ctx) + 1
existingSQ, err := retrieveFromQueryCatalogNoms(ctx, tbl, id)
if err != nil {
if !ErrQueryNotFound.Is(err) {
@@ -154,7 +209,7 @@ func newQueryCatalogEntry(ctx context.Context, root *doltdb.RootValue, id, name,
Order: order,
}
r, err := sq.asRow(root.VRW().Format())
r, err := sq.asRow(tbl.Format())
if err != nil {
return SavedQuery{}, nil, err
}
@@ -172,13 +227,66 @@ func newQueryCatalogEntry(ctx context.Context, root *doltdb.RootValue, id, name,
return SavedQuery{}, nil, err
}
root, err = root.PutTable(ctx, doltdb.DoltQueryCatalogTableName, newTable)
return sq, newTable, nil
}
func newQueryCatalogEntryProlly(ctx context.Context, tbl *doltdb.Table, id, name, query, description string) (SavedQuery, *doltdb.Table, error) {
idx, err := tbl.GetRowData(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
m := durable.ProllyMapFromIndex(idx)
existingSQ, err := retrieveFromQueryCatalogProlly(ctx, tbl, id)
if err != nil && !ErrQueryNotFound.Is(err) {
return SavedQuery{}, nil, err
}
var order uint64
if ErrQueryNotFound.Is(err) {
order, err = getMaxQueryOrderProlly(ctx, m)
if err != nil {
return SavedQuery{}, nil, err
}
order++
} else {
order = existingSQ.Order
}
kb := val.NewTupleBuilder(catalogKd)
vb := val.NewTupleBuilder(catalogVd)
kb.PutString(0, id)
k := kb.Build(m.Pool())
vb.PutUint64(0, order)
vb.PutString(1, name)
vb.PutString(2, query)
vb.PutString(3, description)
v := vb.Build(m.Pool())
mut := m.Mutate()
err = mut.Put(ctx, k, v)
if err != nil {
return SavedQuery{}, nil, err
}
m, err = mut.Map(ctx)
if err != nil {
return SavedQuery{}, nil, err
}
idx = durable.IndexFromProllyMap(m)
tbl, err = tbl.UpdateRows(ctx, idx)
if err != nil {
return SavedQuery{}, nil, err
}
return sq, root, err
return SavedQuery{
ID: id,
Name: name,
Query: query,
Description: description,
Order: order,
}, tbl, nil
}
func RetrieveFromQueryCatalog(ctx context.Context, root *doltdb.RootValue, id string) (SavedQuery, error) {
@@ -190,13 +298,43 @@ func RetrieveFromQueryCatalog(ctx context.Context, root *doltdb.RootValue, id st
return SavedQuery{}, doltdb.ErrTableNotFound
}
if types.IsFormat_DOLT_1(tbl.Format()) {
return retrieveFromQueryCatalogProlly(ctx, tbl, id)
}
return retrieveFromQueryCatalogNoms(ctx, tbl, id)
}
func retrieveFromQueryCatalogProlly(ctx context.Context, tbl *doltdb.Table, id string) (SavedQuery, error) {
idx, err := tbl.GetRowData(ctx)
if err != nil {
return SavedQuery{}, err
}
m := durable.ProllyMapFromIndex(idx)
kb := val.NewTupleBuilder(catalogKd)
kb.PutString(0, id)
k := kb.Build(m.Pool())
var value val.Tuple
_ = m.Get(ctx, k, func(_, v val.Tuple) error {
value = v
return nil
})
if value == nil {
return SavedQuery{}, ErrQueryNotFound.New(id)
}
return savedQueryFromKVProlly(id, value)
}
func retrieveFromQueryCatalogNoms(ctx context.Context, tbl *doltdb.Table, id string) (SavedQuery, error) {
m, err := tbl.GetNomsRowData(ctx)
if err != nil {
return SavedQuery{}, err
}
k, err := types.NewTuple(root.VRW().Format(), types.Uint(schema.QueryCatalogIdTag), types.String(id))
k, err := types.NewTuple(tbl.Format(), types.Uint(schema.QueryCatalogIdTag), types.String(id))
if err != nil {
return SavedQuery{}, err
@@ -210,11 +348,11 @@ func RetrieveFromQueryCatalog(ctx context.Context, root *doltdb.RootValue, id st
return SavedQuery{}, ErrQueryNotFound.New(id)
}
return savedQueryFromKV(id, val.(types.Tuple))
return savedQueryFromKVNoms(id, val.(types.Tuple))
}
// Returns the largest order entry in the catalog
func getMaxQueryOrder(data types.Map, ctx context.Context) uint64 {
func getMaxQueryOrderNoms(data types.Map, ctx context.Context) uint64 {
maxOrder := uint64(0)
data.IterAll(ctx, func(key, value types.Value) error {
r, _ := row.FromNoms(DoltQueryCatalogSchema, key.(types.Tuple), value.(types.Tuple))
@@ -229,3 +367,27 @@ func getMaxQueryOrder(data types.Map, ctx context.Context) uint64 {
})
return maxOrder
}
func getMaxQueryOrderProlly(ctx context.Context, data prolly.Map) (uint64, error) {
itr, err := data.IterAll(ctx)
if err != nil {
return 0, err
}
maxOrder := uint64(0)
for {
_, v, err := itr.Next(ctx)
if err != nil && err != io.EOF {
return 0, err
}
if err == io.EOF {
return maxOrder, nil
}
order, ok := catalogVd.GetUint64(0, v)
if ok {
if order > maxOrder {
maxOrder = order
}
}
}
}
@@ -30,7 +30,6 @@ import (
func TestInsertIntoQueryCatalogTable(t *testing.T) {
dEnv := dtestutils.CreateTestEnv()
sqle.CreateTestDatabase(dEnv, t)
ctx := context.Background()
root, _ := dEnv.WorkingRoot(ctx)
@@ -29,7 +29,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/table/typed/noms"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/shim"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
@@ -93,6 +92,7 @@ func DoltDiffIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Tab
unique: true,
comment: "",
vrw: t.ValueReadWriter(),
ns: t.NodeStore(),
keyBld: keyBld,
order: sql.IndexOrderAsc,
constrainedToLookupExpression: false,
@@ -216,6 +216,7 @@ func getPrimaryKeyIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sc
isPk: true,
comment: "",
vrw: t.ValueReadWriter(),
ns: t.NodeStore(),
keyBld: keyBld,
order: sql.IndexOrderAsc,
constrainedToLookupExpression: true,
@@ -245,6 +246,7 @@ func getSecondaryIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch
isPk: false,
comment: idx.Comment(),
vrw: t.ValueReadWriter(),
ns: t.NodeStore(),
keyBld: keyBld,
order: sql.IndexOrderAsc,
constrainedToLookupExpression: true,
@@ -357,6 +359,7 @@ type doltIndex struct {
constrainedToLookupExpression bool
vrw types.ValueReadWriter
ns tree.NodeStore
keyBld *val.TupleBuilder
cache cachedDurableIndexes
@@ -383,7 +386,7 @@ func (di *doltIndex) NewLookup(ctx *sql.Context, ranges ...sql.Range) (sql.Index
}
if types.IsFormat_DOLT_1(di.vrw.Format()) {
return di.newProllyLookup(ctx, tree.NewNodeStore(shim.ChunkStoreFromVRW(di.vrw)), ranges...)
return di.newProllyLookup(ctx, di.ns, ranges...)
}
return di.newNomsLookup(ctx, ranges...)
+2 -2
View File
@@ -657,7 +657,7 @@ func (t *WritableDoltTable) Truncate(ctx *sql.Context) (int, error) {
// truncate returns an empty copy of the table given by setting the rows and indexes to empty. The schema can be
// updated at the same time.
func truncate(ctx *sql.Context, table *doltdb.Table, sch schema.Schema) (*doltdb.Table, error) {
empty, err := durable.NewEmptyIndex(ctx, table.ValueReadWriter(), sch)
empty, err := durable.NewEmptyIndex(ctx, table.ValueReadWriter(), table.NodeStore(), sch)
if err != nil {
return nil, err
}
@@ -675,7 +675,7 @@ func truncate(ctx *sql.Context, table *doltdb.Table, sch schema.Schema) (*doltdb
}
// truncate table resets auto-increment value
return doltdb.NewTable(ctx, table.ValueReadWriter(), sch, empty, idxSet, nil)
return doltdb.NewTable(ctx, table.ValueReadWriter(), table.NodeStore(), sch, empty, idxSet, nil)
}
// Updater implements sql.UpdatableTable
+4 -3
View File
@@ -84,14 +84,15 @@ func NewTempTable(
return nil, err
}
vrw := ddb.ValueReadWriter()
ns := ddb.NodeStore()
idx, err := durable.NewEmptyIndex(ctx, vrw, sch)
idx, err := durable.NewEmptyIndex(ctx, vrw, ns, sch)
if err != nil {
return nil, err
}
set := durable.NewIndexSet(ctx, vrw)
set := durable.NewIndexSet(ctx, vrw, ns)
tbl, err := doltdb.NewTable(ctx, ddb.ValueReadWriter(), sch, idx, set, nil)
tbl, err := doltdb.NewTable(ctx, vrw, ns, sch, idx, set, nil)
if err != nil {
return nil, err
}
+1 -1
View File
@@ -537,7 +537,7 @@ func UpdateTables(t *testing.T, ctx context.Context, root *doltdb.RootValue, tbl
require.NoError(t, err)
}
tbl, err = doltdb.NewNomsTable(ctx, root.VRW(), sch, rowData, indexData, nil)
tbl, err = doltdb.NewNomsTable(ctx, root.VRW(), root.NodeStore(), sch, rowData, indexData, nil)
require.NoError(t, err)
root, err = root.PutTable(ctx, tblName, tbl)
@@ -29,6 +29,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/shim"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -141,7 +142,7 @@ func BuildSecondaryIndex(ctx context.Context, tbl *doltdb.Table, idx schema.Inde
if err != nil {
return nil, err
}
return durable.IndexFromNomsMap(m, tbl.ValueReadWriter()), nil
return durable.IndexFromNomsMap(m, tbl.ValueReadWriter(), tbl.NodeStore()), nil
case types.Format_DOLT_1:
sch, err := tbl.GetSchema(ctx)
@@ -153,7 +154,7 @@ func BuildSecondaryIndex(ctx context.Context, tbl *doltdb.Table, idx schema.Inde
return nil, err
}
primary := durable.ProllyMapFromIndex(m)
return BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), sch, idx, primary)
return BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, idx, primary)
default:
return nil, fmt.Errorf("unknown NomsBinFormat")
@@ -162,15 +163,15 @@ func BuildSecondaryIndex(ctx context.Context, tbl *doltdb.Table, idx schema.Inde
// BuildSecondaryProllyIndex builds secondary index data for the given primary
// index row data |primary|. |sch| is the current schema of the table.
func BuildSecondaryProllyIndex(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, idx schema.Index, primary prolly.Map) (durable.Index, error) {
func BuildSecondaryProllyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, idx schema.Index, primary prolly.Map) (durable.Index, error) {
if idx.IsUnique() {
kd := shim.KeyDescriptorFromSchema(idx.Schema())
return BuildUniqueProllyIndex(ctx, vrw, sch, idx, primary, func(ctx context.Context, existingKey, newKey val.Tuple) error {
return BuildUniqueProllyIndex(ctx, vrw, ns, sch, idx, primary, func(ctx context.Context, existingKey, newKey val.Tuple) error {
return sql.ErrDuplicateEntry.Wrap(&prollyUniqueKeyErr{k: newKey, kd: kd, IndexName: idx.Name()}, idx.Name())
})
}
empty, err := durable.NewEmptyIndex(ctx, vrw, idx.Schema())
empty, err := durable.NewEmptyIndex(ctx, vrw, ns, idx.Schema())
if err != nil {
return nil, err
}
@@ -233,8 +234,8 @@ type DupEntryCb func(ctx context.Context, existingKey, newKey val.Tuple) error
// BuildUniqueProllyIndex builds a unique index based on the given |primary| row
// data. If any duplicate entries are found, they are passed to |cb|. If |cb|
// returns a non-nil error then the process is stopped.
func BuildUniqueProllyIndex(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, idx schema.Index, primary prolly.Map, cb DupEntryCb) (durable.Index, error) {
empty, err := durable.NewEmptyIndex(ctx, vrw, idx.Schema())
func BuildUniqueProllyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, idx schema.Index, primary prolly.Map, cb DupEntryCb) (durable.Index, error) {
empty, err := durable.NewEmptyIndex(ctx, vrw, ns, idx.Schema())
if err != nil {
return nil, err
}
@@ -27,6 +27,7 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -52,7 +53,7 @@ var id3, _ = uuid.NewRandom()
func TestIndexEditorConcurrency(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -147,7 +148,7 @@ func TestIndexEditorConcurrency(t *testing.T) {
func TestIndexEditorConcurrencyPostInsert(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -239,7 +240,7 @@ func TestIndexEditorConcurrencyPostInsert(t *testing.T) {
func TestIndexEditorUniqueMultipleNil(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -284,7 +285,7 @@ func TestIndexEditorUniqueMultipleNil(t *testing.T) {
func TestIndexEditorWriteAfterFlush(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -352,7 +353,7 @@ func TestIndexEditorWriteAfterFlush(t *testing.T) {
func TestIndexEditorUniqueErrorDoesntPersist(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -394,7 +395,7 @@ func TestIndexEditorUniqueErrorDoesntPersist(t *testing.T) {
}
func TestIndexRebuildingWithZeroIndexes(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
tSchema := createTestSchema(t)
_, err := tSchema.Indexes().RemoveIndex(testSchemaIndexName)
require.NoError(t, err)
@@ -402,7 +403,7 @@ func TestIndexRebuildingWithZeroIndexes(t *testing.T) {
require.NoError(t, err)
rowData, _ := createTestRowData(t, vrw, tSchema)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, tSchema, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, tSchema, rowData)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
@@ -416,7 +417,7 @@ func TestIndexRebuildingWithZeroIndexes(t *testing.T) {
}
func TestIndexRebuildingWithOneIndex(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
tSchema := createTestSchema(t)
_, err := tSchema.Indexes().RemoveIndex(testSchemaIndexAge)
require.NoError(t, err)
@@ -437,7 +438,7 @@ func TestIndexRebuildingWithOneIndex(t *testing.T) {
require.NoError(t, err)
}
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, tSchema, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, tSchema, rowData)
require.NoError(t, err)
var indexRows []row.Row
@@ -468,7 +469,7 @@ func TestIndexRebuildingWithOneIndex(t *testing.T) {
}
func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
tSchema := createTestSchema(t)
indexName := tSchema.Indexes().GetByName(testSchemaIndexName)
@@ -482,7 +483,7 @@ func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
rowData, rows := createTestRowData(t, vrw, tSchema)
indexNameExpectedRows, indexAgeExpectedRows := rowsToIndexRows(t, rows, indexName, indexAge)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, tSchema, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, tSchema, rowData)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
@@ -592,7 +593,7 @@ func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
}
func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -605,7 +606,7 @@ func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
row.TaggedValues{1: types.Int(2), 2: types.Int(2), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(3), 3: types.Int(3)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -622,7 +623,7 @@ func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
}
func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -635,7 +636,7 @@ func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
row.TaggedValues{1: types.Int(2), 2: types.Int(1), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(2), 3: types.Int(2)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2, 3}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -652,7 +653,7 @@ func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
}
func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -665,7 +666,7 @@ func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
row.TaggedValues{1: types.Int(2), 2: types.Int(2), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(2), 3: types.Int(3)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -682,7 +683,7 @@ func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
}
func TestIndexRebuildingUniqueFailTwoCol(t *testing.T) {
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, ns, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -696,7 +697,7 @@ func TestIndexRebuildingUniqueFailTwoCol(t *testing.T) {
row.TaggedValues{1: types.Int(3), 2: types.Int(2), 3: types.Int(2)},
row.TaggedValues{1: types.Int(4), 2: types.Int(1), 3: types.Int(2)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, ns, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2, 3}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -717,7 +718,7 @@ func TestIndexEditorCapacityExceeded(t *testing.T) {
// In the event that we reach the iea capacity on Undo, we need to verify that all code paths fail and remain failing
ctx := context.Background()
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, _, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -830,8 +831,8 @@ func createTestSchema(t *testing.T) schema.Schema {
return sch
}
func createTableWithoutIndexRebuilding(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema, rowData types.Map) (*doltdb.Table, error) {
return doltdb.NewNomsTable(ctx, vrw, sch, rowData, nil, nil)
func createTableWithoutIndexRebuilding(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rowData types.Map) (*doltdb.Table, error) {
return doltdb.NewNomsTable(ctx, vrw, ns, sch, rowData, nil, nil)
}
func rowsToIndexRows(t *testing.T, rows []row.Row, indexName schema.Index, indexAge schema.Index) (indexNameExpectedRows []row.Row, indexAgeExpectedRows []row.Row) {
@@ -32,7 +32,7 @@ import (
func TestKeylessTableEditorConcurrency(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -42,7 +42,7 @@ func TestKeylessTableEditorConcurrency(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
@@ -140,7 +140,7 @@ func TestKeylessTableEditorConcurrency(t *testing.T) {
func TestKeylessTableEditorConcurrencyPostInsert(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -150,7 +150,7 @@ func TestKeylessTableEditorConcurrencyPostInsert(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
@@ -247,7 +247,7 @@ func TestKeylessTableEditorConcurrencyPostInsert(t *testing.T) {
func TestKeylessTableEditorWriteAfterFlush(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -257,7 +257,7 @@ func TestKeylessTableEditorWriteAfterFlush(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
@@ -328,7 +328,7 @@ func TestKeylessTableEditorWriteAfterFlush(t *testing.T) {
func TestKeylessTableEditorDuplicateKeyHandling(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -338,7 +338,7 @@ func TestKeylessTableEditorDuplicateKeyHandling(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
@@ -418,7 +418,7 @@ func TestKeylessTableEditorDuplicateKeyHandling(t *testing.T) {
func TestKeylessTableEditorMultipleIndexErrorHandling(t *testing.T) {
ctx := context.Background()
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -437,7 +437,7 @@ func TestKeylessTableEditorMultipleIndexErrorHandling(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(ctx, vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(ctx, vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
table, err = RebuildAllIndexes(ctx, table, opts)
require.NoError(t, err)
@@ -575,7 +575,7 @@ func TestKeylessTableEditorMultipleIndexErrorHandling(t *testing.T) {
func TestKeylessTableEditorIndexCardinality(t *testing.T) {
ctx := context.Background()
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -590,7 +590,7 @@ func TestKeylessTableEditorIndexCardinality(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(ctx, vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(ctx, vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
table, err = RebuildAllIndexes(ctx, table, opts)
require.NoError(t, err)
@@ -42,7 +42,7 @@ const (
func TestTableEditorConcurrency(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -53,7 +53,7 @@ func TestTableEditorConcurrency(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
for i := 0; i < tableEditorConcurrencyIterations; i++ {
@@ -137,7 +137,7 @@ func TestTableEditorConcurrency(t *testing.T) {
func TestTableEditorConcurrencyPostInsert(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -148,7 +148,7 @@ func TestTableEditorConcurrencyPostInsert(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
tableEditor, err := newPkTableEditor(context.Background(), table, tableSch, tableName, opts)
@@ -230,7 +230,7 @@ func TestTableEditorConcurrencyPostInsert(t *testing.T) {
func TestTableEditorWriteAfterFlush(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -241,7 +241,7 @@ func TestTableEditorWriteAfterFlush(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
tableEditor, err := newPkTableEditor(context.Background(), table, tableSch, tableName, opts)
@@ -300,7 +300,7 @@ func TestTableEditorWriteAfterFlush(t *testing.T) {
func TestTableEditorDuplicateKeyHandling(t *testing.T) {
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -311,7 +311,7 @@ func TestTableEditorDuplicateKeyHandling(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
tableEditor, err := newPkTableEditor(context.Background(), table, tableSch, tableName, opts)
@@ -379,7 +379,7 @@ func TestTableEditorDuplicateKeyHandling(t *testing.T) {
func TestTableEditorMultipleIndexErrorHandling(t *testing.T) {
ctx := context.Background()
format := types.Format_Default
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, ns, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
@@ -398,7 +398,7 @@ func TestTableEditorMultipleIndexErrorHandling(t *testing.T) {
require.NoError(t, err)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(ctx, vrw, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(ctx, vrw, ns, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
table, err = RebuildAllIndexes(ctx, table, opts)
require.NoError(t, err)
@@ -115,6 +115,7 @@ func TestKeylessTableReader(t *testing.T) {
dEnv := dtu.CreateTestEnv()
ctx := context.Background()
vrw := dEnv.DoltDB.ValueReadWriter()
ns := dEnv.DoltDB.NodeStore()
compareRows := func(t *testing.T, expected []sql.Row, rdr table.SqlTableReader) {
for _, exp := range expected {
@@ -130,7 +131,7 @@ func TestKeylessTableReader(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
rowMap := makeBag(vrw, sch, test.rows...)
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rowMap, nil, nil)
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rowMap, nil, nil)
require.NoError(t, err)
rdr, err := table.NewTableReader(ctx, tbl)
require.NoError(t, err)
@@ -138,7 +139,7 @@ func TestKeylessTableReader(t *testing.T) {
})
t.Run(test.name+"_buffered", func(t *testing.T) {
rowMap := makeBag(vrw, sch, test.rows...)
tbl, err := doltdb.NewNomsTable(ctx, vrw, sch, rowMap, nil, nil)
tbl, err := doltdb.NewNomsTable(ctx, vrw, ns, sch, rowMap, nil, nil)
require.NoError(t, err)
rdr, err := table.NewBufferedTableReader(ctx, tbl)
require.NoError(t, err)
@@ -93,11 +93,11 @@ func TestEndToEnd(t *testing.T) {
empty, err := types.NewMap(ctx, root.VRW())
require.NoError(t, err)
indexes := durable.NewIndexSet(ctx, root.VRW())
indexes := durable.NewIndexSet(ctx, root.VRW(), root.NodeStore())
indexes, err = indexes.PutNomsIndex(ctx, dtestutils.IndexName, empty)
require.NoError(t, err)
tbl, err := doltdb.NewNomsTable(ctx, root.VRW(), tt.sch, empty, indexes, nil)
tbl, err := doltdb.NewNomsTable(ctx, root.VRW(), root.NodeStore(), tt.sch, empty, indexes, nil)
require.NoError(t, err)
root, err = root.PutTable(ctx, tableName, tbl)
require.NoError(t, err)
@@ -0,0 +1,99 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tabular
import (
"context"
"io"
"github.com/dolthub/go-mysql-server/sql"
"github.com/fatih/color"
"github.com/dolthub/dolt/go/libraries/doltcore/diff"
)
type ConflictVersion string
type FixedWidthConflictTableWriter struct {
tableWriter *FixedWidthTableWriter
}
// NewFixedWidthConflictTableWriter returns a table writer that prints
// conflicts. |schema| is the schema of the table that the conflicts are being
// printed for.
func NewFixedWidthConflictTableWriter(schema sql.Schema, wr io.WriteCloser, numSamples int) *FixedWidthConflictTableWriter {
schema = append(sql.Schema{
// diff type: *, -, +
&sql.Column{
Name: " ",
Type: sql.Text,
},
// version name: base, ours, theirs
&sql.Column{
Name: " ",
Type: sql.Text,
},
}, schema...)
tableWriter := NewFixedWidthTableWriter(schema, wr, numSamples)
return &FixedWidthConflictTableWriter{
tableWriter: tableWriter,
}
}
func (w FixedWidthConflictTableWriter) WriteRow(
ctx context.Context,
version string,
row sql.Row,
rowDiffType diff.ChangeType,
) error {
diffMarker := ""
switch rowDiffType {
case diff.Removed:
diffMarker = " - "
case diff.Added:
diffMarker = " + "
case diff.ModifiedNew:
diffMarker = " * "
}
newRow := append(sql.Row{diffMarker, version}, row...)
return w.tableWriter.WriteRow(ctx, newRow, rowColorsForDiffType(rowDiffType, 2, len(row)))
}
func (w FixedWidthConflictTableWriter) Close(ctx context.Context) error {
return w.tableWriter.Close(ctx)
}
// |n| columns with no colors, |m| columns with a color corresponding to |diffType|.
func rowColorsForDiffType(diffType diff.ChangeType, n int, m int) []*color.Color {
c := rowConflictColors[diffType]
colors := make([]*color.Color, n+m)
for i := 0; i < n+m; i++ {
if i < n {
colors[i] = nil
} else {
colors[i] = c
}
}
return colors
}
var rowConflictColors = map[diff.ChangeType]*color.Color{
diff.Added: color.New(color.FgGreen),
diff.ModifiedNew: color.New(color.FgYellow),
diff.Removed: color.New(color.FgRed, color.CrossedOut),
diff.None: nil,
}
+3 -1
View File
@@ -22,6 +22,7 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -47,7 +48,8 @@ func newNBSProllyStore(dir string) keyValStore {
func newProllyStore(ctx context.Context, cs chunks.ChunkStore) keyValStore {
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw)
ns := tree.NewNodeStore(cs)
db := datas.NewTypesDatabase(vrw, ns)
m, err := types.NewMap(ctx, vrw)
if err != nil {
panic(err)
+1 -1
View File
@@ -73,7 +73,7 @@ func runDs(ctx context.Context, args []string) int {
if len(args) >= 1 {
dbSpec = args[0]
}
store, _, err := cfg.GetDatabase(ctx, dbSpec)
store, _, _, err := cfg.GetDatabase(ctx, dbSpec)
util.CheckError(err)
defer store.Close()
+7 -6
View File
@@ -37,6 +37,7 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/merge"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/util/status"
"github.com/dolthub/dolt/go/store/util/verbose"
@@ -75,7 +76,7 @@ func runMerge(ctx context.Context, args []string) int {
if len(args) != 4 {
util.CheckErrorNoUsage(fmt.Errorf("incorrect number of arguments"))
}
db, vrw, err := cfg.GetDatabase(ctx, args[0])
db, vrw, ns, err := cfg.GetDatabase(ctx, args[0])
util.CheckError(err)
defer db.Close()
@@ -86,7 +87,7 @@ func runMerge(ctx context.Context, args []string) int {
return 1
}
left, right, ancestor, err := getMergeCandidates(ctx, db, vrw, leftDS, rightDS)
left, right, ancestor, err := getMergeCandidates(ctx, db, vrw, ns, leftDS, rightDS)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
@@ -149,14 +150,14 @@ func resolveDatasets(ctx context.Context, db datas.Database, leftName, rightName
return
}
func getMergeCandidates(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, leftDS, rightDS datas.Dataset) (left, right, ancestor types.Value, err error) {
func getMergeCandidates(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, ns tree.NodeStore, leftDS, rightDS datas.Dataset) (left, right, ancestor types.Value, err error) {
leftRef, ok, err := leftDS.MaybeHeadRef()
d.PanicIfError(err)
checkIfTrue(!ok, "Dataset %s has no data", leftDS.ID())
rightRef, ok, err := rightDS.MaybeHeadRef()
d.PanicIfError(err)
checkIfTrue(!ok, "Dataset %s has no data", rightDS.ID())
ancestorCommit, ok := getCommonAncestor(ctx, leftRef, rightRef, vrw)
ancestorCommit, ok := getCommonAncestor(ctx, leftRef, rightRef, vrw, ns)
checkIfTrue(!ok, "Datasets %s and %s have no common ancestor", leftDS.ID(), rightDS.ID())
leftHead, ok, err := leftDS.MaybeHeadValue()
@@ -180,12 +181,12 @@ func getMergeCandidates(ctx context.Context, db datas.Database, vrw types.ValueR
}
func getCommonAncestor(ctx context.Context, r1, r2 types.Ref, vr types.ValueReader) (a types.Struct, found bool) {
func getCommonAncestor(ctx context.Context, r1, r2 types.Ref, vr types.ValueReader, ns tree.NodeStore) (a types.Struct, found bool) {
c1, err := datas.LoadCommitRef(ctx, vr, r1)
d.PanicIfError(err)
c2, err := datas.LoadCommitRef(ctx, vr, r2)
d.PanicIfError(err)
aaddr, found, err := datas.FindCommonAncestor(ctx, c1, c2, vr, vr)
aaddr, found, err := datas.FindCommonAncestor(ctx, c1, c2, vr, vr, ns, ns)
d.PanicIfError(err)
if !found {
return
+1 -1
View File
@@ -85,7 +85,7 @@ func runRoot(ctx context.Context, args []string) int {
}
// If BUG 3407 is correct, we might be able to just take cs and make a Database directly from that.
db, vrw, err := cfg.GetDatabase(ctx, args[0])
db, vrw, _, err := cfg.GetDatabase(ctx, args[0])
util.CheckErrorNoUsage(err)
defer db.Close()
v, err := vrw.ReadValue(ctx, h)
+1 -1
View File
@@ -37,7 +37,7 @@ func nomsStats(ctx context.Context, noms *kingpin.Application) (*kingpin.CmdClau
return stats, func(input string) int {
cfg := config.NewResolver()
store, _, err := cfg.GetDatabase(ctx, *database)
store, _, _, err := cfg.GetDatabase(ctx, *database)
util.CheckError(err)
defer store.Close()
+4 -3
View File
@@ -28,6 +28,7 @@ import (
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/spec"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/util/verbose"
@@ -130,13 +131,13 @@ func (r *Resolver) ResolvePathSpec(str string) string {
// Resolve string to database spec. If a config is present,
// - resolve a db alias to its db spec
// - resolve "" to the default db spec
func (r *Resolver) GetDatabase(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, error) {
func (r *Resolver) GetDatabase(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
dbc := r.DbConfigForDbSpec(str)
sp, err := spec.ForDatabaseOpts(r.verbose(ctx, str, dbc.Url), specOptsForConfig(r.config, dbc))
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return sp.GetDatabase(ctx), sp.GetVRW(ctx), nil
return sp.GetDatabase(ctx), sp.GetVRW(ctx), sp.GetNodeStore(ctx), nil
}
// Resolve string to a chunkstore. Like ResolveDatabase, but returns the underlying ChunkStore
+11 -10
View File
@@ -34,6 +34,7 @@ import (
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/nomdl"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -127,12 +128,12 @@ func newCommit(ctx context.Context, value types.Value, parentsList types.List, p
}
}
func NewCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.ValueReadWriter, v types.Value, opts CommitOptions) (*Commit, error) {
func NewCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.ValueReadWriter, ns tree.NodeStore, v types.Value, opts CommitOptions) (*Commit, error) {
if opts.Parents == nil || len(opts.Parents) == 0 {
return nil, errors.New("cannot create commit without parents")
}
return newCommitForValue(ctx, cs, vrw, v, opts)
return newCommitForValue(ctx, cs, vrw, ns, v, opts)
}
func commit_flatbuffer(vaddr hash.Hash, opts CommitOptions, heights []uint64, parentsClosureAddr hash.Hash) ([]byte, uint64) {
@@ -183,7 +184,7 @@ var commitKeyTupleDesc = val.NewTupleDescriptor(
)
var commitValueTupleDesc = val.NewTupleDescriptor()
func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.ValueReadWriter, v types.Value, opts CommitOptions) (*Commit, error) {
func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.ValueReadWriter, ns tree.NodeStore, v types.Value, opts CommitOptions) (*Commit, error) {
if opts.Meta == nil {
opts.Meta = &CommitMeta{}
}
@@ -203,7 +204,7 @@ func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.Valu
parents[i] = serial.GetRootAsCommit([]byte(parentValues[i].(types.SerialMessage)), 0)
heights[i] = parents[i].Height()
}
parentClosureAddr, err := writeFbCommitParentClosure(ctx, cs, vrw, parents, opts.Parents)
parentClosureAddr, err := writeFbCommitParentClosure(ctx, cs, vrw, ns, parents, opts.Parents)
if err != nil {
return nil, err
}
@@ -317,7 +318,7 @@ func LoadCommitAddr(ctx context.Context, vr types.ValueReader, addr hash.Hash) (
return commitFromValue(vr.Format(), v)
}
func findCommonAncestorUsingParentsList(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader) (hash.Hash, bool, error) {
func findCommonAncestorUsingParentsList(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader, ns1, ns2 tree.NodeStore) (hash.Hash, bool, error) {
c1Q, c2Q := CommitByHeightHeap{c1}, CommitByHeightHeap{c2}
for !c1Q.Empty() && !c2Q.Empty() {
c1Ht, c2Ht := c1Q.MaxHeight(), c2Q.MaxHeight()
@@ -358,21 +359,21 @@ func findCommonAncestorUsingParentsList(ctx context.Context, c1, c2 *Commit, vr1
// This implementation makes use of the parents_closure field on the commit
// struct. If the commit does not have a materialized parents_closure, this
// implementation delegates to findCommonAncestorUsingParentsList.
func FindCommonAncestor(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader) (hash.Hash, bool, error) {
pi1, err := newParentsClosureIterator(ctx, c1, vr1)
func FindCommonAncestor(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader, ns1, ns2 tree.NodeStore) (hash.Hash, bool, error) {
pi1, err := newParentsClosureIterator(ctx, c1, vr1, ns1)
if err != nil {
return hash.Hash{}, false, err
}
if pi1 == nil {
return findCommonAncestorUsingParentsList(ctx, c1, c2, vr1, vr2)
return findCommonAncestorUsingParentsList(ctx, c1, c2, vr1, vr2, ns1, ns2)
}
pi2, err := newParentsClosureIterator(ctx, c2, vr2)
pi2, err := newParentsClosureIterator(ctx, c2, vr2, ns2)
if err != nil {
return hash.Hash{}, false, err
}
if pi2 == nil {
return findCommonAncestorUsingParentsList(ctx, c1, c2, vr1, vr2)
return findCommonAncestorUsingParentsList(ctx, c1, c2, vr1, vr2, ns1, ns2)
}
for {
+2 -14
View File
@@ -28,17 +28,7 @@ import (
"github.com/dolthub/dolt/go/store/types"
)
func hackVRToCS(vr types.ValueReader) chunks.ChunkStore {
switch v := vr.(type) {
case Database:
return ChunkStoreFromDatabase(v)
case *types.ValueStore:
return v.ChunkStore()
}
panic("unknown ValueReader implementation...")
}
func newParentsClosureIterator(ctx context.Context, c *Commit, vr types.ValueReader) (parentsClosureIter, error) {
func newParentsClosureIterator(ctx context.Context, c *Commit, vr types.ValueReader, ns tree.NodeStore) (parentsClosureIter, error) {
sv := c.NomsValue()
if _, ok := sv.(types.SerialMessage); ok {
@@ -55,7 +45,6 @@ func newParentsClosureIterator(ctx context.Context, c *Commit, vr types.ValueRea
return nil, fmt.Errorf("internal error or data loss: dangling commit parent closure for addr %s or commit %s", addr.String(), c.Addr().String())
}
node := tree.NodeFromBytes(v.(types.TupleRowStorage))
ns := tree.NewNodeStore(hackVRToCS(vr))
cc := prolly.NewCommitClosure(node, ns)
ci, err := cc.IterAllReverse(ctx)
if err != nil {
@@ -384,7 +373,7 @@ func writeTypesCommitParentClosure(ctx context.Context, vrw types.ValueReadWrite
return r, true, nil
}
func writeFbCommitParentClosure(ctx context.Context, cs chunks.ChunkStore, vrw types.ValueReadWriter, parents []*serial.Commit, parentAddrs []hash.Hash) (hash.Hash, error) {
func writeFbCommitParentClosure(ctx context.Context, cs chunks.ChunkStore, vrw types.ValueReadWriter, ns tree.NodeStore, parents []*serial.Commit, parentAddrs []hash.Hash) (hash.Hash, error) {
if len(parents) == 0 {
// We write an empty hash for parent-less commits of height 1.
return hash.Hash{}, nil
@@ -399,7 +388,6 @@ func writeFbCommitParentClosure(ctx context.Context, cs chunks.ChunkStore, vrw t
return hash.Hash{}, fmt.Errorf("writeCommitParentClosure: ReadManyValues: %w", err)
}
// Load them as ProllyTrees.
ns := tree.NewNodeStore(cs)
closures := make([]prolly.CommitClosure, len(parents))
for i := range addrs {
if !types.IsNull(vs[i]) {
+8 -7
View File
@@ -33,6 +33,7 @@ import (
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/nomdl"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -289,7 +290,7 @@ func toRefList(vrw types.ValueReadWriter, commits ...types.Struct) (types.List,
return le.List(context.Background())
}
func commonAncWithSetClosure(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader) (a hash.Hash, ok bool, err error) {
func commonAncWithSetClosure(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader, ns1, ns2 tree.NodeStore) (a hash.Hash, ok bool, err error) {
closure, err := NewSetCommitClosure(ctx, vr1, c1)
if err != nil {
return hash.Hash{}, false, err
@@ -297,14 +298,14 @@ func commonAncWithSetClosure(ctx context.Context, c1, c2 *Commit, vr1, vr2 types
return FindClosureCommonAncestor(ctx, closure, c2, vr2)
}
func commonAncWithLazyClosure(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader) (a hash.Hash, ok bool, err error) {
func commonAncWithLazyClosure(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader, ns1, ns2 tree.NodeStore) (a hash.Hash, ok bool, err error) {
closure := NewLazyCommitClosure(c1, vr1)
return FindClosureCommonAncestor(ctx, closure, c2, vr2)
}
// Assert that c is the common ancestor of a and b, using multiple common ancestor methods.
func assertCommonAncestor(t *testing.T, expected, a, b types.Value, ldb, rdb *database, desc string) {
type caFinder func(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader) (a hash.Hash, ok bool, err error)
type caFinder func(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.ValueReader, ns1, ns2 tree.NodeStore) (a hash.Hash, ok bool, err error)
methods := map[string]caFinder{
"FindCommonAncestor": FindCommonAncestor,
@@ -324,7 +325,7 @@ func assertCommonAncestor(t *testing.T, expected, a, b types.Value, ldb, rdb *da
t.Run(fmt.Sprintf("%s/%s", name, desc), func(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
found, ok, err := method(ctx, ac, bc, ldb, rdb)
found, ok, err := method(ctx, ac, bc, ldb, rdb, ldb.ns, rdb.ns)
assert.NoError(err)
if assert.True(ok) {
tv, err := ldb.ReadValue(context.Background(), found)
@@ -424,7 +425,7 @@ func TestCommitParentsClosure(t *testing.T) {
if !assert.NoError(err) {
return
}
iter, err := newParentsClosureIterator(ctx, c, db)
iter, err := newParentsClosureIterator(ctx, c, db, db.ns)
if !assert.NoError(err) {
return
}
@@ -555,7 +556,7 @@ func TestFindCommonAncestor(t *testing.T) {
require.NoError(t, err)
a6c, err := LoadCommitRef(ctx, db, mustRef(types.NewRef(a6, db.Format())))
require.NoError(t, err)
found, ok, err := FindCommonAncestor(ctx, d2c, a6c, db, db)
found, ok, err := FindCommonAncestor(ctx, d2c, a6c, db, db, db.ns, db.ns)
require.NoError(t, err)
if !assert.False(ok) {
@@ -637,7 +638,7 @@ func TestFindCommonAncestor(t *testing.T) {
require.NoError(t, err)
ra9c, err := commitFromValue(rdb.Format(), ra9)
require.NoError(t, err)
_, _, err = FindCommonAncestor(context.Background(), ra9c, a9c, db, rdb)
_, _, err = FindCommonAncestor(context.Background(), ra9c, a9c, db, rdb, db.ns, rdb.ns)
assert.Error(err)
})
}
+6 -3
View File
@@ -30,6 +30,7 @@ import (
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -143,11 +144,13 @@ type Database interface {
}
func NewDatabase(cs chunks.ChunkStore) Database {
return newDatabase(types.NewValueStore(cs))
vs := types.NewValueStore(cs)
ns := tree.NewNodeStore(cs)
return newDatabase(vs, ns)
}
func NewTypesDatabase(vs *types.ValueStore) Database {
return newDatabase(vs)
func NewTypesDatabase(vs *types.ValueStore, ns tree.NodeStore) Database {
return newDatabase(vs, ns)
}
// GarbageCollector provides a method to remove unreferenced data from a store.
+11 -5
View File
@@ -36,6 +36,7 @@ import (
type database struct {
*types.ValueStore
rt rootTracker
ns tree.NodeStore
}
var (
@@ -50,10 +51,11 @@ type rootTracker interface {
Commit(ctx context.Context, current, last hash.Hash) (bool, error)
}
func newDatabase(vs *types.ValueStore) *database {
func newDatabase(vs *types.ValueStore, ns tree.NodeStore) *database {
return &database{
ValueStore: vs, // ValueStore is responsible for closing |cs|
rt: vs,
ns: ns,
}
}
@@ -67,6 +69,10 @@ func (db *database) chunkStore() chunks.ChunkStore {
return db.ChunkStore()
}
func (db *database) nodeStore() tree.NodeStore {
return db.ns
}
func (db *database) Stats() interface{} {
return db.ChunkStore().Stats()
}
@@ -95,7 +101,7 @@ func (db *database) loadDatasetsNomsMap(ctx context.Context, rootHash hash.Hash)
func (db *database) loadDatasetsRefmap(ctx context.Context, rootHash hash.Hash) (prolly.AddressMap, error) {
if rootHash == (hash.Hash{}) {
return prolly.NewEmptyAddressMap(tree.NewNodeStore(db.chunkStore())), nil
return prolly.NewEmptyAddressMap(db.ns), nil
}
val, err := db.ReadValue(ctx, rootHash)
@@ -107,7 +113,7 @@ func (db *database) loadDatasetsRefmap(ctx context.Context, rootHash hash.Hash)
return prolly.AddressMap{}, errors.New("Root hash doesn't exist")
}
return parse_storeroot([]byte(val.(types.SerialMessage)), db.chunkStore()), nil
return parse_storeroot([]byte(val.(types.SerialMessage)), db.nodeStore()), nil
}
type refmapDatasetsMap struct {
@@ -385,7 +391,7 @@ func (db *database) doFastForward(ctx context.Context, ds Dataset, newHeadAddr h
if err != nil {
return err
}
ancestorHash, found, err := FindCommonAncestor(ctx, currCommit, newCommit, db, db)
ancestorHash, found, err := FindCommonAncestor(ctx, currCommit, newCommit, db, db, db.ns, db.ns)
if err != nil {
return err
}
@@ -873,7 +879,7 @@ func buildNewCommit(ctx context.Context, ds Dataset, v types.Value, opts CommitO
}
}
return newCommitForValue(ctx, ds.db.chunkStore(), ds.db, v, opts)
return newCommitForValue(ctx, ds.db.chunkStore(), ds.db, ds.db.nodeStore(), v, opts)
}
func (db *database) doHeadUpdate(ctx context.Context, ds Dataset, updateFunc func(ds Dataset) error) (Dataset, error) {
+22 -6
View File
@@ -39,6 +39,7 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
)
@@ -66,6 +67,8 @@ type PullSuite struct {
sourceCS *chunks.TestStoreView
sinkVRW types.ValueReadWriter
sourceVRW types.ValueReadWriter
sinkDB datas.Database
sourceDB datas.Database
commitReads int // The number of reads triggered by commit differs across chunk store impls
}
@@ -80,7 +83,11 @@ type LocalToLocalSuite struct {
func (suite *LocalToLocalSuite) SetupTest() {
suite.sinkCS, suite.sourceCS = makeTestStoreViews()
suite.sinkVRW, suite.sourceVRW = types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
sinkVRW, sourceVRW := types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
suite.sinkVRW, suite.sourceVRW = sinkVRW, sourceVRW
suite.sourceDB = datas.NewTypesDatabase(sourceVRW, tree.NewNodeStore(suite.sourceCS))
suite.sinkDB = datas.NewTypesDatabase(sinkVRW, tree.NewNodeStore(suite.sinkCS))
}
type RemoteToLocalSuite struct {
@@ -89,7 +96,10 @@ type RemoteToLocalSuite struct {
func (suite *RemoteToLocalSuite) SetupTest() {
suite.sinkCS, suite.sourceCS = makeTestStoreViews()
suite.sinkVRW, suite.sourceVRW = types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
sinkVRW, sourceVRW := types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
suite.sinkVRW, suite.sourceVRW = sinkVRW, sourceVRW
suite.sourceDB = datas.NewTypesDatabase(sourceVRW, tree.NewNodeStore(suite.sourceCS))
suite.sinkDB = datas.NewTypesDatabase(sinkVRW, tree.NewNodeStore(suite.sinkCS))
}
type LocalToRemoteSuite struct {
@@ -98,7 +108,10 @@ type LocalToRemoteSuite struct {
func (suite *LocalToRemoteSuite) SetupTest() {
suite.sinkCS, suite.sourceCS = makeTestStoreViews()
suite.sinkVRW, suite.sourceVRW = types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
sinkVRW, sourceVRW := types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
suite.sinkVRW, suite.sourceVRW = sinkVRW, sourceVRW
suite.sourceDB = datas.NewTypesDatabase(sourceVRW, tree.NewNodeStore(suite.sourceCS))
suite.sinkDB = datas.NewTypesDatabase(sinkVRW, tree.NewNodeStore(suite.sinkCS))
suite.commitReads = 1
}
@@ -108,7 +121,10 @@ type RemoteToRemoteSuite struct {
func (suite *RemoteToRemoteSuite) SetupTest() {
suite.sinkCS, suite.sourceCS = makeTestStoreViews()
suite.sinkVRW, suite.sourceVRW = types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
sinkVRW, sourceVRW := types.NewValueStore(suite.sinkCS), types.NewValueStore(suite.sourceCS)
suite.sinkVRW, suite.sourceVRW = sinkVRW, sourceVRW
suite.sourceDB = datas.NewTypesDatabase(sourceVRW, tree.NewNodeStore(suite.sourceCS))
suite.sinkDB = datas.NewTypesDatabase(sinkVRW, tree.NewNodeStore(suite.sinkCS))
suite.commitReads = 1
}
@@ -337,7 +353,7 @@ func (suite *PullSuite) TestPullUpdates() {
}
func (suite *PullSuite) commitToSource(v types.Value, p []hash.Hash) hash.Hash {
db := datas.NewTypesDatabase(suite.sourceVRW.(*types.ValueStore))
db := suite.sourceDB
ds, err := db.GetDataset(context.Background(), datasetID)
suite.NoError(err)
ds, err = db.Commit(context.Background(), ds, v, datas.CommitOptions{Parents: p})
@@ -346,7 +362,7 @@ func (suite *PullSuite) commitToSource(v types.Value, p []hash.Hash) hash.Hash {
}
func (suite *PullSuite) commitToSink(v types.Value, p []hash.Hash) hash.Hash {
db := datas.NewTypesDatabase(suite.sinkVRW.(*types.ValueStore))
db := suite.sinkDB
ds, err := db.GetDataset(context.Background(), datasetID)
suite.NoError(err)
ds, err = db.Commit(context.Background(), ds, v, datas.CommitOptions{Parents: p})
+3 -1
View File
@@ -31,6 +31,7 @@ import (
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/util/clienttest"
)
@@ -136,9 +137,10 @@ func tempDirDB(ctx context.Context) (types.ValueReadWriter, datas.Database, erro
return nil, nil, err
}
ns := tree.NewNodeStore(st)
vs := types.NewValueStore(st)
return vs, datas.NewTypesDatabase(vs), nil
return vs, datas.NewTypesDatabase(vs, ns), nil
}
func TestPuller(t *testing.T) {
+2 -3
View File
@@ -20,7 +20,6 @@ import (
flatbuffers "github.com/google/flatbuffers/go"
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
@@ -36,12 +35,12 @@ func storeroot_flatbuffer(am prolly.AddressMap) []byte {
return builder.FinishedBytes()
}
func parse_storeroot(bs []byte, cs chunks.ChunkStore) prolly.AddressMap {
func parse_storeroot(bs []byte, ns tree.NodeStore) prolly.AddressMap {
if !bytes.Equal([]byte(serial.StoreRootFileID), bs[4:8]) {
panic("expected store root file id, got: " + string(bs[4:8]))
}
sr := serial.GetRootAsStoreRoot(bs, 0)
mapbytes := sr.AddressMapBytes()
node := tree.NodeFromBytes(mapbytes)
return prolly.NewAddressMap(node, tree.NewNodeStore(cs))
return prolly.NewAddressMap(node, ns)
}
+13 -6
View File
@@ -33,6 +33,7 @@ import (
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/d"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/util/profile"
)
@@ -72,8 +73,10 @@ func main() {
// Build One-Time
storage := &chunks.MemoryStorage{}
vrw := types.NewValueStore(storage.NewViewWithDefaultFormat())
db := datas.NewTypesDatabase(vrw)
cs := storage.NewViewWithDefaultFormat()
ns := tree.NewNodeStore(cs)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw, ns)
ds, err := db.GetDataset(context.Background(), "test")
d.Chk.NoError(err)
t1 := time.Now()
@@ -93,8 +96,10 @@ func main() {
// Build Incrementally
storage = &chunks.MemoryStorage{}
vrw = types.NewValueStore(storage.NewViewWithDefaultFormat())
db = datas.NewTypesDatabase(vrw)
cs = storage.NewViewWithDefaultFormat()
ns = tree.NewNodeStore(cs)
vrw = types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw, ns)
ds, err = db.GetDataset(context.Background(), "test")
d.Chk.NoError(err)
t1 = time.Now()
@@ -115,8 +120,10 @@ func main() {
fmt.Printf("Testing Blob: \t\tbuild %d MB\t\t\tscan %d MB\n", *blobSize/1000000, *blobSize/1000000)
storage := &chunks.MemoryStorage{}
vrw := types.NewValueStore(storage.NewViewWithDefaultFormat())
db := datas.NewTypesDatabase(vrw)
cs := storage.NewViewWithDefaultFormat()
ns := tree.NewNodeStore(cs)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw, ns)
ds, err := db.GetDataset(context.Background(), "test")
d.Chk.NoError(err)
+3 -1
View File
@@ -115,6 +115,7 @@ import (
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/marshal"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/spec"
"github.com/dolthub/dolt/go/store/types"
)
@@ -304,7 +305,8 @@ func Run(datasetID string, t *testing.T, suiteT perfSuiteT) {
memCS := storage.NewView()
suite.DatabaseSpec = "mem://"
suite.VS = types.NewValueStore(memCS)
suite.Database = datas.NewTypesDatabase(suite.VS)
ns := tree.NewNodeStore(memCS)
suite.Database = datas.NewTypesDatabase(suite.VS, ns)
defer suite.Database.Close()
if t, ok := suiteT.(SetupRepSuite); ok {

Some files were not shown because too many files have changed in this diff Show More