Merge pull request #5496 from dolthub/aaron/types-value-Less-ctx

go/store/types: Thread context on Less and Compare.
This commit is contained in:
Aaron Son
2023-03-06 15:38:42 -08:00
committed by GitHub
116 changed files with 845 additions and 777 deletions
@@ -434,6 +434,8 @@ func TestReadReplica(t *testing.T) {
}
defer os.Chdir(cwd)
ctx := context.Background()
multiSetup := testcommands.NewMultiRepoTestSetup(t.Fatal)
defer multiSetup.Close()
defer os.RemoveAll(multiSetup.Root)
@@ -471,7 +473,7 @@ func TestReadReplica(t *testing.T) {
defer sc.StopServer()
replicatedTable := "new_table"
multiSetup.CreateTable(sourceDbName, replicatedTable)
multiSetup.CreateTable(ctx, sourceDbName, replicatedTable)
multiSetup.StageAll(sourceDbName)
_ = multiSetup.CommitWithWorkingSet(sourceDbName)
multiSetup.PushToRemote(sourceDbName, "remote1", "main")
@@ -146,7 +146,7 @@ func TestAsyncDiffer(t *testing.T) {
ad := NewAsyncDiffer(4)
end := types.Uint(27)
ad.StartWithRange(ctx, m1, m2, types.NullValue, func(ctx context.Context, value types.Value) (bool, bool, error) {
valid, err := value.Less(m1.Format(), end)
valid, err := value.Less(ctx, vrw.Format(), end)
return valid, false, err
})
return ad
@@ -164,7 +164,7 @@ func TestAsyncDiffer(t *testing.T) {
ad := NewAsyncDiffer(4)
end := types.Uint(15)
ad.StartWithRange(ctx, m1, m2, types.NullValue, func(ctx context.Context, value types.Value) (bool, bool, error) {
valid, err := value.Less(m1.Format(), end)
valid, err := value.Less(ctx, vrw.Format(), end)
return valid, false, err
})
return ad
@@ -183,7 +183,7 @@ func TestAsyncDiffer(t *testing.T) {
start := types.Uint(10)
end := types.Uint(15)
ad.StartWithRange(ctx, m1, m2, start, func(ctx context.Context, value types.Value) (bool, bool, error) {
valid, err := value.Less(m1.Format(), end)
valid, err := value.Less(ctx, vrw.Format(), end)
return valid, false, err
})
return ad
@@ -280,7 +280,7 @@ func (mr *MultiRepoTestSetup) CommitWithWorkingSet(dbName string) *doltdb.Commit
return commit
}
func createTestDataTable() (*table.InMemTable, schema.Schema) {
func createTestDataTable(ctx context.Context, ddb *doltdb.DoltDB) (*table.InMemTable, schema.Schema) {
rows, sch, err := dtestutils.RowsAndSchema()
if err != nil {
panic(err)
@@ -289,7 +289,7 @@ func createTestDataTable() (*table.InMemTable, schema.Schema) {
imt := table.NewInMemTable(sch)
for _, r := range rows {
err := imt.AppendRow(r)
err := imt.AppendRow(ctx, ddb.ValueReadWriter(), r)
if err != nil {
panic(err)
}
@@ -298,10 +298,10 @@ func createTestDataTable() (*table.InMemTable, schema.Schema) {
return imt, sch
}
func (mr *MultiRepoTestSetup) CreateTable(dbName, tblName string) {
func (mr *MultiRepoTestSetup) CreateTable(ctx context.Context, dbName, tblName string) {
dEnv := mr.envs[dbName]
imt, sch := createTestDataTable()
imt, sch := createTestDataTable(ctx, dEnv.DoltDB)
rows := make([]row.Row, imt.NumRows())
for i := 0; i < imt.NumRows(); i++ {
r, err := imt.GetRow(i)
+1 -1
View File
@@ -1155,7 +1155,7 @@ func (dEnv *DoltEnv) BulkDbEaFactory() editor.DbEaFactory {
if err != nil {
return nil
}
return editor.NewBulkImportTEAFactory(dEnv.DoltDB.Format(), dEnv.DoltDB.ValueReadWriter(), tmpDir)
return editor.NewBulkImportTEAFactory(dEnv.DoltDB.ValueReadWriter(), tmpDir)
}
func (dEnv *DoltEnv) LockFile() string {
@@ -114,7 +114,7 @@ func mergeNomsTableData(
if key != nil {
mkNilOrKeyLess := mergeKey == nil
if !mkNilOrKeyLess {
mkNilOrKeyLess, err = key.Less(vrw.Format(), mergeKey)
mkNilOrKeyLess, err = key.Less(ctx, vrw.Format(), mergeKey)
if err != nil {
return err
}
@@ -132,7 +132,7 @@ func mergeNomsTableData(
if !processed && mergeKey != nil {
keyNilOrMKLess := key == nil
if !keyNilOrMKLess {
keyNilOrMKLess, err = mergeKey.Less(vrw.Format(), key)
keyNilOrMKLess, err = mergeKey.Less(ctx, vrw.Format(), key)
if err != nil {
return err
}
+17 -8
View File
@@ -111,13 +111,13 @@ func GetForeignKeyViolations(ctx context.Context, newRoot, baseRoot *doltdb.Root
if err != nil {
return err
}
err = parentFkConstraintViolations(ctx, foreignKey, postParent, postChild, postParent.Schema, emptyIdx, receiver)
err = parentFkConstraintViolations(ctx, baseRoot.VRW(), foreignKey, postParent, postChild, postParent.Schema, emptyIdx, receiver)
if err != nil {
return err
}
} else {
// Parent exists in the ancestor
err = parentFkConstraintViolations(ctx, foreignKey, postParent, postChild, preParent.Schema, preParent.RowData, receiver)
err = parentFkConstraintViolations(ctx, baseRoot.VRW(), foreignKey, postParent, postChild, preParent.Schema, preParent.RowData, receiver)
if err != nil {
return err
}
@@ -134,12 +134,12 @@ func GetForeignKeyViolations(ctx context.Context, newRoot, baseRoot *doltdb.Root
return err
}
err = childFkConstraintViolations(ctx, foreignKey, postParent, postChild, postChild, emptyIdx, receiver)
err = childFkConstraintViolations(ctx, baseRoot.VRW(), foreignKey, postParent, postChild, postChild, emptyIdx, receiver)
if err != nil {
return err
}
} else {
err = childFkConstraintViolations(ctx, foreignKey, postParent, postChild, preChild, preChild.RowData, receiver)
err = childFkConstraintViolations(ctx, baseRoot.VRW(), foreignKey, postParent, postChild, preChild, preChild.RowData, receiver)
if err != nil {
return err
}
@@ -350,6 +350,7 @@ var _ FKViolationReceiver = (*foreignKeyViolationWriter)(nil)
// parentFkConstraintViolations processes foreign key constraint violations for the parent in a foreign key.
func parentFkConstraintViolations(
ctx context.Context,
vr types.ValueReader,
foreignKey doltdb.ForeignKey,
postParent, postChild *constraintViolationsLoadedTable,
preParentSch schema.Schema,
@@ -361,13 +362,14 @@ func parentFkConstraintViolations(
return prollyParentFkConstraintViolations(ctx, foreignKey, postParent, postChild, m, receiver)
}
m := durable.NomsMapFromIndex(preParentRowData)
return nomsParentFkConstraintViolations(ctx, foreignKey, postParent, postChild, preParentSch, m, receiver)
return nomsParentFkConstraintViolations(ctx, vr, foreignKey, postParent, postChild, preParentSch, m, receiver)
}
// childFkConstraintViolations handles processing the reference options on a child, or creating a violation if
// necessary.
func childFkConstraintViolations(
ctx context.Context,
vr types.ValueReader,
foreignKey doltdb.ForeignKey,
postParent, postChild, preChild *constraintViolationsLoadedTable,
preChildRowData durable.Index,
@@ -375,7 +377,7 @@ func childFkConstraintViolations(
) error {
if preChildRowData.Format() != types.Format_DOLT {
m := durable.NomsMapFromIndex(preChildRowData)
return nomsChildFkConstraintViolations(ctx, foreignKey, postParent, postChild, preChild.Schema, m, receiver)
return nomsChildFkConstraintViolations(ctx, vr, foreignKey, postParent, postChild, preChild.Schema, m, receiver)
}
if preChild.IndexData == nil || postChild.Schema.GetPKCols().Size() == 0 || preChild.Schema.GetPKCols().Size() == 0 {
m := durable.ProllyMapFromIndex(preChildRowData)
@@ -400,6 +402,7 @@ func childFkConstraintViolations(
func nomsParentFkConstraintViolations(
ctx context.Context,
vr types.ValueReader,
foreignKey doltdb.ForeignKey,
postParent, postChild *constraintViolationsLoadedTable,
preParentSch schema.Schema,
@@ -448,6 +451,7 @@ func nomsParentFkConstraintViolations(
shouldContinue, err := func() (bool, error) {
var mapIter table.ReadCloser = noms.NewNomsRangeReader(
vr,
postParent.IndexSchema,
durable.NomsMapFromIndex(postParent.IndexData),
[]*noms.ReadRange{{Start: postParentIndexPartialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(postParentIndexPartialKey)}})
@@ -478,7 +482,7 @@ func nomsParentFkConstraintViolations(
if err != nil {
return err
}
err = nomsParentFkConstraintViolationsProcess(ctx, foreignKey, postChild, postChildIndexPartialKey, receiver)
err = nomsParentFkConstraintViolationsProcess(ctx, vr, foreignKey, postChild, postChildIndexPartialKey, receiver)
if err != nil {
return err
}
@@ -497,6 +501,7 @@ func nomsParentFkConstraintViolations(
func nomsParentFkConstraintViolationsProcess(
ctx context.Context,
vr types.ValueReader,
foreignKey doltdb.ForeignKey,
postChild *constraintViolationsLoadedTable,
postChildIndexPartialKey types.Tuple,
@@ -506,6 +511,7 @@ func nomsParentFkConstraintViolationsProcess(
rowData := durable.NomsMapFromIndex(postChild.RowData)
mapIter := noms.NewNomsRangeReader(
vr,
postChild.IndexSchema,
indexData,
[]*noms.ReadRange{{Start: postChildIndexPartialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(postChildIndexPartialKey)}})
@@ -543,6 +549,7 @@ func nomsParentFkConstraintViolationsProcess(
// nomsChildFkConstraintViolations processes foreign key constraint violations for the child in a foreign key.
func nomsChildFkConstraintViolations(
ctx context.Context,
vr types.ValueReader,
foreignKey doltdb.ForeignKey,
postParent, postChild *constraintViolationsLoadedTable,
preChildSch schema.Schema,
@@ -605,7 +612,7 @@ func nomsChildFkConstraintViolations(
if err != nil {
return err
}
err = childFkConstraintViolationsProcess(ctx, postParent, rowDiff, parentPartialKey, receiver)
err = childFkConstraintViolationsProcess(ctx, vr, postParent, rowDiff, parentPartialKey, receiver)
if err != nil {
return err
}
@@ -625,12 +632,14 @@ func nomsChildFkConstraintViolations(
// childFkConstraintViolationsProcess handles processing the constraint violations for the child of a foreign key.
func childFkConstraintViolationsProcess(
ctx context.Context,
vr types.ValueReader,
postParent *constraintViolationsLoadedTable,
rowDiff *diff2.Difference,
parentPartialKey types.Tuple,
receiver FKViolationReceiver,
) error {
var mapIter table.ReadCloser = noms.NewNomsRangeReader(
vr,
postParent.IndexSchema,
durable.NomsMapFromIndex(postParent.IndexData),
[]*noms.ReadRange{{Start: parentPartialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(parentPartialKey)}})
+2 -2
View File
@@ -37,7 +37,7 @@ func (tvs TupleVals) Value(ctx context.Context) (types.Value, error) {
return types.NewTuple(tvs.nbf, tvs.vs...)
}
func (tvs TupleVals) Less(nbf *types.NomsBinFormat, other types.LesserValuable) (bool, error) {
func (tvs TupleVals) Less(ctx context.Context, nbf *types.NomsBinFormat, other types.LesserValuable) (bool, error) {
if other.Kind() == types.TupleKind {
if otherTVs, ok := other.(TupleVals); ok {
for i, val := range tvs.vs {
@@ -49,7 +49,7 @@ func (tvs TupleVals) Less(nbf *types.NomsBinFormat, other types.LesserValuable)
otherVal := otherTVs.vs[i]
if !val.Equals(otherVal) {
return val.Less(nbf, otherVal)
return val.Less(ctx, nbf, otherVal)
}
}
@@ -95,13 +95,14 @@ func TestTupleValsLess(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
vrw := types.NewMemoryValueStore()
lesserTplVals := test.lesserTVs.nomsTupleForTags(types.Format_Default, test.tags, true)
greaterTplVals := test.greaterTVs.nomsTupleForTags(types.Format_Default, test.tags, true)
lesserTplVals := test.lesserTVs.nomsTupleForTags(vrw.Format(), test.tags, true)
greaterTplVals := test.greaterTVs.nomsTupleForTags(vrw.Format(), test.tags, true)
lessLTGreater, err := lesserTplVals.Less(types.Format_Default, greaterTplVals)
lessLTGreater, err := lesserTplVals.Less(ctx, vrw.Format(), greaterTplVals)
require.NoError(t, err)
greaterLTLess, err := greaterTplVals.Less(types.Format_Default, lesserTplVals)
greaterLTLess, err := greaterTplVals.Less(ctx, vrw.Format(), lesserTplVals)
require.NoError(t, err)
assert.True(t, test.areEqual && !lessLTGreater || !test.areEqual && lessLTGreater)
assert.True(t, !greaterLTLess)
@@ -111,9 +112,9 @@ func TestTupleValsLess(t *testing.T) {
greaterTpl, err := greaterTplVals.Value(ctx)
require.NoError(t, err)
lesserLess, err := lesserTpl.Less(types.Format_Default, greaterTpl)
lesserLess, err := lesserTpl.Less(ctx, vrw.Format(), greaterTpl)
require.NoError(t, err)
greaterLess, err := greaterTpl.Less(types.Format_Default, lesserTpl)
greaterLess, err := greaterTpl.Less(ctx, vrw.Format(), lesserTpl)
require.NoError(t, err)
// needs to match with the types.Tuple Less implementation.
@@ -102,7 +102,7 @@ func newNomsDiffIter(ctx *sql.Context, ddb *doltdb.DoltDB, joiner *rowconv.Joine
if !ok {
return false, false, nil
}
return ranges[0].Check.Check(ctx, v)
return ranges[0].Check.Check(ctx, ddb.ValueReadWriter(), v)
}
rd.StartWithRange(ctx, durable.NomsMapFromIndex(fromData), durable.NomsMapFromIndex(toData), ranges[0].Start, rangeFunc)
}
@@ -142,7 +142,7 @@ func (dt *DiffTable) Collation() sql.CollationID {
func (dt *DiffTable) Partitions(ctx *sql.Context) (sql.PartitionIter, error) {
cmItr := doltdb.CommitItrForRoots(dt.ddb, dt.head)
sf, err := SelectFuncForFilters(dt.ddb.Format(), dt.partitionFilters)
sf, err := SelectFuncForFilters(dt.ddb.ValueReadWriter(), dt.partitionFilters)
if err != nil {
return nil, err
}
@@ -329,7 +329,7 @@ func (dt *DiffTable) fromCommitLookupPartitions(ctx *sql.Context, hashes []hash.
return sql.PartitionsToPartitionIter(), nil
}
sf, err := SelectFuncForFilters(dt.ddb.Format(), dt.partitionFilters)
sf, err := SelectFuncForFilters(dt.ddb.ValueReadWriter(), dt.partitionFilters)
if err != nil {
return nil, err
}
@@ -523,7 +523,7 @@ func (dt *DiffTable) toCommitLookupPartitions(ctx *sql.Context, hashes []hash.Ha
return sql.PartitionsToPartitionIter(), nil
}
sf, err := SelectFuncForFilters(dt.ddb.Format(), dt.partitionFilters)
sf, err := SelectFuncForFilters(dt.ddb.ValueReadWriter(), dt.partitionFilters)
if err != nil {
return nil, err
}
@@ -674,7 +674,7 @@ func (dp *DiffPartition) isDiffablePartition(ctx *sql.Context) (bool, error) {
type partitionSelectFunc func(*sql.Context, DiffPartition) (bool, error)
func SelectFuncForFilters(nbf *types.NomsBinFormat, filters []sql.Expression) (partitionSelectFunc, error) {
func SelectFuncForFilters(vr types.ValueReader, filters []sql.Expression) (partitionSelectFunc, error) {
const (
toCommitTag uint64 = iota
fromCommitTag
@@ -689,7 +689,7 @@ func SelectFuncForFilters(nbf *types.NomsBinFormat, filters []sql.Expression) (p
schema.NewColumn(fromCommitDate, fromCommitDateTag, types.TimestampKind, false),
)
expFunc, err := expreval.ExpressionFuncFromSQLExpressions(nbf, schema.UnkeyedSchemaFromCols(colColl), filters)
expFunc, err := expreval.ExpressionFuncFromSQLExpressions(vr, schema.UnkeyedSchemaFromCols(colColl), filters)
if err != nil {
return nil, err
@@ -15,6 +15,8 @@
package expreval
import (
"context"
"github.com/dolthub/go-mysql-server/sql/expression"
"github.com/dolthub/dolt/go/store/types"
@@ -29,7 +31,7 @@ type CompareOp interface {
// CompareLiterals compares two go-mysql-server literals
CompareLiterals(l1, l2 *expression.Literal) (bool, error)
// CompareNomsValues compares two noms values
CompareNomsValues(v1, v2 types.Value) (bool, error)
CompareNomsValues(ctx context.Context, v1, v2 types.Value) (bool, error)
// CompareToNil compares a noms value to nil using sql logic rules
CompareToNil(v2 types.Value) (bool, error)
}
@@ -49,7 +51,7 @@ func (op EqualsOp) CompareLiterals(l1, l2 *expression.Literal) (bool, error) {
}
// CompareNomsValues compares two noms values for equality
func (op EqualsOp) CompareNomsValues(v1, v2 types.Value) (bool, error) {
func (op EqualsOp) CompareNomsValues(_ context.Context, v1, v2 types.Value) (bool, error) {
return v1.Equals(v2), nil
}
@@ -64,7 +66,7 @@ func (op EqualsOp) CompareToNil(v types.Value) (bool, error) {
// GreaterOp implements the CompareOp interface implementing greater than logic
type GreaterOp struct {
NBF *types.NomsBinFormat
vr types.ValueReader
}
// CompareLiterals compares two go-mysql-server literals returning true if the value of the first
@@ -81,14 +83,14 @@ func (op GreaterOp) CompareLiterals(l1, l2 *expression.Literal) (bool, error) {
// CompareNomsValues compares two noms values returning true if the of the first
// is greater than the second.
func (op GreaterOp) CompareNomsValues(v1, v2 types.Value) (bool, error) {
func (op GreaterOp) CompareNomsValues(ctx context.Context, v1, v2 types.Value) (bool, error) {
eq := v1.Equals(v2)
if eq {
return false, nil
}
lt, err := v1.Less(op.NBF, v2)
lt, err := v1.Less(ctx, op.vr.Format(), v2)
if err != nil {
return false, nil
@@ -104,7 +106,7 @@ func (op GreaterOp) CompareToNil(types.Value) (bool, error) {
// GreaterEqualOp implements the CompareOp interface implementing greater than or equal to logic
type GreaterEqualOp struct {
NBF *types.NomsBinFormat
vr types.ValueReader
}
// CompareLiterals compares two go-mysql-server literals returning true if the value of the first
@@ -121,8 +123,8 @@ func (op GreaterEqualOp) CompareLiterals(l1, l2 *expression.Literal) (bool, erro
// CompareNomsValues compares two noms values returning true if the of the first
// is greater or equal to than the second.
func (op GreaterEqualOp) CompareNomsValues(v1, v2 types.Value) (bool, error) {
res, err := v1.Less(op.NBF, v2)
func (op GreaterEqualOp) CompareNomsValues(ctx context.Context, v1, v2 types.Value) (bool, error) {
res, err := v1.Less(ctx, op.vr.Format(), v2)
if err != nil {
return false, err
@@ -138,7 +140,7 @@ func (op GreaterEqualOp) CompareToNil(types.Value) (bool, error) {
// LessOp implements the CompareOp interface implementing less than logic
type LessOp struct {
NBF *types.NomsBinFormat
vr types.ValueReader
}
// CompareLiterals compares two go-mysql-server literals returning true if the value of the first
@@ -155,8 +157,8 @@ func (op LessOp) CompareLiterals(l1, l2 *expression.Literal) (bool, error) {
// CompareNomsValues compares two noms values returning true if the of the first
// is less than the second.
func (op LessOp) CompareNomsValues(v1, v2 types.Value) (bool, error) {
return v1.Less(op.NBF, v2)
func (op LessOp) CompareNomsValues(ctx context.Context, v1, v2 types.Value) (bool, error) {
return v1.Less(ctx, op.vr.Format(), v2)
}
// CompareToNil always returns false as values are neither greater than, less than, or equal to nil
@@ -166,7 +168,7 @@ func (op LessOp) CompareToNil(types.Value) (bool, error) {
// LessEqualOp implements the CompareOp interface implementing less than or equal to logic
type LessEqualOp struct {
NBF *types.NomsBinFormat
vr types.ValueReader
}
// CompareLiterals compares two go-mysql-server literals returning true if the value of the first
@@ -183,14 +185,14 @@ func (op LessEqualOp) CompareLiterals(l1, l2 *expression.Literal) (bool, error)
// CompareNomsValues compares two noms values returning true if the of the first
// is less than or equal to the second.
func (op LessEqualOp) CompareNomsValues(v1, v2 types.Value) (bool, error) {
func (op LessEqualOp) CompareNomsValues(ctx context.Context, v1, v2 types.Value) (bool, error) {
eq := v1.Equals(v2)
if eq {
return true, nil
}
return v1.Less(op.NBF, v2)
return v1.Less(ctx, op.vr.Format(), v2)
}
// CompareToNil always returns false as values are neither greater than, less than, or equal to nil
@@ -15,6 +15,7 @@
package expreval
import (
"context"
"testing"
"time"
@@ -76,20 +77,22 @@ func TestCompareNomsValues(t *testing.T) {
},
}
vrw := types.NewMemoryValueStore()
eqOp := EqualsOp{}
gtOp := GreaterOp{}
gteOp := GreaterEqualOp{}
ltOp := LessOp{}
lteOp := LessEqualOp{}
gtOp := GreaterOp{vrw}
gteOp := GreaterEqualOp{vrw}
ltOp := LessOp{vrw}
lteOp := LessEqualOp{vrw}
ctx := context.Background()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
mustBool := getMustBool(t)
resEq := mustBool(eqOp.CompareNomsValues(test.v1, test.v2))
resGt := mustBool(gtOp.CompareNomsValues(test.v1, test.v2))
resGte := mustBool(gteOp.CompareNomsValues(test.v1, test.v2))
resLt := mustBool(ltOp.CompareNomsValues(test.v1, test.v2))
resLte := mustBool(lteOp.CompareNomsValues(test.v1, test.v2))
resEq := mustBool(eqOp.CompareNomsValues(ctx, test.v1, test.v2))
resGt := mustBool(gtOp.CompareNomsValues(ctx, test.v1, test.v2))
resGte := mustBool(gteOp.CompareNomsValues(ctx, test.v1, test.v2))
resLt := mustBool(ltOp.CompareNomsValues(ctx, test.v1, test.v2))
resLte := mustBool(lteOp.CompareNomsValues(ctx, test.v1, test.v2))
assert.True(t, resEq == test.eq, "equals failure. Expected: %t Actual %t", test.lte, resLte)
assert.True(t, resGt == test.gt, "greater failure. Expected: %t Actual %t", test.lte, resLte)
@@ -36,10 +36,10 @@ var errNotImplemented = errors.NewKind("Not Implemented: %s")
type ExpressionFunc func(ctx context.Context, vals map[uint64]types.Value) (bool, error)
// ExpressionFuncFromSQLExpressions returns an ExpressionFunc which represents the slice of sql.Expressions passed in
func ExpressionFuncFromSQLExpressions(nbf *types.NomsBinFormat, sch schema.Schema, expressions []sql.Expression) (ExpressionFunc, error) {
func ExpressionFuncFromSQLExpressions(vr types.ValueReader, sch schema.Schema, expressions []sql.Expression) (ExpressionFunc, error) {
var root ExpressionFunc
for _, exp := range expressions {
expFunc, err := getExpFunc(nbf, sch, exp)
expFunc, err := getExpFunc(vr, sch, exp)
if err != nil {
return nil, err
@@ -61,26 +61,26 @@ func ExpressionFuncFromSQLExpressions(nbf *types.NomsBinFormat, sch schema.Schem
return root, nil
}
func getExpFunc(nbf *types.NomsBinFormat, sch schema.Schema, exp sql.Expression) (ExpressionFunc, error) {
func getExpFunc(vr types.ValueReader, sch schema.Schema, exp sql.Expression) (ExpressionFunc, error) {
switch typedExpr := exp.(type) {
case *expression.Equals:
return newComparisonFunc(EqualsOp{}, typedExpr.BinaryExpression, sch)
case *expression.GreaterThan:
return newComparisonFunc(GreaterOp{nbf}, typedExpr.BinaryExpression, sch)
return newComparisonFunc(GreaterOp{vr}, typedExpr.BinaryExpression, sch)
case *expression.GreaterThanOrEqual:
return newComparisonFunc(GreaterEqualOp{nbf}, typedExpr.BinaryExpression, sch)
return newComparisonFunc(GreaterEqualOp{vr}, typedExpr.BinaryExpression, sch)
case *expression.LessThan:
return newComparisonFunc(LessOp{nbf}, typedExpr.BinaryExpression, sch)
return newComparisonFunc(LessOp{vr}, typedExpr.BinaryExpression, sch)
case *expression.LessThanOrEqual:
return newComparisonFunc(LessEqualOp{nbf}, typedExpr.BinaryExpression, sch)
return newComparisonFunc(LessEqualOp{vr}, typedExpr.BinaryExpression, sch)
case *expression.Or:
leftFunc, err := getExpFunc(nbf, sch, typedExpr.Left)
leftFunc, err := getExpFunc(vr, sch, typedExpr.Left)
if err != nil {
return nil, err
}
rightFunc, err := getExpFunc(nbf, sch, typedExpr.Right)
rightFunc, err := getExpFunc(vr, sch, typedExpr.Right)
if err != nil {
return nil, err
@@ -88,13 +88,13 @@ func getExpFunc(nbf *types.NomsBinFormat, sch schema.Schema, exp sql.Expression)
return newOrFunc(leftFunc, rightFunc), nil
case *expression.And:
leftFunc, err := getExpFunc(nbf, sch, typedExpr.Left)
leftFunc, err := getExpFunc(vr, sch, typedExpr.Left)
if err != nil {
return nil, err
}
rightFunc, err := getExpFunc(nbf, sch, typedExpr.Right)
rightFunc, err := getExpFunc(vr, sch, typedExpr.Right)
if err != nil {
return nil, err
@@ -104,7 +104,7 @@ func getExpFunc(nbf *types.NomsBinFormat, sch schema.Schema, exp sql.Expression)
case *expression.InTuple:
return newComparisonFunc(EqualsOp{}, typedExpr.BinaryExpression, sch)
case *expression.Not:
expFunc, err := getExpFunc(nbf, sch, typedExpr.Child)
expFunc, err := getExpFunc(vr, sch, typedExpr.Child)
if err != nil {
return nil, err
}
@@ -260,7 +260,7 @@ func newComparisonFunc(op CompareOp, exp expression.BinaryExpression, sch schema
colVal, ok := vals[tag]
if ok && !types.IsNull(colVal) {
return compareNomsValues(colVal, nomsVal)
return compareNomsValues(ctx, colVal, nomsVal)
} else {
return compareToNil(nomsVal)
}
@@ -291,7 +291,7 @@ func newComparisonFunc(op CompareOp, exp expression.BinaryExpression, sch schema
if types.IsNull(v1) {
return compareToNull(v2)
} else {
return compareNomsValues(v1, v2)
return compareNomsValues(ctx, v1, v2)
}
}, nil
} else if compType == VariableInLiteralList {
@@ -323,7 +323,7 @@ func newComparisonFunc(op CompareOp, exp expression.BinaryExpression, sch schema
for _, nv := range nomsVals {
var lb bool
if ok && !types.IsNull(colVal) {
lb, err = compareNomsValues(colVal, nv)
lb, err = compareNomsValues(ctx, colVal, nv)
} else {
lb, err = compareToNil(nv)
}
@@ -219,12 +219,14 @@ func TestNewComparisonFunc(t *testing.T) {
lte = "lte"
)
vrw := types.NewMemoryValueStore()
ops := make(map[string]CompareOp)
ops[eq] = EqualsOp{}
ops[gt] = GreaterOp{}
ops[gte] = GreaterEqualOp{}
ops[lt] = LessOp{}
ops[lte] = LessEqualOp{}
ops[gt] = GreaterOp{vrw}
ops[gte] = GreaterEqualOp{vrw}
ops[lt] = LessOp{vrw}
ops[lte] = LessEqualOp{vrw}
type funcTestVal struct {
name string
@@ -56,6 +56,8 @@ type DoltIndex interface {
Format() *types.NomsBinFormat
IsPrimaryKey() bool
valueReadWriter() types.ValueReadWriter
getDurableState(*sql.Context, DoltTableable) (*durableIndexState, error)
coversColumns(s *durableIndexState, columns []uint64) bool
sqlRowConverter(*durableIndexState, []uint64) *KVToSqlRowConverter
@@ -928,6 +930,10 @@ func (di *doltIndex) trimRangeCutValue(to int, keyPart interface{}) interface{}
return keyPart
}
func (di *doltIndex) valueReadWriter() types.ValueReadWriter {
return di.vrw
}
func (di *doltIndex) prollySpatialRanges(ranges []sql.Range) ([]prolly.Range, error) {
// should be exactly one range
rng := ranges[0][0]
@@ -80,7 +80,7 @@ func RowIterForNomsRanges(ctx *sql.Context, idx DoltIndex, ranges []*noms.ReadRa
columns = idx.Schema().GetAllCols().Tags
}
m := durable.NomsMapFromIndex(durableState.Secondary)
nrr := noms.NewNomsRangeReader(idx.IndexSchema(), m, ranges)
nrr := noms.NewNomsRangeReader(idx.valueReadWriter(), idx.IndexSchema(), m, ranges)
covers := idx.coversColumns(durableState, columns)
if covers || idx.ID() == "PRIMARY" {
@@ -515,7 +515,7 @@ var _ noms.InRangeCheck = nomsRangeCheck{}
// Between returns whether the given types.Value is between the bounds. In addition, this returns if the value is outside
// the bounds and above the upperbound.
func (cb columnBounds) Between(ctx context.Context, nbf *types.NomsBinFormat, val types.Value) (ok bool, over bool, err error) {
func (cb columnBounds) Between(ctx context.Context, vr types.ValueReader, val types.Value) (ok bool, over bool, err error) {
// Only boundCase_isNull matches NULL values,
// otherwise we terminate the range scan.
// This is checked early to bypass unpredictable
@@ -529,58 +529,58 @@ func (cb columnBounds) Between(ctx context.Context, nbf *types.NomsBinFormat, va
case boundsCase_infinity_infinity:
return true, false, nil
case boundsCase_infinity_lessEquals:
ok, err := cb.upperbound.Less(nbf, val)
ok, err := cb.upperbound.Less(ctx, vr.Format(), val)
if err != nil || ok {
return false, true, err
}
case boundsCase_infinity_less:
ok, err := val.Less(nbf, cb.upperbound)
ok, err := val.Less(ctx, vr.Format(), cb.upperbound)
if err != nil || !ok {
return false, true, err
}
case boundsCase_greaterEquals_infinity:
ok, err := val.Less(nbf, cb.lowerbound)
ok, err := val.Less(ctx, vr.Format(), cb.lowerbound)
if err != nil || ok {
return false, false, err
}
case boundsCase_greaterEquals_lessEquals:
ok, err := val.Less(nbf, cb.lowerbound)
ok, err := val.Less(ctx, vr.Format(), cb.lowerbound)
if err != nil || ok {
return false, false, err
}
ok, err = cb.upperbound.Less(nbf, val)
ok, err = cb.upperbound.Less(ctx, vr.Format(), val)
if err != nil || ok {
return false, true, err
}
case boundsCase_greaterEquals_less:
ok, err := val.Less(nbf, cb.lowerbound)
ok, err := val.Less(ctx, vr.Format(), cb.lowerbound)
if err != nil || ok {
return false, false, err
}
ok, err = val.Less(nbf, cb.upperbound)
ok, err = val.Less(ctx, vr.Format(), cb.upperbound)
if err != nil || !ok {
return false, true, err
}
case boundsCase_greater_infinity:
ok, err := cb.lowerbound.Less(nbf, val)
ok, err := cb.lowerbound.Less(ctx, vr.Format(), val)
if err != nil || !ok {
return false, false, err
}
case boundsCase_greater_lessEquals:
ok, err := cb.lowerbound.Less(nbf, val)
ok, err := cb.lowerbound.Less(ctx, vr.Format(), val)
if err != nil || !ok {
return false, false, err
}
ok, err = cb.upperbound.Less(nbf, val)
ok, err = cb.upperbound.Less(ctx, vr.Format(), val)
if err != nil || ok {
return false, true, err
}
case boundsCase_greater_less:
ok, err := cb.lowerbound.Less(nbf, val)
ok, err := cb.lowerbound.Less(ctx, vr.Format(), val)
if err != nil || !ok {
return false, false, err
}
ok, err = val.Less(nbf, cb.upperbound)
ok, err = val.Less(ctx, vr.Format(), cb.upperbound)
if err != nil || !ok {
return false, true, err
}
@@ -616,14 +616,13 @@ func (cb columnBounds) Equals(otherBounds columnBounds) bool {
}
// Check implements the interface noms.InRangeCheck.
func (nrc nomsRangeCheck) Check(ctx context.Context, tuple types.Tuple) (valid bool, skip bool, err error) {
func (nrc nomsRangeCheck) Check(ctx context.Context, vr types.ValueReader, tuple types.Tuple) (valid bool, skip bool, err error) {
itr := types.TupleItrPool.Get().(*types.TupleIterator)
defer types.TupleItrPool.Put(itr)
err = itr.InitForTuple(tuple)
if err != nil {
return false, false, err
}
nbf := tuple.Format()
for i := 0; i < len(nrc) && itr.HasMore(); i++ {
if err := itr.Skip(); err != nil {
@@ -637,7 +636,7 @@ func (nrc nomsRangeCheck) Check(ctx context.Context, tuple types.Tuple) (valid b
break
}
ok, over, err := nrc[i].Between(ctx, nbf, val)
ok, over, err := nrc[i].Between(ctx, vr, val)
if err != nil {
return false, false, err
}
@@ -202,7 +202,7 @@ func (v NomsJSON) Compare(ctx *sql.Context, other gmstypes.JSONValue) (cmp int,
return doc.Compare(ctx, other)
}
return types.JSON(v).Compare(types.JSON(noms))
return types.JSON(v).Compare(ctx, types.JSON(noms))
}
// ToString implements the sql.JSONValue interface.
@@ -38,6 +38,7 @@ type BulkImportTEA struct {
capMon remotestorage.CapacityMonitor
emptyTuple types.Tuple
vr types.ValueReader
ea types.EditAccumulator
rowData types.Map
@@ -108,7 +109,7 @@ func (tea *BulkImportTEA) Get(ctx context.Context, keyHash hash.Hash, key types.
func (tea *BulkImportTEA) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) {
var err error
var matches []hashedTuple
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{
var mapIter table.ReadCloser = noms.NewNomsRangeReader(tea.vr, idxSch, tea.rowData, []*noms.ReadRange{
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
defer mapIter.Close(ctx)
var r row.Row
@@ -158,7 +159,7 @@ func (tea *BulkImportTEA) MaterializeEdits(ctx context.Context, nbf *types.NomsB
ea := tea.ea
defer ea.Close(ctx)
itr, err := ea.FinishedEditing()
itr, err := ea.FinishedEditing(ctx)
if err != nil {
return types.EmptyMap, err
}
@@ -184,6 +185,7 @@ type BulkImportIEA struct {
capMon remotestorage.CapacityMonitor
emptyTuple types.Tuple
vr types.ValueReader
ea types.EditAccumulator
rowData types.Map
@@ -264,7 +266,7 @@ func (iea *BulkImportIEA) HasPartial(ctx context.Context, idxSch schema.Schema,
var err error
var matches []hashedTuple
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, iea.rowData, []*noms.ReadRange{
var mapIter table.ReadCloser = noms.NewNomsRangeReader(iea.vr, idxSch, iea.rowData, []*noms.ReadRange{
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
defer mapIter.Close(ctx)
var r row.Row
@@ -319,7 +321,7 @@ func (iea *BulkImportIEA) MaterializeEdits(ctx context.Context, nbf *types.NomsB
ea := iea.ea
defer ea.Close(ctx)
itr, err := ea.FinishedEditing()
itr, err := ea.FinishedEditing(ctx)
if err != nil {
return types.EmptyMap, err
}
@@ -339,14 +341,12 @@ func (iea *BulkImportIEA) MaterializeEdits(ctx context.Context, nbf *types.NomsB
var _ DbEaFactory = (*BulkImportTEAFactory)(nil)
type BulkImportTEAFactory struct {
nbf *types.NomsBinFormat
vrw types.ValueReadWriter
directory string
}
func NewBulkImportTEAFactory(nbf *types.NomsBinFormat, vrw types.ValueReadWriter, directory string) *BulkImportTEAFactory {
func NewBulkImportTEAFactory(vrw types.ValueReadWriter, directory string) *BulkImportTEAFactory {
return &BulkImportTEAFactory{
nbf: nbf,
vrw: vrw,
directory: directory,
}
@@ -356,10 +356,10 @@ func (b *BulkImportTEAFactory) NewTableEA(ctx context.Context, rowData types.Map
const flushInterval = 256 * 1024
createMapEA := func() types.EditAccumulator {
return types.CreateEditAccForMapEdits(b.nbf)
return types.CreateEditAccForMapEdits(b.vrw)
}
ea := edits.NewDiskBackedEditAcc(ctx, b.nbf, b.vrw, flushInterval, b.directory, createMapEA)
ea := edits.NewDiskBackedEditAcc(ctx, b.vrw, flushInterval, b.directory, createMapEA)
return &BulkImportTEA{
teaf: b,
capMon: remotestorage.NewUncappedCapacityMonitor(),
@@ -367,7 +367,7 @@ func (b *BulkImportTEAFactory) NewTableEA(ctx context.Context, rowData types.Map
ea: ea,
adds: make(map[hash.Hash]bool),
deletes: make(map[hash.Hash]bool),
emptyTuple: types.EmptyTuple(b.nbf),
emptyTuple: types.EmptyTuple(b.vrw.Format()),
}
}
@@ -375,10 +375,10 @@ func (b *BulkImportTEAFactory) NewIndexEA(ctx context.Context, rowData types.Map
const flushInterval = 256 * 1024
createMapEA := func() types.EditAccumulator {
return types.CreateEditAccForMapEdits(b.nbf)
return types.CreateEditAccForMapEdits(b.vrw)
}
ea := edits.NewDiskBackedEditAcc(ctx, b.nbf, b.vrw, flushInterval, b.directory, createMapEA)
ea := edits.NewDiskBackedEditAcc(ctx, b.vrw, flushInterval, b.directory, createMapEA)
return &BulkImportIEA{
teaf: b,
capMon: remotestorage.NewUncappedCapacityMonitor(),
@@ -387,18 +387,18 @@ func (b *BulkImportTEAFactory) NewIndexEA(ctx context.Context, rowData types.Map
adds: make(map[hash.Hash]struct{}),
deletes: make(map[hash.Hash]struct{}),
partialAdds: make(map[hash.Hash]hashedTuple),
emptyTuple: types.EmptyTuple(b.nbf),
emptyTuple: types.EmptyTuple(b.vrw.Format()),
}
}
var _ DbEaFactory = (*InMemDEAF)(nil)
type InMemDEAF struct {
nbf *types.NomsBinFormat
vr types.ValueReader
capMon remotestorage.CapacityMonitor
}
func NewInMemDeafWithMaxCapacity(nbf *types.NomsBinFormat, maxCapacity int64) DbEaFactory {
func NewInMemDeafWithMaxCapacity(vr types.ValueReader, maxCapacity int64) DbEaFactory {
var capMon remotestorage.CapacityMonitor
if maxCapacity > 0 {
capMon = remotestorage.NewFixedCapacityMonitor(maxCapacity)
@@ -406,15 +406,15 @@ func NewInMemDeafWithMaxCapacity(nbf *types.NomsBinFormat, maxCapacity int64) Db
capMon = remotestorage.NewUncappedCapacityMonitor()
}
return &InMemDEAF{nbf: nbf, capMon: capMon}
return &InMemDEAF{vr: vr, capMon: capMon}
}
func NewInMemDeaf(nbf *types.NomsBinFormat) DbEaFactory {
return NewInMemDeafWithMaxCapacity(nbf, -1)
func NewInMemDeaf(vr types.ValueReader) DbEaFactory {
return NewInMemDeafWithMaxCapacity(vr, -1)
}
func (i *InMemDEAF) NewTableEA(ctx context.Context, rowData types.Map) TableEditAccumulator {
ea := edits.NewAsyncSortedEditsWithDefaults(i.nbf)
ea := edits.NewAsyncSortedEditsWithDefaults(i.vr)
return &BulkImportTEA{
teaf: i,
capMon: i.capMon,
@@ -422,12 +422,12 @@ func (i *InMemDEAF) NewTableEA(ctx context.Context, rowData types.Map) TableEdit
ea: ea,
adds: make(map[hash.Hash]bool),
deletes: make(map[hash.Hash]bool),
emptyTuple: types.EmptyTuple(i.nbf),
emptyTuple: types.EmptyTuple(i.vr.Format()),
}
}
func (i *InMemDEAF) NewIndexEA(ctx context.Context, rowData types.Map) IndexEditAccumulator {
ea := edits.NewAsyncSortedEditsWithDefaults(i.nbf)
ea := edits.NewAsyncSortedEditsWithDefaults(i.vr)
return &BulkImportIEA{
teaf: i,
capMon: i.capMon,
@@ -436,6 +436,6 @@ func (i *InMemDEAF) NewIndexEA(ctx context.Context, rowData types.Map) IndexEdit
adds: make(map[hash.Hash]struct{}),
deletes: make(map[hash.Hash]struct{}),
partialAdds: make(map[hash.Hash]hashedTuple),
emptyTuple: types.EmptyTuple(i.nbf),
emptyTuple: types.EmptyTuple(i.vr.Format()),
}
}
@@ -128,7 +128,7 @@ func (edits *inMemIndexEdits) Has(keyHash hash.Hash) (added, deleted bool) {
// for the uncommitted changes to become so large that they need to be flushed to disk. At this point we change modes to write all edits
// to a separate map edit accumulator as they occur until the next commit occurs.
type indexEditAccumulatorImpl struct {
nbf *types.NomsBinFormat
vr types.ValueReader
// state of the index last time edits were applied
rowData types.Map
@@ -184,7 +184,7 @@ func (iea *indexEditAccumulatorImpl) flushUncommitted() {
iea.commitEAId = invalidEaId
}
iea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(iea.nbf)
iea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(iea.vr)
iea.uncommittedEAId = iea.accumulatorIdx
iea.accumulatorIdx++
@@ -203,7 +203,7 @@ func (iea *indexEditAccumulatorImpl) flushUncommitted() {
iea.flusher.Flush(iea.uncommittedEA, iea.uncommittedEAId)
// initialize a new types.EditAccumulator for additional uncommitted edits to be written to.
iea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(iea.nbf)
iea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(iea.vr)
iea.uncommittedEAId = iea.accumulatorIdx
iea.accumulatorIdx++
}
@@ -286,7 +286,7 @@ func (iea *indexEditAccumulatorImpl) HasPartial(ctx context.Context, idxSch sche
var err error
var matches []hashedTuple
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, iea.rowData, []*noms.ReadRange{
var mapIter table.ReadCloser = noms.NewNomsRangeReader(iea.vr, idxSch, iea.rowData, []*noms.ReadRange{
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
defer mapIter.Close(ctx)
var r row.Row
@@ -398,7 +398,7 @@ func (iea *indexEditAccumulatorImpl) MaterializeEdits(ctx context.Context, nbf *
return iea.rowData, nil
}
committedEP, err := iea.commitEA.FinishedEditing()
committedEP, err := iea.commitEA.FinishedEditing(ctx)
iea.commitEA = nil
if err != nil {
return types.EmptyMap, err
@@ -421,7 +421,7 @@ func (iea *indexEditAccumulatorImpl) MaterializeEdits(ctx context.Context, nbf *
}
}()
accEdits, err := edits.NewEPMerger(ctx, nbf, eps)
accEdits, err := edits.NewEPMerger(ctx, iea.vr, eps)
if err != nil {
return types.EmptyMap, err
}
@@ -436,7 +436,7 @@ func (iea *indexEditAccumulatorImpl) MaterializeEdits(ctx context.Context, nbf *
iea.committed = newInMemIndexEdits()
iea.commitEAId = iea.accumulatorIdx
iea.accumulatorIdx++
iea.commitEA = edits.NewAsyncSortedEditsWithDefaults(iea.nbf)
iea.commitEA = edits.NewAsyncSortedEditsWithDefaults(iea.vr)
iea.committedEaIds = set.NewUint64Set(nil)
iea.uncommittedEaIds = set.NewUint64Set(nil)
@@ -312,7 +312,7 @@ func RebuildIndex(ctx context.Context, tbl *doltdb.Table, indexName string, opts
tf.Reset(tbl.Format())
defer tupleFactories.Put(tf)
opts = opts.WithDeaf(NewBulkImportTEAFactory(tbl.Format(), tbl.ValueReadWriter(), opts.Tempdir))
opts = opts.WithDeaf(NewBulkImportTEAFactory(tbl.ValueReadWriter(), opts.Tempdir))
rebuiltIndexData, err := rebuildIndexRowData(ctx, tbl.ValueReadWriter(), sch, tableRowData, index, opts, tf)
if err != nil {
return types.EmptyMap, err
@@ -344,7 +344,7 @@ func RebuildAllIndexes(ctx context.Context, t *doltdb.Table, opts Options) (*dol
tf.Reset(t.Format())
defer tupleFactories.Put(tf)
opts = opts.WithDeaf(NewBulkImportTEAFactory(t.Format(), t.ValueReadWriter(), opts.Tempdir))
opts = opts.WithDeaf(NewBulkImportTEAFactory(t.ValueReadWriter(), opts.Tempdir))
for _, index := range sch.Indexes().AllIndexes() {
rebuiltIndexRowData, err := rebuildIndexRowData(ctx, t.ValueReadWriter(), sch, tableRowData, index, opts, tf)
if err != nil {
@@ -744,7 +744,7 @@ func TestIndexEditorCapacityExceeded(t *testing.T) {
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
opts := Options{Deaf: NewInMemDeafWithMaxCapacity(format, 224)}
opts := Options{Deaf: NewInMemDeafWithMaxCapacity(vrw, 224)}
indexEditor := NewIndexEditor(ctx, index, emptyMap, tableSch, opts)
for i := 0; i < 3; i++ {
dRow, err := row.New(format, indexSch, row.TaggedValues{
@@ -160,7 +160,7 @@ func (kte *keylessTableEditor) GetIndexedRows(ctx context.Context, key types.Tup
return nil, err
}
indexIter := noms.NewNomsRangeReader(idxSch, idxMap,
indexIter := noms.NewNomsRangeReader(kte.tbl.ValueReadWriter(), idxSch, idxMap,
[]*noms.ReadRange{{Start: key, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(key)}},
)
@@ -456,7 +456,7 @@ func applyEdits(ctx context.Context, tbl *doltdb.Table, acc keylessEditAcc, inde
idx++
}
err = types.SortWithErroringLess(types.TupleSort{Tuples: keys, Nbf: acc.nbf})
err = types.SortWithErroringLess(ctx, tbl.ValueReadWriter().Format(), types.TupleSort{Tuples: keys})
if err != nil {
return nil, err
}
@@ -103,7 +103,7 @@ func (o Options) WithDeaf(deaf DbEaFactory) Options {
func TestEditorOptions(vrw types.ValueReadWriter) Options {
return Options{
ForeignKeyChecksDisabled: false,
Deaf: NewInMemDeaf(vrw.Format()),
Deaf: NewInMemDeaf(vrw),
}
}
@@ -118,7 +118,7 @@ func (mods *inMemModifications) Get(keyHash hash.Hash) (kvp *doltKVP, added, del
// for the uncommitted changes to become so large that they need to be flushed to disk. At this point we change modes to write all edits
// to a separate map edit accumulator as they occur until the next commit occurs.
type tableEditAccumulatorImpl struct {
nbf *types.NomsBinFormat
vr types.ValueReader
// initial state of the map
rowData types.Map
@@ -185,7 +185,7 @@ func (tea *tableEditAccumulatorImpl) Get(ctx context.Context, keyHash hash.Hash,
func (tea *tableEditAccumulatorImpl) HasPartial(ctx context.Context, idxSch schema.Schema, partialKeyHash hash.Hash, partialKey types.Tuple) ([]hashedTuple, error) {
var err error
var matches []hashedTuple
var mapIter table.ReadCloser = noms.NewNomsRangeReader(idxSch, tea.rowData, []*noms.ReadRange{
var mapIter table.ReadCloser = noms.NewNomsRangeReader(tea.vr, idxSch, tea.rowData, []*noms.ReadRange{
{Start: partialKey, Inclusive: true, Reverse: false, Check: noms.InRangeCheckPartial(partialKey)}})
defer mapIter.Close(ctx)
var r row.Row
@@ -243,7 +243,7 @@ func (tea *tableEditAccumulatorImpl) flushUncommitted() {
tea.commitEAId = invalidEaId
}
tea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(tea.nbf)
tea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(tea.vr)
tea.uncommittedEAId = tea.accumulatorIdx
tea.accumulatorIdx++
@@ -262,7 +262,7 @@ func (tea *tableEditAccumulatorImpl) flushUncommitted() {
tea.flusher.Flush(tea.uncommittedEA, tea.uncommittedEAId)
// initialize a new types.EditAccumulator for additional uncommitted edits to be written to.
tea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(tea.nbf)
tea.uncommittedEA = edits.NewAsyncSortedEditsWithDefaults(tea.vr)
tea.uncommittedEAId = tea.accumulatorIdx
tea.accumulatorIdx++
}
@@ -378,7 +378,7 @@ func (tea *tableEditAccumulatorImpl) MaterializeEdits(ctx context.Context, nbf *
return tea.rowData, nil
}
committedEP, err := tea.commitEA.FinishedEditing()
committedEP, err := tea.commitEA.FinishedEditing(ctx)
tea.commitEA = nil
if err != nil {
return types.EmptyMap, err
@@ -401,7 +401,7 @@ func (tea *tableEditAccumulatorImpl) MaterializeEdits(ctx context.Context, nbf *
}
}()
accEdits, err := edits.NewEPMerger(ctx, nbf, eps)
accEdits, err := edits.NewEPMerger(ctx, tea.vr, eps)
if err != nil {
return types.EmptyMap, err
}
@@ -416,7 +416,7 @@ func (tea *tableEditAccumulatorImpl) MaterializeEdits(ctx context.Context, nbf *
tea.committed = newInMemModifications()
tea.commitEAId = tea.accumulatorIdx
tea.accumulatorIdx++
tea.commitEA = edits.NewAsyncSortedEditsWithDefaults(nbf)
tea.commitEA = edits.NewAsyncSortedEditsWithDefaults(tea.vr)
tea.committedEaIds = set.NewUint64Set(nil)
tea.uncommittedEaIds = set.NewUint64Set(nil)
@@ -447,15 +447,15 @@ func NewDbEaFactory(directory string, vrw types.ValueReadWriter) DbEaFactory {
// NewTableEA creates a TableEditAccumulator
func (deaf *dbEaFactory) NewTableEA(ctx context.Context, rowData types.Map) TableEditAccumulator {
return &tableEditAccumulatorImpl{
nbf: rowData.Format(),
vr: deaf.vrw,
rowData: rowData,
committed: newInMemModifications(),
uncommitted: newInMemModifications(),
accumulatorIdx: 1,
flusher: edits.NewDiskEditFlusher(ctx, deaf.directory, rowData.Format(), deaf.vrw),
flusher: edits.NewDiskEditFlusher(ctx, deaf.directory, deaf.vrw),
committedEaIds: set.NewUint64Set(nil),
uncommittedEaIds: set.NewUint64Set(nil),
commitEA: edits.NewAsyncSortedEditsWithDefaults(rowData.Format()),
commitEA: edits.NewAsyncSortedEditsWithDefaults(deaf.vrw),
commitEAId: 0,
flushingUncommitted: false,
lastFlush: 0,
@@ -467,14 +467,14 @@ func (deaf *dbEaFactory) NewTableEA(ctx context.Context, rowData types.Map) Tabl
// NewIndexEA creates an IndexEditAccumulator
func (deaf *dbEaFactory) NewIndexEA(ctx context.Context, rowData types.Map) IndexEditAccumulator {
return &indexEditAccumulatorImpl{
nbf: rowData.Format(),
vr: deaf.vrw,
rowData: rowData,
committed: newInMemIndexEdits(),
uncommitted: newInMemIndexEdits(),
commitEA: edits.NewAsyncSortedEditsWithDefaults(rowData.Format()),
commitEA: edits.NewAsyncSortedEditsWithDefaults(deaf.vrw),
commitEAId: 0,
accumulatorIdx: 1,
flusher: edits.NewDiskEditFlusher(ctx, deaf.directory, rowData.Format(), deaf.vrw),
flusher: edits.NewDiskEditFlusher(ctx, deaf.directory, deaf.vrw),
committedEaIds: set.NewUint64Set(nil),
uncommittedEaIds: set.NewUint64Set(nil),
flushingUncommitted: false,
+2 -2
View File
@@ -49,7 +49,7 @@ func NewInMemTableWithDataAndValidationType(sch schema.Schema, rows []row.Row) *
}
// AppendRow appends a row. Appended rows must be valid for the table's schema. Sorts rows as they are inserted.
func (imt *InMemTable) AppendRow(r row.Row) error {
func (imt *InMemTable) AppendRow(ctx context.Context, vr types.ValueReader, r row.Row) error {
if isv, err := row.IsValid(r, imt.sch); err != nil {
return err
} else if !isv {
@@ -87,7 +87,7 @@ func (imt *InMemTable) AppendRow(r row.Row) error {
jRow := imt.rows[j]
isLess := false
isLess, err = iRow.NomsMapKey(imt.sch).Less(r.Format(), jRow.NomsMapKey(imt.sch))
isLess, err = iRow.NomsMapKey(imt.sch).Less(ctx, vr.Format(), jRow.NomsMapKey(imt.sch))
return isLess
})
@@ -73,11 +73,13 @@ var rows = []row.Row{
}
func TestInMemTable(t *testing.T) {
vrw := types.NewMemoryValueStore()
ctx := context.Background()
imt := NewInMemTable(rowSch)
func() {
for _, r := range rows {
err := imt.AppendRow(r)
err := imt.AppendRow(ctx, vrw, r)
if err != nil {
t.Fatal("Failed to write row")
@@ -31,13 +31,13 @@ import (
type InRangeCheck interface {
// Check is a call made as the reader reads through values to check that the next value either being read is valid
// and whether it should be skipped or returned.
Check(ctx context.Context, tuple types.Tuple) (valid bool, skip bool, err error)
Check(ctx context.Context, vr types.ValueReader, tuple types.Tuple) (valid bool, skip bool, err error)
}
// InRangeCheckAlways will always return that the given tuple is valid and not to be skipped.
type InRangeCheckAlways struct{}
func (InRangeCheckAlways) Check(context.Context, types.Tuple) (valid bool, skip bool, err error) {
func (InRangeCheckAlways) Check(context.Context, types.ValueReader, types.Tuple) (valid bool, skip bool, err error) {
return true, false, nil
}
@@ -48,7 +48,7 @@ func (InRangeCheckAlways) String() string {
// InRangeCheckNever will always return that the given tuple is not valid.
type InRangeCheckNever struct{}
func (InRangeCheckNever) Check(context.Context, types.Tuple) (valid bool, skip bool, err error) {
func (InRangeCheckNever) Check(context.Context, types.ValueReader, types.Tuple) (valid bool, skip bool, err error) {
return false, false, nil
}
@@ -59,7 +59,7 @@ func (InRangeCheckNever) String() string {
// InRangeCheckPartial will check if the given tuple contains the aliased tuple as a partial key.
type InRangeCheckPartial types.Tuple
func (ircp InRangeCheckPartial) Check(_ context.Context, t types.Tuple) (valid bool, skip bool, err error) {
func (ircp InRangeCheckPartial) Check(_ context.Context, vr types.ValueReader, t types.Tuple) (valid bool, skip bool, err error) {
return t.StartsWith(types.Tuple(ircp)), false, nil
}
@@ -125,6 +125,7 @@ func NewRangeStartingAfter(key types.Tuple, inRangeCheck InRangeCheck) *ReadRang
// NomsRangeReader reads values in one or more ranges from a map
type NomsRangeReader struct {
vr types.ValueReader
sch schema.Schema
m types.Map
ranges []*ReadRange
@@ -135,8 +136,9 @@ type NomsRangeReader struct {
}
// NewNomsRangeReader creates a NomsRangeReader
func NewNomsRangeReader(sch schema.Schema, m types.Map, ranges []*ReadRange) *NomsRangeReader {
func NewNomsRangeReader(vr types.ValueReader, sch schema.Schema, m types.Map, ranges []*ReadRange) *NomsRangeReader {
return &NomsRangeReader{
vr,
sch,
m,
ranges,
@@ -172,8 +174,6 @@ func (nrr *NomsRangeReader) ReadKey(ctx context.Context) (types.Tuple, error) {
}
func (nrr *NomsRangeReader) ReadKV(ctx context.Context) (types.Tuple, types.Tuple, error) {
nbf := nrr.m.Format()
var err error
var k types.Tuple
var v types.Tuple
@@ -205,7 +205,7 @@ func (nrr *NomsRangeReader) ReadKV(ctx context.Context) (types.Tuple, types.Tupl
if err == nil && !r.Inclusive {
var res int
res, err = r.Start.Compare(nbf, k)
res, err = r.Start.Compare(ctx, nrr.vr.Format(), k)
if err == nil && res == 0 {
k, v, err = nrr.itr.NextTuple(ctx)
}
@@ -219,7 +219,7 @@ func (nrr *NomsRangeReader) ReadKV(ctx context.Context) (types.Tuple, types.Tupl
}
if err != io.EOF {
valid, skip, err := nrr.currCheck.Check(ctx, k)
valid, skip, err := nrr.currCheck.Check(ctx, nrr.vr, k)
if err != nil {
return types.Tuple{}, types.Tuple{}, err
}
@@ -128,7 +128,7 @@ func TestRangeReader(t *testing.T) {
for _, test := range rangeReaderTests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
rd := NewNomsRangeReader(sch, m, test.ranges)
rd := NewNomsRangeReader(vrw, sch, m, test.ranges)
var keys []int64
for {
@@ -170,7 +170,7 @@ func TestRangeReaderOnEmptyMap(t *testing.T) {
for _, test := range rangeReaderTests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
rd := NewNomsRangeReader(sch, m, test.ranges)
rd := NewNomsRangeReader(vrw, sch, m, test.ranges)
r, err := rd.ReadRow(ctx)
assert.Equal(t, io.EOF, err)
@@ -181,7 +181,7 @@ func TestRangeReaderOnEmptyMap(t *testing.T) {
type greaterThanCheck int64
func (n greaterThanCheck) Check(ctx context.Context, k types.Tuple) (valid bool, skip bool, err error) {
func (n greaterThanCheck) Check(ctx context.Context, _ types.ValueReader, k types.Tuple) (valid bool, skip bool, err error) {
col0, err := k.Get(1)
if err != nil {
@@ -193,7 +193,7 @@ func (n greaterThanCheck) Check(ctx context.Context, k types.Tuple) (valid bool,
type lessThanCheck int64
func (n lessThanCheck) Check(ctx context.Context, k types.Tuple) (valid bool, skip bool, err error) {
func (n lessThanCheck) Check(ctx context.Context, _ types.ValueReader, k types.Tuple) (valid bool, skip bool, err error) {
col0, err := k.Get(1)
if err != nil {
+1 -1
View File
@@ -160,7 +160,7 @@ func mustList(l types.List, err error) types.List {
func validate(ctx context.Context, nbf *types.NomsBinFormat, r types.Value) bool {
rootType := mustType(types.MakeMapType(types.PrimitiveTypeMap[types.StringKind], mustType(types.MakeRefType(types.PrimitiveTypeMap[types.ValueKind]))))
if isSub, err := types.IsValueSubtypeOf(nbf, r, rootType); err != nil {
if isSub, err := types.IsValueSubtypeOf(ctx, nbf, r, rootType); err != nil {
panic(err)
} else if !isSub {
fmt.Fprintf(os.Stderr, "Root of database must be %s, but you specified: %s\n", mustString(rootType.Describe(ctx)), mustString(mustType(types.TypeOf(r)).Describe(ctx)))
+3 -3
View File
@@ -395,7 +395,7 @@ func FindCommonAncestor(ctx context.Context, c1, c2 *Commit, vr1, vr2 types.Valu
}
return h1, true, nil
}
if pi1.Less(vr1.Format(), pi2) {
if pi1.Less(ctx, vr1.Format(), pi2) {
// TODO: Should pi2.Seek(pi1.curr), but MapIterator does not expose Seek yet.
if !pi2.Next(ctx) {
return hash.Hash{}, false, firstError(pi1.Err(), pi2.Err())
@@ -695,9 +695,9 @@ func firstError(l, r error) error {
return r
}
func IsCommit(v types.Value) (bool, error) {
func IsCommit(ctx context.Context, v types.Value) (bool, error) {
if s, ok := v.(types.Struct); ok {
return types.IsValueSubtypeOf(s.Format(), v, valueCommitType)
return types.IsValueSubtypeOf(ctx, s.Format(), v, valueCommitType)
} else if sm, ok := v.(types.SerialMessage); ok {
data := []byte(sm)
return serial.GetFileID(data) == serial.CommitFileID, nil
+4 -4
View File
@@ -130,7 +130,7 @@ type parentsClosureIter interface {
Err() error
Hash() hash.Hash
Height() uint64
Less(*types.NomsBinFormat, parentsClosureIter) bool
Less(ctx context.Context, nbf *types.NomsBinFormat, itr parentsClosureIter) bool
Next(context.Context) bool
}
@@ -175,12 +175,12 @@ func (i *parentsClosureIterator) Hash() hash.Hash {
return h
}
func (i *parentsClosureIterator) Less(f *types.NomsBinFormat, otherI parentsClosureIter) bool {
func (i *parentsClosureIterator) Less(ctx context.Context, nbf *types.NomsBinFormat, otherI parentsClosureIter) bool {
other := otherI.(*parentsClosureIterator)
if i.err != nil || other.err != nil {
return false
}
ret, err := i.curr.Less(f, other.curr)
ret, err := i.curr.Less(ctx, nbf, other.curr)
if err != nil {
i.err = err
other.err = err
@@ -246,7 +246,7 @@ func (i *fbParentsClosureIterator) Next(ctx context.Context) bool {
return true
}
func (i *fbParentsClosureIterator) Less(f *types.NomsBinFormat, otherI parentsClosureIter) bool {
func (i *fbParentsClosureIterator) Less(ctx context.Context, nbf *types.NomsBinFormat, otherI parentsClosureIter) bool {
other := otherI.(*fbParentsClosureIterator)
return i.curr.Less(other.curr)
}
+5 -4
View File
@@ -246,24 +246,25 @@ func TestNewCommit(t *testing.T) {
func TestCommitWithoutMetaField(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewViewWithDefaultFormat()).(*database)
defer db.Close()
metaCommit, err := types.NewStruct(db.Format(), "Commit", types.StructData{
"value": types.Float(9),
"parents": mustSet(types.NewSet(context.Background(), db)),
"parents": mustSet(types.NewSet(ctx, db)),
"meta": types.EmptyStruct(db.Format()),
})
assert.NoError(err)
assert.True(IsCommit(metaCommit))
assert.True(IsCommit(ctx, metaCommit))
noMetaCommit, err := types.NewStruct(db.Format(), "Commit", types.StructData{
"value": types.Float(9),
"parents": mustSet(types.NewSet(context.Background(), db)),
"parents": mustSet(types.NewSet(ctx, db)),
})
assert.NoError(err)
assert.False(IsCommit(noMetaCommit))
assert.False(IsCommit(ctx, noMetaCommit))
}
func mustCommitToTargetHashes(vrw types.ValueReadWriter, commits ...types.Value) []hash.Hash {
+8 -8
View File
@@ -221,7 +221,7 @@ func (db *database) datasetFromMap(ctx context.Context, datasetID string, dsmap
return Dataset{}, err
}
}
return newDataset(db, datasetID, head, headAddr)
return newDataset(ctx, db, datasetID, head, headAddr)
} else if rmdsmap, ok := dsmap.(refmapDatasetsMap); ok {
var err error
curr, err := rmdsmap.am.Get(ctx, datasetID)
@@ -235,7 +235,7 @@ func (db *database) datasetFromMap(ctx context.Context, datasetID string, dsmap
return Dataset{}, err
}
}
return newDataset(db, datasetID, head, curr)
return newDataset(ctx, db, datasetID, head, curr)
} else {
return Dataset{}, errors.New("unimplemented or unsupported DatasetsMap type")
}
@@ -246,7 +246,7 @@ func (db *database) readHead(ctx context.Context, addr hash.Hash) (dsHead, error
if err != nil {
return nil, err
}
return newHead(head, addr)
return newHead(ctx, head, addr)
}
func (db *database) Close() error {
@@ -268,7 +268,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash) e
headType := newHead.TypeName()
switch headType {
case commitName:
iscommit, err := IsCommit(newVal)
iscommit, err := IsCommit(ctx, newVal)
if err != nil {
return err
}
@@ -276,7 +276,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash) e
return fmt.Errorf("SetHead failed: reffered to value is not a commit:")
}
case tagName:
istag, err := IsTag(newVal)
istag, err := IsTag(ctx, newVal)
if err != nil {
return err
}
@@ -291,7 +291,7 @@ func (db *database) doSetHead(ctx context.Context, ds Dataset, addr hash.Hash) e
if err != nil {
return err
}
iscommit, err := IsCommit(commitval)
iscommit, err := IsCommit(ctx, commitval)
if err != nil {
return err
}
@@ -376,7 +376,7 @@ func (db *database) doFastForward(ctx context.Context, ds Dataset, newHeadAddr h
}
v := newHead.value()
iscommit, err := IsCommit(v)
iscommit, err := IsCommit(ctx, v)
if err != nil {
return err
}
@@ -883,7 +883,7 @@ func (db *database) validateRefAsCommit(ctx context.Context, r types.Ref) (types
var v types.Value
v = rHead.(nomsHead).st
is, err := IsCommit(v)
is, err := IsCommit(ctx, v)
if err != nil {
return types.Struct{}, err
+11 -9
View File
@@ -103,19 +103,21 @@ func (suite *DatabaseSuite) TestTolerateUngettableRefs() {
}
func (suite *DatabaseSuite) TestCompletenessCheck() {
ctx := context.Background()
datasetID := "ds1"
ds1, err := suite.db.GetDataset(context.Background(), datasetID)
ds1, err := suite.db.GetDataset(ctx, datasetID)
suite.NoError(err)
s, err := types.NewSet(context.Background(), suite.db)
s, err := types.NewSet(ctx, suite.db)
suite.NoError(err)
se := s.Edit()
for i := 0; i < 100; i++ {
ref, err := suite.db.WriteValue(context.Background(), types.Float(100))
ref, err := suite.db.WriteValue(ctx, types.Float(100))
suite.NoError(err)
se.Insert(ref)
se.Insert(ctx, ref)
}
s, err = se.Set(context.Background())
s, err = se.Set(ctx)
suite.NoError(err)
ds1, err = CommitValue(context.Background(), suite.db, ds1, s)
@@ -124,11 +126,11 @@ func (suite *DatabaseSuite) TestCompletenessCheck() {
s = mustHeadValue(ds1).(types.Set)
ref, err := types.NewRef(types.Float(1000), suite.db.Format())
suite.NoError(err)
se, err = s.Edit().Insert(ref)
se, err = s.Edit().Insert(ctx, ref)
suite.NoError(err)
s, err = se.Set(context.Background()) // danging ref
s, err = se.Set(ctx) // danging ref
suite.NoError(err)
_, err = CommitValue(context.Background(), suite.db, ds1, s)
_, err = CommitValue(ctx, suite.db, ds1, s)
suite.Error(err)
}
@@ -334,7 +336,7 @@ func assertMapOfStringToRefOfCommit(ctx context.Context, proposed, datasets type
}
if targetValue, err := ref.TargetValue(ctx, vr); err != nil {
d.PanicIfError(err)
} else if is, err := IsCommit(targetValue); err != nil {
} else if is, err := IsCommit(ctx, targetValue); err != nil {
d.PanicIfError(err)
} else if !is {
d.Panic("Root of a Database must be a Map<String, Ref<Commit>>, but the ref at key %s points to a %s", change.Key.(types.String), mustString(mustType(types.TypeOf(targetValue)).Describe(ctx)))
+6 -6
View File
@@ -447,7 +447,7 @@ func LoadRootNomsValueFromRootIshAddr(ctx context.Context, vr types.ValueReader,
if err != nil {
return nil, err
}
h, err := newHead(v, addr)
h, err := newHead(ctx, v, addr)
if err != nil {
return nil, err
}
@@ -470,7 +470,7 @@ func LoadRootNomsValueFromRootIshAddr(ctx context.Context, vr types.ValueReader,
}
}
func newHead(head types.Value, addr hash.Hash) (dsHead, error) {
func newHead(ctx context.Context, head types.Value, addr hash.Hash) (dsHead, error) {
if head == nil {
return nil, nil
}
@@ -492,12 +492,12 @@ func newHead(head types.Value, addr hash.Hash) (dsHead, error) {
}
}
matched, err := IsCommit(head)
matched, err := IsCommit(ctx, head)
if err != nil {
return nil, err
}
if !matched {
matched, err = IsTag(head)
matched, err = IsTag(ctx, head)
if err != nil {
return nil, err
}
@@ -521,8 +521,8 @@ func newHead(head types.Value, addr hash.Hash) (dsHead, error) {
return nomsHead{head.(types.Struct), addr}, nil
}
func newDataset(db *database, id string, head types.Value, addr hash.Hash) (Dataset, error) {
h, err := newHead(head, addr)
func newDataset(ctx context.Context, db *database, id string, head types.Value, addr hash.Hash) (Dataset, error) {
h, err := newHead(ctx, head, addr)
if err != nil {
return Dataset{}, err
}
+1 -1
View File
@@ -38,7 +38,7 @@ func NewStash(ctx context.Context, nbf *types.NomsBinFormat, vrw types.ValueRead
return hash.Hash{}, types.Ref{}, err
}
isCommit, err := IsCommit(headCommit)
isCommit, err := IsCommit(ctx, headCommit)
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
+3 -3
View File
@@ -57,7 +57,7 @@ func newTag(ctx context.Context, db *database, commitAddr hash.Hash, meta *TagMe
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
iscommit, err := IsCommit(commitSt)
iscommit, err := IsCommit(ctx, commitSt)
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
@@ -132,9 +132,9 @@ func tag_flatbuffer(commitAddr hash.Hash, meta *TagMeta) serial.Message {
return serial.FinishMessage(builder, serial.TagEnd(builder), []byte(serial.TagFileID))
}
func IsTag(v types.Value) (bool, error) {
func IsTag(ctx context.Context, v types.Value) (bool, error) {
if s, ok := v.(types.Struct); ok {
return types.IsValueSubtypeOf(s.Format(), v, valueTagType)
return types.IsValueSubtypeOf(ctx, s.Format(), v, valueTagType)
} else if sm, ok := v.(types.SerialMessage); ok {
data := []byte(sm)
return serial.GetFileID(data) == serial.TagFileID, nil
+8 -8
View File
@@ -44,14 +44,14 @@ import (
// one is applied in order. When done in combination with the stack, this enables
// all Differences that change a particular node to be applied to that node
// before it gets assigned back to it's parent.
func Apply(ctx context.Context, nbf *types.NomsBinFormat, root types.Value, patch Patch) (types.Value, error) {
func Apply(ctx context.Context, vr types.ValueReader, root types.Value, patch Patch) (types.Value, error) {
if len(patch) == 0 {
return root, nil
}
var lastPath types.Path
stack := patchStack{}
types.SortWithErroringLess(PatchSort{patch, nbf})
types.SortWithErroringLess(ctx, vr.Format(), PatchSort{patch})
// Push the element on the stack that corresponds to the root
// node.
@@ -198,7 +198,7 @@ func (stack *patchStack) updateNode(ctx context.Context, top *stackElem, parent
}
case types.Set:
if top.oldValue != nil {
se, err := el.Edit().Remove(top.oldValue)
se, err := el.Edit().Remove(ctx, top.oldValue)
if err != nil {
return nil, err
@@ -212,7 +212,7 @@ func (stack *patchStack) updateNode(ctx context.Context, top *stackElem, parent
}
if top.newValue != nil {
se, err := el.Edit().Insert(top.newValue)
se, err := el.Edit().Insert(ctx, top.newValue)
if err != nil {
return nil, err
@@ -232,7 +232,7 @@ func (stack *patchStack) updateNode(ctx context.Context, top *stackElem, parent
case types.Set:
switch top.changeType {
case types.DiffChangeAdded:
se, err := el.Edit().Insert(top.newValue)
se, err := el.Edit().Insert(ctx, top.newValue)
if err != nil {
return nil, err
@@ -240,7 +240,7 @@ func (stack *patchStack) updateNode(ctx context.Context, top *stackElem, parent
return se.Set(ctx)
case types.DiffChangeRemoved:
se, err := el.Edit().Remove(top.oldValue)
se, err := el.Edit().Remove(ctx, top.oldValue)
if err != nil {
return nil, err
@@ -248,13 +248,13 @@ func (stack *patchStack) updateNode(ctx context.Context, top *stackElem, parent
return se.Set(ctx)
case types.DiffChangeModified:
se, err := el.Edit().Remove(top.oldValue)
se, err := el.Edit().Remove(ctx, top.oldValue)
if err != nil {
return nil, err
}
se, err = se.Insert(top.newValue)
se, err = se.Insert(ctx, top.newValue)
if err != nil {
return nil, err
+2 -2
View File
@@ -169,7 +169,7 @@ func getPatch(g1, g2 types.Value) (Patch, error) {
func checkApplyPatch(assert *assert.Assertions, vrw types.ValueReadWriter, g1, expectedG2 types.Value, k1, k2 string) {
patch, err := getPatch(g1, expectedG2)
assert.NoError(err)
g2, err := Apply(context.Background(), vrw.Format(), g1, patch)
g2, err := Apply(context.Background(), vrw, g1, patch)
assert.NoError(err)
assert.True(expectedG2.Equals(g2), "failed to apply diffs for k1: %s and k2: %s", k1, k2)
}
@@ -298,7 +298,7 @@ func checkApplyDiffs(a *assert.Assertions, vrw types.ValueReadWriter, n1, n2 typ
a.NoError(derr)
res, err := Apply(context.Background(), vrw.Format(), n1, difs)
res, err := Apply(context.Background(), vrw, n1, difs)
a.NoError(err)
a.True(n2.Equals(res))
}
+9 -9
View File
@@ -23,6 +23,7 @@ package diff
import (
"bytes"
"context"
"github.com/dolthub/dolt/go/store/types"
)
@@ -34,7 +35,6 @@ type Patch []Difference
type PatchSort struct {
patch Patch
nbf *types.NomsBinFormat
}
func (ps PatchSort) Swap(i, j int) {
@@ -47,22 +47,22 @@ func (ps PatchSort) Len() int {
var vals = map[types.DiffChangeType]int{types.DiffChangeRemoved: 0, types.DiffChangeModified: 1, types.DiffChangeAdded: 2}
func (ps PatchSort) Less(i, j int) (bool, error) {
func (ps PatchSort) Less(ctx context.Context, nbf *types.NomsBinFormat, i, j int) (bool, error) {
if ps.patch[i].Path.Equals(ps.patch[j].Path) {
return vals[ps.patch[i].ChangeType] < vals[ps.patch[j].ChangeType], nil
}
return pathIsLess(ps.nbf, ps.patch[i].Path, ps.patch[j].Path)
return pathIsLess(ctx, nbf, ps.patch[i].Path, ps.patch[j].Path)
}
// Utility methods on path
// TODO: Should these be on types.Path & types.PathPart?
func pathIsLess(nbf *types.NomsBinFormat, p1, p2 types.Path) (bool, error) {
func pathIsLess(ctx context.Context, nbf *types.NomsBinFormat, p1, p2 types.Path) (bool, error) {
for i, pp1 := range p1 {
if len(p2) == i {
return false, nil // p1 > p2
}
idx, err := pathPartCompare(nbf, pp1, p2[i])
idx, err := pathPartCompare(ctx, nbf, pp1, p2[i])
if err != nil {
return false, err
@@ -97,7 +97,7 @@ func fieldPathCompare(pp types.FieldPath, o types.PathPart) int {
panic("unreachable")
}
func indexPathCompare(nbf *types.NomsBinFormat, pp types.IndexPath, o types.PathPart) (int, error) {
func indexPathCompare(ctx context.Context, nbf *types.NomsBinFormat, pp types.IndexPath, o types.PathPart) (int, error) {
switch opp := o.(type) {
case types.FieldPath:
return 1, nil
@@ -111,7 +111,7 @@ func indexPathCompare(nbf *types.NomsBinFormat, pp types.IndexPath, o types.Path
}
return 1, nil
}
if isLess, err := pp.Index.Less(nbf, opp.Index); err != nil {
if isLess, err := pp.Index.Less(ctx, nbf, opp.Index); err != nil {
return 0, err
} else if isLess {
return -1, nil
@@ -148,12 +148,12 @@ func hashIndexPathCompare(pp types.HashIndexPath, o types.PathPart) int {
panic("unreachable")
}
func pathPartCompare(nbf *types.NomsBinFormat, pp, pp2 types.PathPart) (int, error) {
func pathPartCompare(ctx context.Context, nbf *types.NomsBinFormat, pp, pp2 types.PathPart) (int, error) {
switch pp1 := pp.(type) {
case types.FieldPath:
return fieldPathCompare(pp1, pp2), nil
case types.IndexPath:
return indexPathCompare(nbf, pp1, pp2)
return indexPathCompare(ctx, nbf, pp1, pp2)
case types.HashIndexPath:
return hashIndexPathCompare(pp1, pp2), nil
}
+11 -7
View File
@@ -22,6 +22,7 @@
package diff
import (
"context"
"math/rand"
"testing"
@@ -33,6 +34,7 @@ import (
func TestPatchPathPartCompare(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
vs := newTestValueStore()
defer vs.Close()
@@ -62,11 +64,11 @@ func TestPatchPathPartCompare(t *testing.T) {
}
for i, tc := range testCases {
res01, err := pathPartCompare(vs.Format(), tc[0], tc[1])
res01, err := pathPartCompare(ctx, vs.Format(), tc[0], tc[1])
require.NoError(t, err)
res00, err := pathPartCompare(vs.Format(), tc[0], tc[0])
res00, err := pathPartCompare(ctx, vs.Format(), tc[0], tc[0])
require.NoError(t, err)
res10, err := pathPartCompare(vs.Format(), tc[1], tc[0])
res10, err := pathPartCompare(ctx, vs.Format(), tc[1], tc[0])
require.NoError(t, err)
assert.Equal(-1, res01, "test case %d failed, pp0: %s, pp1: %s", i, tc[0], tc[1])
@@ -77,6 +79,7 @@ func TestPatchPathPartCompare(t *testing.T) {
func TestPatchPathIsLess(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
vs := newTestValueStore()
defer vs.Close()
@@ -92,11 +95,11 @@ func TestPatchPathIsLess(t *testing.T) {
for i, tc := range testCases {
p0 := mustParsePath(assert, tc[0])
p1 := mustParsePath(assert, tc[1])
zeroLTOne, err := pathIsLess(vs.Format(), p0, p1)
zeroLTOne, err := pathIsLess(ctx, vs.Format(), p0, p1)
require.NoError(t, err)
zeroLTZero, err := pathIsLess(vs.Format(), p0, p0)
zeroLTZero, err := pathIsLess(ctx, vs.Format(), p0, p0)
require.NoError(t, err)
oneLTZero, err := pathIsLess(vs.Format(), p1, p0)
oneLTZero, err := pathIsLess(ctx, vs.Format(), p1, p0)
require.NoError(t, err)
assert.True(zeroLTOne, "test case %d failed", i)
assert.False(zeroLTZero, "test case %d failed", i)
@@ -108,6 +111,7 @@ func TestPatchPathIsLess(t *testing.T) {
func TestPatchSort(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
vs := newTestValueStore()
defer vs.Close()
@@ -127,6 +131,6 @@ func TestPatchSort(t *testing.T) {
shuffledPaths = append(shuffledPaths, sortedPaths[idx])
}
types.SortWithErroringLess(PatchSort{shuffledPaths, vs.Format()})
types.SortWithErroringLess(ctx, vs.Format(), PatchSort{shuffledPaths})
assert.Equal(sortedPaths, shuffledPaths)
}
+2 -2
View File
@@ -327,7 +327,7 @@ func (m *merger) threeWaySetMerge(ctx context.Context, a, b, parent types.Set, p
defer updateProgress(m.progress)
switch change.ChangeType {
case types.DiffChangeAdded, types.DiffChangeModified:
se, err := target.getValue().(types.Set).Edit().Insert(newVal)
se, err := target.getValue().(types.Set).Edit().Insert(ctx, newVal)
if err != nil {
return nil, err
@@ -341,7 +341,7 @@ func (m *merger) threeWaySetMerge(ctx context.Context, a, b, parent types.Set, p
return setCandidate{s}, nil
case types.DiffChangeRemoved:
se, err := target.getValue().(types.Set).Edit().Remove(newVal)
se, err := target.getValue().(types.Set).Edit().Remove(ctx, newVal)
if err != nil {
return nil, err
+2 -2
View File
@@ -95,7 +95,7 @@ func (m *merger) threeWayOrderedSequenceMerge(ctx context.Context, a, b, parent
var err error
noBOrALessB := bChange.Key == nil
if !noBOrALessB {
noBOrALessB, err = aChange.Key.Less(m.vrw.Format(), bChange.Key)
noBOrALessB, err = aChange.Key.Less(ctx, m.vrw.Format(), bChange.Key)
if err != nil {
return err
}
@@ -122,7 +122,7 @@ func (m *merger) threeWayOrderedSequenceMerge(ctx context.Context, a, b, parent
noAOrBLessA := aChange.Key == nil
if !noAOrBLessA {
noAOrBLessA, err = bChange.Key.Less(m.vrw.Format(), aChange.Key)
noAOrBLessA, err = bChange.Key.Less(ctx, m.vrw.Format(), aChange.Key)
if err != nil {
return err
}
+1 -1
View File
@@ -514,7 +514,7 @@ func (p *Parser) parseSet(ctx context.Context) (types.Set, error) {
return types.EmptySet, err
}
se, err = se.Insert(v)
se, err = se.Insert(ctx, v)
if err != nil {
return types.EmptySet, err
+38 -36
View File
@@ -65,6 +65,8 @@ func main() {
elementSizes := []uint64{numberSize, stringSize, structSize}
valueFns := []createValueFn{createNumber, createString, createStruct}
ctx := context.Background()
for i, colType := range collectionTypes {
fmt.Printf("Testing %s: \t\tbuild %d\t\t\tscan %d\t\t\tinsert %d\n", colType, buildCount, buildCount, insertCount)
@@ -77,11 +79,11 @@ func main() {
ns := tree.NewNodeStore(cs)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw, ns)
ds, err := db.GetDataset(context.Background(), "test")
ds, err := db.GetDataset(ctx, "test")
d.Chk.NoError(err)
t1 := time.Now()
col := buildFns[i](vrw, buildCount, valueFn)
ds, err = datas.CommitValue(context.Background(), db, ds, col)
col := buildFns[i](ctx, vrw, buildCount, valueFn)
ds, err = datas.CommitValue(ctx, db, ds, col)
d.Chk.NoError(err)
buildDuration := time.Since(t1)
@@ -91,7 +93,7 @@ func main() {
d.Chk.NoError(err)
d.Chk.True(ok)
col = val.(types.Collection)
readFns[i](col)
readFns[i](ctx, col)
readDuration := time.Since(t1)
// Build Incrementally
@@ -100,11 +102,11 @@ func main() {
ns = tree.NewNodeStore(cs)
vrw = types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw, ns)
ds, err = db.GetDataset(context.Background(), "test")
ds, err = db.GetDataset(ctx, "test")
d.Chk.NoError(err)
t1 = time.Now()
col = buildIncrFns[i](vrw, insertCount, valueFn)
ds, err = datas.CommitValue(context.Background(), db, ds, col)
col = buildIncrFns[i](ctx, vrw, insertCount, valueFn)
ds, err = datas.CommitValue(ctx, db, ds, col)
d.Chk.NoError(err)
incrDuration := time.Since(t1)
@@ -124,19 +126,19 @@ func main() {
ns := tree.NewNodeStore(cs)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw, ns)
ds, err := db.GetDataset(context.Background(), "test")
ds, err := db.GetDataset(ctx, "test")
d.Chk.NoError(err)
blobBytes := makeBlobBytes(*blobSize)
t1 := time.Now()
blob, err := types.NewBlob(context.Background(), vrw, bytes.NewReader(blobBytes))
blob, err := types.NewBlob(ctx, vrw, bytes.NewReader(blobBytes))
d.Chk.NoError(err)
_, err = datas.CommitValue(context.Background(), db, ds, blob)
_, err = datas.CommitValue(ctx, db, ds, blob)
d.Chk.NoError(err)
buildDuration := time.Since(t1)
db = datas.NewDatabase(storage.NewViewWithDefaultFormat())
ds, err = db.GetDataset(context.Background(), "test")
ds, err = db.GetDataset(ctx, "test")
d.Chk.NoError(err)
t1 = time.Now()
blobVal, ok, err := ds.MaybeHeadValue()
@@ -144,7 +146,7 @@ func main() {
d.Chk.True(ok)
blob = blobVal.(types.Blob)
buff := &bytes.Buffer{}
blob.Copy(context.Background(), buff)
blob.Copy(ctx, buff)
outBytes := buff.Bytes()
readDuration := time.Since(t1)
d.PanicIfFalse(bytes.Equal(blobBytes, outBytes))
@@ -156,8 +158,8 @@ func rate(d time.Duration, size uint64) string {
}
type createValueFn func(i uint64) types.Value
type buildCollectionFn func(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection
type readCollectionFn func(value types.Collection)
type buildCollectionFn func(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection
type readCollectionFn func(ctx context.Context, value types.Collection)
func makeBlobBytes(byteLength uint64) []byte {
buff := &bytes.Buffer{}
@@ -192,21 +194,21 @@ func createStruct(i uint64) types.Value {
return st
}
func buildList(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
func buildList(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
values := make([]types.Value, count)
for i := uint64(0); i < count; i++ {
values[i] = createFn(i)
}
l, err := types.NewList(context.Background(), vrw, values...)
l, err := types.NewList(ctx, vrw, values...)
d.Chk.NoError(err)
return l
}
func buildListIncrementally(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
l, err := types.NewList(context.Background(), vrw)
func buildListIncrementally(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
l, err := types.NewList(ctx, vrw)
d.Chk.NoError(err)
@@ -216,68 +218,68 @@ func buildListIncrementally(vrw types.ValueReadWriter, count uint64, createFn cr
le.Append(createFn(i))
}
l, err = le.List(context.Background())
l, err = le.List(ctx)
d.Chk.NoError(err)
return l
}
func readList(c types.Collection) {
_ = c.(types.List).IterAll(context.Background(), func(v types.Value, idx uint64) error {
func readList(ctx context.Context, c types.Collection) {
_ = c.(types.List).IterAll(ctx, func(v types.Value, idx uint64) error {
return nil
})
}
func buildSet(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
func buildSet(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
values := make([]types.Value, count)
for i := uint64(0); i < count; i++ {
values[i] = createFn(i)
}
s, err := types.NewSet(context.Background(), vrw, values...)
s, err := types.NewSet(ctx, vrw, values...)
d.Chk.NoError(err)
return s
}
func buildSetIncrementally(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
s, err := types.NewSet(context.Background(), vrw)
func buildSetIncrementally(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
s, err := types.NewSet(ctx, vrw)
d.Chk.NoError(err)
se := s.Edit()
for i := uint64(0); i < count; i++ {
se.Insert(createFn(i))
se.Insert(ctx, createFn(i))
}
s, err = se.Set(context.Background())
s, err = se.Set(ctx)
d.Chk.NoError(err)
return s
}
func readSet(c types.Collection) {
_ = c.(types.Set).IterAll(context.Background(), func(v types.Value) error {
func readSet(ctx context.Context, c types.Collection) {
_ = c.(types.Set).IterAll(ctx, func(v types.Value) error {
return nil
})
}
func buildMap(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
func buildMap(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
values := make([]types.Value, count*2)
for i := uint64(0); i < count*2; i++ {
values[i] = createFn(i)
}
m, err := types.NewMap(context.Background(), vrw, values...)
m, err := types.NewMap(ctx, vrw, values...)
d.Chk.NoError(err)
return m
}
func buildMapIncrementally(vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
m, err := types.NewMap(context.Background(), vrw)
func buildMapIncrementally(ctx context.Context, vrw types.ValueReadWriter, count uint64, createFn createValueFn) types.Collection {
m, err := types.NewMap(ctx, vrw)
d.Chk.NoError(err)
me := m.Edit()
@@ -286,14 +288,14 @@ func buildMapIncrementally(vrw types.ValueReadWriter, count uint64, createFn cre
me.Set(createFn(i), createFn(i+1))
}
m, err = me.Map(context.Background())
m, err = me.Map(ctx)
d.Chk.NoError(err)
return m
}
func readMap(c types.Collection) {
_ = c.(types.Map).IterAll(context.Background(), func(k types.Value, v types.Value) error {
func readMap(ctx context.Context, c types.Collection) {
_ = c.(types.Map).IterAll(ctx, func(k types.Value, v types.Value) error {
return nil
})
}
+8 -8
View File
@@ -26,7 +26,7 @@ import (
type EditProvider interface {
// Next returns the next KVP representing the next edit to be applied. Next will always return KVPs
// in key sorted order. Once all KVPs have been read io.EOF will be returned.
Next() (*KVP, error)
Next(ctx context.Context) (*KVP, error)
// ReachedEOF returns true once all data is exhausted. If ReachedEOF returns false that does not mean that there
// is more data, only that io.EOF has not been returned previously. If ReachedEOF returns true then all edits have
@@ -40,7 +40,7 @@ type EditProvider interface {
type EmptyEditProvider struct{}
// Next will always return nil, io.EOF
func (eep EmptyEditProvider) Next() (*KVP, error) {
func (eep EmptyEditProvider) Next(ctx context.Context) (*KVP, error) {
return nil, io.EOF
}
@@ -136,7 +136,7 @@ func ApplyNEdits(ctx context.Context, edits EditProvider, m Map, numEdits int64)
}
// asynchronously add mapWork to be done by the workers
go buildBatches(m.Format(), ae, rc, wc, edits, numEdits)
go buildBatches(ctx, m.valueReadWriter(), ae, rc, wc, edits, numEdits)
// wait for workers to return results and then process them
var ch *sequenceChunker
@@ -279,7 +279,7 @@ func doWork(ctx context.Context, seq orderedSequence, work mapWork) (mapWorkResu
createCur := cur == nil
if cur != nil {
isLess, err := ordKey.Less(seq.format(), curKey)
isLess, err := ordKey.Less(ctx, seq.format(), curKey)
if err != nil {
return mapWorkResult{}, err
@@ -333,12 +333,12 @@ func doWork(ctx context.Context, seq orderedSequence, work mapWork) (mapWorkResu
}
// buildBatches iterates over the sorted edits building batches of work to be completed by the worker threads.
func buildBatches(nbf *NomsBinFormat, ae *atomicerr.AtomicError, rc chan chan mapWorkResult, wc chan mapWork, edits EditProvider, numEdits int64) {
func buildBatches(ctx context.Context, vr ValueReader, ae *atomicerr.AtomicError, rc chan chan mapWorkResult, wc chan mapWork, edits EditProvider, numEdits int64) {
defer close(rc)
defer close(wc)
batchSize := batchSizeStart
nextEdit, err := edits.Next()
nextEdit, err := edits.Next(ctx)
if err == io.EOF {
return
@@ -353,7 +353,7 @@ func buildBatches(nbf *NomsBinFormat, ae *atomicerr.AtomicError, rc chan chan ma
for len(batch) < batchSize {
edit := nextEdit
nextEdit, err = edits.Next()
nextEdit, err = edits.Next(ctx)
if err == io.EOF {
if edit != nil {
batch = append(batch, edit)
@@ -363,7 +363,7 @@ func buildBatches(nbf *NomsBinFormat, ae *atomicerr.AtomicError, rc chan chan ma
return
}
isLess, err := edit.Key.Less(nbf, nextEdit.Key)
isLess, err := edit.Key.Less(ctx, vr.Format(), nextEdit.Key)
if ae.SetIfError(err) {
return
+4 -5
View File
@@ -55,8 +55,8 @@ func NewEmptyBlob(vrw ValueReadWriter) (Blob, error) {
}
// Less implements the LesserValuable interface.
func (b Blob) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
res, err := b.Compare(nbf, other)
func (b Blob) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
res, err := b.Compare(ctx, nbf, other)
if err != nil {
return false, err
}
@@ -64,10 +64,9 @@ func (b Blob) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
return res < 0, nil
}
func (b Blob) Compare(nbf *NomsBinFormat, other LesserValuable) (int, error) {
func (b Blob) Compare(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (int, error) {
if b2, ok := other.(Blob); ok {
// Blobs can have an arbitrary length, so we compare in chunks rather than loading it entirely
ctx := context.Background()
b1Length := b.Len()
b2Length := b2.Len()
b1Reader := b.Reader(ctx)
@@ -348,7 +347,7 @@ func newEmptyBlobChunker(ctx context.Context, vrw ValueReadWriter) (*sequenceChu
}
func makeBlobLeafChunkFn(vrw ValueReadWriter) makeChunkFn {
return func(level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
return func(ctx context.Context, level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
d.PanicIfFalse(level == 0)
buff := make([]byte, len(items))
+1 -1
View File
@@ -40,7 +40,7 @@ func (b Bool) Equals(other Value) bool {
return b == other
}
func (b Bool) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (b Bool) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if b2, ok := other.(Bool); ok {
return !bool(b) && bool(b2), nil
}
+10 -7
View File
@@ -317,16 +317,18 @@ func TestCompareTotalOrdering(t *testing.T) {
// Union - values cannot be unions
}
ctx := context.Background()
for i, vi := range values {
for j, vj := range values {
if i == j {
assert.True(vi.Equals(vj))
} else if i < j {
x, err := vi.Less(vrw.Format(), vj)
x, err := vi.Less(ctx, vrw.Format(), vj)
require.NoError(t, err)
assert.True(x)
} else {
x, err := vi.Less(vrw.Format(), vj)
x, err := vi.Less(ctx, vrw.Format(), vj)
require.NoError(t, err)
assert.False(x)
}
@@ -336,27 +338,28 @@ func TestCompareTotalOrdering(t *testing.T) {
func TestCompareDifferentPrimitiveTypes(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
vrw := newTestValueStore()
defer vrw.Close()
nums := ValueSlice{Float(1), Float(2), Float(3)}
words := ValueSlice{String("k1"), String("v1")}
blob, err := NewBlob(context.Background(), vrw, bytes.NewBuffer([]byte{1, 2, 3}))
blob, err := NewBlob(ctx, vrw, bytes.NewBuffer([]byte{1, 2, 3}))
require.NoError(t, err)
nList, err := NewList(context.Background(), vrw, nums...)
nList, err := NewList(ctx, vrw, nums...)
require.NoError(t, err)
nMap, err := NewMap(context.Background(), vrw, words...)
nMap, err := NewMap(ctx, vrw, words...)
require.NoError(t, err)
nRef, err := NewRef(blob, vrw.Format())
require.NoError(t, err)
nSet, err := NewSet(context.Background(), vrw, nums...)
nSet, err := NewSet(ctx, vrw, nums...)
require.NoError(t, err)
nStruct, err := NewStruct(vrw.Format(), "teststruct", map[string]Value{"f1": Float(1)})
require.NoError(t, err)
vals := ValueSlice{Bool(true), Float(19), String("hellow"), blob, nList, nMap, nRef, nSet, nStruct}
err = SortWithErroringLess(ValueSort{vals, vrw.Format()})
err = SortWithErroringLess(ctx, vrw.Format(), ValueSort{vals})
require.NoError(t, err)
for i, v1 := range vals {
+1 -1
View File
@@ -37,7 +37,7 @@ func (v Decimal) Equals(other Value) bool {
return decimal.Decimal(v).Equal(decimal.Decimal(v2))
}
func (v Decimal) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v Decimal) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if v2, ok := other.(Decimal); ok {
return decimal.Decimal(v).LessThan(decimal.Decimal(v2)), nil
}
+6 -6
View File
@@ -26,12 +26,12 @@ type DumbEditAccumulator struct {
pos int
reachedEOF bool
edits KVPSlice
nbf *NomsBinFormat
vr ValueReader
}
// NewDumbEditAccumulator is a factory method for creation of DumbEditAccumulators
func NewDumbEditAccumulator(nbf *NomsBinFormat) EditAccumulator {
return &DumbEditAccumulator{nbf: nbf}
func NewDumbEditAccumulator(vr ValueReader) EditAccumulator {
return &DumbEditAccumulator{vr: vr}
}
// EditsAdded returns the number of edits that have been added to this EditAccumulator
@@ -46,8 +46,8 @@ func (dumb *DumbEditAccumulator) AddEdit(k LesserValuable, v Valuable) {
// FinishEditing should be called when all edits have been added to get an EditProvider which provides the
// edits in sorted order. Adding more edits after calling FinishedEditing is an error
func (dumb *DumbEditAccumulator) FinishedEditing() (EditProvider, error) {
err := SortWithErroringLess(KVPSort{dumb.edits, dumb.nbf})
func (dumb *DumbEditAccumulator) FinishedEditing(ctx context.Context) (EditProvider, error) {
err := SortWithErroringLess(ctx, dumb.vr.Format(), KVPSort{dumb.edits})
if err != nil {
return nil, err
@@ -63,7 +63,7 @@ func (dumb *DumbEditAccumulator) Close(ctx context.Context) error {
// Next returns the next KVP representing the next edit to be applied. Next will always return KVPs
// in key sorted order. Once all KVPs have been read io.EOF will be returned.
func (dumb *DumbEditAccumulator) Next() (*KVP, error) {
func (dumb *DumbEditAccumulator) Next(ctx context.Context) (*KVP, error) {
if dumb.pos < len(dumb.edits) {
curr := &dumb.edits[dumb.pos]
dumb.pos++
+22 -22
View File
@@ -32,11 +32,11 @@ type AsyncSortedEdits struct {
sortConcurrency int
closed bool
vr types.ValueReader
accumulating []types.KVP
sortedColls []*KVPCollection
nbf *types.NomsBinFormat
sortWork chan types.KVPSort
sortGroup *errgroup.Group
sortCtx context.Context
@@ -44,14 +44,14 @@ type AsyncSortedEdits struct {
}
// NewAsyncSortedEditsWithDefaults creates a new AsyncSortedEdit instance with default concurrency and buffer size values
func NewAsyncSortedEditsWithDefaults(nbf *types.NomsBinFormat) types.EditAccumulator {
return NewAsyncSortedEdits(nbf, 16*1024, 4, 2)
func NewAsyncSortedEditsWithDefaults(vr types.ValueReader) types.EditAccumulator {
return NewAsyncSortedEdits(vr, 16*1024, 4, 2)
}
// NewAsyncSortedEdits creates an AsyncSortedEdits object that creates batches of size 'sliceSize' and kicks off
// 'asyncConcurrency' go routines for background sorting of batches. The final Sort call is processed with
// 'sortConcurrency' go routines
func NewAsyncSortedEdits(nbf *types.NomsBinFormat, sliceSize, asyncConcurrency, sortConcurrency int) *AsyncSortedEdits {
func NewAsyncSortedEdits(vr types.ValueReader, sliceSize, asyncConcurrency, sortConcurrency int) *AsyncSortedEdits {
group, groupCtx := errgroup.WithContext(context.TODO())
sortCh := make(chan types.KVPSort, asyncConcurrency*4)
return &AsyncSortedEdits{
@@ -60,7 +60,7 @@ func NewAsyncSortedEdits(nbf *types.NomsBinFormat, sliceSize, asyncConcurrency,
sortConcurrency: sortConcurrency,
accumulating: nil, // lazy alloc
sortedColls: nil,
nbf: nbf,
vr: vr,
sortWork: sortCh,
sortGroup: group,
sortCtx: groupCtx,
@@ -83,17 +83,17 @@ func (ase *AsyncSortedEdits) AddEdit(k types.LesserValuable, v types.Valuable) {
ase.accumulating = append(ase.accumulating, types.KVP{Key: k, Val: v})
if len(ase.accumulating) == ase.sliceSize {
coll := NewKVPCollection(ase.nbf, ase.accumulating)
coll := NewKVPCollection(ase.vr, ase.accumulating)
// ase.accumulating is getting sorted asynchronously and
// in-place down below. We add it to |sortedColls| here. By
// the time |sortedColls| is used, it will be sorted.
ase.sortedColls = append(ase.sortedColls, coll)
toSort := types.KVPSort{Values: ase.accumulating, NBF: ase.nbf}
toSort := types.KVPSort{Values: ase.accumulating}
select {
case ase.sortWork <- toSort:
break
default:
if err := types.SortWithErroringLess(toSort); err != nil {
if err := types.SortWithErroringLess(ase.sortCtx, ase.vr.Format(), toSort); err != nil {
ase.sortGroup.Go(func() error {
return err
})
@@ -116,7 +116,7 @@ func (ase *AsyncSortedEdits) sortWorker() error {
if !ok {
return nil
}
if err := types.SortWithErroringLess(toSort); err != nil {
if err := types.SortWithErroringLess(ase.sortCtx, ase.vr.Format(), toSort); err != nil {
return err
}
case <-ase.sortCtx.Done():
@@ -129,20 +129,20 @@ func (ase *AsyncSortedEdits) sortWorker() error {
// FinishedEditing should be called once all edits have been added. Once FinishedEditing is called adding more edits
// will have undefined behavior.
func (ase *AsyncSortedEdits) FinishedEditing() (types.EditProvider, error) {
func (ase *AsyncSortedEdits) FinishedEditing(ctx context.Context) (types.EditProvider, error) {
ase.closed = true
if len(ase.accumulating) > 0 {
toSort := types.KVPSort{Values: ase.accumulating, NBF: ase.nbf}
toSort := types.KVPSort{Values: ase.accumulating}
select {
case ase.sortWork <- toSort:
break
default:
if err := types.SortWithErroringLess(toSort); err != nil {
if err := types.SortWithErroringLess(ase.sortCtx, ase.vr.Format(), toSort); err != nil {
return nil, err
}
}
coll := NewKVPCollection(ase.nbf, ase.accumulating)
coll := NewKVPCollection(ase.vr, ase.accumulating)
ase.sortedColls = append(ase.sortedColls, coll)
ase.accumulating = nil
}
@@ -151,7 +151,7 @@ func (ase *AsyncSortedEdits) FinishedEditing() (types.EditProvider, error) {
// Calling thread helps work through remaining |sortWork| until it's sorted.
for toSort := range ase.sortWork {
if err := types.SortWithErroringLess(toSort); err != nil {
if err := types.SortWithErroringLess(ctx, ase.vr.Format(), toSort); err != nil {
return nil, err
}
}
@@ -160,7 +160,7 @@ func (ase *AsyncSortedEdits) FinishedEditing() (types.EditProvider, error) {
return nil, err
}
if err := ase.mergeCollections(); err != nil {
if err := ase.mergeCollections(ctx); err != nil {
return nil, err
}
@@ -171,7 +171,7 @@ func (ase *AsyncSortedEdits) FinishedEditing() (types.EditProvider, error) {
// and thus external synchronization is required.
func (ase *AsyncSortedEdits) Close(ctx context.Context) error {
if !ase.closed {
itr, err := ase.FinishedEditing()
itr, err := ase.FinishedEditing(ctx)
itrCloseErr := itr.Close(ctx)
if err != nil {
@@ -187,12 +187,12 @@ func (ase *AsyncSortedEdits) Close(ctx context.Context) error {
// mergeCollections performs a concurrent sorted-merge of |sortedColls|. Must be called after |sortGroup| is complete.
// Once this completes use the |iterator| method for getting a KVPIterator which can be used to iterate over all the
// KVPs in order.
func (ase *AsyncSortedEdits) mergeCollections() error {
func (ase *AsyncSortedEdits) mergeCollections(ctx context.Context) error {
sema := semaphore.NewWeighted(int64(ase.sortConcurrency))
for len(ase.sortedColls) > 2 {
pairs := pairCollections(ase.sortedColls)
ase.sortedColls = make([]*KVPCollection, len(pairs))
mergeGroup, ctx := errgroup.WithContext(context.TODO())
mergeGroup, ctx := errgroup.WithContext(ctx)
for i := range pairs {
colls := pairs[i]
@@ -209,7 +209,7 @@ func (ase *AsyncSortedEdits) mergeCollections() error {
mergeGroup.Go(func() error {
defer sema.Release(1)
var err error
ase.sortedColls[capi], err = colls[0].DestructiveMerge(colls[1])
ase.sortedColls[capi], err = colls[0].DestructiveMerge(ctx, colls[1])
return err
})
}
@@ -248,9 +248,9 @@ func (ase *AsyncSortedEdits) iterator() types.EditProvider {
case 0:
return types.EmptyEditProvider{}
case 1:
return NewItr(ase.nbf, ase.sortedColls[0])
return NewItr(ase.vr, ase.sortedColls[0])
case 2:
return NewSortedEditItr(ase.nbf, ase.sortedColls[0], ase.sortedColls[1])
return NewSortedEditItr(ase.vr, ase.sortedColls[0], ase.sortedColls[1])
}
panic("Sort needs to be called prior to getting an Iterator.")
+22 -17
View File
@@ -28,13 +28,13 @@ import (
"github.com/dolthub/dolt/go/store/types"
)
func createKVPs(t *testing.T, nbf *types.NomsBinFormat, rng *rand.Rand, size int) types.KVPSlice {
func createKVPs(t *testing.T, vrw types.ValueReadWriter, rng *rand.Rand, size int) types.KVPSlice {
kvps := make(types.KVPSlice, size)
v, err := types.NewTuple(nbf, types.NullValue)
v, err := types.NewTuple(vrw.Format(), types.NullValue)
require.NoError(t, err)
for i := 0; i < size; i++ {
k, err := types.NewTuple(nbf, types.Uint(rng.Uint64()%10000))
k, err := types.NewTuple(vrw.Format(), types.Uint(rng.Uint64()%10000))
require.NoError(t, err)
kvps[i] = types.KVP{Key: k, Val: v}
}
@@ -55,7 +55,10 @@ func TestAsyncSortedEdits(t *testing.T) {
}
func TestAsyncSortedEditsStable(t *testing.T) {
ase := NewAsyncSortedEdits(types.Format_Default, 2, 1, 1)
ctx := context.Background()
vrw := types.NewMemoryValueStore()
ase := NewAsyncSortedEdits(vrw, 2, 1, 1)
assert.NotNil(t, ase)
ase.AddEdit(types.Int(0), nil)
ase.AddEdit(types.Int(1), nil)
@@ -64,43 +67,43 @@ func TestAsyncSortedEditsStable(t *testing.T) {
ase.AddEdit(types.Int(1), types.Int(0))
ase.AddEdit(types.Int(2), types.Int(0))
ep, err := ase.FinishedEditing()
ep, err := ase.FinishedEditing(ctx)
assert.NoError(t, err)
err = ase.Close(context.Background())
assert.NoError(t, err)
kvp, err := ep.Next()
kvp, err := ep.Next(ctx)
assert.NoError(t, err)
assert.NotNil(t, kvp)
assert.Equal(t, types.Int(0), kvp.Key)
assert.Nil(t, kvp.Val)
kvp, err = ep.Next()
kvp, err = ep.Next(ctx)
assert.NoError(t, err)
assert.NotNil(t, kvp)
assert.Equal(t, types.Int(0), kvp.Key)
assert.Equal(t, types.Int(0), kvp.Val)
kvp, err = ep.Next()
kvp, err = ep.Next(ctx)
assert.NoError(t, err)
assert.NotNil(t, kvp)
assert.Equal(t, types.Int(1), kvp.Key)
assert.Nil(t, kvp.Val)
kvp, err = ep.Next()
kvp, err = ep.Next(ctx)
assert.NoError(t, err)
assert.NotNil(t, kvp)
assert.Equal(t, types.Int(1), kvp.Key)
assert.Equal(t, types.Int(0), kvp.Val)
kvp, err = ep.Next()
kvp, err = ep.Next(ctx)
assert.NoError(t, err)
assert.NotNil(t, kvp)
assert.Equal(t, types.Int(2), kvp.Key)
assert.Nil(t, kvp.Val)
kvp, err = ep.Next()
kvp, err = ep.Next(ctx)
assert.NoError(t, err)
assert.NotNil(t, kvp)
assert.Equal(t, types.Int(2), kvp.Key)
assert.Equal(t, types.Int(0), kvp.Val)
_, err = ep.Next()
_, err = ep.Next(ctx)
assert.Equal(t, io.EOF, err)
}
@@ -125,17 +128,19 @@ func testASE(t *testing.T, rng *rand.Rand) {
sortConcurrency := int(minSortCon + rng.Int31n(maxSortCon-minSortCon))
name := fmt.Sprintf("kvps_%d_bs_%d_asc_%d_sc_%d", numKVPs, buffSize, asyncSortConcurrency, sortConcurrency)
nbf := types.Format_Default
t.Run(name, func(t *testing.T) {
kvps := createKVPs(t, nbf, rng, numKVPs)
asyncSorted := NewAsyncSortedEdits(types.Format_Default, buffSize, asyncSortConcurrency, sortConcurrency)
ctx := context.Background()
vrw := types.NewMemoryValueStore()
kvps := createKVPs(t, vrw, rng, numKVPs)
asyncSorted := NewAsyncSortedEdits(vrw, buffSize, asyncSortConcurrency, sortConcurrency)
for _, kvp := range kvps {
asyncSorted.AddEdit(kvp.Key, kvp.Val)
}
itr, err := asyncSorted.FinishedEditing()
itr, err := asyncSorted.FinishedEditing(ctx)
assert.NoError(t, err)
@@ -143,7 +148,7 @@ func testASE(t *testing.T, rng *rand.Rand) {
t.Error("Invalid count", asyncSorted.Size(), "!=", numKVPs)
}
inOrder, count, err := IsInOrder(nbf, itr)
inOrder, count, err := IsInOrder(ctx, vrw, itr)
assert.NoError(t, err)
+8 -8
View File
@@ -25,7 +25,7 @@ var _ types.EditAccumulator = (*DiskBackedEditAcc)(nil)
// DiskBackedEditAcc is an EditAccumulator implementation that flushes the edits to disk at regular intervals
type DiskBackedEditAcc struct {
ctx context.Context
nbf *types.NomsBinFormat
vrw types.ValueReadWriter
flusher *DiskEditFlusher
@@ -40,11 +40,11 @@ type DiskBackedEditAcc struct {
}
// NewDiskBackedEditAcc returns a new DiskBackedEditAccumulator instance
func NewDiskBackedEditAcc(ctx context.Context, nbf *types.NomsBinFormat, vrw types.ValueReadWriter, flushInterval int64, directory string, newEditAcc func() types.EditAccumulator) *DiskBackedEditAcc {
func NewDiskBackedEditAcc(ctx context.Context, vrw types.ValueReadWriter, flushInterval int64, directory string, newEditAcc func() types.EditAccumulator) *DiskBackedEditAcc {
return &DiskBackedEditAcc{
ctx: ctx,
nbf: nbf,
flusher: NewDiskEditFlusher(ctx, directory, nbf, vrw),
vrw: vrw,
flusher: NewDiskEditFlusher(ctx, directory, vrw),
newEditAcc: newEditAcc,
backing: newEditAcc(),
flushInterval: flushInterval,
@@ -71,10 +71,10 @@ func (dbea *DiskBackedEditAcc) AddEdit(key types.LesserValuable, val types.Valua
// FinishedEditing should be called when all edits have been added to get an EditProvider which provides the
// edits in sorted order. Adding more edits after calling FinishedEditing is an error.
func (dbea *DiskBackedEditAcc) FinishedEditing() (types.EditProvider, error) {
func (dbea *DiskBackedEditAcc) FinishedEditing(ctx context.Context) (types.EditProvider, error) {
// If we never flushed to disk then there is no need. Just return the data from the backing edit accumulator
if dbea.flushCount == 0 {
return dbea.backing.FinishedEditing()
return dbea.backing.FinishedEditing(ctx)
}
// flush any data we haven't flushed yet before processing
@@ -85,7 +85,7 @@ func (dbea *DiskBackedEditAcc) FinishedEditing() (types.EditProvider, error) {
dbea.backing = nil
}
results, err := dbea.flusher.Wait(dbea.ctx)
results, err := dbea.flusher.Wait(ctx)
if err != nil {
return nil, err
}
@@ -95,7 +95,7 @@ func (dbea *DiskBackedEditAcc) FinishedEditing() (types.EditProvider, error) {
eps[i] = results[i].Edits
}
return NewEPMerger(dbea.ctx, dbea.nbf, eps)
return NewEPMerger(ctx, dbea.vrw, eps)
}
// Close ensures that the accumulator is closed. Repeat calls are allowed. Not guaranteed to be thread-safe, thus
+9 -10
View File
@@ -33,11 +33,12 @@ func TestDiskBackedEdits(t *testing.T) {
)
size := maxKVPs
vrw := types.NewMemoryValueStore()
rng := rand.New(rand.NewSource(0))
kvps := createKVPs(t, types.Format_Default, rng, maxKVPs)
kvps := createKVPs(t, vrw, rng, maxKVPs)
for i := 0; i < 8; i++ {
t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
testDBE(t, kvps[:size])
testDBE(t, vrw, kvps[:size])
})
size = rng.Intn(maxKVPs)
}
@@ -45,30 +46,28 @@ func TestDiskBackedEdits(t *testing.T) {
// test something smaller than the flush interval
size = 4
t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) {
testDBE(t, kvps[:size])
testDBE(t, vrw, kvps[:size])
})
}
func testDBE(t *testing.T, kvps []types.KVP) {
func testDBE(t *testing.T, vrw types.ValueReadWriter, kvps []types.KVP) {
ctx := context.Background()
nbf := types.Format_Default
vrw := types.NewMemoryValueStore()
tmpDir, err := os.MkdirTemp("", "TestDiskBackedEdits")
require.NoError(t, err)
newEA := func() types.EditAccumulator {
return NewAsyncSortedEdits(nbf, 64, 2, 2)
return NewAsyncSortedEdits(vrw, 64, 2, 2)
}
dbe := NewDiskBackedEditAcc(ctx, nbf, vrw, 2*1024, tmpDir, newEA)
dbe := NewDiskBackedEditAcc(ctx, vrw, 2*1024, tmpDir, newEA)
for _, kvp := range kvps {
dbe.AddEdit(kvp.Key, kvp.Val)
}
itr, err := dbe.FinishedEditing()
itr, err := dbe.FinishedEditing(ctx)
assert.NoError(t, err)
inOrder, count, err := IsInOrder(nbf, itr)
inOrder, count, err := IsInOrder(ctx, vrw, itr)
assert.NoError(t, err)
require.Equal(t, len(kvps), count, "Invalid count %d != %d", count, len(kvps))
+8 -10
View File
@@ -54,7 +54,6 @@ func (res FlushResults) Sort() {
type DiskEditFlusher struct {
ctx context.Context
directory string
nbf *types.NomsBinFormat
vrw types.ValueReadWriter
eg *errgroup.Group
@@ -63,12 +62,11 @@ type DiskEditFlusher struct {
}
// NewDiskEditFlusher returns a new DiskEditFlusher instance
func NewDiskEditFlusher(ctx context.Context, directory string, nbf *types.NomsBinFormat, vrw types.ValueReadWriter) *DiskEditFlusher {
func NewDiskEditFlusher(ctx context.Context, directory string, vrw types.ValueReadWriter) *DiskEditFlusher {
eg, egCtx := errgroup.WithContext(ctx)
return &DiskEditFlusher{
ctx: egCtx,
directory: directory,
nbf: nbf,
vrw: vrw,
eg: eg,
mu: &sync.Mutex{},
@@ -96,7 +94,7 @@ func (ef *DiskEditFlusher) Flush(accumulator types.EditAccumulator, id uint64) {
func (ef *DiskEditFlusher) resultsFromEntries(ctx context.Context, entries []flusherEntry) (FlushResults, error) {
eps := make(FlushResults, 0, len(entries))
for _, entry := range entries {
ep, err := EditProviderFromDisk(ef.nbf, ef.vrw, entry.path)
ep, err := EditProviderFromDisk(ef.vrw, entry.path)
if err != nil {
for i := range eps {
_ = eps[i].Edits.Close(ctx)
@@ -160,20 +158,20 @@ func (ef *DiskEditFlusher) WaitForIDs(ctx context.Context, idFilter *set.Uint64S
}
// EditProviderFromDisk returns a types.EditProvider instance which reads data from the specified file
func EditProviderFromDisk(nbf *types.NomsBinFormat, vrw types.ValueReadWriter, path string) (types.EditProvider, error) {
func EditProviderFromDisk(vrw types.ValueReadWriter, path string) (types.EditProvider, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
ep := types.TupleReaderAsEditProvider(types.NewTupleReader(nbf, vrw, f))
ep := types.TupleReaderAsEditProvider(types.NewTupleReader(vrw.Format(), vrw, f))
return &deleteOnCloseEP{EditProvider: ep, path: path}, nil
}
// FlushEditsToDisk writes the contents of a types.EditAccumulator to disk and returns the path where the
// associated file exists.
func FlushEditsToDisk(ctx context.Context, directory string, ea types.EditAccumulator) (string, error) {
itr, err := ea.FinishedEditing()
itr, err := ea.FinishedEditing(ctx)
if err != nil {
return "", err
}
@@ -183,7 +181,7 @@ func FlushEditsToDisk(ctx context.Context, directory string, ea types.EditAccumu
return "", err
}
err = flushKVPs(wr, itr)
err = flushKVPs(ctx, wr, itr)
if err != nil {
return "", err
}
@@ -207,10 +205,10 @@ func openTupleWriter(directory string) (string, types.TupleWriteCloser, error) {
return absPath, types.NewTupleWriter(f), nil
}
func flushKVPs(wr types.TupleWriter, itr types.EditProvider) error {
func flushKVPs(ctx context.Context, wr types.TupleWriter, itr types.EditProvider) error {
// iterate over all kvps writing the key followed by the value
for {
kvp, err := itr.Next()
kvp, err := itr.Next(ctx)
if err == io.EOF {
return nil
@@ -26,7 +26,8 @@ import (
func TestFlushingNoEdits(t *testing.T) {
ctx := context.Background()
ef := NewDiskEditFlusher(ctx, "", types.Format_Default, nil)
vrw := types.NewMemoryValueStore()
ef := NewDiskEditFlusher(ctx, "", vrw)
eps, err := ef.Wait(ctx)
require.NoError(t, err)
require.Zero(t, len(eps))
@@ -35,11 +36,11 @@ func TestFlushingNoEdits(t *testing.T) {
func TestEditFlusher(t *testing.T) {
const numEditors = 10
ctx := context.Background()
nbf := types.Format_Default
vrw := types.NewMemoryValueStore()
ef := NewDiskEditFlusher(ctx, os.TempDir(), nbf, vrw)
nbf := vrw.Format()
ef := NewDiskEditFlusher(ctx, os.TempDir(), vrw)
for i := 0; i < numEditors; i++ {
ea := types.NewDumbEditAccumulator(nbf)
ea := types.NewDumbEditAccumulator(vrw)
for j := 0; j < 100; j++ {
k, err := types.NewTuple(nbf, types.Int(i))
require.NoError(t, err)
@@ -57,7 +58,7 @@ func TestEditFlusher(t *testing.T) {
for i := 0; i < numEditors; i++ {
require.Equal(t, uint64(i), eps[i].ID)
kvp, err := eps[i].Edits.Next()
kvp, err := eps[i].Edits.Next(ctx)
require.NoError(t, err)
key, err := kvp.Key.Value(ctx)
require.NoError(t, err)
+12 -12
View File
@@ -32,7 +32,7 @@ var _ types.EditProvider = (*EPMerger)(nil)
type EPMerger struct {
ctx context.Context
nbf *types.NomsBinFormat
vr types.ValueReader
reachedEOF bool
editsRead int64
@@ -44,10 +44,10 @@ type EPMerger struct {
// NewEPMerger takes a slice of TupleReaders, whose contents should be key sorted key value tuple
// pairs, and return a *EPMerger
func NewEPMerger(ctx context.Context, nbf *types.NomsBinFormat, eps []types.EditProvider) (*EPMerger, error) {
func NewEPMerger(ctx context.Context, vr types.ValueReader, eps []types.EditProvider) (*EPMerger, error) {
fep := &EPMerger{
ctx: ctx,
nbf: nbf,
vr: vr,
numEPs: len(eps),
eps: eps,
nextKVPS: make([]entry, 0, len(eps)),
@@ -55,7 +55,7 @@ func NewEPMerger(ctx context.Context, nbf *types.NomsBinFormat, eps []types.Edit
// read in the initial values from each stream and put them into the nextKVPS slice in sorted order.
for i := range eps {
kvp, err := fep.eps[i].Next()
kvp, err := fep.eps[i].Next(ctx)
if err == io.EOF {
continue
} else if err != nil {
@@ -71,7 +71,7 @@ func NewEPMerger(ctx context.Context, nbf *types.NomsBinFormat, eps []types.Edit
newEntry := entry{key: key, val: val, readerIdx: i}
// binary search for where this entry should be inserted within the slice
insIdx, err := search(nbf, i, key, fep.nextKVPS)
insIdx, err := search(ctx, vr, i, key, fep.nextKVPS)
if err != nil {
return nil, err
}
@@ -103,7 +103,7 @@ func keyAndValForKVP(ctx context.Context, kvp *types.KVP) (key types.Value, val
// Next returns the next KVP representing the next edit to be applied. Next will always return KVPs
// in key sorted order. Once all KVPs have been read io.EOF will be returned.
func (fep *EPMerger) Next() (*types.KVP, error) {
func (fep *EPMerger) Next(ctx context.Context) (*types.KVP, error) {
if fep.epsWithData == 0 {
return nil, io.EOF
}
@@ -112,7 +112,7 @@ func (fep *EPMerger) Next() (*types.KVP, error) {
nextKVP := fep.nextKVPS[0]
// read the next tuple from the TupleStream that next kvp was read from
kvp, err := fep.eps[nextKVP.readerIdx].Next()
kvp, err := fep.eps[nextKVP.readerIdx].Next(ctx)
if err == io.EOF {
// shrink the slice to only hold valid ordered data
fep.nextKVPS = fep.nextKVPS[1:]
@@ -130,7 +130,7 @@ func (fep *EPMerger) Next() (*types.KVP, error) {
}
// search for the location where the item should be placed
insPos, err := search(fep.nbf, nextKVP.readerIdx, key, fep.nextKVPS[1:])
insPos, err := search(ctx, fep.vr, nextKVP.readerIdx, key, fep.nextKVPS[1:])
if err != nil {
return nil, err
}
@@ -159,12 +159,12 @@ func (fep *EPMerger) ReachedEOF() bool {
}
type comparableValue interface {
Compare(nbf *types.NomsBinFormat, other types.LesserValuable) (int, error)
Compare(ctx context.Context, nbf *types.NomsBinFormat, other types.LesserValuable) (int, error)
}
// search does a binary search or a sorted []entry and returns an integer representing the insertion index where the
// item should be placed in order to keep the vals sorted
func search(nbf *types.NomsBinFormat, readerIdx int, key types.Value, vals []entry) (int, error) {
func search(ctx context.Context, vr types.ValueReader, readerIdx int, key types.Value, vals []entry) (int, error) {
var err error
var n int
if comparable, ok := key.(comparableValue); ok {
@@ -174,7 +174,7 @@ func search(nbf *types.NomsBinFormat, readerIdx int, key types.Value, vals []ent
}
var res int
res, err = comparable.Compare(nbf, vals[i].key)
res, err = comparable.Compare(ctx, vr.Format(), vals[i].key)
if err != nil {
return false
} else if res < 0 {
@@ -192,7 +192,7 @@ func search(nbf *types.NomsBinFormat, readerIdx int, key types.Value, vals []ent
}
var isLess bool
isLess, err = key.Less(nbf, vals[i].key)
isLess, err = key.Less(ctx, vr.Format(), vals[i].key)
if err != nil {
return false
} else if isLess {
@@ -32,28 +32,30 @@ func TestBinarySearch(t *testing.T) {
entryCounts := []int{11, 15, 16, 19, 31, 1024, 32151}
for _, count := range entryCounts {
t.Run(strconv.Itoa(count), func(t *testing.T) {
ctx := context.Background()
vrw := types.NewMemoryValueStore()
vals := make([]entry, count)
for i := 0; i < count; i++ {
vals[i] = entry{key: types.Float(float64(i + 1))}
}
for i := 0; i < count+1; i++ {
idx, err := search(types.Format_Default, 0, types.Float(float64(i)), vals)
idx, err := search(ctx, vrw, 0, types.Float(float64(i)), vals)
require.NoError(t, err)
require.Equal(t, i, idx)
idx, err = search(types.Format_Default, 0, types.Float(float64(i)+0.5), vals)
idx, err = search(ctx, vrw, 0, types.Float(float64(i)+0.5), vals)
require.NoError(t, err)
require.Equal(t, i, idx)
}
// test that in the case of equality that an earlier reader index returns as being less
for i := 1; i < count+1; i++ {
idx, err := search(types.Format_Default, -1, types.Float(float64(i)), vals)
idx, err := search(ctx, vrw, -1, types.Float(float64(i)), vals)
require.NoError(t, err)
require.Equal(t, i-1, idx)
idx, err = search(types.Format_Default, -1, types.Float(float64(i)+0.5), vals)
idx, err = search(ctx, vrw, -1, types.Float(float64(i)+0.5), vals)
require.NoError(t, err)
require.Equal(t, i, idx)
}
@@ -61,11 +63,11 @@ func TestBinarySearch(t *testing.T) {
}
}
func readerForTuples(t *testing.T, nbf *types.NomsBinFormat, vrw types.ValueReadWriter, tuples ...types.Tuple) types.TupleReadCloser {
func readerForTuples(t *testing.T, ctx context.Context, vrw types.ValueReadWriter, tuples ...types.Tuple) types.TupleReadCloser {
require.True(t, len(tuples)%2 == 0)
prev := tuples[0]
for i := 2; i < len(tuples); i += 2 {
isLess, err := prev.Less(nbf, tuples[i])
isLess, err := prev.Less(ctx, vrw.Format(), tuples[i])
require.NoError(t, err)
require.True(t, isLess)
prev = tuples[i]
@@ -77,7 +79,7 @@ func readerForTuples(t *testing.T, nbf *types.NomsBinFormat, vrw types.ValueRead
err := wr.WriteTuples(tuples...)
require.NoError(t, err)
return types.NewTupleReader(nbf, vrw, io.NopCloser(bytes.NewBuffer(buf.Bytes())))
return types.NewTupleReader(vrw.Format(), vrw, io.NopCloser(bytes.NewBuffer(buf.Bytes())))
}
func newTuple(t *testing.T, nbf *types.NomsBinFormat, vals ...types.Value) types.Tuple {
@@ -92,21 +94,21 @@ func TestComparableBinarySearch(t *testing.T) {
vrw := types.NewMemoryValueStore()
readers := []types.EditProvider{
types.TupleReaderAsEditProvider(readerForTuples(t, nbf, vrw, []types.Tuple{
types.TupleReaderAsEditProvider(readerForTuples(t, ctx, vrw, []types.Tuple{
newTuple(t, nbf), newTuple(t, nbf, types.Int(0)),
newTuple(t, nbf, types.Bool(false)), newTuple(t, nbf, types.Int(2)),
newTuple(t, nbf, types.Float(1.0)), newTuple(t, nbf, types.Int(5)),
newTuple(t, nbf, types.String("zz")), newTuple(t, nbf, types.Int(9)),
newTuple(t, nbf, types.UUID{}), newTuple(t, nbf, types.Int(11)),
}...)),
types.TupleReaderAsEditProvider(readerForTuples(t, nbf, vrw, []types.Tuple{
types.TupleReaderAsEditProvider(readerForTuples(t, ctx, vrw, []types.Tuple{
newTuple(t, nbf), newTuple(t, nbf, types.Int(1)),
newTuple(t, nbf, types.Bool(true)), newTuple(t, nbf, types.Int(4)),
newTuple(t, nbf, types.Float(2.0)), newTuple(t, nbf, types.Int(6)),
newTuple(t, nbf, types.String("zz")), newTuple(t, nbf, types.Int(10)),
newTuple(t, nbf, types.UUID{}), newTuple(t, nbf, types.Int(12)),
}...)),
types.TupleReaderAsEditProvider(readerForTuples(t, nbf, vrw, []types.Tuple{
types.TupleReaderAsEditProvider(readerForTuples(t, ctx, vrw, []types.Tuple{
newTuple(t, nbf, types.Bool(false)), newTuple(t, nbf, types.Int(3)),
newTuple(t, nbf, types.Float(2.0)), newTuple(t, nbf, types.Int(7)),
newTuple(t, nbf, types.String("aaa")), newTuple(t, nbf, types.Int(8)),
@@ -118,10 +120,10 @@ func TestComparableBinarySearch(t *testing.T) {
// create a merger and iterate through all values validating that every value is less than
// the next value read, and that we retrieved all of the data.
merger, err := NewEPMerger(ctx, nbf, readers)
merger, err := NewEPMerger(ctx, vrw, readers)
require.NoError(t, err)
items := testMergeOrder(t, ctx, nbf, merger)
items := testMergeOrder(t, ctx, vrw, merger)
require.Equal(t, numItems, len(items))
for i := 0; i < len(items); i++ {
@@ -195,17 +197,17 @@ func TestTupleStreamMerger(t *testing.T) {
// create a merger and iterate through all values validating that every value is less than
// the next value read, and that we retrieved all of the data.
merger, err := NewEPMerger(ctx, nbf, readers)
merger, err := NewEPMerger(ctx, vrw, readers)
require.NoError(t, err)
items := testMergeOrder(t, ctx, nbf, merger)
items := testMergeOrder(t, ctx, vrw, merger)
require.Equal(t, numItems, int64(len(items)))
})
}
}
func testMergeOrder(t *testing.T, ctx context.Context, nbf *types.NomsBinFormat, merger types.EditProvider) []*types.KVP {
curr, err := merger.Next()
func testMergeOrder(t *testing.T, ctx context.Context, vr types.ValueReader, merger types.EditProvider) []*types.KVP {
curr, err := merger.Next(ctx)
require.NoError(t, err)
require.NotNil(t, curr)
@@ -215,7 +217,7 @@ func testMergeOrder(t *testing.T, ctx context.Context, nbf *types.NomsBinFormat,
var items []*types.KVP
items = append(items, curr)
for {
curr, err = merger.Next()
curr, err = merger.Next(ctx)
if err == io.EOF {
break
}
@@ -224,7 +226,7 @@ func testMergeOrder(t *testing.T, ctx context.Context, nbf *types.NomsBinFormat,
currKeyVal, err := curr.Key.Value(ctx)
require.NoError(t, err)
isLess, err := prevKeyVal.Less(nbf, currKeyVal)
isLess, err := prevKeyVal.Less(ctx, vr.Format(), currKeyVal)
require.NoError(t, err)
require.True(t, isLess || prevKeyVal.Equals(currKeyVal))
+4 -4
View File
@@ -20,7 +20,7 @@ import "github.com/dolthub/dolt/go/store/types"
// is filled the target buffer is changed for subsequent adds. New buffers can be added to the builder so that
// buffers of other KVPCollections can be reused.
type KVPCollBuilder struct {
nbf *types.NomsBinFormat
vr types.ValueReader
filled []types.KVPSlice
toFill []types.KVPSlice
currSl types.KVPSlice
@@ -31,11 +31,11 @@ type KVPCollBuilder struct {
}
// NewKVPCollBuilder creates a builder which can be used to
func NewKVPCollBuilder(buffSize int, nbf *types.NomsBinFormat) *KVPCollBuilder {
func NewKVPCollBuilder(vr types.ValueReader, buffSize int) *KVPCollBuilder {
buffs := []types.KVPSlice{make(types.KVPSlice, buffSize)}
currSl := make(types.KVPSlice, buffSize)
return &KVPCollBuilder{nbf, nil, buffs, currSl, buffSize, 0, 0, buffSize}
return &KVPCollBuilder{vr, nil, buffs, currSl, buffSize, 0, 0, buffSize}
}
// AddBuffer adds a buffer of KVPs that can be filled.
@@ -106,6 +106,6 @@ func (cb *KVPCollBuilder) Build() *KVPCollection {
numSlices: len(cb.filled),
totalSize: cb.numItems,
slices: cb.filled,
nbf: cb.nbf,
vr: cb.vr,
}
}
+18 -12
View File
@@ -15,6 +15,7 @@
package edits
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@@ -23,8 +24,9 @@ import (
)
func TestAddKVP(t *testing.T) {
nbf := types.Format_Default
builder := NewKVPCollBuilder(2, nbf)
vrw := types.NewMemoryValueStore()
ctx := context.Background()
builder := NewKVPCollBuilder(vrw, 2)
builder.AddKVP(types.KVP{Key: types.Uint(0), Val: types.NullValue})
builder.AddKVP(types.KVP{Key: types.Uint(1), Val: types.NullValue})
builder.AddKVP(types.KVP{Key: types.Uint(2), Val: types.NullValue})
@@ -33,7 +35,7 @@ func TestAddKVP(t *testing.T) {
itr := coll.Iterator()
for i := int64(0); i < coll.Size(); i++ {
kvp, err := itr.Next()
kvp, err := itr.Next(ctx)
assert.NoError(t, err)
if uint(kvp.Key.(types.Uint)) != uint(i) {
@@ -43,7 +45,9 @@ func TestAddKVP(t *testing.T) {
}
func TestMoveRemaining(t *testing.T) {
nbf := types.Format_Default
ctx := context.Background()
vrw := types.NewMemoryValueStore()
sl1 := types.KVPSlice{{Key: types.Uint(0), Val: types.NullValue}, {Key: types.Uint(1), Val: types.NullValue}}
sl2 := types.KVPSlice{{Key: types.Uint(2), Val: types.NullValue}, {}}
coll := &KVPCollection{
@@ -51,17 +55,17 @@ func TestMoveRemaining(t *testing.T) {
2,
3,
[]types.KVPSlice{sl1, sl2[:1]},
nbf,
vrw,
}
builder := NewKVPCollBuilder(2, nbf)
builder := NewKVPCollBuilder(vrw, 2)
builder.MoveRemaining(coll.Iterator())
result := builder.Build()
itr := result.Iterator()
for i := int64(0); i < result.Size(); i++ {
kvp, err := itr.Next()
kvp, err := itr.Next(ctx)
assert.NoError(t, err)
if uint(kvp.Key.(types.Uint)) != uint(i) {
@@ -71,11 +75,13 @@ func TestMoveRemaining(t *testing.T) {
}
func TestAddKVPAndMoveRemaining(t *testing.T) {
nbf := types.Format_Default
sl := types.KVPSlice{{Key: types.Uint(1), Val: types.NullValue}, {Key: types.Uint(2), Val: types.NullValue}}
coll := NewKVPCollection(nbf, sl)
ctx := context.Background()
vrw := types.NewMemoryValueStore()
builder := NewKVPCollBuilder(2, nbf)
sl := types.KVPSlice{{Key: types.Uint(1), Val: types.NullValue}, {Key: types.Uint(2), Val: types.NullValue}}
coll := NewKVPCollection(vrw, sl)
builder := NewKVPCollBuilder(vrw, 2)
builder.AddKVP(types.KVP{Key: types.Uint(0), Val: types.NullValue})
builder.MoveRemaining(coll.Iterator())
@@ -83,7 +89,7 @@ func TestAddKVPAndMoveRemaining(t *testing.T) {
itr := result.Iterator()
for i := int64(0); i < result.Size(); i++ {
kvp, err := itr.Next()
kvp, err := itr.Next(ctx)
assert.NoError(t, err)
if uint(kvp.Key.(types.Uint)) != uint(i) {
+6 -6
View File
@@ -30,22 +30,22 @@ type KVPCollItr struct {
currSl types.KVPSlice
currSlSize int
currKey types.LesserValuable
nbf *types.NomsBinFormat
vr types.ValueReader
read int64
}
// NewItr creates a new KVPCollItr from a KVPCollection
func NewItr(nbf *types.NomsBinFormat, coll *KVPCollection) *KVPCollItr {
func NewItr(vr types.ValueReader, coll *KVPCollection) *KVPCollItr {
firstSl := coll.slices[0]
firstKey := firstSl[0].Key
slSize := len(firstSl)
return &KVPCollItr{coll: coll, currSl: firstSl, currSlSize: slSize, currKey: firstKey, nbf: nbf}
return &KVPCollItr{coll: coll, currSl: firstSl, currSlSize: slSize, currKey: firstKey, vr: vr}
}
// Less returns whether the current key this iterator is less than the current key for another iterator
func (itr *KVPCollItr) Less(other *KVPCollItr) (bool, error) {
func (itr *KVPCollItr) Less(ctx context.Context, other *KVPCollItr) (bool, error) {
if other.currKey == nil {
return true, nil
}
@@ -54,7 +54,7 @@ func (itr *KVPCollItr) Less(other *KVPCollItr) (bool, error) {
return false, nil
}
return itr.currKey.Less(itr.nbf, other.currKey)
return itr.currKey.Less(ctx, itr.vr.Format(), other.currKey)
}
// returns the next kvp, the slice it was read from when that slice is empty, and whether or not iteration is complete.
@@ -92,7 +92,7 @@ func (itr *KVPCollItr) nextForDestructiveMerge() (nextKVP *types.KVP, sliceIfExh
// Next returns the next KVP representing the next edit to be applied. Next will always return KVPs
// in key sorted order. Once all KVPs have been read io.EOF will be returned.
func (itr *KVPCollItr) Next() (*types.KVP, error) {
func (itr *KVPCollItr) Next(ctx context.Context) (*types.KVP, error) {
kvp, _, _ := itr.nextForDestructiveMerge()
if kvp == nil {
+3 -3
View File
@@ -55,11 +55,11 @@ func TestKVPCollItr(t *testing.T) {
},
}
nbf := types.Format_Default
vrw := types.NewMemoryValueStore()
for _, test := range tests {
coll := &KVPCollection{test.buffSize, len(test.slices), test.totalSize, test.slices, nbf}
itr := NewItr(nbf, coll)
coll := &KVPCollection{test.buffSize, len(test.slices), test.totalSize, test.slices, vrw}
itr := NewItr(vrw, coll)
for i := 0; i < 2; i++ {
for _, expRes := range test.itrResults {
+14 -10
View File
@@ -14,7 +14,11 @@
package edits
import "github.com/dolthub/dolt/go/store/types"
import (
"context"
"github.com/dolthub/dolt/go/store/types"
)
// KVPCollection is a collection of sorted KVPs
type KVPCollection struct {
@@ -22,20 +26,20 @@ type KVPCollection struct {
numSlices int
totalSize int64
slices []types.KVPSlice
nbf *types.NomsBinFormat
vr types.ValueReader
}
// NewKVPCollection creates a new KVPCollection from a sorted KVPSlice
func NewKVPCollection(nbf *types.NomsBinFormat, sl types.KVPSlice) *KVPCollection {
return newKVPColl(nbf, cap(sl), 1, int64(len(sl)), []types.KVPSlice{sl})
func NewKVPCollection(vr types.ValueReader, sl types.KVPSlice) *KVPCollection {
return newKVPColl(vr, cap(sl), 1, int64(len(sl)), []types.KVPSlice{sl})
}
func newKVPColl(nbf *types.NomsBinFormat, maxSize, numSlices int, totalSize int64, slices []types.KVPSlice) *KVPCollection {
func newKVPColl(vr types.ValueReader, maxSize, numSlices int, totalSize int64, slices []types.KVPSlice) *KVPCollection {
if slices == nil {
panic("invalid params")
}
return &KVPCollection{maxSize, numSlices, totalSize, slices, nbf}
return &KVPCollection{maxSize, numSlices, totalSize, slices, vr}
}
// Size returns the total number of elements in the collection
@@ -45,20 +49,20 @@ func (coll *KVPCollection) Size() int64 {
// Iterator returns an iterator that will iterate over the KVPs in the collection in order.
func (coll *KVPCollection) Iterator() *KVPCollItr {
return NewItr(coll.nbf, coll)
return NewItr(coll.vr, coll)
}
// DestructiveMerge merges two KVPCollections into a new collection. This KVPCollection and the
// collection it is being merged with will no longer be valid once this method is called. A
// new KVPCollection will be returned which holds the merged collections.
func (left *KVPCollection) DestructiveMerge(right *KVPCollection) (*KVPCollection, error) {
func (left *KVPCollection) DestructiveMerge(ctx context.Context, right *KVPCollection) (*KVPCollection, error) {
if left.buffSize != right.buffSize {
panic("Cannot merge collections with varying buffer sizes.")
}
lItr := left.Iterator()
rItr := right.Iterator()
resBuilder := NewKVPCollBuilder(left.buffSize, left.nbf)
resBuilder := NewKVPCollBuilder(left.vr, left.buffSize)
var done bool
var kvp *types.KVP
@@ -68,7 +72,7 @@ func (left *KVPCollection) DestructiveMerge(right *KVPCollection) (*KVPCollectio
for !done {
currItr, otherItr = lItr, rItr
isLess, err := rItr.Less(lItr)
isLess, err := rItr.Less(ctx, lItr)
if err != nil {
return nil, err
+28 -24
View File
@@ -31,7 +31,7 @@ func (coll *KVPCollection) String() string {
ctx := context.Background()
itr := coll.Iterator()
val, err := itr.Next()
val, err := itr.Next(ctx)
d.PanicIfError(err)
keys := make([]types.Value, coll.totalSize)
@@ -40,11 +40,11 @@ func (coll *KVPCollection) String() string {
keys[i], err = val.Key.Value(ctx)
d.PanicIfError(err)
val, err = itr.Next()
val, err = itr.Next(ctx)
d.PanicIfError(err)
}
tpl, err := types.NewTuple(coll.nbf, keys...)
tpl, err := types.NewTuple(coll.vr.Format(), keys...)
d.PanicIfError(err)
str, err := types.EncodedValue(ctx, tpl)
@@ -55,62 +55,66 @@ func (coll *KVPCollection) String() string {
func TestKVPCollection(t *testing.T) {
rng := rand.New(rand.NewSource(0))
nbf := types.Format_Default
testKVPCollection(t, nbf, rng)
vrw := types.NewMemoryValueStore()
ctx := context.Background()
testKVPCollection(t, ctx, vrw, rng)
for i := 0; i < 64; i++ {
seed := time.Now().UnixNano()
t.Log(seed)
rng := rand.New(rand.NewSource(seed))
testKVPCollection(t, nbf, rng)
testKVPCollection(t, ctx, vrw, rng)
}
}
func TestKVPCollectionDestructiveMergeStable(t *testing.T) {
left := NewKVPCollection(types.Format_Default, types.KVPSlice{
vrw := types.NewMemoryValueStore()
ctx := context.Background()
left := NewKVPCollection(vrw, types.KVPSlice{
types.KVP{Key: types.Int(0)},
types.KVP{Key: types.Int(1)},
types.KVP{Key: types.Int(2)},
})
right := NewKVPCollection(types.Format_Default, types.KVPSlice{
right := NewKVPCollection(vrw, types.KVPSlice{
types.KVP{Key: types.Int(0), Val: types.Int(0)},
types.KVP{Key: types.Int(1), Val: types.Int(0)},
types.KVP{Key: types.Int(2), Val: types.Int(0)},
})
var err error
left, err = left.DestructiveMerge(right)
left, err = left.DestructiveMerge(ctx, right)
assert.NoError(t, err)
i := left.Iterator()
var v *types.KVP
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(0), v.Key)
assert.Nil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(0), v.Key)
assert.NotNil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(1), v.Key)
assert.Nil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(1), v.Key)
assert.NotNil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(2), v.Key)
assert.Nil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(2), v.Key)
assert.NotNil(t, v.Val)
_, err = i.Next()
_, err = i.Next(ctx)
assert.Equal(t, io.EOF, err)
}
func testKVPCollection(t *testing.T, nbf *types.NomsBinFormat, rng *rand.Rand) {
func testKVPCollection(t *testing.T, ctx context.Context, vr types.ValueReader, rng *rand.Rand) {
const (
maxSize = 1024
minSize = 4
@@ -126,12 +130,12 @@ func testKVPCollection(t *testing.T, nbf *types.NomsBinFormat, rng *rand.Rand) {
t.Log("num collections:", numColls, "- buffer size", size)
for i := 0; i < numColls; i++ {
colls[i] = createKVPColl(nbf, rng, size)
colls[i] = createKVPColl(vr, rng, size)
}
for len(colls) > 1 {
for i, coll := range colls {
inOrder, _, err := IsInOrder(nbf, NewItr(nbf, coll))
inOrder, _, err := IsInOrder(ctx, vr, NewItr(vr, coll))
assert.NoError(t, err)
if !inOrder {
t.Fatal(i, "not in order")
@@ -146,7 +150,7 @@ func testKVPCollection(t *testing.T, nbf *types.NomsBinFormat, rng *rand.Rand) {
s1 := colls[i].Size()
s2 := colls[j].Size()
//fmt.Print(colls[i].String(), "+", colls[j].String())
mergedColl, err := colls[i].DestructiveMerge(colls[j])
mergedColl, err := colls[i].DestructiveMerge(ctx, colls[j])
assert.NoError(t, err)
ms := mergedColl.Size()
@@ -163,7 +167,7 @@ func testKVPCollection(t *testing.T, nbf *types.NomsBinFormat, rng *rand.Rand) {
colls = newColls
}
inOrder, numItems, err := IsInOrder(nbf, NewItr(nbf, colls[0]))
inOrder, numItems, err := IsInOrder(ctx, vr, NewItr(vr, colls[0]))
assert.NoError(t, err)
if !inOrder {
t.Fatal("collection not in order")
@@ -172,14 +176,14 @@ func testKVPCollection(t *testing.T, nbf *types.NomsBinFormat, rng *rand.Rand) {
}
}
func createKVPColl(nbf *types.NomsBinFormat, rng *rand.Rand, size int) *KVPCollection {
func createKVPColl(vr types.ValueReader, rng *rand.Rand, size int) *KVPCollection {
kvps := make(types.KVPSlice, size)
for i := 0; i < size; i++ {
kvps[i] = types.KVP{Key: types.Uint(rng.Uint64() % 10000), Val: types.NullValue}
}
types.SortWithErroringLess(types.KVPSort{Values: kvps, NBF: nbf})
types.SortWithErroringLess(context.Background(), vr.Format(), types.KVPSort{Values: kvps})
return NewKVPCollection(nbf, kvps)
return NewKVPCollection(vr, kvps)
}
+8 -7
View File
@@ -25,8 +25,8 @@ import (
)
// IsInOrder iterates over every value and validates that they are returned in key order. This is intended for testing.
func IsInOrder(nbf *types.NomsBinFormat, itr types.EditProvider) (bool, int, error) {
prev, err := itr.Next()
func IsInOrder(ctx context.Context, vr types.ValueReader, itr types.EditProvider) (bool, int, error) {
prev, err := itr.Next(ctx)
if err == io.EOF {
return true, 0, nil
@@ -37,7 +37,7 @@ func IsInOrder(nbf *types.NomsBinFormat, itr types.EditProvider) (bool, int, err
count := 1
for {
curr, err := itr.Next()
curr, err := itr.Next(ctx)
if err == io.EOF {
return true, count, nil
@@ -45,7 +45,7 @@ func IsInOrder(nbf *types.NomsBinFormat, itr types.EditProvider) (bool, int, err
return false, 0, err
}
isLess, err := curr.Key.Less(nbf, prev.Key)
isLess, err := curr.Key.Less(ctx, vr.Format(), prev.Key)
if err != nil {
return false, 0, err
@@ -62,7 +62,8 @@ func IsInOrder(nbf *types.NomsBinFormat, itr types.EditProvider) (bool, int, err
func TestKVPSliceSort(t *testing.T) {
ctx := context.Background()
nbf := types.Format_Default
vrw := types.NewMemoryValueStore()
tests := []struct {
kvps types.KVPSlice
@@ -85,9 +86,9 @@ func TestKVPSliceSort(t *testing.T) {
}
for _, test := range tests {
_, _, err := IsInOrder(nbf, NewItr(nbf, NewKVPCollection(nbf, test.kvps)))
_, _, err := IsInOrder(ctx, vrw, NewItr(vrw, NewKVPCollection(vrw, test.kvps)))
assert.NoError(t, err)
err = types.SortWithErroringLess(types.KVPSort{Values: test.kvps, NBF: nbf})
err = types.SortWithErroringLess(ctx, vrw.Format(), types.KVPSort{Values: test.kvps})
assert.NoError(t, err)
if len(test.kvps) != len(test.expSorted) {
+8 -8
View File
@@ -31,22 +31,22 @@ type SortedEditItr struct {
// NewSortedEditItr creates an iterator from two KVPCollection references. As the iterator iterates it
// merges the collections and iterates in order
func NewSortedEditItr(nbf *types.NomsBinFormat, left, right *KVPCollection) *SortedEditItr {
leftItr := NewItr(nbf, left)
rightItr := NewItr(nbf, right)
func NewSortedEditItr(vr types.ValueReader, left, right *KVPCollection) *SortedEditItr {
leftItr := NewItr(vr, left)
rightItr := NewItr(vr, right)
return &SortedEditItr{leftItr: leftItr, rightItr: rightItr}
}
// Next returns the next KVP representing the next edit to be applied. Next will always return KVPs
// in key sorted order. Once all KVPs have been read io.EOF will be returned.
func (itr *SortedEditItr) Next() (*types.KVP, error) {
func (itr *SortedEditItr) Next(ctx context.Context) (*types.KVP, error) {
if itr.done {
return nil, io.EOF
}
lesser := itr.leftItr
isLess, err := itr.rightItr.Less(itr.leftItr)
isLess, err := itr.rightItr.Less(ctx, itr.leftItr)
if err != nil {
return nil, err
@@ -56,7 +56,7 @@ func (itr *SortedEditItr) Next() (*types.KVP, error) {
lesser = itr.rightItr
}
kvp, err := lesser.Next()
kvp, err := lesser.Next(ctx)
if err != nil {
return nil, err
@@ -81,13 +81,13 @@ func (itr *SortedEditItr) ReachedEOF() bool {
}
// Peek returns the next KVP without advancing
func (itr *SortedEditItr) Peek() (*types.KVP, error) {
func (itr *SortedEditItr) Peek(ctx context.Context) (*types.KVP, error) {
if itr.done {
return nil, nil
}
lesser := itr.leftItr
isLess, err := itr.rightItr.Less(itr.leftItr)
isLess, err := itr.rightItr.Less(ctx, itr.leftItr)
if err != nil {
return nil, err
+14 -10
View File
@@ -15,6 +15,7 @@
package edits
import (
"context"
"io"
"testing"
@@ -24,47 +25,50 @@ import (
)
func TestSortedEditItrStable(t *testing.T) {
left := NewKVPCollection(types.Format_Default, types.KVPSlice{
ctx := context.Background()
vrw := types.NewMemoryValueStore()
left := NewKVPCollection(vrw, types.KVPSlice{
types.KVP{Key: types.Int(0)},
types.KVP{Key: types.Int(1)},
types.KVP{Key: types.Int(2)},
})
right := NewKVPCollection(types.Format_Default, types.KVPSlice{
right := NewKVPCollection(vrw, types.KVPSlice{
types.KVP{Key: types.Int(0), Val: types.Int(0)},
types.KVP{Key: types.Int(1), Val: types.Int(0)},
types.KVP{Key: types.Int(2), Val: types.Int(0)},
})
assert.NotNil(t, left)
assert.NotNil(t, right)
i := NewSortedEditItr(types.Format_Default, left, right)
i := NewSortedEditItr(vrw, left, right)
assert.NotNil(t, i)
var err error
var v *types.KVP
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(0), v.Key)
assert.Nil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(0), v.Key)
assert.NotNil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(1), v.Key)
assert.Nil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(1), v.Key)
assert.NotNil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(2), v.Key)
assert.Nil(t, v.Val)
v, err = i.Next()
v, err = i.Next(ctx)
assert.NoError(t, err)
assert.Equal(t, types.Int(2), v.Key)
assert.NotNil(t, v.Val)
_, err = i.Next()
_, err = i.Next(ctx)
assert.Equal(t, io.EOF, err)
}
+1 -1
View File
@@ -40,7 +40,7 @@ func (v Float) Equals(other Value) bool {
return v == other
}
func (v Float) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v Float) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if v2, ok := other.(Float); ok {
return v < v2, nil
}
+2 -2
View File
@@ -42,8 +42,8 @@ func (v Geometry) Equals(other Value) bool {
return v.Inner.Equals(other)
}
func (v Geometry) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
return v.Inner.Less(nbf, other)
func (v Geometry) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
return v.Inner.Less(ctx, nbf, other)
}
func (v Geometry) Hash(nbf *NomsBinFormat) (hash.Hash, error) {
+2 -2
View File
@@ -58,7 +58,7 @@ func (v GeomColl) Equals(other Value) bool {
return true
}
func (v GeomColl) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v GeomColl) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare types
v2, ok := other.(GeomColl)
if !ok {
@@ -80,7 +80,7 @@ func (v GeomColl) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare each polygon until there is one that is less
for i := 0; i < n; i++ {
if !v.Geometries[i].Equals(v2.Geometries[i]) {
return v.Geometries[i].Less(nbf, v2.Geometries[i])
return v.Geometries[i].Less(ctx, nbf, v2.Geometries[i])
}
}
// Determine based off length
+1 -1
View File
@@ -85,7 +85,7 @@ func advanceCursorToOffset(cur *sequenceCursor, idx uint64) (uint64, error) {
}
func newIndexedMetaSequenceChunkFn(kind NomsKind, vrw ValueReadWriter) makeChunkFn {
return func(level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
return func(ctx context.Context, level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
tuples := make([]metaTuple, len(items))
numLeaves := uint64(0)
+1 -1
View File
@@ -40,7 +40,7 @@ func (v InlineBlob) Equals(other Value) bool {
return bytes.Equal(v, v2)
}
func (v InlineBlob) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v InlineBlob) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if v2, ok := other.(InlineBlob); ok {
return bytes.Compare(v, v2) == -1, nil
}
+1 -1
View File
@@ -40,7 +40,7 @@ func (v Int) Equals(other Value) bool {
return v == other
}
func (v Int) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v Int) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if v2, ok := other.(Int); ok {
return v < v2, nil
}
+13 -13
View File
@@ -164,13 +164,13 @@ func (t JSON) isPrimitive() bool {
}
// Less implements the LesserValuable interface.
func (t JSON) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (t JSON) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
otherJSONDoc, ok := other.(JSON)
if !ok {
return JSONKind < other.Kind(), nil
}
cmp, err := t.Compare(otherJSONDoc)
cmp, err := t.Compare(ctx, otherJSONDoc)
if err != nil {
return false, err
}
@@ -179,7 +179,7 @@ func (t JSON) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
}
// Compare implements MySQL JSON type compare semantics.
func (t JSON) Compare(other JSON) (int, error) {
func (t JSON) Compare(ctx context.Context, other JSON) (int, error) {
left, err := t.Inner()
if err != nil {
return 0, err
@@ -190,7 +190,7 @@ func (t JSON) Compare(other JSON) (int, error) {
return 0, err
}
return compareJSON(left, right)
return compareJSON(ctx, left, right)
}
func (t JSON) readFrom(nbf *NomsBinFormat, b *binaryNomsReader) (Value, error) {
@@ -214,7 +214,7 @@ func (t JSON) HumanReadableString() string {
return fmt.Sprintf("JSON(%s)", h.String())
}
func compareJSON(a, b Value) (int, error) {
func compareJSON(ctx context.Context, a, b Value) (int, error) {
aNull := a.Kind() == NullKind
bNull := b.Kind() == NullKind
if aNull && bNull {
@@ -229,9 +229,9 @@ func compareJSON(a, b Value) (int, error) {
case Bool:
return compareJSONBool(a, b)
case List:
return compareJSONArray(a, b)
return compareJSONArray(ctx, a, b)
case Map:
return compareJSONObject(a, b)
return compareJSONObject(ctx, a, b)
case String:
return compareJSONString(a, b)
case Float:
@@ -262,7 +262,7 @@ func compareJSONBool(a Bool, b Value) (int, error) {
}
}
func compareJSONArray(a List, b Value) (int, error) {
func compareJSONArray(ctx context.Context, a List, b Value) (int, error) {
switch b := b.(type) {
case Bool:
// a is lower precedence
@@ -274,7 +274,7 @@ func compareJSONArray(a List, b Value) (int, error) {
// where there is a difference. The array with the smaller value in that position is ordered first.
// TODO(andy): this diverges from GMS
aLess, err := a.Less(a.format(), b)
aLess, err := a.Less(ctx, a.format(), b)
if err != nil {
return 0, err
}
@@ -282,7 +282,7 @@ func compareJSONArray(a List, b Value) (int, error) {
return -1, nil
}
bLess, err := b.Less(b.format(), a)
bLess, err := b.Less(ctx, a.format(), a)
if err != nil {
return 0, err
}
@@ -298,7 +298,7 @@ func compareJSONArray(a List, b Value) (int, error) {
}
}
func compareJSONObject(a Map, b Value) (int, error) {
func compareJSONObject(ctx context.Context, a Map, b Value) (int, error) {
switch b := b.(type) {
case
Bool,
@@ -311,7 +311,7 @@ func compareJSONObject(a Map, b Value) (int, error) {
// objects. The order of two objects that are not equal is unspecified but deterministic.
// TODO(andy): this diverges from GMS
aLess, err := a.Less(a.format(), b)
aLess, err := a.Less(ctx, a.format(), b)
if err != nil {
return 0, err
}
@@ -319,7 +319,7 @@ func compareJSONObject(a Map, b Value) (int, error) {
return -1, nil
}
bLess, err := b.Less(b.format(), a)
bLess, err := b.Less(ctx, b.format(), a)
if err != nil {
return 0, err
}
+4 -3
View File
@@ -14,6 +14,8 @@
package types
import "context"
// KVP is a simple key value pair
type KVP struct {
// Key is the key
@@ -28,7 +30,6 @@ type KVPSlice []KVP
type KVPSort struct {
Values []KVP
NBF *NomsBinFormat
}
// Len returns the size of the slice
@@ -37,8 +38,8 @@ func (kvps KVPSort) Len() int {
}
// Less returns a bool representing whether the key at index i is less than the key at index j
func (kvps KVPSort) Less(i, j int) (bool, error) {
return kvps.Values[i].Key.Less(kvps.NBF, kvps.Values[j].Key)
func (kvps KVPSort) Less(ctx context.Context, nbf *NomsBinFormat, i, j int) (bool, error) {
return kvps.Values[i].Key.Less(ctx, nbf, kvps.Values[j].Key)
}
// Swap swaps the KVP at index i with the KVP at index j
+2 -2
View File
@@ -62,7 +62,7 @@ func (v LineString) Equals(other Value) bool {
return true
}
func (v LineString) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v LineString) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare types
v2, ok := other.(LineString)
if !ok {
@@ -86,7 +86,7 @@ func (v LineString) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error)
// Compare each point until there's one that is less than
for i := 0; i < n; i++ {
if !v.Points[i].Equals(v2.Points[i]) {
return v.Points[i].Less(nbf, v2.Points[i])
return v.Points[i].Less(ctx, nbf, v2.Points[i])
}
}
+2 -2
View File
@@ -82,7 +82,7 @@ func (l List) ToSet(ctx context.Context) (Set, error) {
}
e := s.Edit()
err = l.IterAll(ctx, func(v Value, idx uint64) error {
se, err := e.Insert(v)
se, err := e.Insert(ctx, v)
e = se
return err
})
@@ -477,7 +477,7 @@ func newListChunker(nbf *NomsBinFormat, salt byte) sequenceSplitter {
}
func makeListLeafChunkFn(vrw ValueReadWriter) makeChunkFn {
return func(level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
return func(ctx context.Context, level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
d.PanicIfFalse(level == 0)
values := make([]Value, len(items))
+10 -13
View File
@@ -69,7 +69,7 @@ func newMapChunker(nbf *NomsBinFormat, salt byte) sequenceSplitter {
}
func NewMap(ctx context.Context, vrw ValueReadWriter, kv ...Value) (Map, error) {
entries, err := buildMapData(vrw.Format(), kv)
entries, err := buildMapData(ctx, vrw, kv)
if err != nil {
return EmptyMap, err
@@ -159,7 +159,7 @@ LOOP:
k = v
if lastK != nil {
isLess, err := lastK.Less(vrw.Format(), k)
isLess, err := lastK.Less(ctx, vrw.Format(), k)
if err != nil {
return EmptyMap, err
}
@@ -514,7 +514,7 @@ func (m Map) Edit() *MapEditor {
return NewMapEditor(m)
}
func buildMapData(nbf *NomsBinFormat, values []Value) (mapEntrySlice, error) {
func buildMapData(ctx context.Context, vr ValueReader, values []Value) (mapEntrySlice, error) {
if len(values) == 0 {
return mapEntrySlice{}, nil
}
@@ -524,7 +524,6 @@ func buildMapData(nbf *NomsBinFormat, values []Value) (mapEntrySlice, error) {
}
kvs := mapEntrySlice{
make([]mapEntry, len(values)/2),
nbf,
}
for i := 0; i < len(values); i += 2 {
@@ -536,10 +535,9 @@ func buildMapData(nbf *NomsBinFormat, values []Value) (mapEntrySlice, error) {
uniqueSorted := mapEntrySlice{
make([]mapEntry, 0, len(kvs.entries)),
nbf,
}
err := SortWithErroringLess(kvs)
err := SortWithErroringLess(ctx, vr.Format(), kvs)
if err != nil {
return mapEntrySlice{}, err
@@ -557,12 +555,11 @@ func buildMapData(nbf *NomsBinFormat, values []Value) (mapEntrySlice, error) {
return mapEntrySlice{
append(uniqueSorted.entries, last),
uniqueSorted.nbf,
}, nil
}
func makeMapLeafChunkFn(vrw ValueReadWriter) makeChunkFn {
return func(level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
return func(ctx context.Context, level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
d.PanicIfFalse(level == 0)
mapData := make([]mapEntry, len(items))
@@ -571,7 +568,7 @@ func makeMapLeafChunkFn(vrw ValueReadWriter) makeChunkFn {
entry := v.(mapEntry)
if lastKey != nil {
isLess, err := lastKey.Less(vrw.Format(), entry.key)
isLess, err := lastKey.Less(ctx, vrw.Format(), entry.key)
if err != nil {
return nil, orderedKey{}, 0, err
@@ -760,7 +757,7 @@ func (m Map) IndexForKey(ctx context.Context, key Value) (int64, error) {
if metaSeq, ok := m.orderedSequence.(metaSequence); ok {
return indexForKeyWithinSubtree(ctx, orderedKey, metaSeq, m.valueReadWriter())
} else if leaf, ok := m.orderedSequence.(mapLeafSequence); ok {
leafIdx, err := leaf.search(orderedKey)
leafIdx, err := leaf.search(ctx, orderedKey)
if err != nil {
return 0, err
}
@@ -784,7 +781,7 @@ func indexForKeyWithinSubtree(ctx context.Context, key orderedKey, metaSeq metaS
return 0, err
}
isLess, err := key.Less(vrw.Format(), tupleKey)
isLess, err := key.Less(ctx, vrw.Format(), tupleKey)
if err != nil {
return 0, err
}
@@ -808,7 +805,7 @@ func indexForKeyWithinSubtree(ctx context.Context, key orderedKey, metaSeq metaS
}
return idx + subtreeIdx, nil
} else if leaf, ok := child.(mapLeafSequence); ok {
leafIdx, err := leaf.search(key)
leafIdx, err := leaf.search(ctx, key)
if err != nil {
return 0, err
}
@@ -852,7 +849,7 @@ func UnionMaps(ctx context.Context, a Map, b Map, cb MapUnionConflictCB) (Map, e
for aKey != nil && bKey != nil {
aLess, err := aKey.Less(a.Format(), bKey)
aLess, err := aKey.Less(ctx, a.format(), bKey)
if err != nil {
return EmptyMap, nil
}
+4 -5
View File
@@ -28,7 +28,7 @@ import (
)
// CreateEditAcc defines a factory method for EditAccumulator creation
type CreateEditAcc func(nbf *NomsBinFormat) EditAccumulator
type CreateEditAcc func(ValueReader) EditAccumulator
// CreateEditAccForMapEdits allows users to define the EditAccumulator that should be used when creating a MapEditor via
// the Map.Edit method. In most cases you should call:
@@ -51,7 +51,7 @@ type EditAccumulator interface {
// FinishedEditing should be called when all edits have been added to get an EditProvider which provides the
// edits in sorted order. Adding more edits after calling FinishedEditing is an error.
FinishedEditing() (EditProvider, error)
FinishedEditing(context.Context) (EditProvider, error)
// Close ensures that the accumulator is closed. Repeat calls are allowed. Not guaranteed to be thread-safe, thus
// requires external synchronization.
@@ -66,13 +66,12 @@ type MapEditor struct {
}
func NewMapEditor(m Map) *MapEditor {
return &MapEditor{m, 0, CreateEditAccForMapEdits(m.format())}
return &MapEditor{m, 0, CreateEditAccForMapEdits(m.valueReadWriter())}
}
// Map applies all edits and returns a newly updated Map
func (med *MapEditor) Map(ctx context.Context) (Map, error) {
edits, err := med.acc.FinishedEditing()
edits, err := med.acc.FinishedEditing(ctx)
if err != nil {
return EmptyMap, err
}
+11 -14
View File
@@ -93,15 +93,14 @@ func (entry mapEntry) equals(other mapEntry) bool {
type mapEntrySlice struct {
entries []mapEntry
nbf *NomsBinFormat
}
func (mes mapEntrySlice) Len() int { return len(mes.entries) }
func (mes mapEntrySlice) Swap(i, j int) {
mes.entries[i], mes.entries[j] = mes.entries[j], mes.entries[i]
}
func (mes mapEntrySlice) Less(i, j int) (bool, error) {
return mes.entries[i].key.Less(mes.nbf, mes.entries[j].key)
func (mes mapEntrySlice) Less(ctx context.Context, nbf *NomsBinFormat, i, j int) (bool, error) {
return mes.entries[i].key.Less(ctx, nbf, mes.entries[j].key)
}
func (mes mapEntrySlice) Equals(other mapEntrySlice) bool {
if mes.Len() != other.Len() {
@@ -175,11 +174,10 @@ func (ml mapLeafSequence) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
return walkRefs(w.buff[:w.offset], ml.format(), cb)
}
func (ml mapLeafSequence) entries() (mapEntrySlice, error) {
func (ml mapLeafSequence) entries(ctx context.Context) (mapEntrySlice, error) {
dec, count := ml.decoderSkipToValues()
entries := mapEntrySlice{
make([]mapEntry, count),
ml.format(),
}
for i := uint64(0); i < count; i++ {
k, err := dec.readValue(ml.format())
@@ -337,7 +335,7 @@ func (ml mapLeafSequence) getKey(idx int) (orderedKey, error) {
return newOrderedKey(v, ml.format())
}
func (ml mapLeafSequence) search(key orderedKey) (int, error) {
func (ml mapLeafSequence) search(ctx context.Context, key orderedKey) (int, error) {
n, err := SearchWithErroringLess(int(ml.Len()), func(i int) (bool, error) {
k, err := ml.getKey(i)
@@ -345,7 +343,7 @@ func (ml mapLeafSequence) search(key orderedKey) (int, error) {
return false, err
}
isLess, err := k.Less(ml.format(), key)
isLess, err := k.Less(ctx, ml.format(), key)
if err != nil {
return false, nil
@@ -372,24 +370,23 @@ var _ sequence = (*mapEntrySequence)(nil)
var _ orderedSequence = (*mapEntrySequence)(nil)
type mapEntrySequence struct {
nbf *NomsBinFormat
vrw ValueReadWriter
entries []mapEntry
}
func newMapEntrySequence(vrw ValueReadWriter, data ...mapEntry) (sequence, error) {
return mapEntrySequence{nbf: vrw.Format(), vrw: vrw, entries: data}, nil
return mapEntrySequence{vrw: vrw, entries: data}, nil
}
func (mes mapEntrySequence) getKey(idx int) (orderedKey, error) {
return newOrderedKey(mes.entries[idx].key, mes.nbf)
return newOrderedKey(mes.entries[idx].key, mes.vrw.Format())
}
func (mes mapEntrySequence) getValue(idx int) (Value, error) {
return mes.entries[idx].value, nil
}
func (mes mapEntrySequence) search(key orderedKey) (int, error) {
func (mes mapEntrySequence) search(ctx context.Context, key orderedKey) (int, error) {
n, err := SearchWithErroringLess(len(mes.entries), func(i int) (bool, error) {
ordKey, err := mes.getKey(i)
@@ -397,7 +394,7 @@ func (mes mapEntrySequence) search(key orderedKey) (int, error) {
return false, err
}
isLess, err := ordKey.Less(mes.nbf, key)
isLess, err := ordKey.Less(ctx, mes.vrw.Format(), key)
if err != nil {
return false, nil
@@ -561,11 +558,11 @@ func (mes mapEntrySequence) writeTo(writer nomsWriter, format *NomsBinFormat) er
panic("not implemented")
}
func (mes mapEntrySequence) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (mes mapEntrySequence) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
panic("not implemented")
}
func (mes mapEntrySequence) Compare(nbf *NomsBinFormat, other LesserValuable) (int, error) {
func (mes mapEntrySequence) Compare(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (int, error) {
panic("not implemented")
}
+28 -24
View File
@@ -55,14 +55,14 @@ func (tm testMap) SetValue(i int, v Value) testMap {
entries := make([]mapEntry, 0, len(tm.entries.entries))
entries = append(entries, tm.entries.entries...)
entries[i].value = v
return testMap{mapEntrySlice{entries, tm.entries.nbf}, tm.knownBadKey}
return testMap{mapEntrySlice{entries}, tm.knownBadKey}
}
func (tm testMap) Remove(from, to int) testMap {
entries := make([]mapEntry, 0, len(tm.entries.entries)-(to-from))
entries = append(entries, tm.entries.entries[:from]...)
entries = append(entries, tm.entries.entries[to:]...)
return testMap{mapEntrySlice{entries, tm.entries.nbf}, tm.knownBadKey}
return testMap{mapEntrySlice{entries}, tm.knownBadKey}
}
func (tm testMap) MaybeGet(key Value) (v Value, ok bool) {
@@ -141,13 +141,13 @@ func (tm testMap) FlattenAll() []Value {
return tm.Flatten(0, len(tm.entries.entries))
}
func newSortedTestMap(nbf *NomsBinFormat, length int, gen genValueFn) testMap {
func newSortedTestMap(ctx context.Context, vr ValueReader, length int, gen genValueFn) testMap {
keys := make(ValueSlice, 0, length)
for i := 0; i < length; i++ {
keys = append(keys, mustValue(gen(nbf, i)))
keys = append(keys, mustValue(gen(vr.Format(), i)))
}
err := SortWithErroringLess(ValueSort{keys, nbf})
err := SortWithErroringLess(ctx, vr.Format(), ValueSort{keys})
d.PanicIfError(err)
entries := make([]mapEntry, 0, len(keys))
@@ -155,7 +155,7 @@ func newSortedTestMap(nbf *NomsBinFormat, length int, gen genValueFn) testMap {
entries = append(entries, mapEntry{k, Float(i * 2)})
}
return testMap{mapEntrySlice{entries, nbf}, Float(length + 2)}
return testMap{mapEntrySlice{entries}, Float(length + 2)}
}
func newTestMapFromMap(m Map) testMap {
@@ -167,10 +167,10 @@ func newTestMapFromMap(m Map) testMap {
d.PanicIfError(err)
return testMap{mapEntrySlice{entries, m.Format()}, Float(-0)}
return testMap{mapEntrySlice{entries}, Float(-0)}
}
func newRandomTestMap(nbf *NomsBinFormat, length int, gen genValueFn) testMap {
func newRandomTestMap(ctx context.Context, vr ValueReader, length int, gen genValueFn) testMap {
s := rand.NewSource(4242)
used := map[int]bool{}
@@ -179,13 +179,13 @@ func newRandomTestMap(nbf *NomsBinFormat, length int, gen genValueFn) testMap {
for len(entries) < length {
v := int(s.Int63()) & mask
if _, ok := used[v]; !ok {
entry := mapEntry{mustValue(gen(nbf, v)), mustValue(gen(nbf, v*2))}
entry := mapEntry{mustValue(gen(vr.Format(), v)), mustValue(gen(vr.Format(), v*2))}
entries = append(entries, entry)
used[v] = true
}
}
return testMap{mapEntrySlice{entries, nbf}, mustValue(gen(nbf, mask+1))}
return testMap{mapEntrySlice{entries}, mustValue(gen(vr.Format(), mask+1))}
}
func validateMap(t *testing.T, vrw ValueReadWriter, m Map, entries mapEntrySlice) {
@@ -213,7 +213,7 @@ func newMapTestSuite(size uint, expectChunkCount int, expectPrependChunkDiff int
length := 1 << size
keyType, err := TypeOf(mustValue(gen(vrw.Format(), 0)))
d.PanicIfError(err)
elems := newSortedTestMap(vrw.Format(), length, gen)
elems := newSortedTestMap(context.Background(), vrw, length, gen)
tr, err := MakeMapType(keyType, PrimitiveTypeMap[FloatKind])
d.PanicIfError(err)
tmap, err := NewMap(context.Background(), vrw, elems.FlattenAll()...)
@@ -346,7 +346,7 @@ func (suite *mapTestSuite) TestStreamingMapOrder() {
vs := newTestValueStore()
defer vs.Close()
entries := mapEntrySlice{make([]mapEntry, len(suite.elems.entries.entries)), vs.Format()}
entries := mapEntrySlice{make([]mapEntry, len(suite.elems.entries.entries))}
copy(entries.entries, suite.elems.entries.entries)
entries.entries[0], entries.entries[1] = entries.entries[1], entries.entries[0]
@@ -400,21 +400,21 @@ func newNumberStruct(nbf *NomsBinFormat, i int) (Value, error) {
}
func getTestNativeOrderMap(scale int, vrw ValueReadWriter) testMap {
return newRandomTestMap(vrw.Format(), 64*scale, newNumber)
return newRandomTestMap(context.Background(), vrw, 64*scale, newNumber)
}
func getTestRefValueOrderMap(scale int, vrw ValueReadWriter) testMap {
return newRandomTestMap(vrw.Format(), 64*scale, newNumber)
return newRandomTestMap(context.Background(), vrw, 64*scale, newNumber)
}
func getTestRefToNativeOrderMap(scale int, vrw ValueReadWriter) testMap {
return newRandomTestMap(vrw.Format(), 64*scale, func(nbf *NomsBinFormat, i int) (Value, error) {
return newRandomTestMap(context.Background(), vrw, 64*scale, func(nbf *NomsBinFormat, i int) (Value, error) {
return vrw.WriteValue(context.Background(), Float(i))
})
}
func getTestRefToValueOrderMap(scale int, vrw ValueReadWriter) testMap {
return newRandomTestMap(vrw.Format(), 64*scale, func(nbf *NomsBinFormat, i int) (Value, error) {
return newRandomTestMap(context.Background(), vrw, 64*scale, func(nbf *NomsBinFormat, i int) (Value, error) {
s, err := NewSet(context.Background(), vrw, Float(i))
if err != nil {
@@ -470,8 +470,8 @@ func TestMapDiff(t *testing.T) {
vrw := newTestValueStore()
testMap1 := newRandomTestMap(vrw.Format(), 64*2, newNumber)
testMap2 := newRandomTestMap(vrw.Format(), 64*2, newNumber)
testMap1 := newRandomTestMap(context.Background(), vrw, 64*2, newNumber)
testMap2 := newRandomTestMap(context.Background(), vrw, 64*2, newNumber)
testMapAdded, testMapRemoved, testMapModified := testMap1.Diff(testMap2)
map1 := testMap1.toMap(vrw)
map2 := testMap2.toMap(vrw)
@@ -865,9 +865,10 @@ func TestMapFirst2(t *testing.T) {
doTest := func(toTestMap toTestMapFunc, scale int) {
vrw := newTestValueStore()
ctx := context.Background()
tm := toTestMap(scale, vrw)
m := tm.toMap(vrw)
err := SortWithErroringLess(tm.entries)
err := SortWithErroringLess(ctx, vrw.Format(), tm.entries)
require.NoError(t, err)
actualKey, actualValue, err := m.First(context.Background())
require.NoError(t, err)
@@ -923,9 +924,10 @@ func TestMapLast2(t *testing.T) {
doTest := func(toTestMap toTestMapFunc, scale int) {
vrw := newTestValueStore()
ctx := context.Background()
tm := toTestMap(scale, vrw)
m := tm.toMap(vrw)
err := SortWithErroringLess(tm.entries)
err := SortWithErroringLess(ctx, vrw.Format(), tm.entries)
require.NoError(t, err)
actualKey, actualValue, err := m.Last(context.Background())
require.NoError(t, err)
@@ -1042,7 +1044,7 @@ func TestMapValidateInsertAscending(t *testing.T) {
defer normalProductionChunks()
vrw := newTestValueStore()
validateMapInsertion(t, vrw, newSortedTestMap(vrw.Format(), 300, newNumber))
validateMapInsertion(t, vrw, newSortedTestMap(context.Background(), vrw, 300, newNumber))
}
func TestMapSet(t *testing.T) {
@@ -1239,9 +1241,10 @@ func TestMapIter2(t *testing.T) {
doTest := func(toTestMap toTestMapFunc, scale int) {
vrw := newTestValueStore()
ctx := context.Background()
tm := toTestMap(scale, vrw)
m := tm.toMap(vrw)
err := SortWithErroringLess(tm.entries)
err := SortWithErroringLess(ctx, vrw.Format(), tm.entries)
require.NoError(t, err)
idx := uint64(0)
endAt := uint64(64)
@@ -1292,9 +1295,10 @@ func TestMapIterAll(t *testing.T) {
doTest := func(toTestMap toTestMapFunc, scale int) {
vrw := newTestValueStore()
ctx := context.Background()
tm := toTestMap(scale, vrw)
m := tm.toMap(vrw)
err := SortWithErroringLess(tm.entries)
err := SortWithErroringLess(ctx, vrw.Format(), tm.entries)
require.NoError(t, err)
idx := uint64(0)
@@ -1992,7 +1996,7 @@ func TestMapIterFrom(t *testing.T) {
test := func(m Map, start, end Value) ValueSlice {
res := ValueSlice{}
err := m.IterFrom(context.Background(), start, func(k, v Value) (bool, error) {
isLess, err := end.Less(vrw.Format(), k)
isLess, err := end.Less(context.Background(), vrw.Format(), k)
if err != nil {
return false, err
+8 -8
View File
@@ -147,10 +147,10 @@ func orderedKeyFromUint64(n uint64, nbf *NomsBinFormat) (orderedKey, error) {
return newOrderedKey(Float(n), nbf)
}
func (key orderedKey) Less(nbf *NomsBinFormat, mk2 orderedKey) (bool, error) {
func (key orderedKey) Less(ctx context.Context, nbf *NomsBinFormat, mk2 orderedKey) (bool, error) {
switch {
case key.isOrderedByValue && mk2.isOrderedByValue:
return key.v.Less(nbf, mk2.v)
return key.v.Less(ctx, nbf, mk2.v)
case key.isOrderedByValue:
return true, nil
case mk2.isOrderedByValue:
@@ -246,7 +246,7 @@ func (ms metaSequence) getKey(idx int) (orderedKey, error) {
return dec.readOrderedKey(ms.format())
}
func (ms metaSequence) search(key orderedKey) (int, error) {
func (ms metaSequence) search(ctx context.Context, key orderedKey) (int, error) {
res, err := SearchWithErroringLess(int(ms.seqLen()), func(i int) (bool, error) {
ordKey, err := ms.getKey(i)
@@ -254,7 +254,7 @@ func (ms metaSequence) search(key orderedKey) (int, error) {
return false, err
}
isLess, err := ordKey.Less(ms.format(), key)
isLess, err := ordKey.Less(ctx, ms.format(), key)
if err != nil {
return false, err
@@ -491,7 +491,7 @@ func (ms metaSequence) getCompositeChildSequence(ctx context.Context, start uint
var valueItems []mapEntry
for _, seq := range output {
entries, err := seq.(mapLeafSequence).entries()
entries, err := seq.(mapLeafSequence).entries(ctx)
if err != nil {
return nil, err
@@ -606,7 +606,7 @@ func (es emptySequence) getKey(idx int) (orderedKey, error) {
panic("empty sequence")
}
func (es emptySequence) search(key orderedKey) (int, error) {
func (es emptySequence) search(ctx context.Context, key orderedKey) (int, error) {
panic("empty sequence")
}
@@ -649,11 +649,11 @@ func (es emptySequence) Equals(other Value) bool {
panic("empty sequence")
}
func (es emptySequence) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (es emptySequence) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
panic("empty sequence")
}
func (es emptySequence) Compare(nbf *NomsBinFormat, other LesserValuable) (int, error) {
func (es emptySequence) Compare(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (int, error) {
panic("empty sequence")
}
+2 -2
View File
@@ -58,7 +58,7 @@ func (v MultiLineString) Equals(other Value) bool {
return true
}
func (v MultiLineString) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v MultiLineString) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare types
v2, ok := other.(MultiLineString)
if !ok {
@@ -80,7 +80,7 @@ func (v MultiLineString) Less(nbf *NomsBinFormat, other LesserValuable) (bool, e
// Compare each line until there is one that is less
for i := 0; i < n; i++ {
if !v.Lines[i].Equals(v2.Lines[i]) {
return v.Lines[i].Less(nbf, v2.Lines[i])
return v.Lines[i].Less(ctx, nbf, v2.Lines[i])
}
}
// Determine based off length
+2 -2
View File
@@ -54,7 +54,7 @@ func (v MultiPoint) Equals(other Value) bool {
return true
}
func (v MultiPoint) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v MultiPoint) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
v2, ok := other.(MultiPoint)
if !ok {
return MultiPointKind < other.Kind(), nil
@@ -73,7 +73,7 @@ func (v MultiPoint) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error)
for i := 0; i < n; i++ {
if !v.Points[i].Equals(v2.Points[i]) {
return v.Points[i].Less(nbf, v2.Points[i])
return v.Points[i].Less(ctx, nbf, v2.Points[i])
}
}
+2 -2
View File
@@ -58,7 +58,7 @@ func (v MultiPolygon) Equals(other Value) bool {
return true
}
func (v MultiPolygon) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v MultiPolygon) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare types
v2, ok := other.(MultiPolygon)
if !ok {
@@ -80,7 +80,7 @@ func (v MultiPolygon) Less(nbf *NomsBinFormat, other LesserValuable) (bool, erro
// Compare each polygon until there is one that is less
for i := 0; i < n; i++ {
if !v.Polygons[i].Equals(v2.Polygons[i]) {
return v.Polygons[i].Less(nbf, v2.Polygons[i])
return v.Polygons[i].Less(ctx, nbf, v2.Polygons[i])
}
}
// Determine based off length
+1 -1
View File
@@ -46,7 +46,7 @@ func (v Null) Equals(other Value) bool {
return other == nil || other.Kind() == NullKind
}
func (v Null) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v Null) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
return NullKind < other.Kind(), nil
}
+8 -8
View File
@@ -30,7 +30,7 @@ import (
type orderedSequence interface {
sequence
getKey(idx int) (orderedKey, error)
search(key orderedKey) (int, error)
search(ctx context.Context, key orderedKey) (int, error)
}
func newSetMetaSequence(level uint64, tuples []metaTuple, vrw ValueReadWriter) (metaSequence, error) {
@@ -63,7 +63,7 @@ func newCursorAt(ctx context.Context, seq orderedSequence, key orderedKey, forIn
}
cur = newSequenceCursor(cur, seq, idx)
if key != emptyKey {
ok, err := seekTo(cur, key, forInsertion && !seq.isLeaf())
ok, err := seekTo(ctx, cur, key, forInsertion && !seq.isLeaf())
if err != nil {
return nil, err
@@ -110,7 +110,7 @@ func newCursorBackFrom(ctx context.Context, seq orderedSequence, key orderedKey)
cur = newReverseSequenceCursor(cur, seq, idx)
if key != emptyKey {
// If we run off the end of the sequence, start the cursor at the last element.
ok, err := seekTo(cur, key, true)
ok, err := seekTo(ctx, cur, key, true)
if err != nil {
return nil, err
@@ -140,7 +140,7 @@ func newCursorBackFrom(ctx context.Context, seq orderedSequence, key orderedKey)
return nil, err
}
isLess, err := key.Less(cur.seq.format(), currKey)
isLess, err := key.Less(ctx, cur.seq.format(), currKey)
if err != nil {
return nil, err
}
@@ -157,12 +157,12 @@ func newCursorBackFrom(ctx context.Context, seq orderedSequence, key orderedKey)
return cur, nil
}
func seekTo(cur *sequenceCursor, key orderedKey, lastPositionIfNotFound bool) (bool, error) {
func seekTo(ctx context.Context, cur *sequenceCursor, key orderedKey, lastPositionIfNotFound bool) (bool, error) {
seq := cur.seq.(orderedSequence)
var err error
// Find smallest idx in seq where key(idx) >= key
cur.idx, err = seq.search(key)
cur.idx, err = seq.search(ctx, key)
if err != nil {
return false, err
@@ -201,7 +201,7 @@ func getMapValue(cur *sequenceCursor) (Value, error) {
// If |vw| is not nil, chunks will be eagerly written as they're created. Otherwise they are
// written when the root is written.
func newOrderedMetaSequenceChunkFn(kind NomsKind, vrw ValueReadWriter) makeChunkFn {
return func(level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
return func(ctx context.Context, level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
tuples := make([]metaTuple, len(items))
numLeaves := uint64(0)
@@ -215,7 +215,7 @@ func newOrderedMetaSequenceChunkFn(kind NomsKind, vrw ValueReadWriter) makeChunk
}
if lastKey != emptyKey {
isLess, err := lastKey.Less(vrw.Format(), key)
isLess, err := lastKey.Less(ctx, vrw.Format(), key)
if err != nil {
return nil, orderedKey{}, 0, err
+2 -2
View File
@@ -96,7 +96,7 @@ VALIDRANGES:
return err
}
if isLess, err := currentKey.Less(last.format(), lastKey); err != nil {
if isLess, err := currentKey.Less(ctx, last.format(), lastKey); err != nil {
return err
} else if isLess {
valid, skip, err := inRange(ctx, currentKey.v)
@@ -145,7 +145,7 @@ VALIDRANGES:
continue
}
if isLess, err := lastKey.Less(last.format(), currentKey); err != nil {
if isLess, err := lastKey.Less(ctx, last.format(), currentKey); err != nil {
return err
} else if isLess {
mv, err := getMapValue(lastCur)
+2 -2
View File
@@ -49,7 +49,7 @@ func (t *testOrderedSequence) getChildSequence(_ context.Context, idx int) (sequ
}
}
func (t *testOrderedSequence) search(key orderedKey) (int, error) {
func (t *testOrderedSequence) search(ctx context.Context, key orderedKey) (int, error) {
idx, err := SearchWithErroringLess(int(t.Len()), func(i int) (bool, error) {
k, err := t.getKey(i)
@@ -57,7 +57,7 @@ func (t *testOrderedSequence) search(key orderedKey) (int, error) {
return false, err
}
isLess, err := k.Less(t.format(), key)
isLess, err := k.Less(ctx, t.format(), key)
if err != nil {
return false, nil
+1 -1
View File
@@ -42,7 +42,7 @@ func (v Point) Equals(other Value) bool {
return false
}
func (v Point) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v Point) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if v2, ok := other.(Point); ok {
return v.SRID < v2.SRID || v.X < v2.X || v.Y < v2.Y, nil
}
+2 -2
View File
@@ -58,7 +58,7 @@ func (v Polygon) Equals(other Value) bool {
return true
}
func (v Polygon) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (v Polygon) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare types
v2, ok := other.(Polygon)
if !ok {
@@ -80,7 +80,7 @@ func (v Polygon) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
// Compare each point until there is one that is less
for i := 0; i < n; i++ {
if !v.Lines[i].Equals(v2.Lines[i]) {
return v.Lines[i].Less(nbf, v2.Lines[i])
return v.Lines[i].Less(ctx, nbf, v2.Lines[i])
}
}
// Determine based off length
+1 -1
View File
@@ -54,7 +54,7 @@ func TestRefInSet(t *testing.T) {
require.NoError(t, err)
r, err := NewRef(s, vs.Format())
require.NoError(t, err)
se, err := s.Edit().Insert(r)
se, err := s.Edit().Insert(context.Background(), r)
require.NoError(t, err)
s, err = se.Set(context.Background())
require.NoError(t, err)
+2 -2
View File
@@ -46,8 +46,8 @@ type sequence interface {
isLeaf() bool
Kind() NomsKind
Len() uint64
Less(nbf *NomsBinFormat, other LesserValuable) (bool, error)
Compare(nbf *NomsBinFormat, other LesserValuable) (int, error)
Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error)
Compare(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (int, error)
numLeaves() uint64
seqLen() int
treeLevel() uint64
+2 -2
View File
@@ -61,7 +61,7 @@ type hashValueBytesFn func(item sequenceItem, sp sequenceSplitter) error
// makeChunkFn takes a sequence of items to chunk, and returns the result of chunking those items,
// a tuple of a reference to that chunk which can itself be chunked + its underlying value.
type makeChunkFn func(level uint64, values []sequenceItem) (Collection, orderedKey, uint64, error)
type makeChunkFn func(ctx context.Context, level uint64, values []sequenceItem) (Collection, orderedKey, uint64, error)
type sequenceChunker struct {
cur *sequenceCursor
@@ -351,7 +351,7 @@ func (sc *sequenceChunker) createParent(ctx context.Context) error {
// unnecessarily writing a chunk - the canonical root. However, this is a fair
// tradeoff for simplicity of the chunking algorithm.
func (sc *sequenceChunker) createSequence(ctx context.Context, write bool) (sequence, metaTuple, error) {
col, key, numLeaves, err := sc.makeChunk(sc.level, sc.current)
col, key, numLeaves, err := sc.makeChunk(ctx, sc.level, sc.current)
if err != nil {
return nil, metaTuple{}, err
+2 -2
View File
@@ -108,11 +108,11 @@ func (ts testSequence) kvTuples(from, to uint64, dest []Tuple) ([]Tuple, error)
panic("not reached")
}
func (ts testSequence) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (ts testSequence) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
panic("not reached")
}
func (ts testSequence) Compare(nbf *NomsBinFormat, other LesserValuable) (int, error) {
func (ts testSequence) Compare(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (int, error) {
panic("not reached")
}
+1 -1
View File
@@ -179,7 +179,7 @@ func (sm SerialMessage) HumanReadableString() string {
}
}
func (sm SerialMessage) Less(nbf *NomsBinFormat, other LesserValuable) (bool, error) {
func (sm SerialMessage) Less(ctx context.Context, nbf *NomsBinFormat, other LesserValuable) (bool, error) {
if v2, ok := other.(SerialMessage); ok {
return bytes.Compare(sm, v2) == -1, nil
}
+6 -6
View File
@@ -40,7 +40,7 @@ func newSet(seq orderedSequence) Set {
}
func NewSet(ctx context.Context, vrw ValueReadWriter, v ...Value) (Set, error) {
data := buildSetData(vrw.Format(), v)
data := buildSetData(ctx, vrw, v)
ch, err := newEmptySetSequenceChunker(ctx, vrw)
if err != nil {
@@ -96,7 +96,7 @@ func readSetInput(ctx context.Context, vrw ValueReadWriter, ae *atomicerr.Atomic
var lastV Value
for v := range vChan {
if lastV != nil {
isLess, err := lastV.Less(vrw.Format(), v)
isLess, err := lastV.Less(ctx, vrw.Format(), v)
if ae.SetIfErrAndCheck(err) {
return
@@ -270,12 +270,12 @@ func (s Set) Edit() *SetEditor {
return NewSetEditor(s)
}
func buildSetData(nbf *NomsBinFormat, values ValueSlice) ValueSlice {
func buildSetData(ctx context.Context, vr ValueReader, values ValueSlice) ValueSlice {
if len(values) == 0 {
return ValueSlice{}
}
SortWithErroringLess(ValueSort{values, nbf})
SortWithErroringLess(ctx, vr.Format(), ValueSort{values})
uniqueSorted := make(ValueSlice, 0, len(values))
last := values[0]
@@ -291,7 +291,7 @@ func buildSetData(nbf *NomsBinFormat, values ValueSlice) ValueSlice {
}
func makeSetLeafChunkFn(vrw ValueReadWriter) makeChunkFn {
return func(level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
return func(ctx context.Context, level uint64, items []sequenceItem) (Collection, orderedKey, uint64, error) {
d.PanicIfFalse(level == 0)
setData := make([]Value, len(items))
@@ -300,7 +300,7 @@ func makeSetLeafChunkFn(vrw ValueReadWriter) makeChunkFn {
v := item.(Value)
if lastValue != nil {
isLess, err := lastValue.Less(vrw.Format(), v)
isLess, err := lastValue.Less(ctx, vrw.Format(), v)
if err != nil {
return nil, orderedKey{}, 0, err
+17 -19
View File
@@ -60,7 +60,7 @@ func (se *SetEditor) Set(ctx context.Context) (Set, error) {
seq := se.s.orderedSequence
vrw := seq.valueReadWriter()
err := se.normalize()
err := se.normalize(ctx)
if err != nil {
return EmptySet, err
@@ -175,11 +175,11 @@ func (se *SetEditor) Set(ctx context.Context) (Set, error) {
return newSet(chSeq.(orderedSequence)), nil
}
func (se *SetEditor) Insert(vs ...Value) (*SetEditor, error) {
SortWithErroringLess(ValueSort{vs, se.s.format()})
func (se *SetEditor) Insert(ctx context.Context, vs ...Value) (*SetEditor, error) {
SortWithErroringLess(ctx, se.s.format(), ValueSort{vs})
for _, v := range vs {
d.PanicIfTrue(v == nil)
err := se.edit(v, true)
err := se.edit(ctx, v, true)
if err != nil {
return nil, err
@@ -188,11 +188,11 @@ func (se *SetEditor) Insert(vs ...Value) (*SetEditor, error) {
return se, nil
}
func (se *SetEditor) Remove(vs ...Value) (*SetEditor, error) {
SortWithErroringLess(ValueSort{vs, se.s.format()})
func (se *SetEditor) Remove(ctx context.Context, vs ...Value) (*SetEditor, error) {
SortWithErroringLess(ctx, se.s.format(), ValueSort{vs})
for _, v := range vs {
d.PanicIfTrue(v == nil)
err := se.edit(v, false)
err := se.edit(ctx, v, false)
if err != nil {
return nil, err
@@ -202,7 +202,7 @@ func (se *SetEditor) Remove(vs ...Value) (*SetEditor, error) {
}
func (se *SetEditor) Has(ctx context.Context, v Value) (bool, error) {
if idx, found, err := se.findEdit(v); err != nil {
if idx, found, err := se.findEdit(ctx, v); err != nil {
return false, err
} else if found {
return se.edits.edits[idx].insert, nil
@@ -211,7 +211,7 @@ func (se *SetEditor) Has(ctx context.Context, v Value) (bool, error) {
return se.s.Has(ctx, v)
}
func (se *SetEditor) edit(v Value, insert bool) error {
func (se *SetEditor) edit(ctx context.Context, v Value, insert bool) error {
if len(se.edits.edits) == 0 {
se.edits.edits = append(se.edits.edits, setEdit{v, insert})
return nil
@@ -225,7 +225,7 @@ func (se *SetEditor) edit(v Value, insert bool) error {
se.edits.edits = append(se.edits.edits, setEdit{v, insert})
isLess, err := final.value.Less(se.s.format(), v)
isLess, err := final.value.Less(ctx, se.s.format(), v)
if err != nil {
return err
@@ -242,8 +242,8 @@ func (se *SetEditor) edit(v Value, insert bool) error {
}
// Find the edit position of the last edit for a given key
func (se *SetEditor) findEdit(v Value) (int, bool, error) {
err := se.normalize()
func (se *SetEditor) findEdit(ctx context.Context, v Value) (int, bool, error) {
err := se.normalize(ctx)
if err != nil {
return 0, false, err
@@ -251,7 +251,7 @@ func (se *SetEditor) findEdit(v Value) (int, bool, error) {
var found bool
idx, err := SearchWithErroringLess(len(se.edits.edits), func(i int) (bool, error) {
return se.edits.edits[i].value.Less(se.s.format(), v)
return se.edits.edits[i].value.Less(ctx, se.s.format(), v)
})
if err != nil {
@@ -276,13 +276,12 @@ func (se *SetEditor) findEdit(v Value) (int, bool, error) {
return idx, found, nil
}
func (se *SetEditor) normalize() error {
func (se *SetEditor) normalize(ctx context.Context) error {
if se.normalized {
return nil
}
err := SortWithErroringLess(se.edits)
err := SortWithErroringLess(ctx, se.s.format(), se.edits)
if err != nil {
return err
}
@@ -299,11 +298,10 @@ type setEdit struct {
type setEditSlice struct {
edits []setEdit
nbf *NomsBinFormat
}
func (ses setEditSlice) Len() int { return len(ses.edits) }
func (ses setEditSlice) Swap(i, j int) { ses.edits[i], ses.edits[j] = ses.edits[j], ses.edits[i] }
func (ses setEditSlice) Less(i, j int) (bool, error) {
return ses.edits[i].value.Less(ses.nbf, ses.edits[j].value)
func (ses setEditSlice) Less(ctx context.Context, nbf *NomsBinFormat, i, j int) (bool, error) {
return ses.edits[i].value.Less(ctx, nbf, ses.edits[j].value)
}
+17 -19
View File
@@ -79,7 +79,7 @@ func (si *setIterator) Next(ctx context.Context) (Value, error) {
func (si *setIterator) SkipTo(ctx context.Context, v Value) (Value, error) {
d.PanicIfTrue(v == nil)
if si.sequenceIter.valid() {
if cmp, err := compareValue(si.s.format(), v, si.currentValue); err != nil {
if cmp, err := compareValue(ctx, si.s.valueReadWriter(), v, si.currentValue); err != nil {
return nil, err
} else if cmp <= 0 {
return si.Next(ctx)
@@ -156,32 +156,30 @@ func (st *iterState) SkipTo(ctx context.Context, v Value) (Value, error) {
type UnionIterator struct {
aState iterState
bState iterState
nbf *NomsBinFormat
vr ValueReader
}
// NewUnionIterator creates a union iterator from two other SetIterators.
func NewUnionIterator(ctx context.Context, nbf *NomsBinFormat, iterA, iterB SetIterator) (SetIterator, error) {
func NewUnionIterator(ctx context.Context, vr ValueReader, iterA, iterB SetIterator) (SetIterator, error) {
d.PanicIfTrue(iterA == nil)
d.PanicIfTrue(iterB == nil)
aVal, err := iterA.Next(ctx)
if err != nil {
return nil, err
}
bVal, err := iterB.Next(ctx)
if err != nil {
return nil, err
}
a := iterState{i: iterA, v: aVal}
b := iterState{i: iterB, v: bVal}
return &UnionIterator{aState: a, bState: b, nbf: nbf}, nil
return &UnionIterator{aState: a, bState: b, vr: vr}, nil
}
func (u *UnionIterator) Next(ctx context.Context) (Value, error) {
cmp, err := compareValue(u.nbf, u.aState.v, u.bState.v)
cmp, err := compareValue(ctx, u.vr, u.aState.v, u.bState.v)
if err != nil {
return nil, err
@@ -207,7 +205,7 @@ func (u *UnionIterator) Next(ctx context.Context) (Value, error) {
func (u *UnionIterator) SkipTo(ctx context.Context, v Value) (Value, error) {
d.PanicIfTrue(v == nil)
didAdvance := false
if cmp, err := compareValue(u.nbf, u.aState.v, v); err != nil {
if cmp, err := compareValue(ctx, u.vr, u.aState.v, v); err != nil {
return nil, err
} else if cmp < 0 {
didAdvance = true
@@ -217,7 +215,7 @@ func (u *UnionIterator) SkipTo(ctx context.Context, v Value) (Value, error) {
return nil, err
}
}
if cmp, err := compareValue(u.nbf, u.bState.v, v); err != nil {
if cmp, err := compareValue(ctx, u.vr, u.bState.v, v); err != nil {
return nil, err
} else if cmp < 0 {
didAdvance = true
@@ -230,7 +228,7 @@ func (u *UnionIterator) SkipTo(ctx context.Context, v Value) (Value, error) {
if !didAdvance {
return u.Next(ctx)
}
cmp, err := compareValue(u.nbf, u.aState.v, u.bState.v)
cmp, err := compareValue(ctx, u.vr, u.aState.v, u.bState.v)
if err != nil {
return nil, err
@@ -258,11 +256,11 @@ func (u *UnionIterator) SkipTo(ctx context.Context, v Value) (Value, error) {
type IntersectionIterator struct {
aState iterState
bState iterState
nbf *NomsBinFormat
vr ValueReader
}
// NewIntersectionIterator creates a intersect iterator from two other SetIterators.
func NewIntersectionIterator(ctx context.Context, nbf *NomsBinFormat, iterA, iterB SetIterator) (SetIterator, error) {
func NewIntersectionIterator(ctx context.Context, vr ValueReader, iterA, iterB SetIterator) (SetIterator, error) {
d.PanicIfTrue(iterA == nil)
d.PanicIfTrue(iterB == nil)
aVal, err := iterA.Next(ctx)
@@ -279,12 +277,12 @@ func NewIntersectionIterator(ctx context.Context, nbf *NomsBinFormat, iterA, ite
a := iterState{i: iterA, v: aVal}
b := iterState{i: iterB, v: bVal}
return &IntersectionIterator{aState: a, bState: b, nbf: nbf}, nil
return &IntersectionIterator{aState: a, bState: b, vr: vr}, nil
}
func (i *IntersectionIterator) Next(ctx context.Context) (Value, error) {
for cont := true; cont; {
cmp, err := compareValue(i.nbf, i.aState.v, i.bState.v)
cmp, err := compareValue(ctx, i.vr, i.aState.v, i.bState.v)
if err != nil {
return nil, err
@@ -326,7 +324,7 @@ func (i *IntersectionIterator) Next(ctx context.Context) (Value, error) {
func (i *IntersectionIterator) SkipTo(ctx context.Context, v Value) (Value, error) {
d.PanicIfTrue(v == nil)
if cmp, err := compareValue(i.nbf, v, i.aState.v); err != nil {
if cmp, err := compareValue(ctx, i.vr, v, i.aState.v); err != nil {
return nil, err
} else if cmp >= 0 {
_, err := i.aState.SkipTo(ctx, v)
@@ -336,7 +334,7 @@ func (i *IntersectionIterator) SkipTo(ctx context.Context, v Value) (Value, erro
}
}
if cmp, err := compareValue(i.nbf, v, i.bState.v); err != nil {
if cmp, err := compareValue(ctx, i.vr, v, i.bState.v); err != nil {
return nil, err
} else if cmp >= 0 {
_, err := i.bState.SkipTo(ctx, v)
@@ -350,7 +348,7 @@ func (i *IntersectionIterator) SkipTo(ctx context.Context, v Value) (Value, erro
}
// considers nil max value, return -1 if v1 < v2, 0 if v1 == v2, 1 if v1 > v2
func compareValue(nbf *NomsBinFormat, v1, v2 Value) (int, error) {
func compareValue(ctx context.Context, vr ValueReader, v1, v2 Value) (int, error) {
if v1 == nil && v2 == nil {
return 0, nil
}
@@ -360,7 +358,7 @@ func compareValue(nbf *NomsBinFormat, v1, v2 Value) (int, error) {
}
if v1 != nil {
if isLess, err := v1.Less(nbf, v2); err != nil {
if isLess, err := v1.Less(ctx, vr.Format(), v2); err != nil {
return 0, err
} else if isLess {
return -1, nil
@@ -371,7 +369,7 @@ func compareValue(nbf *NomsBinFormat, v1, v2 Value) (int, error) {
return 1, nil
}
if isLess, err := v2.Less(nbf, v1); err != nil {
if isLess, err := v2.Less(ctx, vr.Format(), v1); err != nil {
return 0, err
} else if isLess {
return 1, nil

Some files were not shown because too many files have changed in this diff Show More