Remove ValueReadWriter interface from Database.

This commit is contained in:
Aaron Son
2022-02-01 17:20:06 -08:00
parent 7b8f6cde09
commit 52fc803365
67 changed files with 546 additions and 484 deletions
+2 -2
View File
@@ -116,9 +116,9 @@ func (cmd RootsCmd) processTableFile(ctx context.Context, path string, modified
return nbs.IterChunks(rdCloser.(io.ReadSeeker), func(chunk chunks.Chunk) (stop bool, err error) {
//Want a clean db every loop
sp, _ := spec.ForDatabase("mem")
db := sp.GetDatabase(ctx)
vrw := sp.GetVRW(ctx)
value, err := types.DecodeValue(chunk, db)
value, err := types.DecodeValue(chunk, vrw)
if err != nil {
return false, err
+5 -4
View File
@@ -105,17 +105,18 @@ type AWSFactory struct {
}
// CreateDB creates an AWS backed database
func (fact AWSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error) {
func (fact AWSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
var db datas.Database
cs, err := fact.newChunkStore(ctx, nbf, urlObj, params)
if err != nil {
return nil, err
return nil, nil, err
}
db = datas.NewDatabase(cs)
vrw := types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw)
return db, nil
return db, vrw, nil
}
func (fact AWSFactory) newChunkStore(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (chunks.ChunkStore, error) {
+4 -4
View File
@@ -53,7 +53,7 @@ const (
// DBFactory is an interface for creating concrete datas.Database instances which may have different backing stores.
type DBFactory interface {
CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error)
CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error)
}
// DBFactories is a map from url scheme name to DBFactory. Additional factories can be added to the DBFactories map
@@ -70,11 +70,11 @@ var DBFactories = map[string]DBFactory{
// CreateDB creates a database based on the supplied urlStr, and creation params. The DBFactory used for creation is
// determined by the scheme of the url. Naked urls will use https by default.
func CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, params map[string]interface{}) (datas.Database, error) {
func CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
urlObj, err := earl.Parse(urlStr)
if err != nil {
return nil, err
return nil, nil, err
}
scheme := urlObj.Scheme
@@ -86,5 +86,5 @@ func CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, para
return fact.CreateDB(ctx, nbf, urlObj, params)
}
return nil, fmt.Errorf("unknown url scheme: '%s'", urlObj.Scheme)
return nil, nil, fmt.Errorf("unknown url scheme: '%s'", urlObj.Scheme)
}
@@ -54,8 +54,9 @@ func TestCreateFileDB(t *testing.T) {
func TestCreateMemDB(t *testing.T) {
ctx := context.Background()
db, err := CreateDB(ctx, types.Format_Default, "mem://", nil)
db, vrw, err := CreateDB(ctx, types.Format_Default, "mem://", nil)
assert.NoError(t, err)
assert.NotNil(t, db)
assert.NotNil(t, vrw)
}
+10 -8
View File
@@ -43,11 +43,11 @@ type FileFactory struct {
}
// CreateDB creates an local filesys backed database
func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error) {
func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
path, err := url.PathUnescape(urlObj.Path)
if err != nil {
return nil, err
return nil, nil, err
}
path = filepath.FromSlash(path)
@@ -55,38 +55,40 @@ func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat,
err = validateDir(path)
if err != nil {
return nil, err
return nil, nil, err
}
newGenSt, err := nbs.NewLocalStore(ctx, nbf.VersionString(), path, defaultMemTableSize)
if err != nil {
return nil, err
return nil, nil, err
}
oldgenPath := filepath.Join(path, "oldgen")
err = validateDir(oldgenPath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return nil, err
return nil, nil, err
}
err = os.Mkdir(oldgenPath, os.ModePerm)
if err != nil && !errors.Is(err, os.ErrExist) {
return nil, err
return nil, nil, err
}
}
oldGenSt, err := nbs.NewLocalStore(ctx, nbf.VersionString(), oldgenPath, defaultMemTableSize)
if err != nil {
return nil, err
return nil, nil, err
}
st := nbs.NewGenerationalCS(oldGenSt, newGenSt)
// metrics?
return datas.NewDatabase(st), nil
vrw := types.NewValueStore(st)
return datas.NewTypesDatabase(vrw), vrw, nil
}
func validateDir(path string) error {
+7 -6
View File
@@ -51,27 +51,28 @@ func NewDoltRemoteFactory(insecure bool) DoltRemoteFactory {
// CreateDB creates a database backed by a remote server that implements the GRPC rpcs defined by
// remoteapis.ChunkStoreServiceClient
func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error) {
func (fact DoltRemoteFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
var db datas.Database
dpi, ok := params[GRPCDialProviderParam]
if dpi == nil || !ok {
return nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
return nil, nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
}
dp, ok := dpi.(GRPCDialProvider)
if !ok {
return nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
return nil, nil, errors.New("DoltRemoteFactory.CreateDB must provide a GRPCDialProvider param through GRPCDialProviderParam")
}
cs, err := fact.newChunkStore(ctx, nbf, urlObj, params, dp)
if err != nil {
return nil, err
return nil, nil, err
}
db = datas.NewDatabase(cs)
vrw := types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw)
return db, err
return db, vrw, err
}
var NoCachingParameter = "__dolt__NO_CACHING"
+12 -10
View File
@@ -32,24 +32,25 @@ type GSFactory struct {
}
// CreateDB creates an GCS backed database
func (fact GSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error) {
func (fact GSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
var db datas.Database
gcs, err := storage.NewClient(ctx)
if err != nil {
return nil, err
return nil, nil, err
}
bs := blobstore.NewGCSBlobstore(gcs, urlObj.Host, urlObj.Path)
gcsStore, err := nbs.NewBSStore(ctx, nbf.VersionString(), bs, defaultMemTableSize)
if err != nil {
return nil, err
return nil, nil, err
}
db = datas.NewDatabase(gcsStore)
vrw := types.NewValueStore(gcsStore)
db = datas.NewTypesDatabase(vrw)
return db, err
return db, vrw, nil
}
// LocalBSFactory is a DBFactory implementation for creating a local filesystem blobstore backed databases for testing
@@ -57,22 +58,23 @@ type LocalBSFactory struct {
}
// CreateDB creates a local filesystem blobstore backed database
func (fact LocalBSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error) {
func (fact LocalBSFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
var db datas.Database
absPath, err := filepath.Abs(filepath.Join(urlObj.Host, urlObj.Path))
if err != nil {
return nil, err
return nil, nil, err
}
bs := blobstore.NewLocalBlobstore(absPath)
bsStore, err := nbs.NewBSStore(ctx, nbf.VersionString(), bs, defaultMemTableSize)
if err != nil {
return nil, err
return nil, nil, err
}
db = datas.NewDatabase(bsStore)
vrw := types.NewValueStore(bsStore)
db = datas.NewTypesDatabase(vrw)
return db, err
return db, vrw, err
}
+5 -3
View File
@@ -28,10 +28,12 @@ type MemFactory struct {
}
// CreateDB creates an in memory backed database
func (fact MemFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, error) {
func (fact MemFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, error) {
var db datas.Database
storage := &chunks.MemoryStorage{}
db = datas.NewDatabase(storage.NewViewWithDefaultFormat())
cs := storage.NewViewWithDefaultFormat()
vrw := types.NewValueStore(cs)
db = datas.NewTypesDatabase(vrw)
return db, nil
return db, vrw, nil
}
@@ -26,14 +26,13 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/constants"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
func TestAsyncDiffer(t *testing.T) {
ctx := context.Background()
storage := &chunks.MemoryStorage{}
db := datas.NewDatabase(storage.NewView())
vrw := types.NewValueStore(storage.NewView())
vals := []types.Value{
types.Uint(0), types.String("a"),
@@ -56,7 +55,7 @@ func TestAsyncDiffer(t *testing.T) {
types.Uint(25), types.String("z"),
}
m1, err := types.NewMap(ctx, db, vals...)
m1, err := types.NewMap(ctx, vrw, vals...)
require.NoError(t, err)
vals = []types.Value{
@@ -87,7 +86,7 @@ func TestAsyncDiffer(t *testing.T) {
types.Uint(24), types.String("y2"), // changed
//types.Uint(25), types.String("z"), // deleted
}
m2, err := types.NewMap(ctx, db, vals...)
m2, err := types.NewMap(ctx, vrw, vals...)
require.NoError(t, err)
tests := []struct {
@@ -245,20 +244,20 @@ func TestAsyncDiffer(t *testing.T) {
})
k1Row1Vals := []types.Value{c1Tag, types.Uint(3), c2Tag, types.String("d")}
k1Vals, err := getKeylessRow(ctx, db, k1Row1Vals)
k1Vals, err := getKeylessRow(ctx, k1Row1Vals)
assert.NoError(t, err)
k1, err := types.NewMap(ctx, db, k1Vals...)
k1, err := types.NewMap(ctx, vrw, k1Vals...)
assert.NoError(t, err)
// Delete one row, add two rows
k2Row1Vals := []types.Value{c1Tag, types.Uint(4), c2Tag, types.String("d")}
k2Vals1, err := getKeylessRow(ctx, db, k2Row1Vals)
k2Vals1, err := getKeylessRow(ctx, k2Row1Vals)
assert.NoError(t, err)
k2Row2Vals := []types.Value{c1Tag, types.Uint(1), c2Tag, types.String("e")}
k2Vals2, err := getKeylessRow(ctx, db, k2Row2Vals)
k2Vals2, err := getKeylessRow(ctx, k2Row2Vals)
assert.NoError(t, err)
k2Vals := append(k2Vals1, k2Vals2...)
k2, err := types.NewMap(ctx, db, k2Vals...)
k2, err := types.NewMap(ctx, vrw, k2Vals...)
require.NoError(t, err)
t.Run("can diff and filter keyless tables", func(t *testing.T) {
@@ -312,7 +311,7 @@ var c2Tag = types.Uint(2)
var cardTag = types.Uint(schema.KeylessRowCardinalityTag)
var rowIdTag = types.Uint(schema.KeylessRowIdTag)
func getKeylessRow(ctx context.Context, db datas.Database, vals []types.Value) ([]types.Value, error) {
func getKeylessRow(ctx context.Context, vals []types.Value) ([]types.Value, error) {
nbf, err := types.GetFormatForVersionString(constants.FormatDefaultString)
if err != nil {
return []types.Value{}, err
+3 -3
View File
@@ -353,7 +353,7 @@ func (ddb *DoltDB) NewPendingCommit(
return nil, err
}
parents, err := types.NewList(ctx, ddb.db)
parents, err := types.NewList(ctx, ddb.vrw)
if err != nil {
return nil, err
}
@@ -364,7 +364,7 @@ func (ddb *DoltDB) NewPendingCommit(
}
for _, pc := range parentCommits {
rf, err := types.NewRef(pc.commitSt, ddb.db.Format())
rf, err := types.NewRef(pc.commitSt, ddb.vrw.Format())
if err != nil {
return nil, err
}
@@ -377,7 +377,7 @@ func (ddb *DoltDB) NewPendingCommit(
return nil, err
}
st, err := cm.toNomsStruct(ddb.db.Format())
st, err := cm.toNomsStruct(ddb.vrw.Format())
if err != nil {
return nil, err
}
@@ -104,8 +104,8 @@ func TestPushOnWriteHook(t *testing.T) {
}
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.db, tSchema)
tbl, err := CreateTestTable(ddb.db, tSchema, rowData)
rowData, _ := createTestRowData(t, ddb.vrw, tSchema)
tbl, err := CreateTestTable(ddb.vrw, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
@@ -237,8 +237,8 @@ func TestAsyncPushOnWrite(t *testing.T) {
assert.NoError(t, err)
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.db, tSchema)
tbl, err := CreateTestTable(ddb.db, tSchema, rowData)
rowData, _ := createTestRowData(t, ddb.vrw, tSchema)
tbl, err := CreateTestTable(ddb.vrw, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
+49 -47
View File
@@ -59,14 +59,16 @@ var ErrCannotDeleteLastBranch = errors.New("cannot delete the last branch")
// Additionally the noms codebase uses panics in a way that is non idiomatic and We've opted to recover and return
// errors in many cases.
type DoltDB struct {
db hooksDatabase
db hooksDatabase
vrw types.ValueReadWriter
}
// DoltDBFromCS creates a DoltDB from a noms chunks.ChunkStore
func DoltDBFromCS(cs chunks.ChunkStore) *DoltDB {
db := datas.NewDatabase(cs)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw)
return &DoltDB{hooksDatabase{Database: db}}
return &DoltDB{hooksDatabase{Database: db}, vrw}
}
// LoadDoltDB will acquire a reference to the underlying noms db. If the Location is InMemDoltDB then a reference
@@ -94,13 +96,13 @@ func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr
urlStr = fmt.Sprintf("file://%s", filepath.ToSlash(absPath))
}
db, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
db, vrw, err := dbfactory.CreateDB(ctx, nbf, urlStr, params)
if err != nil {
return nil, err
}
return &DoltDB{hooksDatabase{Database: db}}, nil
return &DoltDB{hooksDatabase{Database: db}, vrw}, nil
}
// NomsRoot returns the hash of the noms dataset map
@@ -151,7 +153,7 @@ func (ddb *DoltDB) WriteEmptyRepoWithCommitTimeAndDefaultBranch(
return errors.New("database already exists")
}
rv, err := EmptyRootValue(ctx, ddb.db)
rv, err := EmptyRootValue(ctx, ddb.vrw)
if err != nil {
return err
@@ -165,12 +167,12 @@ func (ddb *DoltDB) WriteEmptyRepoWithCommitTimeAndDefaultBranch(
cm, _ := NewCommitMetaWithUserTS(name, email, "Initialize data repository", t)
parents, err := types.NewList(ctx, ddb.db)
parents, err := types.NewList(ctx, ddb.vrw)
if err != nil {
return err
}
meta, err := cm.toNomsStruct(ddb.db.Format())
meta, err := cm.toNomsStruct(ddb.vrw.Format())
if err != nil {
return err
@@ -212,21 +214,21 @@ func (ddb *DoltDB) WriteEmptyRepoWithCommitTimeAndDefaultBranch(
return err
}
func getCommitStForRefStr(ctx context.Context, db datas.Database, ref string) (types.Struct, error) {
func getCommitStForRefStr(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, ref string) (types.Struct, error) {
if !datas.DatasetFullRe.MatchString(ref) {
return types.EmptyStruct(db.Format()), fmt.Errorf("invalid ref format: %s", ref)
return types.Struct{}, fmt.Errorf("invalid ref format: %s", ref)
}
ds, err := db.GetDataset(ctx, ref)
if err != nil {
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
dsHead, hasHead := ds.MaybeHead()
if !hasHead {
return types.EmptyStruct(db.Format()), ErrBranchNotFound
return types.Struct{}, ErrBranchNotFound
}
if dsHead.Name() == datas.CommitName {
@@ -236,23 +238,23 @@ func getCommitStForRefStr(ctx context.Context, db datas.Database, ref string) (t
if dsHead.Name() == datas.TagName {
commitRef, ok, err := dsHead.MaybeGet(datas.TagCommitRefField)
if err != nil {
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
if !ok {
err = fmt.Errorf("tag struct does not have field %s", datas.TagCommitRefField)
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
commitSt, err := commitRef.(types.Ref).TargetValue(ctx, db)
commitSt, err := commitRef.(types.Ref).TargetValue(ctx, vrw)
if err != nil {
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
return commitSt.(types.Struct), nil
}
err = fmt.Errorf("dataset head is neither commit nor tag")
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
func getCommitStForHash(ctx context.Context, vr types.ValueReader, c string) (types.Struct, error) {
@@ -336,7 +338,7 @@ func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef)
var err error
switch cs.csType {
case hashCommitSpec:
commitSt, err = getCommitStForHash(ctx, ddb.db, cs.baseSpec)
commitSt, err = getCommitStForHash(ctx, ddb.vrw, cs.baseSpec)
case refCommitSpec:
// For a ref in a CommitSpec, we have the following behavior.
// If it starts with `refs/`, we look for an exact match before
@@ -359,7 +361,7 @@ func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef)
}
}
for _, candidate := range candidates {
commitSt, err = getCommitStForRefStr(ctx, ddb.db, candidate)
commitSt, err = getCommitStForRefStr(ctx, ddb.db, ddb.vrw, candidate)
if err == nil {
break
}
@@ -371,7 +373,7 @@ func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef)
if cwb == nil {
return nil, fmt.Errorf("cannot use a nil current working branch with a HEAD commit spec")
}
commitSt, err = getCommitStForRefStr(ctx, ddb.db, cwb.String())
commitSt, err = getCommitStForRefStr(ctx, ddb.db, ddb.vrw, cwb.String())
default:
panic("unrecognized commit spec csType: " + cs.csType)
}
@@ -380,23 +382,23 @@ func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef)
return nil, err
}
commitSt, err = getAncestor(ctx, ddb.db, commitSt, cs.aSpec)
commitSt, err = getAncestor(ctx, ddb.vrw, commitSt, cs.aSpec)
if err != nil {
return nil, err
}
return NewCommit(ddb.db, commitSt), nil
return NewCommit(ddb.vrw, commitSt), nil
}
// ResolveCommitRef takes a DoltRef and returns a Commit, or an error if the commit cannot be found. The ref given must
// point to a Commit.
func (ddb *DoltDB) ResolveCommitRef(ctx context.Context, ref ref.DoltRef) (*Commit, error) {
commitSt, err := getCommitStForRefStr(ctx, ddb.db, ref.String())
commitSt, err := getCommitStForRefStr(ctx, ddb.db, ddb.vrw, ref.String())
if err != nil {
return nil, err
}
return NewCommit(ddb.db, commitSt), nil
return NewCommit(ddb.vrw, commitSt), nil
}
// ResolveTag takes a TagRef and returns the corresponding Tag object.
@@ -417,7 +419,7 @@ func (ddb *DoltDB) ResolveTag(ctx context.Context, tagRef ref.TagRef) (*Tag, err
return nil, fmt.Errorf("tagRef head is not a tag")
}
return NewTag(ctx, tagRef.GetPath(), ddb.db, tagSt)
return NewTag(ctx, tagRef.GetPath(), ddb.vrw, tagSt)
}
// ResolveWorkingSet takes a WorkingSetRef and returns the corresponding WorkingSet object.
@@ -438,7 +440,7 @@ func (ddb *DoltDB) ResolveWorkingSet(ctx context.Context, workingSetRef ref.Work
return nil, fmt.Errorf("workingSetRef head is not a workingSetRef")
}
return NewWorkingSet(ctx, workingSetRef.GetPath(), ddb.db, wsSt)
return NewWorkingSet(ctx, workingSetRef.GetPath(), ddb.vrw, wsSt)
}
// TODO: convenience method to resolve the head commit of a branch.
@@ -472,13 +474,13 @@ func (ddb *DoltDB) writeRootValue(ctx context.Context, rv *RootValue) (types.Ref
return types.Ref{}, err
}
return ddb.db.WriteValue(ctx, rv.valueSt)
return ddb.vrw.WriteValue(ctx, rv.valueSt)
}
// ReadRootValue reads the RootValue associated with the hash given and returns it. Returns an error if the value cannot
// be read, or if the hash given doesn't represent a dolt RootValue.
func (ddb *DoltDB) ReadRootValue(ctx context.Context, h hash.Hash) (*RootValue, error) {
val, err := ddb.db.ReadValue(ctx, h)
val, err := ddb.vrw.ReadValue(ctx, h)
if err != nil {
return nil, err
@@ -492,7 +494,7 @@ func (ddb *DoltDB) ReadRootValue(ctx context.Context, h hash.Hash) (*RootValue,
return nil, ErrNoRootValAtHash
}
return newRootValue(ddb.db, rootSt)
return newRootValue(ddb.vrw, rootSt)
}
// Commit will update a branch's head value to be that of a previously committed root value hash
@@ -512,7 +514,7 @@ func (ddb *DoltDB) FastForward(ctx context.Context, branch ref.DoltRef, commit *
return err
}
rf, err := types.NewRef(commit.commitSt, ddb.db.Format())
rf, err := types.NewRef(commit.commitSt, ddb.vrw.Format())
if err != nil {
return err
@@ -541,7 +543,7 @@ func (ddb *DoltDB) CanFastForward(ctx context.Context, branch ref.DoltRef, new *
// SetHeadToCommit sets the given ref to point at the given commit. It is used in the course of 'force' updates.
func (ddb *DoltDB) SetHeadToCommit(ctx context.Context, ref ref.DoltRef, cm *Commit) error {
stRef, err := types.NewRef(cm.commitSt, ddb.db.Format())
stRef, err := types.NewRef(cm.commitSt, ddb.vrw.Format())
if err != nil {
return err
@@ -577,7 +579,7 @@ func (ddb *DoltDB) CommitWithParentSpecs(ctx context.Context, valHash hash.Hash,
}
func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Hash, dref ref.DoltRef, parentCommits []*Commit, cm *CommitMeta) (*Commit, error) {
val, err := ddb.db.ReadValue(ctx, valHash)
val, err := ddb.vrw.ReadValue(ctx, valHash)
if err != nil {
return nil, err
@@ -593,7 +595,7 @@ func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Has
return nil, err
}
l, err := types.NewList(ctx, ddb.db)
l, err := types.NewList(ctx, ddb.vrw)
if err != nil {
return nil, err
@@ -612,7 +614,7 @@ func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Has
}
for _, cm := range parentCommits {
rf, err := types.NewRef(cm.commitSt, ddb.db.Format())
rf, err := types.NewRef(cm.commitSt, ddb.vrw.Format())
if err != nil {
return nil, err
@@ -627,7 +629,7 @@ func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Has
return nil, err
}
st, err := cm.toNomsStruct(ddb.db.Format())
st, err := cm.toNomsStruct(ddb.vrw.Format())
if err != nil {
return nil, err
@@ -651,14 +653,14 @@ func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Has
return nil, errors.New("Commit has no head but commit succeeded. This is a bug.")
}
return NewCommit(ddb.db, commitSt), nil
return NewCommit(ddb.vrw, commitSt), nil
}
// dangling commits are unreferenced by any branch or ref. They are created in the course of programmatic updates
// such as rebase. You must create a ref to a dangling commit for it to be reachable
func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash hash.Hash, parentCommits []*Commit, cm *CommitMeta) (*Commit, error) {
var commitSt types.Struct
val, err := ddb.db.ReadValue(ctx, valHash)
val, err := ddb.vrw.ReadValue(ctx, valHash)
if err != nil {
return nil, err
}
@@ -666,7 +668,7 @@ func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash
return nil, errors.New("can't commit a value that is not a valid root value")
}
l, err := types.NewList(ctx, ddb.db)
l, err := types.NewList(ctx, ddb.vrw)
if err != nil {
return nil, err
}
@@ -674,7 +676,7 @@ func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash
parentEditor := l.Edit()
for _, cm := range parentCommits {
rf, err := types.NewRef(cm.commitSt, ddb.db.Format())
rf, err := types.NewRef(cm.commitSt, ddb.vrw.Format())
if err != nil {
return nil, err
}
@@ -687,7 +689,7 @@ func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash
return nil, err
}
st, err := cm.toNomsStruct(ddb.db.Format())
st, err := cm.toNomsStruct(ddb.vrw.Format())
if err != nil {
return nil, err
}
@@ -698,16 +700,16 @@ func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash
return nil, err
}
return NewCommit(ddb.db, commitSt), nil
return NewCommit(ddb.vrw, commitSt), nil
}
// ValueReadWriter returns the underlying noms database as a types.ValueReadWriter.
func (ddb *DoltDB) ValueReadWriter() types.ValueReadWriter {
return ddb.db
return ddb.vrw
}
func (ddb *DoltDB) Format() *types.NomsBinFormat {
return ddb.db.Format()
return ddb.vrw.Format()
}
func WriteValAndGetRef(ctx context.Context, vrw types.ValueReadWriter, val types.Value) (types.Ref, error) {
@@ -869,7 +871,7 @@ func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef,
return err
}
rf, err := types.NewRef(commit.commitSt, ddb.db.Format())
rf, err := types.NewRef(commit.commitSt, ddb.vrw.Format())
if err != nil {
return err
}
@@ -995,7 +997,7 @@ func (ddb *DoltDB) NewTagAtCommit(ctx context.Context, tagRef ref.DoltRef, c *Co
return err
}
st, err := meta.toNomsStruct(ddb.db.Format())
st, err := meta.toNomsStruct(ddb.vrw.Format())
if err != nil {
return err
@@ -1071,7 +1073,7 @@ func (ddb *DoltDB) CommitWithWorkingSet(
}
var metaSt types.Struct
metaSt, err = meta.toNomsStruct(ddb.db.Format())
metaSt, err = meta.toNomsStruct(ddb.vrw.Format())
if err != nil {
return nil, err
}
@@ -1092,7 +1094,7 @@ func (ddb *DoltDB) CommitWithWorkingSet(
return nil, errors.New("Commit has no head but commit succeeded. This is a bug.")
}
return NewCommit(ddb.db, commitSt), nil
return NewCommit(ddb.vrw, commitSt), nil
}
// DeleteWorkingSet deletes the working set given
+2 -2
View File
@@ -281,8 +281,8 @@ func TestLDNoms(t *testing.T) {
}
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.db, tSchema)
tbl, err = CreateTestTable(ddb.db, tSchema, rowData)
rowData, _ := createTestRowData(t, ddb.vrw, tSchema)
tbl, err = CreateTestTable(ddb.vrw, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
+1 -1
View File
@@ -367,7 +367,7 @@ func (ws *WorkingSet) writeValues(ctx context.Context, db *DoltDB) (
return types.Ref{}, types.Ref{}, nil, err
}
mergeStateRef, err = db.db.WriteValue(ctx, mergeStateRefSt)
mergeStateRef, err = db.vrw.WriteValue(ctx, mergeStateRefSt)
if err != nil {
return types.Ref{}, types.Ref{}, nil, err
}
@@ -50,13 +50,13 @@ func createTestSchema() schema.Schema {
func TestNomsMarshalling(t *testing.T) {
tSchema := createTestSchema()
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
if err != nil {
t.Fatal("Could not create in mem noms db.")
}
val, err := MarshalSchemaAsNomsValue(context.Background(), db, tSchema)
val, err := MarshalSchemaAsNomsValue(context.Background(), vrw, tSchema)
if err != nil {
t.Fatal("Failed to marshal Schema as a types.Value.")
@@ -85,7 +85,7 @@ func TestNomsMarshalling(t *testing.T) {
tSuperSchema, err := schema.NewSuperSchema(tSchema)
require.NoError(t, err)
ssVal, err := MarshalSuperSchemaAsNomsValue(context.Background(), db, tSuperSchema)
ssVal, err := MarshalSuperSchemaAsNomsValue(context.Background(), vrw, tSuperSchema)
require.NoError(t, err)
unMarshalledSS, err := UnmarshalSuperSchemaNomsValue(context.Background(), types.Format_Default, ssVal)
@@ -154,9 +154,9 @@ func TestTypeInfoMarshalling(t *testing.T) {
nbf, err := types.GetFormatForVersionString(constants.FormatDefaultString)
require.NoError(t, err)
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), nbf, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), nbf, nil, nil)
require.NoError(t, err)
val, err := MarshalSchemaAsNomsValue(context.Background(), db, originalSch)
val, err := MarshalSchemaAsNomsValue(context.Background(), vrw, originalSch)
require.NoError(t, err)
unmarshalledSch, err := UnmarshalSchemaNomsValue(context.Background(), nbf, val)
require.NoError(t, err)
@@ -457,18 +457,18 @@ func TestFilteredReader(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
memDB, err := dbfactory.MemFactory{}.CreateDB(ctx, types.Format_Default, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, types.Format_Default, nil, nil)
require.NoError(t, err)
createFunc, err := CreateReaderFuncLimitedByExpressions(types.Format_Default, test.sch, test.filters)
require.NoError(t, err)
tblData, err := mapFromRows(ctx, memDB, test.sch, test.rowData...)
tblData, err := mapFromRows(ctx, vrw, test.sch, test.rowData...)
require.NoError(t, err)
rd, err := createFunc(ctx, tblData)
require.NoError(t, err)
resMap, err := types.NewMap(ctx, memDB)
resMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
me := resMap.Edit()
@@ -486,7 +486,7 @@ func TestFilteredReader(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, uint64(me.NumEdits()), resMap.Len())
expectedMap, err := mapFromRows(ctx, memDB, test.sch, test.expectedRows...)
expectedMap, err := mapFromRows(ctx, vrw, test.sch, test.expectedRows...)
require.NoError(t, err)
assert.Equal(t, expectedMap.Len(), resMap.Len())
@@ -182,9 +182,10 @@ func TestJSONStructuralSharing(t *testing.T) {
ctx := context.Background()
ts := &chunks.TestStorage{}
db := datas.NewDatabase(ts.NewViewWithDefaultFormat())
vrw := types.NewValueStore(ts.NewViewWithDefaultFormat())
db := datas.NewTypesDatabase(vrw)
val := MustNomsJSONWithVRW(db, sb.String())
val := MustNomsJSONWithVRW(vrw, sb.String())
err := db.Flush(ctx)
require.NoError(t, err)
@@ -199,7 +200,7 @@ func TestJSONStructuralSharing(t *testing.T) {
for i < tuples {
tup, err := types.NewTuple(types.Format_Default, types.Int(i), types.JSON(val))
require.NoError(t, err)
_, err = db.WriteValue(ctx, tup)
_, err = vrw.WriteValue(ctx, tup)
require.NoError(t, err)
i++
}
@@ -55,7 +55,7 @@ var id3, _ = uuid.NewRandom()
func TestIndexEditorConcurrency(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -66,10 +66,10 @@ func TestIndexEditorConcurrency(t *testing.T) {
index, err := tableSch.Indexes().AddIndexByColNames("idx_concurrency", []string{"v1"}, schema.IndexProperties{IsUnique: false, Comment: ""})
require.NoError(t, err)
indexSch := index.Schema()
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
for i := 0; i < indexEditorConcurrencyIterations; i++ {
indexEditor := NewIndexEditor(context.Background(), index, emptyMap, tableSch, opts)
wg := &sync.WaitGroup{}
@@ -152,7 +152,7 @@ func TestIndexEditorConcurrency(t *testing.T) {
func TestIndexEditorConcurrencyPostInsert(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -163,10 +163,10 @@ func TestIndexEditorConcurrencyPostInsert(t *testing.T) {
index, err := tableSch.Indexes().AddIndexByColNames("idx_concurrency", []string{"v1"}, schema.IndexProperties{IsUnique: false, Comment: ""})
require.NoError(t, err)
indexSch := index.Schema()
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
indexEditor := NewIndexEditor(context.Background(), index, emptyMap, tableSch, opts)
for i := 0; i < indexEditorConcurrencyFinalCount*2; i++ {
dRow, err := row.New(format, indexSch, row.TaggedValues{
@@ -246,7 +246,7 @@ func TestIndexEditorConcurrencyPostInsert(t *testing.T) {
func TestIndexEditorUniqueMultipleNil(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -256,10 +256,10 @@ func TestIndexEditorUniqueMultipleNil(t *testing.T) {
index, err := tableSch.Indexes().AddIndexByColNames("idx_unique", []string{"v1"}, schema.IndexProperties{IsUnique: true, Comment: ""})
require.NoError(t, err)
indexSch := index.Schema()
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
indexEditor := NewIndexEditor(context.Background(), index, emptyMap, tableSch, opts)
for i := 0; i < 3; i++ {
dRow, err := row.New(format, indexSch, row.TaggedValues{
@@ -291,7 +291,7 @@ func TestIndexEditorUniqueMultipleNil(t *testing.T) {
func TestIndexEditorWriteAfterFlush(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -302,10 +302,10 @@ func TestIndexEditorWriteAfterFlush(t *testing.T) {
index, err := tableSch.Indexes().AddIndexByColNames("idx_concurrency", []string{"v1"}, schema.IndexProperties{IsUnique: false, Comment: ""})
require.NoError(t, err)
indexSch := index.Schema()
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
indexEditor := NewIndexEditor(context.Background(), index, emptyMap, tableSch, opts)
require.NoError(t, err)
@@ -359,7 +359,7 @@ func TestIndexEditorWriteAfterFlush(t *testing.T) {
func TestIndexEditorUniqueErrorDoesntPersist(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -369,10 +369,10 @@ func TestIndexEditorUniqueErrorDoesntPersist(t *testing.T) {
index, err := tableSch.Indexes().AddIndexByColNames("idx_unq", []string{"v1"}, schema.IndexProperties{IsUnique: true, Comment: ""})
require.NoError(t, err)
indexSch := index.Schema()
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
indexEditor := NewIndexEditor(context.Background(), index, emptyMap, tableSch, opts)
dRow, err := row.New(format, indexSch, row.TaggedValues{
0: types.Int(1),
@@ -401,18 +401,18 @@ func TestIndexEditorUniqueErrorDoesntPersist(t *testing.T) {
}
func TestIndexRebuildingWithZeroIndexes(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
tSchema := createTestSchema(t)
_, err := tSchema.Indexes().RemoveIndex(testSchemaIndexName)
require.NoError(t, err)
_, err = tSchema.Indexes().RemoveIndex(testSchemaIndexAge)
require.NoError(t, err)
rowData, _ := createTestRowData(t, db, tSchema)
rowData, _ := createTestRowData(t, vrw, tSchema)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, tSchema, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, tSchema, rowData)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
rebuildAllTable, err := RebuildAllIndexes(context.Background(), originalTable, opts)
require.NoError(t, err)
_, err = rebuildAllTable.GetNomsIndexRowData(context.Background(), testSchemaIndexName)
@@ -423,14 +423,14 @@ func TestIndexRebuildingWithZeroIndexes(t *testing.T) {
}
func TestIndexRebuildingWithOneIndex(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
tSchema := createTestSchema(t)
_, err := tSchema.Indexes().RemoveIndex(testSchemaIndexAge)
require.NoError(t, err)
index := tSchema.Indexes().GetByName(testSchemaIndexName)
require.NotNil(t, index)
indexSch := index.Schema()
rowData, rows := createTestRowData(t, db, tSchema)
rowData, rows := createTestRowData(t, vrw, tSchema)
indexExpectedRows := make([]row.Row, len(rows))
for i, r := range rows {
@@ -444,12 +444,12 @@ func TestIndexRebuildingWithOneIndex(t *testing.T) {
require.NoError(t, err)
}
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, tSchema, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, tSchema, rowData)
require.NoError(t, err)
var indexRows []row.Row
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
rebuildAllTable, err := RebuildAllIndexes(context.Background(), originalTable, opts)
require.NoError(t, err)
indexRowData, err := rebuildAllTable.GetNomsIndexRowData(context.Background(), testSchemaIndexName)
@@ -475,7 +475,7 @@ func TestIndexRebuildingWithOneIndex(t *testing.T) {
}
func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
tSchema := createTestSchema(t)
indexName := tSchema.Indexes().GetByName(testSchemaIndexName)
@@ -486,13 +486,13 @@ func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
indexNameSch := indexName.Schema()
indexAgeSch := indexAge.Schema()
rowData, rows := createTestRowData(t, db, tSchema)
rowData, rows := createTestRowData(t, vrw, tSchema)
indexNameExpectedRows, indexAgeExpectedRows := rowsToIndexRows(t, rows, indexName, indexAge)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, tSchema, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, tSchema, rowData)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
rebuildAllTable := originalTable
var indexRows []row.Row
@@ -547,7 +547,7 @@ func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
}
// change the underlying data and verify that rebuild changes the data as well
rowData, rows = createUpdatedTestRowData(t, db, tSchema)
rowData, rows = createUpdatedTestRowData(t, vrw, tSchema)
indexNameExpectedRows, indexAgeExpectedRows = rowsToIndexRows(t, rows, indexName, indexAge)
updatedTable, err := rebuildAllTable.UpdateNomsRows(context.Background(), rowData)
require.NoError(t, err)
@@ -599,7 +599,7 @@ func TestIndexRebuildingWithTwoIndexes(t *testing.T) {
}
func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -607,12 +607,12 @@ func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
)
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
rowData, _ := createTestRowDataFromTaggedValues(t, db, sch,
rowData, _ := createTestRowDataFromTaggedValues(t, vrw, sch,
row.TaggedValues{1: types.Int(1), 2: types.Int(1), 3: types.Int(1)},
row.TaggedValues{1: types.Int(2), 2: types.Int(2), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(3), 3: types.Int(3)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -620,7 +620,7 @@ func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
updatedTable, err := originalTable.UpdateSchema(context.Background(), sch)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
_, err = RebuildAllIndexes(context.Background(), updatedTable, opts)
require.NoError(t, err)
@@ -629,7 +629,7 @@ func TestIndexRebuildingUniqueSuccessOneCol(t *testing.T) {
}
func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -637,12 +637,12 @@ func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
)
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
rowData, _ := createTestRowDataFromTaggedValues(t, db, sch,
rowData, _ := createTestRowDataFromTaggedValues(t, vrw, sch,
row.TaggedValues{1: types.Int(1), 2: types.Int(1), 3: types.Int(1)},
row.TaggedValues{1: types.Int(2), 2: types.Int(1), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(2), 3: types.Int(2)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2, 3}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -650,7 +650,7 @@ func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
updatedTable, err := originalTable.UpdateSchema(context.Background(), sch)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
_, err = RebuildAllIndexes(context.Background(), updatedTable, opts)
require.NoError(t, err)
@@ -659,7 +659,7 @@ func TestIndexRebuildingUniqueSuccessTwoCol(t *testing.T) {
}
func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -667,12 +667,12 @@ func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
)
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
rowData, _ := createTestRowDataFromTaggedValues(t, db, sch,
rowData, _ := createTestRowDataFromTaggedValues(t, vrw, sch,
row.TaggedValues{1: types.Int(1), 2: types.Int(1), 3: types.Int(1)},
row.TaggedValues{1: types.Int(2), 2: types.Int(2), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(2), 3: types.Int(3)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -680,7 +680,7 @@ func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
updatedTable, err := originalTable.UpdateSchema(context.Background(), sch)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
_, err = RebuildAllIndexes(context.Background(), updatedTable, opts)
require.Error(t, err)
@@ -689,7 +689,7 @@ func TestIndexRebuildingUniqueFailOneCol(t *testing.T) {
}
func TestIndexRebuildingUniqueFailTwoCol(t *testing.T) {
db, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
_, vrw, _ := dbfactory.MemFactory{}.CreateDB(context.Background(), types.Format_Default, nil, nil)
colColl := schema.NewColCollection(
schema.NewColumn("pk1", 1, types.IntKind, true, schema.NotNullConstraint{}),
schema.NewColumn("v1", 2, types.IntKind, false),
@@ -697,13 +697,13 @@ func TestIndexRebuildingUniqueFailTwoCol(t *testing.T) {
)
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
rowData, _ := createTestRowDataFromTaggedValues(t, db, sch,
rowData, _ := createTestRowDataFromTaggedValues(t, vrw, sch,
row.TaggedValues{1: types.Int(1), 2: types.Int(1), 3: types.Int(1)},
row.TaggedValues{1: types.Int(2), 2: types.Int(1), 3: types.Int(2)},
row.TaggedValues{1: types.Int(3), 2: types.Int(2), 3: types.Int(2)},
row.TaggedValues{1: types.Int(4), 2: types.Int(1), 3: types.Int(2)},
)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), db, sch, rowData)
originalTable, err := createTableWithoutIndexRebuilding(context.Background(), vrw, sch, rowData)
require.NoError(t, err)
index, err := sch.Indexes().AddIndexByColTags("idx_v1", []uint64{2, 3}, schema.IndexProperties{IsUnique: true, Comment: ""})
@@ -711,7 +711,7 @@ func TestIndexRebuildingUniqueFailTwoCol(t *testing.T) {
updatedTable, err := originalTable.UpdateSchema(context.Background(), sch)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
_, err = RebuildAllIndexes(context.Background(), updatedTable, opts)
require.Error(t, err)
@@ -724,7 +724,7 @@ func TestIndexEditorCapacityExceeded(t *testing.T) {
// In the event that we reach the iea capacity on Undo, we need to verify that all code paths fail and remain failing
ctx := context.Background()
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
@@ -734,7 +734,7 @@ func TestIndexEditorCapacityExceeded(t *testing.T) {
index, err := tableSch.Indexes().AddIndexByColNames("idx_cap", []string{"v1"}, schema.IndexProperties{IsUnique: false, Comment: ""})
require.NoError(t, err)
indexSch := index.Schema()
emptyMap, err := types.NewMap(ctx, db)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
opts := Options{Deaf: NewInMemDeafWithMaxCapacity(format, 224)}
@@ -32,7 +32,7 @@ import (
func TestKeylessTableEditorConcurrency(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -40,12 +40,12 @@ func TestKeylessTableEditorConcurrency(t *testing.T) {
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
for i := 0; i < tableEditorConcurrencyIterations; i++ {
tableEditor, err := newKeylessTableEditor(context.Background(), table, tableSch, tableName, opts)
require.NoError(t, err)
@@ -140,7 +140,7 @@ func TestKeylessTableEditorConcurrency(t *testing.T) {
func TestKeylessTableEditorConcurrencyPostInsert(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -148,12 +148,12 @@ func TestKeylessTableEditorConcurrencyPostInsert(t *testing.T) {
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
tableEditor, err := newKeylessTableEditor(context.Background(), table, tableSch, tableName, opts)
require.NoError(t, err)
for i := 0; i < tableEditorConcurrencyFinalCount*2; i++ {
@@ -247,7 +247,7 @@ func TestKeylessTableEditorConcurrencyPostInsert(t *testing.T) {
func TestKeylessTableEditorWriteAfterFlush(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -255,12 +255,12 @@ func TestKeylessTableEditorWriteAfterFlush(t *testing.T) {
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
tableEditor, err := newKeylessTableEditor(context.Background(), table, tableSch, tableName, opts)
require.NoError(t, err)
@@ -328,7 +328,7 @@ func TestKeylessTableEditorWriteAfterFlush(t *testing.T) {
func TestKeylessTableEditorDuplicateKeyHandling(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
@@ -336,12 +336,12 @@ func TestKeylessTableEditorDuplicateKeyHandling(t *testing.T) {
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
tableEditor, err := newKeylessTableEditor(context.Background(), table, tableSch, tableName, opts)
require.NoError(t, err)
@@ -418,9 +418,9 @@ func TestKeylessTableEditorDuplicateKeyHandling(t *testing.T) {
func TestKeylessTableEditorMultipleIndexErrorHandling(t *testing.T) {
ctx := context.Background()
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
schema.NewColumn("v1", 1, types.IntKind, false),
@@ -435,9 +435,9 @@ func TestKeylessTableEditorMultipleIndexErrorHandling(t *testing.T) {
IsUnique: false,
})
require.NoError(t, err)
emptyMap, err := types.NewMap(ctx, db)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(ctx, db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(ctx, vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
table, err = RebuildAllIndexes(ctx, table, opts)
require.NoError(t, err)
@@ -575,9 +575,9 @@ func TestKeylessTableEditorMultipleIndexErrorHandling(t *testing.T) {
func TestKeylessTableEditorIndexCardinality(t *testing.T) {
ctx := context.Background()
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("v0", 0, types.IntKind, false),
schema.NewColumn("v1", 1, types.IntKind, false),
@@ -588,9 +588,9 @@ func TestKeylessTableEditorIndexCardinality(t *testing.T) {
IsUnique: false,
})
require.NoError(t, err)
emptyMap, err := types.NewMap(ctx, db)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(ctx, db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(ctx, vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
table, err = RebuildAllIndexes(ctx, table, opts)
require.NoError(t, err)
@@ -42,18 +42,18 @@ const (
func TestTableEditorConcurrency(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
schema.NewColumn("v1", 1, types.IntKind, false),
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
for i := 0; i < tableEditorConcurrencyIterations; i++ {
@@ -137,18 +137,18 @@ func TestTableEditorConcurrency(t *testing.T) {
func TestTableEditorConcurrencyPostInsert(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
schema.NewColumn("v1", 1, types.IntKind, false),
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
tableEditor, err := newPkTableEditor(context.Background(), table, tableSch, tableName, opts)
@@ -230,18 +230,18 @@ func TestTableEditorConcurrencyPostInsert(t *testing.T) {
func TestTableEditorWriteAfterFlush(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
schema.NewColumn("v1", 1, types.IntKind, false),
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
tableEditor, err := newPkTableEditor(context.Background(), table, tableSch, tableName, opts)
@@ -300,18 +300,18 @@ func TestTableEditorWriteAfterFlush(t *testing.T) {
func TestTableEditorDuplicateKeyHandling(t *testing.T) {
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(context.Background(), format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
schema.NewColumn("v1", 1, types.IntKind, false),
schema.NewColumn("v2", 2, types.IntKind, false))
tableSch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
emptyMap, err := types.NewMap(context.Background(), db)
emptyMap, err := types.NewMap(context.Background(), vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(context.Background(), db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(context.Background(), vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
tableEditor, err := newPkTableEditor(context.Background(), table, tableSch, tableName, opts)
@@ -379,9 +379,9 @@ func TestTableEditorDuplicateKeyHandling(t *testing.T) {
func TestTableEditorMultipleIndexErrorHandling(t *testing.T) {
ctx := context.Background()
format := types.Format_Default
db, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
_, vrw, err := dbfactory.MemFactory{}.CreateDB(ctx, format, nil, nil)
require.NoError(t, err)
opts := TestEditorOptions(db)
opts := TestEditorOptions(vrw)
colColl := schema.NewColCollection(
schema.NewColumn("pk", 0, types.IntKind, true),
schema.NewColumn("v1", 1, types.IntKind, false),
@@ -396,9 +396,9 @@ func TestTableEditorMultipleIndexErrorHandling(t *testing.T) {
IsUnique: true,
})
require.NoError(t, err)
emptyMap, err := types.NewMap(ctx, db)
emptyMap, err := types.NewMap(ctx, vrw)
require.NoError(t, err)
table, err := doltdb.NewNomsTable(ctx, db, tableSch, emptyMap, nil, nil)
table, err := doltdb.NewNomsTable(ctx, vrw, tableSch, emptyMap, nil, nil)
require.NoError(t, err)
table, err = RebuildAllIndexes(ctx, table, opts)
require.NoError(t, err)
@@ -24,7 +24,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
@@ -107,18 +106,17 @@ func TestRangeReader(t *testing.T) {
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
var db datas.Database
storage := &chunks.MemoryStorage{}
db = datas.NewDatabase(storage.NewView())
m, err := types.NewMap(ctx, db)
vrw := types.NewValueStore(storage.NewView())
m, err := types.NewMap(ctx, vrw)
assert.NoError(t, err)
me := m.Edit()
for i := 0; i <= 100; i += 2 {
k, err := types.NewTuple(db.Format(), types.Uint(pkTag), types.Int(i))
k, err := types.NewTuple(vrw.Format(), types.Uint(pkTag), types.Int(i))
require.NoError(t, err)
v, err := types.NewTuple(db.Format(), types.Uint(valTag), types.Int(100-i))
v, err := types.NewTuple(vrw.Format(), types.Uint(valTag), types.Int(100-i))
require.NoError(t, err)
me.Set(k, v)
@@ -164,10 +162,9 @@ func TestRangeReaderOnEmptyMap(t *testing.T) {
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
var db datas.Database
storage := &chunks.MemoryStorage{}
db = datas.NewDatabase(storage.NewView())
m, err := types.NewMap(ctx, db)
vrw := types.NewValueStore(storage.NewView())
m, err := types.NewMap(ctx, vrw)
assert.NoError(t, err)
for _, test := range rangeReaderTests {
@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
@@ -43,18 +42,17 @@ func TestReaderForKeys(t *testing.T) {
sch, err := schema.SchemaFromCols(colColl)
require.NoError(t, err)
var db datas.Database
storage := &chunks.MemoryStorage{}
db = datas.NewDatabase(storage.NewView())
m, err := types.NewMap(ctx, db)
vrw := types.NewValueStore(storage.NewView())
m, err := types.NewMap(ctx, vrw)
assert.NoError(t, err)
me := m.Edit()
for i := 0; i <= 100; i += 2 {
k, err := types.NewTuple(db.Format(), types.Uint(pkTag), types.Int(i))
k, err := types.NewTuple(vrw.Format(), types.Uint(pkTag), types.Int(i))
require.NoError(t, err)
v, err := types.NewTuple(db.Format(), types.Uint(valTag), types.Int(100-i))
v, err := types.NewTuple(vrw.Format(), types.Uint(valTag), types.Int(100-i))
require.NoError(t, err)
me.Set(k, v)
@@ -93,7 +91,7 @@ func TestReaderForKeys(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
rd := NewNomsMapReaderForKeys(m, sch, intKeysToTupleKeys(t, db.Format(), test.keys))
rd := NewNomsMapReaderForKeys(m, sch, intKeysToTupleKeys(t, vrw.Format(), test.keys))
var rows []row.Row
for {
+6 -3
View File
@@ -46,8 +46,9 @@ func newNBSProllyStore(dir string) keyValStore {
}
func newProllyStore(ctx context.Context, cs chunks.ChunkStore) keyValStore {
db := datas.NewDatabase(cs)
m, err := types.NewMap(ctx, db)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw)
m, err := types.NewMap(ctx, vrw)
if err != nil {
panic(err)
}
@@ -55,12 +56,14 @@ func newProllyStore(ctx context.Context, cs chunks.ChunkStore) keyValStore {
store: m,
editor: types.NewMapEditor(m),
db: db,
vrw: vrw,
}
}
type prollyStore struct {
store types.Map
editor *types.MapEditor
vrw types.ValueReadWriter
db datas.Database
mu sync.RWMutex
}
@@ -134,7 +137,7 @@ func (m *prollyStore) flush() {
}
// persist
_, err = m.db.WriteValue(ctx, m.store)
_, err = m.vrw.WriteValue(ctx, m.store)
if err != nil {
panic(err)
}
+5 -5
View File
@@ -32,16 +32,16 @@ import (
)
type CommitIterator struct {
db datas.Database
vr types.ValueReader
branches branchList
}
// NewCommitIterator initializes a new CommitIterator with the first commit to be printed.
func NewCommitIterator(db datas.Database, commit types.Struct) *CommitIterator {
cr, err := types.NewRef(commit, db.Format())
func NewCommitIterator(vr types.ValueReader, commit types.Struct) *CommitIterator {
cr, err := types.NewRef(commit, vr.Format())
d.PanicIfError(err)
return &CommitIterator{db: db, branches: branchList{branch{cr: cr, commit: commit}}}
return &CommitIterator{vr: vr, branches: branchList{branch{cr: cr, commit: commit}}}
}
// Next returns information about the next commit to be printed. LogNode contains enough contextual
@@ -76,7 +76,7 @@ func (iter *CommitIterator) Next(ctx context.Context) (LogNode, bool) {
parents := commitRefsFromSet(ctx, pFld.(types.Set))
for _, p := range parents {
v, err := iter.db.ReadValue(ctx, p.TargetHash())
v, err := iter.vr.ReadValue(ctx, p.TargetHash())
d.PanicIfError(err)
b := branch{cr: p, commit: v.(types.Struct)}
+1 -1
View File
@@ -41,7 +41,7 @@ import (
func nomsBlobGet(ctx context.Context, ds string, filePath string) int {
cfg := config.NewResolver()
var blob types.Blob
if db, val, err := cfg.GetPath(ctx, ds); err != nil {
if db, _, val, err := cfg.GetPath(ctx, ds); err != nil {
util.CheckErrorNoUsage(err)
} else if val == nil {
util.CheckErrorNoUsage(fmt.Errorf("No value at %s", ds))
+6 -4
View File
@@ -48,16 +48,18 @@ func (s *nbeSuite) TestNomsBlobGet() {
sp, err := spec.ForDatabase(s.TempDir)
s.NoError(err)
defer sp.Close()
db := sp.GetDatabase(context.Background())
ctx := context.Background()
db := sp.GetDatabase(ctx)
vrw := sp.GetVRW(ctx)
blobBytes := []byte("hello")
blob, err := types.NewBlob(context.Background(), db, bytes.NewBuffer(blobBytes))
blob, err := types.NewBlob(ctx, vrw, bytes.NewBuffer(blobBytes))
s.NoError(err)
ref, err := db.WriteValue(context.Background(), blob)
ref, err := vrw.WriteValue(ctx, blob)
s.NoError(err)
ref, err = db.WriteValue(context.Background(), blob)
ref, err = vrw.WriteValue(context.Background(), blob)
s.NoError(err)
ds, err := db.GetDataset(context.Background(), "datasetID")
s.NoError(err)
+2 -2
View File
@@ -68,14 +68,14 @@ func nomsBlobPut(ctx context.Context, filePath string, dsPath string, concurrenc
}
cfg := config.NewResolver()
db, ds, err := cfg.GetDataset(ctx, dsPath)
db, vrw, ds, err := cfg.GetDataset(ctx, dsPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not create dataset: %s\n", err)
return 1
}
defer db.Close()
blob, err := types.NewBlob(ctx, db, readers...)
blob, err := types.NewBlob(ctx, vrw, readers...)
// TODO: fix panics
d.PanicIfError(err)
+3 -3
View File
@@ -157,7 +157,7 @@ func runCat(ctx context.Context, args []string) int {
//Want a clean db every loop
sp, _ := spec.ForDatabase("mem")
db := sp.GetDatabase(ctx)
vrw := sp.GetVRW(ctx)
fmt.Printf(" chunk[%d].raw.len: %d\n", cidx, len(currCD.compressed))
@@ -175,7 +175,7 @@ func runCat(ctx context.Context, args []string) int {
}
if !catNoShow {
value, err := types.DecodeValue(chunk, db)
value, err := types.DecodeValue(chunk, vrw)
if err != nil {
fmt.Println(" error reading value (Could be a format issue).")
@@ -189,7 +189,7 @@ func runCat(ctx context.Context, args []string) int {
}
refIdx := 0
err = types.WalkRefs(chunk, db.Format(), func(ref types.Ref) error {
err = types.WalkRefs(chunk, vrw.Format(), func(ref types.Ref) error {
if refIdx == 0 {
fmt.Printf(" chunk[%d] references chunks:\n", cidx)
}
+5 -5
View File
@@ -59,7 +59,7 @@ func setupCommitFlags() *flag.FlagSet {
func runCommit(ctx context.Context, args []string) int {
cfg := config.NewResolver()
db, ds, err := cfg.GetDataset(ctx, args[len(args)-1])
db, vrw, ds, err := cfg.GetDataset(ctx, args[len(args)-1])
util.CheckError(err)
defer db.Close()
@@ -74,7 +74,7 @@ func runCommit(ctx context.Context, args []string) int {
absPath, err := spec.NewAbsolutePath(path)
util.CheckError(err)
value := absPath.Resolve(ctx, db)
value := absPath.Resolve(ctx, db, vrw)
if value == nil {
util.CheckErrorNoUsage(errors.New(fmt.Sprintf("Error resolving value: %s", path)))
}
@@ -91,9 +91,9 @@ func runCommit(ctx context.Context, args []string) int {
return 1
}
hh, err := head.Hash(db.Format())
hh, err := head.Hash(vrw.Format())
d.PanicIfError(err)
vh, err := value.Hash(db.Format())
vh, err := value.Hash(vrw.Format())
d.PanicIfError(err)
if hh == vh && !allowDupe {
@@ -102,7 +102,7 @@ func runCommit(ctx context.Context, args []string) int {
}
}
meta, err := spec.CreateCommitMetaStruct(ctx, db, "", "", nil, nil)
meta, err := spec.CreateCommitMetaStruct(ctx, db, vrw, "", "", nil, nil)
util.CheckErrorNoUsage(err)
ds, err = db.Commit(ctx, ds, value, datas.CommitOptions{Meta: meta})
+3 -1
View File
@@ -49,7 +49,9 @@ func (s *nomsCommitTestSuite) setupDataset(name string, doCommit bool) (sp spec.
s.NoError(err)
v := types.String("testcommit")
ref, err = sp.GetDatabase(context.Background()).WriteValue(context.Background(), v)
ctx := context.Background()
vrw := sp.GetVRW(ctx)
ref, err = vrw.WriteValue(context.Background(), v)
s.NoError(err)
if doCommit {
+3 -3
View File
@@ -57,21 +57,21 @@ func setupDiffFlags() *flag.FlagSet {
func runDiff(ctx context.Context, args []string) int {
cfg := config.NewResolver()
db1, value1, err := cfg.GetPath(ctx, args[0])
db1, vrw1, value1, err := cfg.GetPath(ctx, args[0])
util.CheckErrorNoUsage(err)
if value1 == nil {
util.CheckErrorNoUsage(fmt.Errorf("Object not found: %s", args[0]))
}
defer db1.Close()
db2, value2, err := cfg.GetPath(ctx, args[1])
db2, vrw2, value2, err := cfg.GetPath(ctx, args[1])
util.CheckErrorNoUsage(err)
if value2 == nil {
util.CheckErrorNoUsage(fmt.Errorf("Object not found: %s", args[1]))
}
defer db2.Close()
d.PanicIfFalse(db1.Format() == db2.Format())
d.PanicIfFalse(vrw1.Format() == vrw2.Format())
if stat {
diff.Summary(ctx, value1, value2)
+3 -2
View File
@@ -65,6 +65,7 @@ func (s *nomsDiffTestSuite) TestNomsDiffStat() {
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
ds, err := addCommit(sp.GetDataset(context.Background()), "first commit")
s.NoError(err)
@@ -82,14 +83,14 @@ func (s *nomsDiffTestSuite) TestNomsDiffStat() {
out, _ = s.MustRun(main, []string{"diff", "--stat", r1 + ".value", r2 + ".value"})
s.NotContains(out, "Comparing commit values")
l, err := types.NewList(context.Background(), db, types.Float(1), types.Float(2), types.Float(3), types.Float(4))
l, err := types.NewList(context.Background(), vrw, types.Float(1), types.Float(2), types.Float(3), types.Float(4))
s.NoError(err)
ds, err = db.CommitValue(context.Background(), ds, l)
s.NoError(err)
r3 := spec.CreateHashSpecString("nbs", s.DBDir, mustHeadRef(ds).TargetHash()) + ".value"
l, err = types.NewList(context.Background(), db, types.Float(1), types.Float(222), types.Float(4))
l, err = types.NewList(context.Background(), vrw, types.Float(1), types.Float(222), types.Float(4))
s.NoError(err)
ds, err = db.CommitValue(context.Background(), ds, l)
s.NoError(err)
+2 -2
View File
@@ -56,7 +56,7 @@ func setupDsFlags() *flag.FlagSet {
func runDs(ctx context.Context, args []string) int {
cfg := config.NewResolver()
if toDelete != "" {
db, set, err := cfg.GetDataset(ctx, toDelete)
db, _, set, err := cfg.GetDataset(ctx, toDelete)
util.CheckError(err)
defer db.Close()
@@ -76,7 +76,7 @@ func runDs(ctx context.Context, args []string) int {
if len(args) >= 1 {
dbSpec = args[0]
}
store, err := cfg.GetDatabase(ctx, dbSpec)
store, _, err := cfg.GetDatabase(ctx, dbSpec)
util.CheckError(err)
defer store.Close()
+3 -2
View File
@@ -76,7 +76,8 @@ func nomsListNew(ctx context.Context, dbStr string, args []string) int {
sp, err := spec.ForDatabase(dbStr)
d.PanicIfError(err)
db := sp.GetDatabase(ctx)
l, err := types.NewList(ctx, db)
vrw := sp.GetVRW(ctx)
l, err := types.NewList(ctx, vrw)
d.PanicIfError(err)
applyListInserts(ctx, db, sp, l, nil, 0, args)
return 0
@@ -130,7 +131,7 @@ func applyListInserts(ctx context.Context, db datas.Database, sp spec.Spec, root
}
patch := diff.Patch{}
for i := 0; i < len(args); i++ {
vv, err := argumentToValue(ctx, args[i], db)
vv, err := argumentToValue(ctx, args[i], db, sp.GetVRW(ctx))
if err != nil {
util.CheckError(fmt.Errorf("Invalid value: %s at position %d: %s", args[i], i, err))
}
+14 -14
View File
@@ -102,7 +102,7 @@ func runLog(ctx context.Context, args []string) int {
return 1
}
defer pinned.Close()
database := pinned.GetDatabase(ctx)
vrw := pinned.GetVRW(ctx)
absPath := pinned.Path
path := absPath.Path
@@ -110,7 +110,7 @@ func runLog(ctx context.Context, args []string) int {
path = types.MustParsePath(".value")
}
origCommitVal, err := database.ReadValue(ctx, absPath.Hash)
origCommitVal, err := vrw.ReadValue(ctx, absPath.Hash)
d.PanicIfError(err)
origCommit, ok := origCommitVal.(types.Struct)
@@ -124,7 +124,7 @@ func runLog(ctx context.Context, args []string) int {
util.CheckError(fmt.Errorf("%s does not reference a Commit object", args[0]))
}
iter := NewCommitIterator(database, origCommit)
iter := NewCommitIterator(vrw, origCommit)
displayed := 0
if maxCommits <= 0 {
maxCommits = math.MaxInt32
@@ -141,7 +141,7 @@ func runLog(ctx context.Context, args []string) int {
go func(ch chan []byte, node LogNode) {
buff := &bytes.Buffer{}
printCommit(ctx, node, path, buff, database, tz)
printCommit(ctx, node, path, buff, vrw, tz)
ch <- buff.Bytes()
}(ch, ln)
@@ -169,7 +169,7 @@ func runLog(ctx context.Context, args []string) int {
// Prints the information for one commit in the log, including ascii graph on left side of commits if
// -graph arg is true.
func printCommit(ctx context.Context, node LogNode, path types.Path, w io.Writer, db datas.Database, tz *time.Location) (err error) {
func printCommit(ctx context.Context, node LogNode, path types.Path, w io.Writer, vr types.ValueReader, tz *time.Location) (err error) {
maxMetaFieldNameLength := func(commit types.Struct) int {
maxLen := 0
if m, ok, err := commit.MaybeGet(datas.CommitMetaField); err != nil {
@@ -185,7 +185,7 @@ func printCommit(ctx context.Context, node LogNode, path types.Path, w io.Writer
return maxLen
}
h, err := node.commit.Hash(db.Format())
h, err := node.commit.Hash(vr.Format())
d.PanicIfError(err)
hashStr := h.String()
if useColor {
@@ -231,9 +231,9 @@ func printCommit(ctx context.Context, node LogNode, path types.Path, w io.Writer
}
if showValue {
_, err = writeCommitLines(ctx, node, path, maxLines, lineno, w, db)
_, err = writeCommitLines(ctx, node, path, maxLines, lineno, w, vr)
} else {
_, err = writeDiffLines(ctx, node, path, db, maxLines, lineno, w)
_, err = writeDiffLines(ctx, node, path, vr, maxLines, lineno, w)
}
}
return
@@ -345,13 +345,13 @@ func writeMetaLines(ctx context.Context, node LogNode, maxLines, lineno, maxLabe
return lineno, nil
}
func writeCommitLines(ctx context.Context, node LogNode, path types.Path, maxLines, lineno int, w io.Writer, db datas.Database) (lineCnt int, err error) {
func writeCommitLines(ctx context.Context, node LogNode, path types.Path, maxLines, lineno int, w io.Writer, vr types.ValueReader) (lineCnt int, err error) {
genPrefix := func(pw *writers.PrefixWriter) []byte {
return []byte(genGraph(node, int(pw.NumLines)+1))
}
mlw := &writers.MaxLineWriter{Dest: w, MaxLines: uint32(maxLines), NumLines: uint32(lineno)}
pw := &writers.PrefixWriter{Dest: mlw, PrefixFunc: genPrefix, NeedsPrefix: true, NumLines: uint32(lineno)}
v, err := path.Resolve(ctx, node.commit, db)
v, err := path.Resolve(ctx, node.commit, vr)
d.PanicIfError(err)
if v == nil {
pw.Write([]byte("<nil>\n"))
@@ -375,7 +375,7 @@ func writeCommitLines(ctx context.Context, node LogNode, path types.Path, maxLin
return int(pw.NumLines), err
}
func writeDiffLines(ctx context.Context, node LogNode, path types.Path, db datas.Database, maxLines, lineno int, w io.Writer) (lineCnt int, err error) {
func writeDiffLines(ctx context.Context, node LogNode, path types.Path, vr types.ValueReader, maxLines, lineno int, w io.Writer) (lineCnt int, err error) {
genPrefix := func(w *writers.PrefixWriter) []byte {
return []byte(genGraph(node, int(w.NumLines)+1))
}
@@ -398,7 +398,7 @@ func writeDiffLines(ctx context.Context, node LogNode, path types.Path, db datas
return 1, err
}
val, err := parent.(types.Ref).TargetValue(ctx, db)
val, err := parent.(types.Ref).TargetValue(ctx, vr)
parentCommit := val.(types.Struct)
d.PanicIfError(err)
@@ -406,12 +406,12 @@ func writeDiffLines(ctx context.Context, node LogNode, path types.Path, db datas
err = functions.All(
func() error {
var err error
old, err = path.Resolve(ctx, parentCommit, db)
old, err = path.Resolve(ctx, parentCommit, vr)
return err
},
func() error {
var err error
neu, err = path.Resolve(ctx, node.commit, db)
neu, err = path.Resolve(ctx, node.commit, vr)
return err
},
)
+12 -7
View File
@@ -50,13 +50,14 @@ func testCommitInResults(s *nomsLogTestSuite, str string, i int) {
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
db.CommitValue(context.Background(), sp.GetDataset(context.Background()), types.Float(i))
s.NoError(err)
commit, ok := sp.GetDataset(context.Background()).MaybeHead()
s.True(ok)
res, _ := s.MustRun(main, []string{"log", str})
h, err := commit.Hash(db.Format())
h, err := commit.Hash(vrw.Format())
s.NoError(err)
s.Contains(res, h.String())
}
@@ -79,9 +80,10 @@ func (s *nomsLogTestSuite) TestNomsLogPath() {
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
ds := sp.GetDataset(context.Background())
for i := 0; i < 3; i++ {
data, err := types.NewStruct(db.Format(), "", types.StructData{
data, err := types.NewStruct(vrw.Format(), "", types.StructData{
"bar": types.Float(i),
})
s.NoError(err)
@@ -165,20 +167,21 @@ func (s *nomsLogTestSuite) TestNArg() {
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
ds, err := db.GetDataset(context.Background(), dsName)
s.NoError(err)
ds, err = addCommit(ds, "1")
s.NoError(err)
h1, err := mustHead(ds).Hash(db.Format())
h1, err := mustHead(ds).Hash(vrw.Format())
s.NoError(err)
ds, err = addCommit(ds, "2")
s.NoError(err)
h2, err := mustHead(ds).Hash(db.Format())
h2, err := mustHead(ds).Hash(vrw.Format())
s.NoError(err)
ds, err = addCommit(ds, "3")
s.NoError(err)
h3, err := mustHead(ds).Hash(db.Format())
h3, err := mustHead(ds).Hash(vrw.Format())
s.NoError(err)
dsSpec := spec.CreateValueSpecString("nbs", s.DBDir, dsName)
@@ -204,11 +207,12 @@ func (s *nomsLogTestSuite) TestEmptyCommit() {
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
ds, err := db.GetDataset(context.Background(), "ds1")
s.NoError(err)
meta, err := types.NewStruct(db.Format(), "Meta", map[string]types.Value{
meta, err := types.NewStruct(vrw.Format(), "Meta", map[string]types.Value{
"longNameForTest": types.String("Yoo"),
"test2": types.String("Hoo"),
})
@@ -232,6 +236,7 @@ func (s *nomsLogTestSuite) TestTruncation() {
s.NoError(err)
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
toNomsList := func(l []string) types.List {
nv := []types.Value{}
@@ -239,7 +244,7 @@ func (s *nomsLogTestSuite) TestTruncation() {
nv = append(nv, types.String(v))
}
lst, err := types.NewList(context.Background(), db, nv...)
lst, err := types.NewList(context.Background(), vrw, nv...)
s.NoError(err)
return lst
+3 -2
View File
@@ -68,7 +68,8 @@ func nomsMapNew(ctx context.Context, dbStr string, args []string) int {
sp, err := spec.ForDatabase(dbStr)
d.PanicIfError(err)
db := sp.GetDatabase(ctx)
m, err := types.NewMap(ctx, db)
vrw := sp.GetVRW(ctx)
m, err := types.NewMap(ctx, vrw)
d.PanicIfError(err)
applyMapEdits(ctx, db, sp, m, nil, args)
return 0
@@ -113,7 +114,7 @@ func applyMapEdits(ctx context.Context, db datas.Database, sp spec.Spec, rootVal
patch := diff.Patch{}
for i := 0; i < len(args); i += 2 {
kp := parseKeyPart(args, i)
vv, err := argumentToValue(ctx, args[i+1], db)
vv, err := argumentToValue(ctx, args[i+1], db, sp.GetVRW(ctx))
if err != nil {
util.CheckError(fmt.Errorf("Invalid value: %s at position %d: %s", args[i+1], i+1, err))
}
+6 -6
View File
@@ -74,7 +74,7 @@ func runMerge(ctx context.Context, args []string) int {
if len(args) != 4 {
util.CheckErrorNoUsage(fmt.Errorf("incorrect number of arguments"))
}
db, err := cfg.GetDatabase(ctx, args[0])
db, vrw, err := cfg.GetDatabase(ctx, args[0])
util.CheckError(err)
defer db.Close()
@@ -85,7 +85,7 @@ func runMerge(ctx context.Context, args []string) int {
return 1
}
left, right, ancestor, err := getMergeCandidates(ctx, db, leftDS, rightDS)
left, right, ancestor, err := getMergeCandidates(ctx, db, vrw, leftDS, rightDS)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
@@ -94,7 +94,7 @@ func runMerge(ctx context.Context, args []string) int {
policy := decidePolicy(resolver)
pc, closer := newMergeProgressChan()
merged, err := policy(ctx, left, right, ancestor, db, pc)
merged, err := policy(ctx, left, right, ancestor, vrw, pc)
closer()
util.CheckErrorNoUsage(err)
@@ -114,7 +114,7 @@ func runMerge(ctx context.Context, args []string) int {
return 1
}
p, err := types.NewList(ctx, db, leftHeadRef, rightHeadRef)
p, err := types.NewList(ctx, vrw, leftHeadRef, rightHeadRef)
d.PanicIfError(err)
_, err = db.Commit(ctx, outDS, merged, datas.CommitOptions{
@@ -157,14 +157,14 @@ func resolveDatasets(ctx context.Context, db datas.Database, leftName, rightName
return
}
func getMergeCandidates(ctx context.Context, db datas.Database, leftDS, rightDS datas.Dataset) (left, right, ancestor types.Value, err error) {
func getMergeCandidates(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, leftDS, rightDS datas.Dataset) (left, right, ancestor types.Value, err error) {
leftRef, ok, err := leftDS.MaybeHeadRef()
d.PanicIfError(err)
checkIfTrue(!ok, "Dataset %s has no data", leftDS.ID())
rightRef, ok, err := rightDS.MaybeHeadRef()
d.PanicIfError(err)
checkIfTrue(!ok, "Dataset %s has no data", rightDS.ID())
ancestorCommit, ok := getCommonAncestor(ctx, leftRef, rightRef, db)
ancestorCommit, ok := getCommonAncestor(ctx, leftRef, rightRef, vrw)
checkIfTrue(!ok, "Datasets %s and %s have no common ancestor", leftDS.ID(), rightDS.ID())
leftHead, ok, err := leftDS.MaybeHeadValue()
+29 -27
View File
@@ -67,37 +67,37 @@ func (s *nomsMergeTestSuite) TestNomsMerge_Success() {
types.StructData{
"num": types.Float(42),
"str": types.String("foobar"),
"lst": mustValue(types.NewList(context.Background(), parentSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), parentSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"),
"lst": mustValue(types.NewList(context.Background(), parentSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), parentSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"),
types.String("foo"), types.Float(1))),
},
mustList(types.NewList(context.Background(), parentSpec.GetDatabase(context.Background()))))
mustList(types.NewList(context.Background(), parentSpec.GetVRW(context.Background()))))
l := s.setupMergeDataset(
leftSpec,
types.StructData{
"num": types.Float(42),
"str": types.String("foobaz"),
"lst": mustValue(types.NewList(context.Background(), leftSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), leftSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"), types.String("foo"), types.Float(1))),
"lst": mustValue(types.NewList(context.Background(), leftSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), leftSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"), types.String("foo"), types.Float(1))),
},
mustList(types.NewList(context.Background(), leftSpec.GetDatabase(context.Background()), p)))
mustList(types.NewList(context.Background(), leftSpec.GetVRW(context.Background()), p)))
r := s.setupMergeDataset(
rightSpec,
types.StructData{
"num": types.Float(42),
"str": types.String("foobar"),
"lst": mustValue(types.NewList(context.Background(), rightSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), rightSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"), types.String("foo"), types.Float(1), types.Float(2), types.String("bar"))),
"lst": mustValue(types.NewList(context.Background(), rightSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), rightSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"), types.String("foo"), types.Float(1), types.Float(2), types.String("bar"))),
},
mustList(types.NewList(context.Background(), rightSpec.GetDatabase(context.Background()), p)))
mustList(types.NewList(context.Background(), rightSpec.GetVRW(context.Background()), p)))
expected := mustValue(types.NewStruct(parentSpec.GetDatabase(context.Background()).Format(), "", types.StructData{
expected := mustValue(types.NewStruct(parentSpec.GetVRW(context.Background()).Format(), "", types.StructData{
"num": types.Float(42),
"str": types.String("foobaz"),
"lst": mustValue(types.NewList(context.Background(), parentSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), parentSpec.GetDatabase(context.Background()), types.Float(1), types.String("foo"), types.String("foo"), types.Float(1), types.Float(2), types.String("bar"))),
"lst": mustValue(types.NewList(context.Background(), parentSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"))),
"map": mustValue(types.NewMap(context.Background(), parentSpec.GetVRW(context.Background()), types.Float(1), types.String("foo"), types.String("foo"), types.Float(1), types.Float(2), types.String("bar"))),
}))
output := "output"
@@ -119,18 +119,19 @@ func (s *nomsMergeTestSuite) spec(name string) spec.Spec {
func (s *nomsMergeTestSuite) setupMergeDataset(sp spec.Spec, data types.StructData, p types.List) types.Ref {
ds := sp.GetDataset(context.Background())
db := sp.GetDatabase(context.Background())
ds, err := db.Commit(context.Background(), ds, mustValue(types.NewStruct(db.Format(), "", data)), datas.CommitOptions{ParentsList: p})
vrw := sp.GetVRW(context.Background())
ds, err := db.Commit(context.Background(), ds, mustValue(types.NewStruct(vrw.Format(), "", data)), datas.CommitOptions{ParentsList: p})
s.NoError(err)
return mustHeadRef(ds)
}
func (s *nomsMergeTestSuite) validateDataset(name string, expected types.Struct, parents ...types.Value) {
sp, err := spec.ForDataset(spec.CreateValueSpecString("nbs", s.DBDir, name))
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
if s.NoError(err) {
defer sp.Close()
commit := mustHead(sp.GetDataset(context.Background()))
s.True(mustGetValue(commit.MaybeGet(datas.ParentsField)).Equals(mustSet(types.NewSet(context.Background(), db, parents...))))
s.True(mustGetValue(commit.MaybeGet(datas.ParentsField)).Equals(mustSet(types.NewSet(context.Background(), vrw, parents...))))
merged := mustHeadValue(sp.GetDataset(context.Background()))
s.True(expected.Equals(merged), "%s != %s", mustString(types.EncodedValue(context.Background(), expected)), mustString(types.EncodedValue(context.Background(), merged)))
}
@@ -145,11 +146,11 @@ func (s *nomsMergeTestSuite) TestNomsMerge_Left() {
rightSpec := s.spec(right)
defer rightSpec.Close()
p := s.setupMergeDataset(parentSpec, types.StructData{"num": types.Float(42)}, mustList(types.NewList(context.Background(), parentSpec.GetDatabase(context.Background()))))
l := s.setupMergeDataset(leftSpec, types.StructData{"num": types.Float(43)}, mustList(types.NewList(context.Background(), leftSpec.GetDatabase(context.Background()), p)))
r := s.setupMergeDataset(rightSpec, types.StructData{"num": types.Float(44)}, mustList(types.NewList(context.Background(), rightSpec.GetDatabase(context.Background()), p)))
p := s.setupMergeDataset(parentSpec, types.StructData{"num": types.Float(42)}, mustList(types.NewList(context.Background(), parentSpec.GetVRW(context.Background()))))
l := s.setupMergeDataset(leftSpec, types.StructData{"num": types.Float(43)}, mustList(types.NewList(context.Background(), leftSpec.GetVRW(context.Background()), p)))
r := s.setupMergeDataset(rightSpec, types.StructData{"num": types.Float(44)}, mustList(types.NewList(context.Background(), rightSpec.GetVRW(context.Background()), p)))
expected := mustValue(types.NewStruct(parentSpec.GetDatabase(context.Background()).Format(), "", types.StructData{"num": types.Float(43)}))
expected := mustValue(types.NewStruct(parentSpec.GetVRW(context.Background()).Format(), "", types.StructData{"num": types.Float(43)}))
output := "output"
stdout, stderr, err := s.Run(main, []string{"merge", "--policy=l", s.DBDir, left, right, output})
@@ -170,11 +171,11 @@ func (s *nomsMergeTestSuite) TestNomsMerge_Right() {
rightSpec := s.spec(right)
defer rightSpec.Close()
p := s.setupMergeDataset(parentSpec, types.StructData{"num": types.Float(42)}, mustList(types.NewList(context.Background(), parentSpec.GetDatabase(context.Background()))))
l := s.setupMergeDataset(leftSpec, types.StructData{"num": types.Float(43)}, mustList(types.NewList(context.Background(), leftSpec.GetDatabase(context.Background()), p)))
r := s.setupMergeDataset(rightSpec, types.StructData{"num": types.Float(44)}, mustList(types.NewList(context.Background(), rightSpec.GetDatabase(context.Background()), p)))
p := s.setupMergeDataset(parentSpec, types.StructData{"num": types.Float(42)}, mustList(types.NewList(context.Background(), parentSpec.GetVRW(context.Background()))))
l := s.setupMergeDataset(leftSpec, types.StructData{"num": types.Float(43)}, mustList(types.NewList(context.Background(), leftSpec.GetVRW(context.Background()), p)))
r := s.setupMergeDataset(rightSpec, types.StructData{"num": types.Float(44)}, mustList(types.NewList(context.Background(), rightSpec.GetVRW(context.Background()), p)))
expected := mustValue(types.NewStruct(parentSpec.GetDatabase(context.Background()).Format(), "", types.StructData{"num": types.Float(44)}))
expected := mustValue(types.NewStruct(parentSpec.GetVRW(context.Background()).Format(), "", types.StructData{"num": types.Float(44)}))
output := "output"
stdout, stderr, err := s.Run(main, []string{"merge", "--policy=r", s.DBDir, left, right, output})
@@ -194,9 +195,9 @@ func (s *nomsMergeTestSuite) TestNomsMerge_Conflict() {
defer leftSpec.Close()
rightSpec := s.spec(right)
defer rightSpec.Close()
p := s.setupMergeDataset(parentSpec, types.StructData{"num": types.Float(42)}, mustList(types.NewList(context.Background(), parentSpec.GetDatabase(context.Background()))))
s.setupMergeDataset(leftSpec, types.StructData{"num": types.Float(43)}, mustList(types.NewList(context.Background(), leftSpec.GetDatabase(context.Background()), p)))
s.setupMergeDataset(rightSpec, types.StructData{"num": types.Float(44)}, mustList(types.NewList(context.Background(), rightSpec.GetDatabase(context.Background()), p)))
p := s.setupMergeDataset(parentSpec, types.StructData{"num": types.Float(42)}, mustList(types.NewList(context.Background(), parentSpec.GetVRW(context.Background()))))
s.setupMergeDataset(leftSpec, types.StructData{"num": types.Float(43)}, mustList(types.NewList(context.Background(), leftSpec.GetVRW(context.Background()), p)))
s.setupMergeDataset(rightSpec, types.StructData{"num": types.Float(44)}, mustList(types.NewList(context.Background(), rightSpec.GetVRW(context.Background()), p)))
s.Panics(func() { s.MustRun(main, []string{"merge", s.DBDir, left, right, "output"}) })
}
@@ -219,11 +220,12 @@ func (s *nomsMergeTestSuite) TestBadInput() {
}
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
prep := func(dsName string) {
ds, err := db.GetDataset(context.Background(), dsName)
s.NoError(err)
_, err = db.CommitValue(context.Background(), ds, mustValue(types.NewMap(context.Background(), db, types.String("foo"), types.String("bar"))))
_, err = db.CommitValue(context.Background(), ds, mustValue(types.NewMap(context.Background(), vrw, types.String("foo"), types.String("bar"))))
s.NoError(err)
}
prep(l)
+3 -3
View File
@@ -86,12 +86,12 @@ func runRoot(ctx context.Context, args []string) int {
}
// If BUG 3407 is correct, we might be able to just take cs and make a Database directly from that.
db, err := cfg.GetDatabase(ctx, args[0])
db, vrw, err := cfg.GetDatabase(ctx, args[0])
util.CheckErrorNoUsage(err)
defer db.Close()
v, err := db.ReadValue(ctx, h)
v, err := vrw.ReadValue(ctx, h)
util.CheckErrorNoUsage(err)
if !validate(ctx, db.Format(), v) {
if !validate(ctx, vrw.Format(), v) {
return 1
}
+7 -6
View File
@@ -69,8 +69,8 @@ func nomsSet(ctx context.Context, noms *kingpin.Application) (*kingpin.CmdClause
func nomsSetNew(ctx context.Context, dbStr string, args []string) int {
sp, err := spec.ForDatabase(dbStr)
d.PanicIfError(err)
db := sp.GetDatabase(ctx)
s, err := types.NewSet(ctx, db)
vrw := sp.GetVRW(ctx)
s, err := types.NewSet(ctx, vrw)
d.PanicIfError(err)
applySetEdits(ctx, sp, s, nil, types.DiffChangeAdded, args)
return 0
@@ -100,9 +100,10 @@ func applySetEdits(ctx context.Context, sp spec.Spec, rootVal types.Value, baseP
return
}
db := sp.GetDatabase(ctx)
vrw := sp.GetVRW(ctx)
patch := diff.Patch{}
for i := 0; i < len(args); i++ {
vv, err := argumentToValue(ctx, args[i], db)
vv, err := argumentToValue(ctx, args[i], db, vrw)
if err != nil {
util.CheckErrorNoUsage(err)
}
@@ -110,7 +111,7 @@ func applySetEdits(ctx context.Context, sp spec.Spec, rootVal types.Value, baseP
if types.ValueCanBePathIndex(vv) {
pp = types.NewIndexPath(vv)
} else {
h, err := vv.Hash(db.Format())
h, err := vv.Hash(vrw.Format())
d.PanicIfError(err)
pp = types.NewHashIndexPath(h)
}
@@ -127,7 +128,7 @@ func applySetEdits(ctx context.Context, sp spec.Spec, rootVal types.Value, baseP
appplyPatch(ctx, db, sp, rootVal, basePath, patch)
}
func argumentToValue(ctx context.Context, arg string, db datas.Database) (types.Value, error) {
func argumentToValue(ctx context.Context, arg string, db datas.Database, vrw types.ValueReadWriter) (types.Value, error) {
d.PanicIfTrue(arg == "")
if arg == "true" {
@@ -160,7 +161,7 @@ func argumentToValue(ctx context.Context, arg string, db datas.Database) (types.
if arg[0] == '@' {
p, err := spec.NewAbsolutePath(arg[1:])
d.PanicIfError(err)
return p.Resolve(ctx, db), nil
return p.Resolve(ctx, db, vrw), nil
}
if n, err := strconv.ParseFloat(arg, 64); err == nil {
return types.Float(n), nil
+3 -3
View File
@@ -67,7 +67,7 @@ func setupShowFlags() *flag.FlagSet {
func runShow(ctx context.Context, args []string) int {
cfg := config.NewResolver()
database, value, err := cfg.GetPath(ctx, args[0])
database, vrw, value, err := cfg.GetPath(ctx, args[0])
util.CheckErrorNoUsage(err)
defer database.Close()
@@ -82,7 +82,7 @@ func runShow(ctx context.Context, args []string) int {
}
if showRaw {
ch, err := types.EncodeValue(value, database.Format())
ch, err := types.EncodeValue(value, vrw.Format())
util.CheckError(err)
buf := bytes.NewBuffer(ch.Data())
_, err = io.Copy(os.Stdout, buf)
@@ -91,7 +91,7 @@ func runShow(ctx context.Context, args []string) int {
}
if showStats {
types.WriteValueStats(ctx, os.Stdout, value, database)
types.WriteValueStats(ctx, os.Stdout, value, vrw)
return 0
}
+9 -7
View File
@@ -81,7 +81,8 @@ func (s *nomsShowTestSuite) writeTestData(str string, value types.Value) types.R
defer sp.Close()
db := sp.GetDatabase(context.Background())
r1, err := db.WriteValue(context.Background(), value)
vrw := sp.GetVRW(context.Background())
r1, err := vrw.WriteValue(context.Background(), value)
s.NoError(err)
_, err = db.CommitValue(context.Background(), sp.GetDataset(context.Background()), r1)
s.NoError(err)
@@ -104,7 +105,7 @@ func (s *nomsShowTestSuite) TestNomsShow() {
sp := s.spec(str)
defer sp.Close()
list, err := types.NewList(context.Background(), sp.GetDatabase(context.Background()), types.String("elem1"), types.Float(2), types.String("elem3"))
list, err := types.NewList(context.Background(), sp.GetVRW(context.Background()), types.String("elem1"), types.Float(2), types.String("elem3"))
s.NoError(err)
r = s.writeTestData(str, list)
res, _ = s.MustRun(main, []string{"show", str})
@@ -135,17 +136,18 @@ func (s *nomsShowTestSuite) TestNomsShowRaw() {
defer sp.Close()
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
// Put a value into the db, get its raw serialization, then deserialize it and ensure it comes
// out to same thing.
test := func(in types.Value) {
r1, err := db.WriteValue(context.Background(), in)
r1, err := vrw.WriteValue(context.Background(), in)
s.NoError(err)
db.CommitValue(context.Background(), sp.GetDataset(context.Background()), r1)
res, _ := s.MustRun(main, []string{"show", "--raw",
spec.CreateValueSpecString("nbs", s.DBDir, "#"+r1.TargetHash().String())})
ch := chunks.NewChunk([]byte(res))
out, err := types.DecodeValue(ch, db)
out, err := types.DecodeValue(ch, vrw)
s.NoError(err)
s.True(out.Equals(in))
}
@@ -154,18 +156,18 @@ func (s *nomsShowTestSuite) TestNomsShowRaw() {
test(types.String("hello"))
// Ref (one child chunk)
test(mustValue(db.WriteValue(context.Background(), types.Float(42))))
test(mustValue(vrw.WriteValue(context.Background(), types.Float(42))))
// Prolly tree with multiple child chunks
items := make([]types.Value, 10000)
for i := 0; i < len(items); i++ {
items[i] = types.Float(i)
}
l, err := types.NewList(context.Background(), db, items...)
l, err := types.NewList(context.Background(), vrw, items...)
s.NoError(err)
numChildChunks := 0
_ = l.WalkRefs(db.Format(), func(r types.Ref) error {
_ = l.WalkRefs(vrw.Format(), func(r types.Ref) error {
numChildChunks++
return nil
})
+1 -1
View File
@@ -37,7 +37,7 @@ func nomsStats(ctx context.Context, noms *kingpin.Application) (*kingpin.CmdClau
return stats, func(input string) int {
cfg := config.NewResolver()
store, err := cfg.GetDatabase(ctx, *database)
store, _, err := cfg.GetDatabase(ctx, *database)
util.CheckError(err)
defer store.Close()
+9 -8
View File
@@ -68,10 +68,10 @@ func nomsStruct(ctx context.Context, noms *kingpin.Application) (*kingpin.CmdCla
func nomsStructNew(ctx context.Context, dbStr string, name string, args []string) int {
sp, err := spec.ForDatabase(dbStr)
d.PanicIfError(err)
db := sp.GetDatabase(ctx)
st, err := types.NewStruct(db.Format(), name, nil)
vrw := sp.GetVRW(ctx)
st, err := types.NewStruct(vrw.Format(), name, nil)
d.PanicIfError(err)
applyStructEdits(ctx, db, sp, st, nil, args)
applyStructEdits(ctx, sp.GetDatabase(ctx), sp, st, nil, args)
return 0
}
@@ -107,7 +107,7 @@ func nomsStructDel(ctx context.Context, specStr string, args []string) int {
func splitPath(ctx context.Context, db datas.Database, sp spec.Spec) (rootVal types.Value, basePath types.Path) {
rootPath := sp.Path
rootPath.Path = types.Path{}
rootVal = rootPath.Resolve(ctx, db)
rootVal = rootPath.Resolve(ctx, db, sp.GetVRW(ctx))
if rootVal == nil {
util.CheckError(fmt.Errorf("Invalid path: %s", sp.String()))
return
@@ -129,7 +129,7 @@ func applyStructEdits(ctx context.Context, db datas.Database, sp spec.Spec, root
if !types.IsValidStructFieldName(args[i]) {
util.CheckError(fmt.Errorf("Invalid field name: %s at position: %d", args[i], i))
}
nv, err := argumentToValue(ctx, args[i+1], db)
nv, err := argumentToValue(ctx, args[i+1], db, sp.GetVRW(ctx))
if err != nil {
util.CheckError(fmt.Errorf("Invalid field value: %s at position %d: %s", args[i+1], i+1, err))
}
@@ -143,16 +143,17 @@ func applyStructEdits(ctx context.Context, db datas.Database, sp spec.Spec, root
}
func appplyPatch(ctx context.Context, db datas.Database, sp spec.Spec, rootVal types.Value, basePath types.Path, patch diff.Patch) {
baseVal, err := basePath.Resolve(ctx, rootVal, db)
vrw := sp.GetVRW(ctx)
baseVal, err := basePath.Resolve(ctx, rootVal, vrw)
util.CheckError(err)
if baseVal == nil {
util.CheckErrorNoUsage(fmt.Errorf("No value at: %s", sp.String()))
}
newRootVal, err := diff.Apply(ctx, db.Format(), rootVal, patch)
newRootVal, err := diff.Apply(ctx, vrw.Format(), rootVal, patch)
util.CheckError(err)
d.Chk.NotNil(newRootVal)
r, err := db.WriteValue(ctx, newRootVal)
r, err := vrw.WriteValue(ctx, newRootVal)
util.CheckError(err)
db.Flush(ctx)
newAbsPath := spec.AbsolutePath{
+3 -3
View File
@@ -63,7 +63,7 @@ func setupSyncFlags() *flag.FlagSet {
func runSync(ctx context.Context, args []string) int {
cfg := config.NewResolver()
sourceStore, sourceObj, err := cfg.GetPath(ctx, args[0])
sourceStore, sourceVRW, sourceObj, err := cfg.GetPath(ctx, args[0])
util.CheckError(err)
defer sourceStore.Close()
@@ -71,7 +71,7 @@ func runSync(ctx context.Context, args []string) int {
util.CheckErrorNoUsage(fmt.Errorf("Object not found: %s", args[0]))
}
sinkDB, sinkDataset, err := cfg.GetDataset(ctx, args[1])
sinkDB, _, sinkDataset, err := cfg.GetDataset(ctx, args[1])
util.CheckError(err)
defer sinkDB.Close()
@@ -97,7 +97,7 @@ func runSync(ctx context.Context, args []string) int {
lastProgressCh <- last
}()
sourceRef, err := types.NewRef(sourceObj, sourceStore.Format())
sourceRef, err := types.NewRef(sourceObj, sourceVRW.Format())
util.CheckError(err)
sinkRef, sinkExists, err := sinkDataset.MaybeHeadRef()
util.CheckError(err)
+9 -9
View File
@@ -130,13 +130,13 @@ func (r *Resolver) ResolvePathSpec(str string) string {
// Resolve string to database spec. If a config is present,
// - resolve a db alias to its db spec
// - resolve "" to the default db spec
func (r *Resolver) GetDatabase(ctx context.Context, str string) (datas.Database, error) {
func (r *Resolver) GetDatabase(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, error) {
dbc := r.DbConfigForDbSpec(str)
sp, err := spec.ForDatabaseOpts(r.verbose(ctx, str, dbc.Url), specOptsForConfig(r.config, dbc))
if err != nil {
return nil, err
return nil, nil, err
}
return sp.GetDatabase(ctx), nil
return sp.GetDatabase(ctx), sp.GetVRW(ctx), nil
}
// Resolve string to a chunkstore. Like ResolveDatabase, but returns the underlying ChunkStore
@@ -152,23 +152,23 @@ func (r *Resolver) GetChunkStore(ctx context.Context, str string) (chunks.ChunkS
// Resolve string to a dataset. If a config is present,
// - if no db prefix is present, assume the default db
// - if the db prefix is an alias, replace it
func (r *Resolver) GetDataset(ctx context.Context, str string) (datas.Database, datas.Dataset, error) {
func (r *Resolver) GetDataset(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, datas.Dataset, error) {
specStr, dbc := r.ResolvePathSpecAndGetDbConfig(str)
sp, err := spec.ForDatasetOpts(r.verbose(ctx, str, specStr), specOptsForConfig(r.config, dbc))
if err != nil {
return nil, datas.Dataset{}, err
return nil, nil, datas.Dataset{}, err
}
return sp.GetDatabase(ctx), sp.GetDataset(ctx), nil
return sp.GetDatabase(ctx), sp.GetVRW(ctx), sp.GetDataset(ctx), nil
}
// Resolve string to a value path. If a config is present,
// - if no db spec is present, assume the default db
// - if the db spec is an alias, replace it
func (r *Resolver) GetPath(ctx context.Context, str string) (datas.Database, types.Value, error) {
func (r *Resolver) GetPath(ctx context.Context, str string) (datas.Database, types.ValueReadWriter, types.Value, error) {
specStr, dbc := r.ResolvePathSpecAndGetDbConfig(str)
sp, err := spec.ForPathOpts(r.verbose(ctx, str, specStr), specOptsForConfig(r.config, dbc))
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return sp.GetDatabase(ctx), sp.GetValue(ctx), nil
return sp.GetDatabase(ctx), sp.GetVRW(ctx), sp.GetValue(ctx), nil
}
+9 -9
View File
@@ -136,7 +136,7 @@ func TestNewCommit(t *testing.T) {
}
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewView())
db := NewDatabase(storage.NewView()).(*database)
defer db.Close()
parents := mustList(types.NewList(context.Background(), db))
@@ -255,7 +255,7 @@ func TestCommitWithoutMetaField(t *testing.T) {
assert := assert.New(t)
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewView())
db := NewDatabase(storage.NewView()).(*database)
defer db.Close()
metaCommit, err := types.NewStruct(types.Format_7_18, "Commit", types.StructData{
@@ -302,7 +302,7 @@ func commonAncWithLazyClosure(ctx context.Context, c1, c2 types.Ref, vr1, vr2 ty
}
// Assert that c is the common ancestor of a and b, using multiple common ancestor methods.
func assertCommonAncestor(t *testing.T, expected, a, b types.Struct, ldb, rdb Database) {
func assertCommonAncestor(t *testing.T, expected, a, b types.Struct, ldb, rdb *database) {
assert := assert.New(t)
type caFinder func(ctx context.Context, c1, c2 types.Ref, vr1, vr2 types.ValueReader) (a types.Ref, ok bool, err error)
@@ -342,7 +342,7 @@ func assertCommonAncestor(t *testing.T, expected, a, b types.Struct, ldb, rdb Da
}
// Add a commit and return it.
func addCommit(t *testing.T, db Database, datasetID string, val string, parents ...types.Struct) (types.Struct, types.Ref) {
func addCommit(t *testing.T, db *database, datasetID string, val string, parents ...types.Struct) (types.Struct, types.Ref) {
ds, err := db.GetDataset(context.Background(), datasetID)
assert.NoError(t, err)
ds, err = db.Commit(context.Background(), ds, types.String(val), CommitOptions{ParentsList: mustList(toRefList(db, parents...))})
@@ -405,7 +405,7 @@ func TestCommitParentsClosure(t *testing.T) {
assert := assert.New(t)
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewView())
db := NewDatabase(storage.NewView()).(*database)
type expected struct {
height int
@@ -539,7 +539,7 @@ func TestFindCommonAncestor(t *testing.T) {
assert := assert.New(t)
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewView())
db := NewDatabase(storage.NewView()).(*database)
// Build commit DAG
//
@@ -599,11 +599,11 @@ func TestFindCommonAncestor(t *testing.T) {
assert.NoError(db.Close())
storage = &chunks.TestStorage{}
db = NewDatabase(storage.NewView())
db = NewDatabase(storage.NewView()).(*database)
defer db.Close()
rstorage := &chunks.TestStorage{}
rdb := NewDatabase(rstorage.NewView())
rdb := NewDatabase(rstorage.NewView()).(*database)
defer rdb.Close()
// Rerun the tests when using two difference Databases for left and
@@ -661,7 +661,7 @@ func TestFindCommonAncestor(t *testing.T) {
func TestNewCommitRegressionTest(t *testing.T) {
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewView())
db := NewDatabase(storage.NewView()).(*database)
defer db.Close()
parents := mustList(types.NewList(context.Background(), db))
-7
View File
@@ -43,13 +43,6 @@ import (
// Datasets() occurring after a call to Commit() (et al) will represent the
// result of the Commit().
type Database interface {
// To implement types.ValueWriter, Database implementations provide
// WriteValue(). WriteValue() writes v to this Database, though v is not
// guaranteed to be be persistent until after a subsequent Commit(). The
// return value is the Ref of v.
// Written values won't be persisted until a commit-alike
types.ValueReadWriter
// Close must have no side-effects
io.Closer
+7 -7
View File
@@ -944,30 +944,30 @@ func buildNewCommit(ctx context.Context, ds Dataset, v types.Value, opts CommitO
parents := opts.ParentsList
if parents == types.EmptyList || parents.Len() == 0 {
var err error
parents, err = types.NewList(ctx, ds.Database())
parents, err = types.NewList(ctx, ds.db)
if err != nil {
return types.EmptyStruct(ds.Database().Format()), err
return types.EmptyStruct(ds.db.Format()), err
}
if headRef, ok, err := ds.MaybeHeadRef(); err != nil {
return types.EmptyStruct(ds.Database().Format()), err
return types.EmptyStruct(ds.db.Format()), err
} else if ok {
le := parents.Edit().Append(headRef)
parents, err = le.List(ctx)
if err != nil {
return types.EmptyStruct(ds.Database().Format()), err
return types.EmptyStruct(ds.db.Format()), err
}
}
}
meta := opts.Meta
if meta.IsZeroValue() {
meta = types.EmptyStruct(ds.Database().Format())
meta = types.EmptyStruct(ds.db.Format())
}
parentsClosure, includeParentsClosure, err := getParentsClosure(ctx, ds.Database(), parents)
parentsClosure, includeParentsClosure, err := getParentsClosure(ctx, ds.db, parents)
if err != nil {
return types.EmptyStruct(ds.Database().Format()), err
return types.EmptyStruct(ds.db.Format()), err
}
return newCommit(ctx, v, parents, parentsClosure, includeParentsClosure, meta)
+3 -3
View File
@@ -58,7 +58,7 @@ func TestValidateRef(t *testing.T) {
type DatabaseSuite struct {
suite.Suite
storage *chunks.TestStorage
db Database
db *database
makeDb func(chunks.ChunkStore) Database
}
@@ -69,7 +69,7 @@ type LocalDatabaseSuite struct {
func (suite *LocalDatabaseSuite) SetupTest() {
suite.storage = &chunks.TestStorage{}
suite.makeDb = NewDatabase
suite.db = suite.makeDb(suite.storage.NewView())
suite.db = suite.makeDb(suite.storage.NewView()).(*database)
}
type RemoteDatabaseSuite struct {
@@ -81,7 +81,7 @@ func (suite *RemoteDatabaseSuite) SetupTest() {
suite.makeDb = func(cs chunks.ChunkStore) Database {
return NewDatabase(cs)
}
suite.db = suite.makeDb(suite.storage.NewView())
suite.db = suite.makeDb(suite.storage.NewView()).(*database)
}
func (suite *DatabaseSuite) TearDownTest() {
+3 -3
View File
@@ -39,12 +39,12 @@ var DatasetFullRe = regexp.MustCompile("^" + DatasetRe.String() + "$")
// Dataset is a named value within a Database. Different head values may be stored in a dataset. Most commonly, this is
// a commit, but other values are also supported in some cases.
type Dataset struct {
db Database
db *database
id string
head types.Value
}
func newDataset(db Database, id string, head types.Value) (Dataset, error) {
func newDataset(db *database, id string, head types.Value) (Dataset, error) {
check := head == nil
var err error
@@ -105,7 +105,7 @@ func (ds Dataset) MaybeHeadRef() (types.Ref, bool, error) {
if ds.head == nil {
return types.Ref{}, false, nil
}
ref, err := types.NewRef(ds.head, ds.Database().Format())
ref, err := types.NewRef(ds.head, ds.db.Format())
if err != nil {
return types.Ref{}, false, err
+1 -1
View File
@@ -43,7 +43,7 @@ func TestExplicitBranchUsingDatasets(t *testing.T) {
id1 := "testdataset"
id2 := "othertestdataset"
stg := &chunks.MemoryStorage{}
store := NewDatabase(stg.NewView())
store := NewDatabase(stg.NewView()).(*database)
defer store.Close()
ds1, err := store.GetDataset(context.Background(), id1)
+1 -1
View File
@@ -34,7 +34,7 @@ func TestNewTag(t *testing.T) {
}
storage := &chunks.TestStorage{}
db := NewDatabase(storage.NewView())
db := NewDatabase(storage.NewView()).(*database)
defer db.Close()
parents := mustList(types.NewList(context.Background(), db))
+4 -2
View File
@@ -83,7 +83,9 @@ func main() {
log.Fatalf("Must set either --dir or ALL of --table, --bucket and --db\n")
}
db := datas.NewDatabase(store)
vrw := types.NewValueStore(store)
db := datas.NewTypesDatabase(vrw)
defer db.Close()
defer profile.MaybeStartProfile().Stop()
@@ -117,7 +119,7 @@ func main() {
for numNodes := 1; numNodes > 0; numNodes = len(current) {
// Start by reading the values of the current level of the graph
currentValues := make(map[hash.Hash]types.Value, len(current))
readValues, err := db.ReadManyValues(context.Background(), current)
readValues, err := vrw.ReadManyValues(context.Background(), current)
d.PanicIfError(err)
for i, v := range readValues {
h := current[i]
+9 -6
View File
@@ -72,11 +72,12 @@ func main() {
// Build One-Time
storage := &chunks.MemoryStorage{}
db := datas.NewDatabase(storage.NewView())
vrw := types.NewValueStore(storage.NewView())
db := datas.NewTypesDatabase(vrw)
ds, err := db.GetDataset(context.Background(), "test")
d.Chk.NoError(err)
t1 := time.Now()
col := buildFns[i](db, buildCount, valueFn)
col := buildFns[i](vrw, buildCount, valueFn)
ds, err = db.CommitValue(context.Background(), ds, col)
d.Chk.NoError(err)
buildDuration := time.Since(t1)
@@ -92,11 +93,12 @@ func main() {
// Build Incrementally
storage = &chunks.MemoryStorage{}
db = datas.NewDatabase(storage.NewView())
vrw = types.NewValueStore(storage.NewView())
db = datas.NewTypesDatabase(vrw)
ds, err = db.GetDataset(context.Background(), "test")
d.Chk.NoError(err)
t1 = time.Now()
col = buildIncrFns[i](db, insertCount, valueFn)
col = buildIncrFns[i](vrw, insertCount, valueFn)
ds, err = db.CommitValue(context.Background(), ds, col)
d.Chk.NoError(err)
incrDuration := time.Since(t1)
@@ -113,13 +115,14 @@ func main() {
fmt.Printf("Testing Blob: \t\tbuild %d MB\t\t\tscan %d MB\n", *blobSize/1000000, *blobSize/1000000)
storage := &chunks.MemoryStorage{}
db := datas.NewDatabase(storage.NewView())
vrw := types.NewValueStore(storage.NewView())
db := datas.NewTypesDatabase(vrw)
ds, err := db.GetDataset(context.Background(), "test")
d.Chk.NoError(err)
blobBytes := makeBlobBytes(*blobSize)
t1 := time.Now()
blob, err := types.NewBlob(context.Background(), db, bytes.NewReader(blobBytes))
blob, err := types.NewBlob(context.Background(), vrw, bytes.NewReader(blobBytes))
d.Chk.NoError(err)
_, err = db.CommitValue(context.Background(), ds, blob)
d.Chk.NoError(err)
+10 -6
View File
@@ -146,6 +146,8 @@ type PerfSuite struct {
// Database is a Noms database that tests can use for reading and writing. State is persisted across a single Run of a suite.
Database datas.Database
VS *types.ValueStore
// DatabaseSpec is the Noms spec of Database (typically a localhost URL).
DatabaseSpec string
@@ -257,12 +259,13 @@ func Run(datasetID string, t *testing.T, suiteT perfSuiteT) {
defer func() {
db := sp.GetDatabase(context.Background())
vrw := sp.GetVRW(context.Background())
reps := make([]types.Value, *perfRepeatFlag)
for i, rep := range testReps {
timesSlice := types.ValueSlice{}
for name, info := range rep {
st, err := types.NewStruct(db.Format(), "", types.StructData{
st, err := types.NewStruct(vrw.Format(), "", types.StructData{
"elapsed": types.Float(info.elapsed.Nanoseconds()),
"paused": types.Float(info.paused.Nanoseconds()),
"total": types.Float(info.total.Nanoseconds()),
@@ -271,13 +274,13 @@ func Run(datasetID string, t *testing.T, suiteT perfSuiteT) {
require.NoError(t, err)
timesSlice = append(timesSlice, types.String(name), st)
}
reps[i], err = types.NewMap(context.Background(), db, timesSlice...)
reps[i], err = types.NewMap(context.Background(), vrw, timesSlice...)
}
l, err := types.NewList(context.Background(), db, reps...)
l, err := types.NewList(context.Background(), vrw, reps...)
require.NoError(t, err)
record, err := types.NewStruct(db.Format(), "", map[string]types.Value{
"environment": suite.getEnvironment(db),
record, err := types.NewStruct(vrw.Format(), "", map[string]types.Value{
"environment": suite.getEnvironment(vrw),
"nomsRevision": types.String(suite.getGitHead(path.Join(suite.AtticLabs, "noms"))),
"testdataRevision": types.String(suite.getGitHead(suite.Testdata)),
"reps": l,
@@ -300,7 +303,8 @@ func Run(datasetID string, t *testing.T, suiteT perfSuiteT) {
storage := &chunks.MemoryStorage{}
memCS := storage.NewView()
suite.DatabaseSpec = "mem://"
suite.Database = datas.NewDatabase(memCS)
suite.VS = types.NewValueStore(memCS)
suite.Database = datas.NewTypesDatabase(suite.VS)
defer suite.Database.Close()
if t, ok := suiteT.(SetupRepSuite); ok {
+2 -2
View File
@@ -54,9 +54,9 @@ func (s *testSuite) TestNonEmptyPaths() {
func (s *testSuite) TestDatabase() {
assert := s.NewAssert()
val := types.Bool(true)
r, err := s.Database.WriteValue(context.Background(), val)
r, err := s.VS.WriteValue(context.Background(), val)
require.NoError(s.T, err)
v2, err := s.Database.ReadValue(context.Background(), r.TargetHash())
v2, err := s.VS.ReadValue(context.Background(), r.TargetHash())
require.NoError(s.T, err)
assert.True(v2.Equals(val))
}
+5 -5
View File
@@ -103,7 +103,7 @@ func NewAbsolutePath(str string) (AbsolutePath, error) {
}
// Resolve returns the Value reachable by 'p' in 'db'.
func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database) (val types.Value) {
func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database, vrw types.ValueReadWriter) (val types.Value) {
if len(p.Dataset) > 0 {
var ok bool
ds, err := db.GetDataset(ctx, p.Dataset)
@@ -114,7 +114,7 @@ func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database) (val types
}
} else if !p.Hash.IsEmpty() {
var err error
val, err = db.ReadValue(ctx, p.Hash)
val, err = vrw.ReadValue(ctx, p.Hash)
d.PanicIfError(err)
} else {
panic("Unreachable")
@@ -122,7 +122,7 @@ func (p AbsolutePath) Resolve(ctx context.Context, db datas.Database) (val types
if val != nil && p.Path != nil {
var err error
val, err = p.Path.Resolve(ctx, val, db)
val, err = p.Path.Resolve(ctx, val, vrw)
d.PanicIfError(err)
}
return
@@ -151,7 +151,7 @@ func (p AbsolutePath) String() (str string) {
// ReadAbsolutePaths attempts to parse each path in 'paths' and resolve them.
// If any path fails to parse correctly or if any path can be resolved to an
// existing Noms Value, then this function returns (nil, error).
func ReadAbsolutePaths(ctx context.Context, db datas.Database, paths ...string) ([]types.Value, error) {
func ReadAbsolutePaths(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, paths ...string) ([]types.Value, error) {
r := make([]types.Value, 0, len(paths))
for _, ps := range paths {
p, err := NewAbsolutePath(ps)
@@ -159,7 +159,7 @@ func ReadAbsolutePaths(ctx context.Context, db datas.Database, paths ...string)
return nil, fmt.Errorf("invalid input path '%s'", ps)
}
v := p.Resolve(ctx, db)
v := p.Resolve(ctx, db, vrw)
if v == nil {
return nil, fmt.Errorf("input path '%s' does not exist in database", ps)
}
+15 -13
View File
@@ -53,21 +53,22 @@ func TestAbsolutePathToAndFromString(t *testing.T) {
func TestAbsolutePaths(t *testing.T) {
assert := assert.New(t)
storage := &chunks.MemoryStorage{}
db := datas.NewDatabase(storage.NewView())
vs := types.NewValueStore(storage.NewView())
db := datas.NewTypesDatabase(vs)
s0, s1 := types.String("foo"), types.String("bar")
list, err := types.NewList(context.Background(), db, s0, s1)
list, err := types.NewList(context.Background(), vs, s0, s1)
assert.NoError(err)
emptySet, err := types.NewSet(context.Background(), db)
emptySet, err := types.NewSet(context.Background(), vs)
assert.NoError(err)
_, err = db.WriteValue(context.Background(), s0)
_, err = vs.WriteValue(context.Background(), s0)
assert.NoError(err)
_, err = db.WriteValue(context.Background(), s1)
_, err = vs.WriteValue(context.Background(), s1)
assert.NoError(err)
_, err = db.WriteValue(context.Background(), list)
_, err = vs.WriteValue(context.Background(), list)
assert.NoError(err)
_, err = db.WriteValue(context.Background(), emptySet)
_, err = vs.WriteValue(context.Background(), emptySet)
assert.NoError(err)
ds, err := db.GetDataset(context.Background(), "ds")
@@ -80,7 +81,7 @@ func TestAbsolutePaths(t *testing.T) {
resolvesTo := func(exp types.Value, str string) {
p, err := NewAbsolutePath(str)
assert.NoError(err)
act := p.Resolve(context.Background(), db)
act := p.Resolve(context.Background(), db, vs)
if exp == nil {
assert.Nil(act)
} else {
@@ -111,10 +112,11 @@ func TestAbsolutePaths(t *testing.T) {
func TestReadAbsolutePaths(t *testing.T) {
assert := assert.New(t)
storage := &chunks.MemoryStorage{}
db := datas.NewDatabase(storage.NewView())
vs := types.NewValueStore(storage.NewView())
db := datas.NewTypesDatabase(vs)
s0, s1 := types.String("foo"), types.String("bar")
list, err := types.NewList(context.Background(), db, s0, s1)
list, err := types.NewList(context.Background(), vs, s0, s1)
assert.NoError(err)
ds, err := db.GetDataset(context.Background(), "ds")
@@ -122,18 +124,18 @@ func TestReadAbsolutePaths(t *testing.T) {
_, err = db.CommitValue(context.Background(), ds, list)
assert.NoError(err)
vals, err := ReadAbsolutePaths(context.Background(), db, "ds.value[0]", "ds.value[1]")
vals, err := ReadAbsolutePaths(context.Background(), db, vs, "ds.value[0]", "ds.value[1]")
assert.NoError(err)
assert.Equal(2, len(vals))
assert.Equal("foo", string(vals[0].(types.String)))
assert.Equal("bar", string(vals[1].(types.String)))
vals, err = ReadAbsolutePaths(context.Background(), db, "!!#")
vals, err = ReadAbsolutePaths(context.Background(), db, vs, "!!#")
assert.Nil(vals)
assert.Equal("invalid input path '!!#'", err.Error())
vals, err = ReadAbsolutePaths(context.Background(), db, "invalid.monkey")
vals, err = ReadAbsolutePaths(context.Background(), db, vs, "invalid.monkey")
assert.Nil(vals)
assert.Equal("input path 'invalid.monkey' does not exist in database", err.Error())
}
+8 -8
View File
@@ -55,7 +55,7 @@ func RegisterCommitMetaFlags(flags *flag.FlagSet) {
// Database is used only if commitMetaKeyValuePaths are provided on the command line and values need to be resolved.
// Date should be ISO 8601 format (see CommitMetaDateFormat), if empty the current date is used.
// The values passed as command line arguments (if any) are merged with the values provided as function arguments.
func CreateCommitMetaStruct(ctx context.Context, db datas.Database, date, message string, keyValueStrings map[string]string, keyValuePaths map[string]types.Value) (types.Struct, error) {
func CreateCommitMetaStruct(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, date, message string, keyValueStrings map[string]string, keyValuePaths map[string]types.Value) (types.Struct, error) {
metaValues := types.StructData{}
resolvePathFunc := func(path string) (types.Value, error) {
@@ -63,7 +63,7 @@ func CreateCommitMetaStruct(ctx context.Context, db datas.Database, date, messag
if err != nil {
return nil, fmt.Errorf("bad path for meta-p: %s", path)
}
return absPath.Resolve(ctx, db), nil
return absPath.Resolve(ctx, db, vrw), nil
}
parseMetaStrings := func(param string, resolveAsPaths bool) error {
if param == "" {
@@ -92,21 +92,21 @@ func CreateCommitMetaStruct(ctx context.Context, db datas.Database, date, messag
}
if err := parseMetaStrings(commitMetaKeyValueStrings, false); err != nil {
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
if err := parseMetaStrings(commitMetaKeyValuePaths, true); err != nil {
return types.EmptyStruct(db.Format()), err
return types.Struct{}, err
}
for k, v := range keyValueStrings {
if !types.IsValidStructFieldName(k) {
return types.EmptyStruct(db.Format()), fmt.Errorf("invalid meta key: %s", k)
return types.Struct{}, fmt.Errorf("invalid meta key: %s", k)
}
metaValues[k] = types.String(v)
}
for k, v := range keyValuePaths {
if !types.IsValidStructFieldName(k) {
return types.EmptyStruct(db.Format()), fmt.Errorf("invalid meta key: %s", k)
return types.Struct{}, fmt.Errorf("invalid meta key: %s", k)
}
metaValues[k] = v
}
@@ -119,7 +119,7 @@ func CreateCommitMetaStruct(ctx context.Context, db datas.Database, date, messag
} else {
_, err := time.Parse(CommitMetaDateFormat, date)
if err != nil {
return types.EmptyStruct(db.Format()), fmt.Errorf("unable to parse date: %s, error: %s", date, err)
return types.Struct{}, fmt.Errorf("unable to parse date: %s, error: %s", date, err)
}
}
metaValues["date"] = types.String(date)
@@ -129,5 +129,5 @@ func CreateCommitMetaStruct(ctx context.Context, db datas.Database, date, messag
} else if commitMetaMessage != "" {
metaValues["message"] = types.String(commitMetaMessage)
}
return types.NewStruct(db.Format(), "Meta", metaValues)
return types.NewStruct(vrw.Format(), "Meta", metaValues)
}
+19 -14
View File
@@ -39,14 +39,16 @@ func isEmptyStruct(s types.Struct) bool {
return s.Equals(types.EmptyStruct(types.Format_7_18))
}
func newTestDB() datas.Database {
return datas.NewDatabase(chunks.NewMemoryStoreFactory().CreateStore(context.Background(), ""))
func newTestDB() (datas.Database, types.ValueReadWriter) {
vrw := types.NewValueStore(chunks.NewMemoryStoreFactory().CreateStore(context.Background(), ""))
return datas.NewTypesDatabase(vrw), vrw
}
func TestCreateCommitMetaStructBasic(t *testing.T) {
assert := assert.New(t)
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), "", "", nil, nil)
db, vrw := newTestDB()
meta, err := CreateCommitMetaStruct(context.Background(), db, vrw, "", "", nil, nil)
assert.NoError(err)
assert.False(isEmptyStruct(meta))
assert.Equal("Struct Meta {\n date: String,\n}", mustString(mustType(types.TypeOf(meta)).Describe(context.Background())))
@@ -58,7 +60,8 @@ func TestCreateCommitMetaStructFromFlags(t *testing.T) {
setCommitMetaFlags(time.Now().UTC().Format(CommitMetaDateFormat), "this is a message", "k1=v1,k2=v2,k3=v3")
defer resetCommitMetaFlags()
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), "", "", nil, nil)
db, vrw := newTestDB()
meta, err := CreateCommitMetaStruct(context.Background(), db, vrw, "", "", nil, nil)
assert.NoError(err)
assert.Equal("Struct Meta {\n date: String,\n k1: String,\n k2: String,\n k3: String,\n message: String,\n}",
mustString(mustType(types.TypeOf(meta)).Describe(context.Background())))
@@ -75,7 +78,8 @@ func TestCreateCommitMetaStructFromArgs(t *testing.T) {
dateArg := time.Now().UTC().Format(CommitMetaDateFormat)
messageArg := "this is a message"
keyValueArg := map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), dateArg, messageArg, keyValueArg, nil)
db, vrw := newTestDB()
meta, err := CreateCommitMetaStruct(context.Background(), db, vrw, dateArg, messageArg, keyValueArg, nil)
assert.NoError(err)
assert.Equal("Struct Meta {\n date: String,\n k1: String,\n k2: String,\n k3: String,\n message: String,\n}",
mustString(mustType(types.TypeOf(meta)).Describe(context.Background())))
@@ -96,8 +100,9 @@ func TestCreateCommitMetaStructFromFlagsAndArgs(t *testing.T) {
messageArg := "this is a message"
keyValueArg := map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}
db, vrw := newTestDB()
// args passed in should win over the ones in the flags
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), dateArg, messageArg, keyValueArg, nil)
meta, err := CreateCommitMetaStruct(context.Background(), db, vrw, dateArg, messageArg, keyValueArg, nil)
assert.NoError(err)
assert.Equal("Struct Meta {\n date: String,\n k1: String,\n k2: String,\n k3: String,\n k4: String,\n message: String,\n}",
mustString(mustType(types.TypeOf(meta)).Describe(context.Background())))
@@ -116,10 +121,10 @@ func TestCreateCommitMetaStructBadDate(t *testing.T) {
setCommitMetaFlags(cliDateString, "", "")
defer resetCommitMetaFlags()
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), argDateString, "", nil, nil)
db, vrw := newTestDB()
_, err := CreateCommitMetaStruct(context.Background(), db, vrw, argDateString, "", nil, nil)
assert.Error(err)
assert.True(strings.HasPrefix(err.Error(), "unable to parse date: "))
assert.True(isEmptyStruct(meta))
}
testBadDateMultipleWays := func(dateString string) {
testBadDates(dateString, "")
@@ -139,10 +144,10 @@ func TestCreateCommitMetaStructBadMetaStrings(t *testing.T) {
setCommitMetaFlags("", "", fmt.Sprintf("%s%s%s", k, sep, v))
defer resetCommitMetaFlags()
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), "", "", nil, nil)
db, vrw := newTestDB()
_, err := CreateCommitMetaStruct(context.Background(), db, vrw, "", "", nil, nil)
assert.Error(err)
assert.True(strings.HasPrefix(err.Error(), "unable to parse meta value: "))
assert.True(isEmptyStruct(meta))
}
testBadMetaKeys := func(k, v string) {
@@ -151,18 +156,18 @@ func TestCreateCommitMetaStructBadMetaStrings(t *testing.T) {
setCommitMetaFlags("", "", fmt.Sprintf("%s=%s", k, v))
meta, err := CreateCommitMetaStruct(context.Background(), newTestDB(), "", "", nil, nil)
db, vrw := newTestDB()
_, err := CreateCommitMetaStruct(context.Background(), db, vrw, "", "", nil, nil)
assert.Error(err)
assert.True(strings.HasPrefix(err.Error(), "invalid meta key: "))
assert.True(isEmptyStruct(meta))
resetCommitMetaFlags()
metaValues := map[string]string{k: v}
meta, err = CreateCommitMetaStruct(context.Background(), newTestDB(), "", "", metaValues, nil)
db, vrw = newTestDB()
_, err = CreateCommitMetaStruct(context.Background(), db, vrw, "", "", metaValues, nil)
assert.Error(err)
assert.True(strings.HasPrefix(err.Error(), "invalid meta key: "))
assert.True(isEmptyStruct(meta))
}
// Valid names must start with `a-zA-Z` and after that `a-zA-Z0-9_`.
+31 -11
View File
@@ -55,7 +55,6 @@ const (
type ProtocolImpl interface {
NewChunkStore(sp Spec) (chunks.ChunkStore, error)
NewDatabase(sp Spec) (datas.Database, error)
}
var ExternalProtocols = map[string]ProtocolImpl{}
@@ -165,6 +164,7 @@ type Spec struct {
// db is lazily created, so it needs to be a pointer to a Database.
db *datas.Database
vrw *types.ValueReadWriter
}
func newSpec(dbSpec string, opts SpecOptions) (Spec, error) {
@@ -178,6 +178,7 @@ func newSpec(dbSpec string, opts SpecOptions) (Spec, error) {
DatabaseName: dbName,
Options: opts,
db: new(datas.Database),
vrw: new(types.ValueReadWriter),
}, nil
}
@@ -271,11 +272,22 @@ func (sp Spec) String() string {
// is called. If the Spec is closed, it is re-opened with a new Database.
func (sp Spec) GetDatabase(ctx context.Context) datas.Database {
if *sp.db == nil {
*sp.db = sp.createDatabase(ctx)
db, vrw := sp.createDatabase(ctx)
*sp.db = db
*sp.vrw = vrw
}
return *sp.db
}
func (sp Spec) GetVRW(ctx context.Context) types.ValueReadWriter {
if *sp.db == nil {
db, vrw := sp.createDatabase(ctx)
*sp.db = db
*sp.vrw = vrw
}
return *sp.vrw
}
// NewChunkStore returns a new ChunkStore instance that this Spec's
// DatabaseName describes. It's unusual to call this method, GetDatabase is
// more useful. Unlike GetDatabase, a new ChunkStore instance is returned every
@@ -385,7 +397,7 @@ func (sp Spec) GetDataset(ctx context.Context) (ds datas.Dataset) {
// if this isn't a Path Spec or if that path isn't found.
func (sp Spec) GetValue(ctx context.Context) (val types.Value) {
if !sp.Path.IsEmpty() {
val = sp.Path.Resolve(ctx, sp.GetDatabase(ctx))
val = sp.Path.Resolve(ctx, sp.GetDatabase(ctx), sp.GetVRW(ctx))
}
return
}
@@ -428,7 +440,7 @@ func (sp Spec) Pin(ctx context.Context) (Spec, bool) {
return Spec{}, false
}
nbf := sp.GetDatabase(ctx).Format()
nbf := sp.GetVRW(ctx).Format()
r := sp
var err error
@@ -449,28 +461,36 @@ func (sp Spec) Close() error {
return db.Close()
}
func (sp Spec) createDatabase(ctx context.Context) datas.Database {
func (sp Spec) createDatabase(ctx context.Context) (datas.Database, types.ValueReadWriter) {
switch sp.Protocol {
case "aws":
return datas.NewDatabase(parseAWSSpec(ctx, sp.Href(), sp.Options))
cs := parseAWSSpec(ctx, sp.Href(), sp.Options)
vrw := types.NewValueStore(cs)
return datas.NewTypesDatabase(vrw), vrw
case "gs":
return datas.NewDatabase(parseGCSSpec(ctx, sp.Href(), sp.Options))
cs := parseGCSSpec(ctx, sp.Href(), sp.Options)
vrw := types.NewValueStore(cs)
return datas.NewTypesDatabase(vrw), vrw
case "nbs":
os.Mkdir(sp.DatabaseName, 0777)
cs, err := nbs.NewLocalStore(ctx, types.Format_Default.VersionString(), sp.DatabaseName, 1<<28)
d.PanicIfError(err)
return datas.NewDatabase(cs)
vrw := types.NewValueStore(cs)
return datas.NewTypesDatabase(vrw), vrw
case "mem":
storage := &chunks.MemoryStorage{}
return datas.NewDatabase(storage.NewViewWithDefaultFormat())
cs := storage.NewViewWithDefaultFormat()
vrw := types.NewValueStore(cs)
return datas.NewTypesDatabase(vrw), vrw
default:
impl, ok := ExternalProtocols[sp.Protocol]
if !ok {
d.PanicIfError(fmt.Errorf("unknown protocol: %s", sp.Protocol))
}
r, err := impl.NewDatabase(sp)
cs, err := impl.NewChunkStore(sp)
d.PanicIfError(err)
return r
vrw := types.NewValueStore(cs)
return datas.NewTypesDatabase(vrw), vrw
}
}
+16 -13
View File
@@ -82,9 +82,9 @@ func TestMemDatabaseSpec(t *testing.T) {
assert.True(spec.Path.IsEmpty())
s := types.String("hello")
db := spec.GetDatabase(context.Background())
db.WriteValue(context.Background(), s)
assert.Equal(s, mustValue(db.ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
vrw := spec.GetVRW(context.Background())
vrw.WriteValue(context.Background(), s)
assert.Equal(s, mustValue(vrw.ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
}
func TestMemDatasetSpec(t *testing.T) {
@@ -129,7 +129,7 @@ func TestMemHashPathSpec(t *testing.T) {
// This is a reasonable check but it causes the next GetValue to return nil:
// assert.Nil(spec.GetValue())
spec.GetDatabase(context.Background()).WriteValue(context.Background(), s)
spec.GetVRW(context.Background()).WriteValue(context.Background(), s)
assert.Equal(s, spec.GetValue(context.Background()))
}
@@ -149,7 +149,7 @@ func TestMemDatasetPathSpec(t *testing.T) {
db := spec.GetDatabase(context.Background())
ds, err := db.GetDataset(context.Background(), "test")
assert.NoError(err)
_, err = db.CommitValue(context.Background(), ds, mustList(types.NewList(context.Background(), db, types.Float(42))))
_, err = db.CommitValue(context.Background(), ds, mustList(types.NewList(context.Background(), spec.GetVRW(context.Background()), types.Float(42))))
assert.NoError(err)
assert.Equal(types.Float(42), spec.GetValue(context.Background()))
@@ -171,9 +171,10 @@ func TestNBSDatabaseSpec(t *testing.T) {
func() {
cs, err := nbs.NewLocalStore(context.Background(), types.Format_Default.VersionString(), store1, 8*(1<<20))
assert.NoError(err)
db := datas.NewDatabase(cs)
vrw := types.NewValueStore(cs)
db := datas.NewTypesDatabase(vrw)
defer db.Close()
r, err := db.WriteValue(context.Background(), s)
r, err := vrw.WriteValue(context.Background(), s)
assert.NoError(err)
ds, err := db.GetDataset(context.Background(), "datasetID")
assert.NoError(err)
@@ -188,7 +189,7 @@ func TestNBSDatabaseSpec(t *testing.T) {
assert.Equal("nbs", spec1.Protocol)
assert.Equal(store1, spec1.DatabaseName)
assert.Equal(s, mustValue(spec1.GetDatabase(context.Background()).ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
assert.Equal(s, mustValue(spec1.GetVRW(context.Background()).ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
// New databases can be created and read/written from.
store2 := filepath.Join(tmpDir, "store2")
@@ -201,14 +202,15 @@ func TestNBSDatabaseSpec(t *testing.T) {
assert.Equal(store2, spec2.DatabaseName)
db := spec2.GetDatabase(context.Background())
db.WriteValue(context.Background(), s)
r, err := db.WriteValue(context.Background(), s)
vrw := spec2.GetVRW(context.Background())
vrw.WriteValue(context.Background(), s)
r, err := vrw.WriteValue(context.Background(), s)
assert.NoError(err)
ds, err := db.GetDataset(context.Background(), "datasetID")
assert.NoError(err)
_, err = db.CommitValue(context.Background(), ds, r)
assert.NoError(err)
assert.Equal(s, mustValue(db.ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
assert.Equal(s, mustValue(vrw.ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
}
run("")
@@ -509,13 +511,14 @@ func TestMultipleSpecsSameNBS(t *testing.T) {
s := types.String("hello")
db := spec1.GetDatabase(context.Background())
r, err := db.WriteValue(context.Background(), s)
vrw := spec1.GetVRW(context.Background())
r, err := vrw.WriteValue(context.Background(), s)
assert.NoError(err)
ds, err := db.GetDataset(context.Background(), "datasetID")
assert.NoError(err)
_, err = db.CommitValue(context.Background(), ds, r)
assert.NoError(err)
assert.Equal(s, mustValue(spec2.GetDatabase(context.Background()).ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
assert.Equal(s, mustValue(spec2.GetVRW(context.Background()).ReadValue(context.Background(), mustHash(s.Hash(types.Format_7_18)))))
}
func TestAcccessingInvalidSpec(t *testing.T) {
+14 -13
View File
@@ -40,11 +40,12 @@ func poe(err error) {
}
}
func getDBAtDir(ctx context.Context, dir string) datas.Database {
func getDBAtDir(ctx context.Context, dir string) (datas.Database, types.ValueReadWriter) {
cs, err := nbs.NewLocalStore(ctx, types.Format_Default.VersionString(), dir, 1<<28)
poe(err)
return datas.NewDatabase(nbs.NewNBSMetricWrapper(cs))
vrw := types.NewValueStore(nbs.NewNBSMetricWrapper(cs))
return datas.NewTypesDatabase(vrw), vrw
}
const (
@@ -56,12 +57,12 @@ const (
var benchmarkTmpDir = os.TempDir()
var genOnce = &sync.Once{}
func getBenchmarkDB(ctx context.Context) datas.Database {
func getBenchmarkDB(ctx context.Context) (datas.Database, types.ValueReadWriter) {
return getDBAtDir(ctx, benchmarkTmpDir)
}
func writeTupleToDB(ctx context.Context, db datas.Database, dsID string, vals ...types.Value) {
root, err := types.NewTuple(db.Format(), vals...)
func writeTupleToDB(ctx context.Context, db datas.Database, vrw types.ValueReadWriter, dsID string, vals ...types.Value) {
root, err := types.NewTuple(vrw.Format(), vals...)
poe(err)
ds, err := db.GetDataset(ctx, dsID)
@@ -72,7 +73,7 @@ func writeTupleToDB(ctx context.Context, db datas.Database, dsID string, vals ..
}
func readTupleFromDB(ctx context.Context, t require.TestingT, dsID string) (*types.NomsBinFormat, []types.Value) {
db := getBenchmarkDB(ctx)
db, vrw := getBenchmarkDB(ctx)
ds, err := db.GetDataset(ctx, dsID)
require.NoError(t, err)
@@ -80,7 +81,7 @@ func readTupleFromDB(ctx context.Context, t require.TestingT, dsID string) (*typ
require.NoError(t, err)
require.True(t, ok)
val, err := ref.TargetValue(ctx, db)
val, err := ref.TargetValue(ctx, vrw)
require.NoError(t, err)
st := val.(types.Struct)
@@ -90,7 +91,7 @@ func readTupleFromDB(ctx context.Context, t require.TestingT, dsID string) (*typ
tup := val.(types.Tuple)
valSlice, err := tup.AsSlice()
require.NoError(t, err)
return db.Format(), valSlice
return vrw.Format(), valSlice
}
var testDataCols = []schema.Column{
@@ -107,13 +108,13 @@ var testDataCols = []schema.Column{
func generateTestData(ctx context.Context) {
genOnce.Do(func() {
db := getBenchmarkDB(ctx)
nbf := db.Format()
db, vrw := getBenchmarkDB(ctx)
nbf := vrw.Format()
m, err := types.NewMap(ctx, db)
m, err := types.NewMap(ctx, vrw)
poe(err)
idx, err := types.NewMap(ctx, db)
idx, err := types.NewMap(ctx, vrw)
poe(err)
me := m.Edit()
@@ -138,7 +139,7 @@ func generateTestData(ctx context.Context) {
idx, err = idxMe.Map(ctx)
poe(err)
writeTupleToDB(ctx, db, simIdxBenchDataset, m, idx)
writeTupleToDB(ctx, db, vrw, simIdxBenchDataset, m, idx)
})
}
+4 -4
View File
@@ -49,7 +49,7 @@ func (s *perfSuite) Test01BuildList10mNumbers() {
assert := s.NewAssert()
in := make(chan types.Value, 16)
ae := atomicerr.New()
out := types.NewStreamingList(context.Background(), s.Database, ae, in)
out := types.NewStreamingList(context.Background(), s.VS, ae, in)
for i := 0; i < 1e7; i++ {
in <- types.Float(s.r.Int63())
@@ -70,7 +70,7 @@ func (s *perfSuite) Test02BuildList10mStructs() {
assert := s.NewAssert()
in := make(chan types.Value, 16)
ae := atomicerr.New()
out := types.NewStreamingList(context.Background(), s.Database, ae, in)
out := types.NewStreamingList(context.Background(), s.VS, ae, in)
for i := 0; i < 1e7; i++ {
st, err := types.NewStruct(types.Format_7_18, "", types.StructData{
@@ -114,7 +114,7 @@ func (s *perfSuite) Test05Concat10mValues2kTimes() {
l1Len, l2Len := l1.Len(), l2.Len()
l1Last, l2Last := last(l1), last(l2)
l3, err := types.NewList(context.Background(), s.Database)
l3, err := types.NewList(context.Background(), s.VS)
assert.NoError(err)
for i := uint64(0); i < 1e3; i++ { // 1k iterations * 2 concat ops = 2k times
// Include some basic sanity checks.
@@ -183,7 +183,7 @@ func (s *perfSuite) testBuild500megBlob(p int) {
}
})
b, err := types.NewBlob(context.Background(), s.Database, readers...)
b, err := types.NewBlob(context.Background(), s.VS, readers...)
assert.NoError(err)
assert.Equal(uint64(size), b.Len())
}
+6 -4
View File
@@ -72,14 +72,15 @@ func WriteValueFile(ctx context.Context, filepath string, store *FileValueStore,
// WriteToWriter writes the values out to the provided writer in the value file format
func WriteToWriter(ctx context.Context, wr io.Writer, store *FileValueStore, values ...types.Value) error {
db := datas.NewDatabase(store)
vrw := types.NewValueStore(store)
db := datas.NewTypesDatabase(vrw)
ds, err := db.GetDataset(ctx, env.DefaultInitBranch)
if err != nil {
return err
}
l, err := types.NewList(ctx, db, values...)
l, err := types.NewList(ctx, vrw, values...)
if err != nil {
return err
@@ -164,8 +165,9 @@ func ReadFromReader(ctx context.Context, rd io.Reader) ([]types.Value, error) {
return nil, err
}
db := datas.NewDatabase(store)
v, err := db.ReadValue(ctx, h)
vrw := types.NewValueStore(store)
v, err := vrw.ReadValue(ctx, h)
if err != nil {
return nil, err