mirror of
https://github.com/dolthub/dolt.git
synced 2026-02-11 10:33:08 -06:00
Merge pull request #1303 from willhite/datas
Replace Commit type with types.Struct().
This commit is contained in:
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/datas"
|
||||
"github.com/attic-labs/noms/dataset"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
@@ -25,7 +26,7 @@ func main() {
|
||||
|
||||
lastVal := uint64(0)
|
||||
if commit, ok := ds.MaybeHead(); ok {
|
||||
lastVal = uint64(commit.Value().(types.Uint64))
|
||||
lastVal = uint64(commit.Get(datas.ValueField).(types.Uint64))
|
||||
}
|
||||
newVal := lastVal + 1
|
||||
_, err := ds.Commit(types.Uint64(newVal))
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/clients/csv"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/datas"
|
||||
"github.com/attic-labs/noms/dataset"
|
||||
)
|
||||
|
||||
@@ -43,7 +44,7 @@ func main() {
|
||||
}
|
||||
|
||||
err = d.Try(func() {
|
||||
nomsList, structDesc := csv.ValueToListAndElemDesc(ds.Head().Value(), ds.Store())
|
||||
nomsList, structDesc := csv.ValueToListAndElemDesc(ds.Head().Get(datas.ValueField), ds.Store())
|
||||
csv.Write(nomsList, structDesc, comma, os.Stdout)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -48,7 +48,7 @@ func (s *testSuite) TestCSVImporter() {
|
||||
defer ds.Store().Close()
|
||||
defer os.RemoveAll(s.LdbDir)
|
||||
|
||||
l := ds.Head().Value().(types.List)
|
||||
l := ds.Head().Get(datas.ValueField).(types.List)
|
||||
s.Equal(uint64(100), l.Len())
|
||||
|
||||
i := uint64(0)
|
||||
@@ -110,7 +110,7 @@ func (s *testSuite) TestCSVImporterWithPipe() {
|
||||
defer ds.Store().Close()
|
||||
defer os.RemoveAll(s.LdbDir)
|
||||
|
||||
l := ds.Head().Value().(types.List)
|
||||
l := ds.Head().Get(datas.ValueField).(types.List)
|
||||
s.Equal(uint64(1), l.Len())
|
||||
v := l.Get(0)
|
||||
st := v.(types.Struct)
|
||||
@@ -142,7 +142,7 @@ func (s *testSuite) TestCSVImporterWithExternalHeader() {
|
||||
defer ds.Store().Close()
|
||||
defer os.RemoveAll(s.LdbDir)
|
||||
|
||||
l := ds.Head().Value().(types.List)
|
||||
l := ds.Head().Get(datas.ValueField).(types.List)
|
||||
s.Equal(uint64(1), l.Len())
|
||||
v := l.Get(0)
|
||||
st := v.(types.Struct)
|
||||
|
||||
@@ -106,7 +106,7 @@ func TestParseDatasetFromHTTP(t *testing.T) {
|
||||
setTest, err := ParseDataset(datasetName)
|
||||
|
||||
assert.NoError(err)
|
||||
assert.EqualValues(commit, setTest.Head().Value())
|
||||
assert.EqualValues(commit, setTest.Head().Get(datas.ValueField))
|
||||
|
||||
server.Stop()
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func TestParseDatasetFromMem(t *testing.T) {
|
||||
dsTest, err := dsTest.Commit(commit)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.EqualValues(commit, dsTest.Head().Value())
|
||||
assert.EqualValues(commit, dsTest.Head().Get(datas.ValueField))
|
||||
}
|
||||
|
||||
func TestParseDatasetFromLDB(t *testing.T) {
|
||||
@@ -148,7 +148,7 @@ func TestParseDatasetFromLDB(t *testing.T) {
|
||||
setTest, errRead := ParseDataset(datasetName)
|
||||
|
||||
assert.NoError(errRead)
|
||||
assert.EqualValues(commit, setTest.Head().Value())
|
||||
assert.EqualValues(commit, setTest.Head().Get(datas.ValueField))
|
||||
}
|
||||
|
||||
func TestDatasetBadInput(t *testing.T) {
|
||||
@@ -201,7 +201,7 @@ func TestParseDatasetObjectFromLdb(t *testing.T) {
|
||||
assert.Zero(ref)
|
||||
assert.True(isDs)
|
||||
assert.NoError(errRead)
|
||||
assert.EqualValues(commit, setTest.Head().Value())
|
||||
assert.EqualValues(commit, setTest.Head().Get(datas.ValueField))
|
||||
}
|
||||
|
||||
func TestReadRef(t *testing.T) {
|
||||
|
||||
@@ -40,10 +40,10 @@ func main() {
|
||||
defer util.StopCPUProfile()
|
||||
}
|
||||
|
||||
var commit datas.Commit
|
||||
var commit types.Struct
|
||||
if r, ok := ref.MaybeParse(*sourceObject); ok {
|
||||
// sourceObject was sha1
|
||||
commit, ok = sourceStore.ReadValue(r).(datas.Commit)
|
||||
commit, ok = sourceStore.ReadValue(r).(types.Struct)
|
||||
d.Exp.True(ok, "Unable to read Commit object with ref: %s", r)
|
||||
} else {
|
||||
// sourceObject must be a dataset Id
|
||||
|
||||
@@ -36,13 +36,13 @@ func (s *testSuite) TestShove() {
|
||||
s.Equal("", out)
|
||||
|
||||
dest := dataset.NewDataset(datas.NewDataStore(chunks.NewLevelDBStore(ldb2dir, sn, 1, false)), "bar")
|
||||
s.True(types.Int32(42).Equals(dest.Head().Value()))
|
||||
s.True(types.Int32(42).Equals(dest.Head().Get(datas.ValueField)))
|
||||
dest.Store().Close()
|
||||
|
||||
out = s.Run(main, []string{"-source-store", sn, "-source", "foo", "-sink-ldb", ldb2dir, "-sink-ds", "bar"})
|
||||
s.Equal("", out)
|
||||
|
||||
dest = dataset.NewDataset(datas.NewDataStore(chunks.NewLevelDBStore(ldb2dir, sn, 1, false)), "bar")
|
||||
s.True(types.Int32(43).Equals(dest.Head().Value()))
|
||||
s.True(types.Int32(43).Equals(dest.Head().Get(datas.ValueField)))
|
||||
dest.Store().Close()
|
||||
}
|
||||
|
||||
120
datas/commit.go
120
datas/commit.go
@@ -1,5 +1,3 @@
|
||||
// This file was generated by nomdl/codegen.
|
||||
|
||||
package datas
|
||||
|
||||
import (
|
||||
@@ -7,108 +5,36 @@ import (
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
// This function builds up a Noms value that describes the type
|
||||
// package implemented by this file and registers it with the global
|
||||
// type package definition cache.
|
||||
func init() {
|
||||
p := types.NewPackage([]*types.Type{
|
||||
types.MakeStructType("Commit",
|
||||
[]types.Field{
|
||||
types.Field{"value", types.ValueType, false},
|
||||
types.Field{"parents", types.MakeSetType(types.MakeRefType(types.MakeType(ref.Ref{}, 0))), false},
|
||||
},
|
||||
[]types.Field{},
|
||||
),
|
||||
}, []ref.Ref{})
|
||||
types.RegisterPackage(&p)
|
||||
}
|
||||
|
||||
// Commit
|
||||
|
||||
type Commit struct {
|
||||
_value types.Value
|
||||
_parents types.Set
|
||||
|
||||
ref *ref.Ref
|
||||
}
|
||||
|
||||
func NewCommit() Commit {
|
||||
return Commit{
|
||||
_value: types.Bool(false),
|
||||
_parents: NewSetOfRefOfCommit(),
|
||||
|
||||
ref: &ref.Ref{},
|
||||
}
|
||||
}
|
||||
|
||||
var __typeForCommit *types.Type
|
||||
var __typeDef *types.Type
|
||||
|
||||
func (m Commit) Type() *types.Type {
|
||||
return __typeForCommit
|
||||
}
|
||||
const (
|
||||
ParentsField = "parents"
|
||||
ValueField = "value"
|
||||
)
|
||||
|
||||
func init() {
|
||||
__typeForCommit = types.MakeType(ref.Parse("sha1-d8bf281149d5474072c87be999e961184bd9ec99"), 0)
|
||||
types.RegisterStruct(__typeForCommit, builderForCommit, readerForCommit)
|
||||
structName := "Commit"
|
||||
|
||||
fieldTypes := []types.Field{
|
||||
types.Field{Name: ValueField, T: types.MakePrimitiveType(types.ValueKind)},
|
||||
types.Field{Name: ParentsField, T: types.MakeSetType(types.MakeRefType(types.MakeType(ref.Ref{}, 0)))},
|
||||
}
|
||||
|
||||
typeDef := types.MakeStructType(structName, fieldTypes, []types.Field{})
|
||||
pkg := types.NewPackage([]*types.Type{typeDef}, []ref.Ref{})
|
||||
__typeDef = pkg.Types()[0]
|
||||
pkgRef := types.RegisterPackage(&pkg)
|
||||
__typeForCommit = types.MakeType(pkgRef, 0)
|
||||
}
|
||||
|
||||
func builderForCommit(values []types.Value) types.Value {
|
||||
i := 0
|
||||
s := Commit{ref: &ref.Ref{}}
|
||||
s._value = values[i]
|
||||
i++
|
||||
s._parents = values[i].(types.Set)
|
||||
i++
|
||||
return s
|
||||
}
|
||||
func NewCommit() types.Struct {
|
||||
initialFields := map[string]types.Value{
|
||||
ValueField: types.NewString(""),
|
||||
ParentsField: NewSetOfRefOfCommit(),
|
||||
}
|
||||
|
||||
func readerForCommit(v types.Value) []types.Value {
|
||||
values := []types.Value{}
|
||||
s := v.(Commit)
|
||||
values = append(values, s._value)
|
||||
values = append(values, s._parents)
|
||||
return values
|
||||
}
|
||||
|
||||
func (s Commit) Equals(other types.Value) bool {
|
||||
return other != nil && __typeForCommit.Equals(other.Type()) && s.Ref() == other.Ref()
|
||||
}
|
||||
|
||||
func (s Commit) Ref() ref.Ref {
|
||||
return types.EnsureRef(s.ref, s)
|
||||
}
|
||||
|
||||
func (s Commit) Chunks() (chunks []types.RefBase) {
|
||||
chunks = append(chunks, __typeForCommit.Chunks()...)
|
||||
chunks = append(chunks, s._value.Chunks()...)
|
||||
chunks = append(chunks, s._parents.Chunks()...)
|
||||
return
|
||||
}
|
||||
|
||||
func (s Commit) ChildValues() (ret []types.Value) {
|
||||
ret = append(ret, s._value)
|
||||
ret = append(ret, s._parents)
|
||||
return
|
||||
}
|
||||
|
||||
func (s Commit) Value() types.Value {
|
||||
return s._value
|
||||
}
|
||||
|
||||
func (s Commit) SetValue(val types.Value) Commit {
|
||||
s._value = val
|
||||
s.ref = &ref.Ref{}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s Commit) Parents() types.Set {
|
||||
return s._parents
|
||||
}
|
||||
|
||||
func (s Commit) SetParents(val types.Set) Commit {
|
||||
s._parents = val
|
||||
s.ref = &ref.Ref{}
|
||||
return s
|
||||
return types.NewStruct(__typeForCommit, __typeDef, initialFields)
|
||||
}
|
||||
|
||||
func typeForMapOfStringToRefOfCommit() *types.Type {
|
||||
|
||||
@@ -16,16 +16,16 @@ type DataStore interface {
|
||||
io.Closer
|
||||
|
||||
// MaybeHead returns the current Head Commit of this Datastore, which contains the current root of the DataStore's value tree, if available. If not, it returns a new Commit and 'false'.
|
||||
MaybeHead(datasetID string) (Commit, bool)
|
||||
MaybeHead(datasetID string) (types.Struct, bool)
|
||||
|
||||
// Head returns the current head Commit, which contains the current root of the DataStore's value tree.
|
||||
Head(datasetID string) Commit
|
||||
Head(datasetID string) types.Struct
|
||||
|
||||
// Datasets returns the root of the datastore which is a MapOfStringToRefOfCommit where string is a datasetID.
|
||||
Datasets() types.Map
|
||||
|
||||
// Commit updates the Commit that datasetID in this datastore points at. All Values that have been written to this DataStore are guaranteed to be persistent after Commit(). If the update cannot be performed, e.g., because of a conflict, error will non-nil. The newest snapshot of the datastore is always returned.
|
||||
Commit(datasetID string, commit Commit) (DataStore, error)
|
||||
Commit(datasetID string, commit types.Struct) (DataStore, error)
|
||||
|
||||
// Delete removes the Dataset named datasetID from the map at the root of the DataStore. The Dataset data is not necessarily cleaned up at this time, but may be garbage collected in the future. If the update cannot be performed, e.g., because of a conflict, error will non-nil. The newest snapshot of the datastore is always returned.
|
||||
Delete(datasetID string) (DataStore, error)
|
||||
|
||||
@@ -26,14 +26,14 @@ func newDataStoreCommon(bs types.BatchStore, rt chunks.RootTracker) dataStoreCom
|
||||
return dataStoreCommon{ValueStore: types.NewValueStore(bs), bs: bs, rt: rt, rootRef: rt.Root()}
|
||||
}
|
||||
|
||||
func (ds *dataStoreCommon) MaybeHead(datasetID string) (Commit, bool) {
|
||||
func (ds *dataStoreCommon) MaybeHead(datasetID string) (types.Struct, bool) {
|
||||
if r, ok := ds.Datasets().MaybeGet(types.NewString(datasetID)); ok {
|
||||
return r.(types.Ref).TargetValue(ds).(Commit), true
|
||||
return r.(types.Ref).TargetValue(ds).(types.Struct), true
|
||||
}
|
||||
return NewCommit(), false
|
||||
}
|
||||
|
||||
func (ds *dataStoreCommon) Head(datasetID string) Commit {
|
||||
func (ds *dataStoreCommon) Head(datasetID string) types.Struct {
|
||||
c, ok := ds.MaybeHead(datasetID)
|
||||
d.Chk.True(ok, "DataStore has no Head.")
|
||||
return c
|
||||
@@ -57,12 +57,12 @@ func (ds *dataStoreCommon) datasetsFromRef(datasetsRef ref.Ref) *types.Map {
|
||||
return &c
|
||||
}
|
||||
|
||||
func (ds *dataStoreCommon) commit(datasetID string, commit Commit) error {
|
||||
func (ds *dataStoreCommon) commit(datasetID string, commit types.Struct) error {
|
||||
return ds.doCommit(datasetID, commit)
|
||||
}
|
||||
|
||||
// doCommit manages concurrent access the single logical piece of mutable state: the current Root. doCommit is optimistic in that it is attempting to update head making the assumption that currentRootRef is the ref of the current head. The call to UpdateRoot below will return an 'ErrOptimisticLockFailed' error if that assumption fails (e.g. because of a race with another writer) and the entire algorithm must be tried again. This method will also fail and return an 'ErrMergeNeeded' error if the |commit| is not a descendent of the current dataset head
|
||||
func (ds *dataStoreCommon) doCommit(datasetID string, commit Commit) error {
|
||||
func (ds *dataStoreCommon) doCommit(datasetID string, commit types.Struct) error {
|
||||
currentRootRef, currentDatasets := ds.getRootAndDatasets()
|
||||
|
||||
// TODO: This Commit will be orphaned if the tryUpdateRoot() below fails
|
||||
@@ -117,9 +117,9 @@ func (ds *dataStoreCommon) tryUpdateRoot(currentDatasets types.Map, currentRootR
|
||||
return
|
||||
}
|
||||
|
||||
func descendsFrom(commit Commit, currentHeadRef types.Ref, vr types.ValueReader) bool {
|
||||
func descendsFrom(commit types.Struct, currentHeadRef types.Ref, vr types.ValueReader) bool {
|
||||
// BFS because the common case is that the ancestor is only a step or two away
|
||||
ancestors := commit.Parents()
|
||||
ancestors := commit.Get(ParentsField).(types.Set)
|
||||
for !ancestors.Has(currentHeadRef) {
|
||||
if ancestors.Empty() {
|
||||
return false
|
||||
@@ -133,8 +133,8 @@ func getAncestors(commits types.Set, vr types.ValueReader) types.Set {
|
||||
ancestors := NewSetOfRefOfCommit()
|
||||
commits.IterAll(func(v types.Value) {
|
||||
r := v.(types.Ref)
|
||||
c := r.TargetValue(vr).(Commit)
|
||||
ancestors = ancestors.Union(c.Parents())
|
||||
c := r.TargetValue(vr).(types.Struct)
|
||||
ancestors = ancestors.Union(c.Get(ParentsField).(types.Set))
|
||||
})
|
||||
return ancestors
|
||||
}
|
||||
|
||||
@@ -59,7 +59,8 @@ func (suite *DataStoreSuite) TestReadWriteCache() {
|
||||
var v types.Value = types.Bool(true)
|
||||
suite.NotEqual(ref.Ref{}, suite.ds.WriteValue(v))
|
||||
r := suite.ds.WriteValue(v).TargetRef()
|
||||
newDs, err := suite.ds.Commit("foo", NewCommit().SetValue(v))
|
||||
commit := NewCommit()
|
||||
newDs, err := suite.ds.Commit("foo", commit.Set(ValueField, v))
|
||||
suite.NoError(err)
|
||||
suite.Equal(1, suite.cs.Writes-writesOnCommit)
|
||||
|
||||
@@ -91,7 +92,7 @@ func (suite *DataStoreSuite) TestReadValueTypeRefPanics_BUG1121() {
|
||||
suite.NotEqual(ref.Ref{}, suite.ds.WriteValue(b))
|
||||
|
||||
datasetID := "ds1"
|
||||
aCommit := NewCommit().SetValue(types.NewRef(b.Ref()))
|
||||
aCommit := NewCommit().Set(ValueField, types.NewRef(b.Ref()))
|
||||
ds2, err := suite.ds.Commit(datasetID, aCommit)
|
||||
suite.NoError(err)
|
||||
|
||||
@@ -113,7 +114,7 @@ func (suite *DataStoreSuite) TestDataStoreCommit() {
|
||||
|
||||
// |a|
|
||||
a := types.NewString("a")
|
||||
aCommit := NewCommit().SetValue(a)
|
||||
aCommit := NewCommit().Set(ValueField, a)
|
||||
ds2, err := suite.ds.Commit(datasetID, aCommit)
|
||||
suite.NoError(err)
|
||||
|
||||
@@ -123,37 +124,37 @@ func (suite *DataStoreSuite) TestDataStoreCommit() {
|
||||
|
||||
// The new datastore has |a|.
|
||||
aCommit1 := ds2.Head(datasetID)
|
||||
suite.True(aCommit1.Value().Equals(a))
|
||||
suite.True(aCommit1.Get(ValueField).Equals(a))
|
||||
suite.ds = ds2
|
||||
|
||||
// |a| <- |b|
|
||||
b := types.NewString("b")
|
||||
bCommit := NewCommit().SetValue(b).SetParents(NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(aCommit)))
|
||||
bCommit := NewCommit().Set(ValueField, b).Set(ParentsField, NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(aCommit)))
|
||||
suite.ds, err = suite.ds.Commit(datasetID, bCommit)
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(b))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b))
|
||||
|
||||
// |a| <- |b|
|
||||
// \----|c|
|
||||
// Should be disallowed.
|
||||
c := types.NewString("c")
|
||||
cCommit := NewCommit().SetValue(c)
|
||||
cCommit := NewCommit().Set(ValueField, c)
|
||||
suite.ds, err = suite.ds.Commit(datasetID, cCommit)
|
||||
suite.Error(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(b))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b))
|
||||
|
||||
// |a| <- |b| <- |d|
|
||||
d := types.NewString("d")
|
||||
dCommit := NewCommit().SetValue(d).SetParents(NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(bCommit)))
|
||||
dCommit := NewCommit().Set(ValueField, d).Set(ParentsField, NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(bCommit)))
|
||||
suite.ds, err = suite.ds.Commit(datasetID, dCommit)
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(d))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(d))
|
||||
|
||||
// Attempt to recommit |b| with |a| as parent.
|
||||
// Should be disallowed.
|
||||
suite.ds, err = suite.ds.Commit(datasetID, bCommit)
|
||||
suite.Error(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(d))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(d))
|
||||
|
||||
// Add a commit to a different datasetId
|
||||
_, err = suite.ds.Commit("otherDs", aCommit)
|
||||
@@ -174,21 +175,21 @@ func (suite *DataStoreSuite) TestDataStoreDelete() {
|
||||
// |a|
|
||||
var err error
|
||||
a := types.NewString("a")
|
||||
suite.ds, err = suite.ds.Commit(datasetID1, NewCommit().SetValue(a))
|
||||
suite.ds, err = suite.ds.Commit(datasetID1, NewCommit().Set(ValueField, a))
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID1).Value().Equals(a))
|
||||
suite.True(suite.ds.Head(datasetID1).Get(ValueField).Equals(a))
|
||||
|
||||
// ds1; |a|, ds2: |b|
|
||||
b := types.NewString("b")
|
||||
suite.ds, err = suite.ds.Commit(datasetID2, NewCommit().SetValue(b))
|
||||
suite.ds, err = suite.ds.Commit(datasetID2, NewCommit().Set(ValueField, b))
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID2).Value().Equals(b))
|
||||
suite.True(suite.ds.Head(datasetID2).Get(ValueField).Equals(b))
|
||||
|
||||
suite.ds, err = suite.ds.Delete(datasetID1)
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID2).Value().Equals(b))
|
||||
suite.True(suite.ds.Head(datasetID2).Get(ValueField).Equals(b))
|
||||
h, present := suite.ds.MaybeHead(datasetID1)
|
||||
suite.False(present, "Dataset %s should not be present, but head is %v", datasetID1, h.Value())
|
||||
suite.False(present, "Dataset %s should not be present, but head is %v", datasetID1, h.Get(ValueField))
|
||||
|
||||
// Get a fresh datastore, and verify that only ds1 is present
|
||||
newDs := suite.makeDs(suite.cs)
|
||||
@@ -207,22 +208,22 @@ func (suite *DataStoreSuite) TestDataStoreDeleteConcurrent() {
|
||||
|
||||
// |a|
|
||||
a := types.NewString("a")
|
||||
aCommit := NewCommit().SetValue(a)
|
||||
aCommit := NewCommit().Set(ValueField, a)
|
||||
suite.ds, err = suite.ds.Commit(datasetID, aCommit)
|
||||
suite.NoError(err)
|
||||
|
||||
// |a| <- |b|
|
||||
b := types.NewString("b")
|
||||
bCommit := NewCommit().SetValue(b).SetParents(NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(aCommit)))
|
||||
bCommit := NewCommit().Set(ValueField, b).Set(ParentsField, NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(aCommit)))
|
||||
ds2, err := suite.ds.Commit(datasetID, bCommit)
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(a))
|
||||
suite.True(ds2.Head(datasetID).Value().Equals(b))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(a))
|
||||
suite.True(ds2.Head(datasetID).Get(ValueField).Equals(b))
|
||||
|
||||
suite.ds, err = suite.ds.Delete(datasetID)
|
||||
suite.NoError(err)
|
||||
h, present := suite.ds.MaybeHead(datasetID)
|
||||
suite.False(present, "Dataset %s should not be present, but head is %v", datasetID, h.Value())
|
||||
suite.False(present, "Dataset %s should not be present, but head is %v", datasetID, h.Get(ValueField))
|
||||
h, present = ds2.MaybeHead(datasetID)
|
||||
suite.True(present, "Dataset %s should be present", datasetID)
|
||||
|
||||
@@ -240,13 +241,13 @@ func (suite *DataStoreSuite) TestDataStoreConcurrency() {
|
||||
// Setup:
|
||||
// |a| <- |b|
|
||||
a := types.NewString("a")
|
||||
aCommit := NewCommit().SetValue(a)
|
||||
aCommit := NewCommit().Set(ValueField, a)
|
||||
suite.ds, err = suite.ds.Commit(datasetID, aCommit)
|
||||
b := types.NewString("b")
|
||||
bCommit := NewCommit().SetValue(b).SetParents(NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(aCommit)))
|
||||
bCommit := NewCommit().Set(ValueField, b).Set(ParentsField, NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(aCommit)))
|
||||
suite.ds, err = suite.ds.Commit(datasetID, bCommit)
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(b))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b))
|
||||
|
||||
// Important to create this here.
|
||||
ds2 := suite.makeDs(suite.cs)
|
||||
@@ -254,17 +255,17 @@ func (suite *DataStoreSuite) TestDataStoreConcurrency() {
|
||||
// Change 1:
|
||||
// |a| <- |b| <- |c|
|
||||
c := types.NewString("c")
|
||||
cCommit := NewCommit().SetValue(c).SetParents(NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(bCommit)))
|
||||
cCommit := NewCommit().Set(ValueField, c).Set(ParentsField, NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(bCommit)))
|
||||
suite.ds, err = suite.ds.Commit(datasetID, cCommit)
|
||||
suite.NoError(err)
|
||||
suite.True(suite.ds.Head(datasetID).Value().Equals(c))
|
||||
suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(c))
|
||||
|
||||
// Change 2:
|
||||
// |a| <- |b| <- |e|
|
||||
// Should be disallowed, DataStore returned by Commit() should have |c| as Head.
|
||||
e := types.NewString("e")
|
||||
eCommit := NewCommit().SetValue(e).SetParents(NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(bCommit)))
|
||||
eCommit := NewCommit().Set(ValueField, e).Set(ParentsField, NewSetOfRefOfCommit().Insert(types.NewTypedRefFromValue(bCommit)))
|
||||
ds2, err = ds2.Commit(datasetID, eCommit)
|
||||
suite.Error(err)
|
||||
suite.True(ds2.Head(datasetID).Value().Equals(c))
|
||||
suite.True(ds2.Head(datasetID).Get(ValueField).Equals(c))
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func newLocalDataStore(cs chunks.ChunkStore) *LocalDataStore {
|
||||
}
|
||||
}
|
||||
|
||||
func (lds *LocalDataStore) Commit(datasetID string, commit Commit) (DataStore, error) {
|
||||
func (lds *LocalDataStore) Commit(datasetID string, commit types.Struct) (DataStore, error) {
|
||||
err := lds.commit(datasetID, commit)
|
||||
lds.Flush()
|
||||
return newLocalDataStore(lds.cs), err
|
||||
|
||||
@@ -26,7 +26,7 @@ func (rds *RemoteDataStoreClient) batchStore() types.BatchStore {
|
||||
return rds.bs
|
||||
}
|
||||
|
||||
func (rds *RemoteDataStoreClient) Commit(datasetID string, commit Commit) (DataStore, error) {
|
||||
func (rds *RemoteDataStoreClient) Commit(datasetID string, commit types.Struct) (DataStore, error) {
|
||||
err := rds.commit(datasetID, commit)
|
||||
rds.Flush()
|
||||
return &RemoteDataStoreClient{newDataStoreCommon(rds.bs, rds.rt)}, err
|
||||
|
||||
@@ -28,12 +28,12 @@ func (ds *Dataset) ID() string {
|
||||
}
|
||||
|
||||
// MaybeHead returns the current Head Commit of this Dataset, which contains the current root of the Dataset's value tree, if available. If not, it returns a new Commit and 'false'.
|
||||
func (ds *Dataset) MaybeHead() (datas.Commit, bool) {
|
||||
func (ds *Dataset) MaybeHead() (types.Struct, bool) {
|
||||
return ds.Store().MaybeHead(ds.id)
|
||||
}
|
||||
|
||||
// Head returns the current head Commit, which contains the current root of the Dataset's value tree.
|
||||
func (ds *Dataset) Head() datas.Commit {
|
||||
func (ds *Dataset) Head() types.Struct {
|
||||
c, ok := ds.MaybeHead()
|
||||
d.Chk.True(ok, "Dataset \"%s\" does not exist", ds.id)
|
||||
return c
|
||||
@@ -52,7 +52,7 @@ func (ds *Dataset) Commit(v types.Value) (Dataset, error) {
|
||||
// CommitWithParents updates the commit that a dataset points at. The new Commit is constructed using v and p.
|
||||
// If the update cannot be performed, e.g., because of a conflict, CommitWithParents returns an 'ErrMergeNeeded' error and the current snapshot of the dataset so that the client can merge the changes and try again.
|
||||
func (ds *Dataset) CommitWithParents(v types.Value, p types.Set) (Dataset, error) {
|
||||
newCommit := datas.NewCommit().SetParents(p).SetValue(v)
|
||||
newCommit := datas.NewCommit().Set(datas.ParentsField, p).Set(datas.ValueField, v)
|
||||
store, err := ds.Store().Commit(ds.id, newCommit)
|
||||
return Dataset{store, ds.id}, err
|
||||
}
|
||||
@@ -87,12 +87,12 @@ func (ds *Dataset) pull(source datas.DataStore, sourceRef types.Ref, concurrency
|
||||
return sink, err
|
||||
}
|
||||
|
||||
func (ds *Dataset) validateRefAsCommit(r types.Ref) datas.Commit {
|
||||
func (ds *Dataset) validateRefAsCommit(r types.Ref) types.Struct {
|
||||
v := ds.store.ReadValue(r.TargetRef())
|
||||
|
||||
d.Exp.NotNil(v, "%v cannot be found", r)
|
||||
d.Exp.True(v.Type().Equals(datas.NewCommit().Type()), "Not a Commit: %+v", v)
|
||||
return v.(datas.Commit)
|
||||
return v.(types.Struct)
|
||||
}
|
||||
|
||||
// setNewHead takes the Ref of the desired new Head of ds, the chunk for which should already exist
|
||||
@@ -103,7 +103,7 @@ func (ds *Dataset) validateRefAsCommit(r types.Ref) datas.Commit {
|
||||
// again using this new Dataset.
|
||||
func (ds *Dataset) setNewHead(newHeadRef types.Ref) (Dataset, error) {
|
||||
commit := ds.validateRefAsCommit(newHeadRef)
|
||||
return ds.CommitWithParents(commit.Value(), commit.Parents())
|
||||
return ds.CommitWithParents(commit.Get(datas.ValueField), commit.Get(datas.ParentsField).(types.Set))
|
||||
}
|
||||
|
||||
type DatasetFlags struct {
|
||||
|
||||
@@ -25,10 +25,10 @@ func TestDatasetCommitTracker(t *testing.T) {
|
||||
ds2, err = ds2.Commit(ds2Commit)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.EqualValues(ds1Commit, ds1.Head().Value())
|
||||
assert.EqualValues(ds2Commit, ds2.Head().Value())
|
||||
assert.False(ds2.Head().Value().Equals(ds1Commit))
|
||||
assert.False(ds1.Head().Value().Equals(ds2Commit))
|
||||
assert.EqualValues(ds1Commit, ds1.Head().Get(datas.ValueField))
|
||||
assert.EqualValues(ds2Commit, ds2.Head().Get(datas.ValueField))
|
||||
assert.False(ds2.Head().Get(datas.ValueField).Equals(ds1Commit))
|
||||
assert.False(ds1.Head().Get(datas.ValueField).Equals(ds2Commit))
|
||||
|
||||
assert.Equal("sha1-6ddf39e2ccd452d06e610713e0261cd9b31d5681", cs.Root().String())
|
||||
}
|
||||
@@ -50,27 +50,27 @@ func TestExplicitBranchUsingDatasets(t *testing.T) {
|
||||
a := types.NewString("a")
|
||||
ds1, err := ds1.Commit(a)
|
||||
assert.NoError(err)
|
||||
assert.True(ds1.Head().Value().Equals(a))
|
||||
assert.True(ds1.Head().Get(datas.ValueField).Equals(a))
|
||||
|
||||
// ds1: |a|
|
||||
// \ds2
|
||||
ds2 := newDS(id2, cs)
|
||||
ds2, err = ds2.Commit(ds1.Head().Value())
|
||||
ds2, err = ds2.Commit(ds1.Head().Get(datas.ValueField))
|
||||
assert.NoError(err)
|
||||
assert.True(ds2.Head().Value().Equals(a))
|
||||
assert.True(ds2.Head().Get(datas.ValueField).Equals(a))
|
||||
|
||||
// ds1: |a| <- |b|
|
||||
b := types.NewString("b")
|
||||
ds1, err = ds1.Commit(b)
|
||||
assert.NoError(err)
|
||||
assert.True(ds1.Head().Value().Equals(b))
|
||||
assert.True(ds1.Head().Get(datas.ValueField).Equals(b))
|
||||
|
||||
// ds1: |a| <- |b|
|
||||
// \ds2 <- |c|
|
||||
c := types.NewString("c")
|
||||
ds2, err = ds2.Commit(c)
|
||||
assert.NoError(err)
|
||||
assert.True(ds2.Head().Value().Equals(c))
|
||||
assert.True(ds2.Head().Get(datas.ValueField).Equals(c))
|
||||
|
||||
// ds1: |a| <- |b| <--|d|
|
||||
// \ds2 <- |c| <--/
|
||||
@@ -80,11 +80,11 @@ func TestExplicitBranchUsingDatasets(t *testing.T) {
|
||||
d := types.NewString("d")
|
||||
ds2, err = ds2.CommitWithParents(d, mergeParents)
|
||||
assert.NoError(err)
|
||||
assert.True(ds2.Head().Value().Equals(d))
|
||||
assert.True(ds2.Head().Get(datas.ValueField).Equals(d))
|
||||
|
||||
ds1, err = ds1.CommitWithParents(d, mergeParents)
|
||||
assert.NoError(err)
|
||||
assert.True(ds1.Head().Value().Equals(d))
|
||||
assert.True(ds1.Head().Get(datas.ValueField).Equals(d))
|
||||
}
|
||||
|
||||
func TestTwoClientsWithEmptyDataset(t *testing.T) {
|
||||
@@ -99,7 +99,7 @@ func TestTwoClientsWithEmptyDataset(t *testing.T) {
|
||||
a := types.NewString("a")
|
||||
dsx, err := dsx.Commit(a)
|
||||
assert.NoError(err)
|
||||
assert.True(dsx.Head().Value().Equals(a))
|
||||
assert.True(dsx.Head().Get(datas.ValueField).Equals(a))
|
||||
|
||||
// dsy: || -> |b|
|
||||
_, ok := dsy.MaybeHead()
|
||||
@@ -111,7 +111,7 @@ func TestTwoClientsWithEmptyDataset(t *testing.T) {
|
||||
// dsy: |a| -> |b|
|
||||
dsy, err = dsy.Commit(b)
|
||||
assert.NoError(err)
|
||||
assert.True(dsy.Head().Value().Equals(b))
|
||||
assert.True(dsy.Head().Get(datas.ValueField).Equals(b))
|
||||
}
|
||||
|
||||
func TestTwoClientsWithNonEmptyDataset(t *testing.T) {
|
||||
@@ -125,28 +125,28 @@ func TestTwoClientsWithNonEmptyDataset(t *testing.T) {
|
||||
ds1 := newDS(id1, cs)
|
||||
ds1, err := ds1.Commit(a)
|
||||
assert.NoError(err)
|
||||
assert.True(ds1.Head().Value().Equals(a))
|
||||
assert.True(ds1.Head().Get(datas.ValueField).Equals(a))
|
||||
}
|
||||
|
||||
dsx := newDS(id1, cs)
|
||||
dsy := newDS(id1, cs)
|
||||
|
||||
// dsx: |a| -> |b|
|
||||
assert.True(dsx.Head().Value().Equals(a))
|
||||
assert.True(dsx.Head().Get(datas.ValueField).Equals(a))
|
||||
b := types.NewString("b")
|
||||
dsx, err := dsx.Commit(b)
|
||||
assert.NoError(err)
|
||||
assert.True(dsx.Head().Value().Equals(b))
|
||||
assert.True(dsx.Head().Get(datas.ValueField).Equals(b))
|
||||
|
||||
// dsy: |a| -> |c|
|
||||
assert.True(dsy.Head().Value().Equals(a))
|
||||
assert.True(dsy.Head().Get(datas.ValueField).Equals(a))
|
||||
c := types.NewString("c")
|
||||
dsy, err = dsy.Commit(c)
|
||||
assert.Error(err)
|
||||
assert.True(dsy.Head().Value().Equals(b))
|
||||
assert.True(dsy.Head().Get(datas.ValueField).Equals(b))
|
||||
// Commit failed, but dsy now has latest head, so we should be able to just try again.
|
||||
// dsy: |b| -> |c|
|
||||
dsy, err = dsy.Commit(c)
|
||||
assert.NoError(err)
|
||||
assert.True(dsy.Head().Value().Equals(c))
|
||||
assert.True(dsy.Head().Get(datas.ValueField).Equals(c))
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func main() {
|
||||
pkgDS := dataset.NewDataset(ds, *pkgDSFlag)
|
||||
// Ensure that, if pkgDS has stuff in it, its head is a SetOfRefOfPackage.
|
||||
if h, ok := pkgDS.MaybeHead(); ok {
|
||||
d.Chk.IsType(types.NewSetOfRefOfPackage(), h.Value())
|
||||
d.Chk.IsType(types.NewSetOfRefOfPackage(), h.Get(datas.ValueField))
|
||||
}
|
||||
|
||||
localPkgs := refSet{}
|
||||
@@ -160,7 +160,7 @@ func buildSetOfRefOfPackage(pkg pkg.Parsed, deps depsMap, ds dataset.Dataset) ty
|
||||
// Can do better once generated collections implement types.Value.
|
||||
s := types.NewSetOfRefOfPackage()
|
||||
if h, ok := ds.MaybeHead(); ok {
|
||||
s = h.Value().(types.Set)
|
||||
s = h.Get(datas.ValueField).(types.Set)
|
||||
}
|
||||
for _, dep := range deps {
|
||||
// Writing the deps into ds should be redundant at this point, but do it to be sure.
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestCommitNewPackages(t *testing.T) {
|
||||
p := parsePackageFile("name", inFile, pkgDS)
|
||||
localPkgs := refSet{p.Ref(): true}
|
||||
pkgDS = generate("name", inFile, filepath.Join(dir, "out.js"), dir, map[string]bool{}, p, localPkgs, pkgDS)
|
||||
s := pkgDS.Head().Value().(types.Set)
|
||||
s := pkgDS.Head().Get(datas.ValueField).(types.Set)
|
||||
assert.EqualValues(1, s.Len())
|
||||
tr := s.First().(types.Ref).TargetValue(ds).(types.Package).Types()[0]
|
||||
assert.EqualValues(types.StructKind, tr.Kind())
|
||||
|
||||
@@ -6,12 +6,18 @@ import (
|
||||
)
|
||||
|
||||
// Type defines and describes Noms types, both custom and built-in.
|
||||
// StructKind types, and possibly others if we do type aliases, will have a Name(). Named types are 'exported' in that they can be addressed from other type packages.
|
||||
// Desc provides more details of the type. It may contain only a types.NomsKind, in the case of primitives, or it may contain additional information -- e.g. element Types for compound type specializations, field descriptions for structs, etc. Either way, checking Kind() allows code to understand how to interpret the rest of the data.
|
||||
// StructKind types, and possibly others if we do type aliases, will have a Name(). Named types are
|
||||
// 'exported' in that they can be addressed from other type packages.
|
||||
// Desc provides more details of the type. It may contain only a types.NomsKind, in the case of
|
||||
// primitives, or it may contain additional information -- e.g. element Types for compound type
|
||||
// specializations, field descriptions for structs, etc. Either way, checking Kind() allows code
|
||||
// to understand how to interpret the rest of the data.
|
||||
// If Kind() refers to a primitive, then Desc has no more info.
|
||||
// If Kind() refers to List, Map, Set or Ref, then Desc is a list of Types describing the element type(s).
|
||||
// If Kind() refers to Struct, then Desc contains a []Field and Choices.
|
||||
// If Kind() refers to an UnresolvedKind, then Desc contains a PackageRef, which is the Ref of the package where the type definition is defined. The ordinal, if not -1, is the index into the Types list of the package. If the Name is set then the ordinal needs to be found.
|
||||
// If Kind() refers to an UnresolvedKind, then Desc contains a PackageRef, which is the Ref of the
|
||||
// package where the type definition is defined. The ordinal, if not -1, is the index into the
|
||||
// Types list of the package. If the Name is set then the ordinal needs to be found.
|
||||
|
||||
type Type struct {
|
||||
name name
|
||||
|
||||
Reference in New Issue
Block a user