mirror of
https://github.com/dolthub/dolt.git
synced 2026-02-10 10:30:57 -06:00
@@ -4,18 +4,18 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// Chunk is a unit of stored data in noms
|
||||
type Chunk struct {
|
||||
r ref.Ref
|
||||
r hash.Hash
|
||||
data []byte
|
||||
}
|
||||
|
||||
var EmptyChunk = Chunk{}
|
||||
|
||||
func (c Chunk) Ref() ref.Ref {
|
||||
func (c Chunk) Hash() hash.Hash {
|
||||
return c.r
|
||||
}
|
||||
|
||||
@@ -29,12 +29,12 @@ func (c Chunk) IsEmpty() bool {
|
||||
|
||||
// NewChunk creates a new Chunk backed by data. This means that the returned Chunk has ownership of this slice of memory.
|
||||
func NewChunk(data []byte) Chunk {
|
||||
r := ref.FromData(data)
|
||||
r := hash.FromData(data)
|
||||
return Chunk{r, data}
|
||||
}
|
||||
|
||||
// NewChunkWithRef creates a new chunk with a known ref. The ref is not re-calculated or verified. This should obviously only be used in cases where the caller already knows the specified ref is correct.
|
||||
func NewChunkWithRef(r ref.Ref, data []byte) Chunk {
|
||||
// NewChunkWithHash creates a new chunk with a known hash. The hash is not re-calculated or verified. This should obviously only be used in cases where the caller already knows the specified hash is correct.
|
||||
func NewChunkWithHash(r hash.Hash, data []byte) Chunk {
|
||||
return Chunk{r, data}
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func NewChunkWriter() *ChunkWriter {
|
||||
}
|
||||
|
||||
func (w *ChunkWriter) Write(data []byte) (int, error) {
|
||||
d.Chk.NotNil(w.buffer, "Write() cannot be called after Ref() or Close().")
|
||||
d.Chk.NotNil(w.buffer, "Write() cannot be called after Hash() or Close().")
|
||||
size, err := w.buffer.Write(data)
|
||||
d.Chk.NoError(err)
|
||||
return size, nil
|
||||
@@ -64,7 +64,7 @@ func (w *ChunkWriter) Chunk() Chunk {
|
||||
return w.c
|
||||
}
|
||||
|
||||
// Close() closes computes the ref and Puts it into the ChunkSink Note: The Write() method never returns an error. Instead, like other noms interfaces, errors are reported via panic.
|
||||
// Close() closes computes the hash and Puts it into the ChunkSink Note: The Write() method never returns an error. Instead, like other noms interfaces, errors are reported via panic.
|
||||
func (w *ChunkWriter) Close() error {
|
||||
if w.buffer == nil {
|
||||
return nil
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
Chunk N
|
||||
|
||||
Chunk:
|
||||
Ref // 20-byte sha1 hash
|
||||
Hash // 20-byte sha1 hash
|
||||
Len // 4-byte int
|
||||
Data // len(Data) == Len
|
||||
*/
|
||||
@@ -36,7 +36,7 @@ func NewSerializer(writer io.Writer) ChunkSink {
|
||||
for chunk := range s.chs {
|
||||
d.Chk.NotNil(chunk.Data)
|
||||
|
||||
digest := chunk.Ref().Digest()
|
||||
digest := chunk.Hash().Digest()
|
||||
n, err := io.Copy(s.writer, bytes.NewReader(digest[:]))
|
||||
d.Chk.NoError(err)
|
||||
d.Chk.Equal(int64(sha1.Size), n)
|
||||
@@ -119,14 +119,14 @@ func DeserializeToChan(reader io.Reader, chunkChan chan<- Chunk) {
|
||||
}
|
||||
|
||||
func deserializeChunk(reader io.Reader) Chunk {
|
||||
digest := ref.Sha1Digest{}
|
||||
digest := hash.Sha1Digest{}
|
||||
n, err := io.ReadFull(reader, digest[:])
|
||||
if err == io.EOF {
|
||||
return EmptyChunk
|
||||
}
|
||||
d.Chk.NoError(err)
|
||||
d.Chk.Equal(int(sha1.Size), n)
|
||||
r := ref.New(digest)
|
||||
h := hash.New(digest)
|
||||
|
||||
chunkSize := uint32(0)
|
||||
err = binary.Read(reader, binary.BigEndian, &chunkSize)
|
||||
@@ -137,6 +137,6 @@ func deserializeChunk(reader io.Reader) Chunk {
|
||||
d.Chk.NoError(err)
|
||||
d.Chk.Equal(int64(chunkSize), n2)
|
||||
c := w.Chunk()
|
||||
d.Chk.Equal(r, c.Ref())
|
||||
d.Chk.Equal(h, c.Hash())
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// ChunkStore is the core storage abstraction in noms. We can put data anyplace we have a ChunkStore implementation for.
|
||||
@@ -22,19 +22,19 @@ type Factory interface {
|
||||
Shutter()
|
||||
}
|
||||
|
||||
// RootTracker allows querying and management of the root of an entire tree of references. The "root" is the single mutable variable in a ChunkStore. It can store any ref, but it is typically used by higher layers (such as Database) to store a ref to a value that represents the current state and entire history of a database.
|
||||
// RootTracker allows querying and management of the root of an entire tree of references. The "root" is the single mutable variable in a ChunkStore. It can store any hash, but it is typically used by higher layers (such as Database) to store a hash to a value that represents the current state and entire history of a database.
|
||||
type RootTracker interface {
|
||||
Root() ref.Ref
|
||||
UpdateRoot(current, last ref.Ref) bool
|
||||
Root() hash.Hash
|
||||
UpdateRoot(current, last hash.Hash) bool
|
||||
}
|
||||
|
||||
// ChunkSource is a place to get chunks from.
|
||||
type ChunkSource interface {
|
||||
// Get gets a reader for the value of the Ref in the store. If the ref is absent from the store nil is returned.
|
||||
Get(ref ref.Ref) Chunk
|
||||
// Get the Chunk for the value of the hash in the store. If the hash is absent from the store nil is returned.
|
||||
Get(h hash.Hash) Chunk
|
||||
|
||||
// Returns true iff the value at the address |ref| is contained in the source
|
||||
Has(ref ref.Ref) bool
|
||||
// Returns true iff the value at the address |h| is contained in the source
|
||||
Has(h hash.Hash) bool
|
||||
}
|
||||
|
||||
// ChunkSink is a place to put chunks.
|
||||
@@ -48,13 +48,13 @@ type ChunkSink interface {
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// BackpressureError is a slice of ref.Ref that indicates some chunks could not be Put(). Caller is free to try to Put them again later.
|
||||
type BackpressureError ref.RefSlice
|
||||
// BackpressureError is a slice of hash.Hash that indicates some chunks could not be Put(). Caller is free to try to Put them again later.
|
||||
type BackpressureError hash.HashSlice
|
||||
|
||||
func (b BackpressureError) Error() string {
|
||||
return fmt.Sprintf("Tried to Put %d too many Chunks", len(b))
|
||||
}
|
||||
|
||||
func (b BackpressureError) AsHashes() ref.RefSlice {
|
||||
return ref.RefSlice(b)
|
||||
func (b BackpressureError) AsHashes() hash.HashSlice {
|
||||
return hash.HashSlice(b)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package chunks
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type ChunkStoreTestSuite struct {
|
||||
@@ -15,15 +16,15 @@ func (suite *ChunkStoreTestSuite) TestChunkStorePut() {
|
||||
input := "abc"
|
||||
c := NewChunk([]byte(input))
|
||||
suite.Store.Put(c)
|
||||
ref := c.Ref()
|
||||
h := c.Hash()
|
||||
|
||||
// See http://www.di-mgt.com.au/sha_testvectors.html
|
||||
suite.Equal("sha1-a9993e364706816aba3e25717850c26c9cd0d89d", ref.String())
|
||||
suite.Equal("sha1-a9993e364706816aba3e25717850c26c9cd0d89d", h.String())
|
||||
|
||||
suite.Store.UpdateRoot(ref, suite.Store.Root()) // Commit writes
|
||||
suite.Store.UpdateRoot(h, suite.Store.Root()) // Commit writes
|
||||
|
||||
// And reading it via the API should work...
|
||||
assertInputInStore(input, ref, suite.Store, suite.Assert())
|
||||
assertInputInStore(input, h, suite.Store, suite.Assert())
|
||||
if suite.putCountFn != nil {
|
||||
suite.Equal(1, suite.putCountFn())
|
||||
}
|
||||
@@ -31,9 +32,9 @@ func (suite *ChunkStoreTestSuite) TestChunkStorePut() {
|
||||
// Re-writing the same data should cause a second put
|
||||
c = NewChunk([]byte(input))
|
||||
suite.Store.Put(c)
|
||||
suite.Equal(ref, c.Ref())
|
||||
assertInputInStore(input, ref, suite.Store, suite.Assert())
|
||||
suite.Store.UpdateRoot(ref, suite.Store.Root()) // Commit writes
|
||||
suite.Equal(h, c.Hash())
|
||||
assertInputInStore(input, h, suite.Store, suite.Assert())
|
||||
suite.Store.UpdateRoot(h, suite.Store.Root()) // Commit writes
|
||||
|
||||
if suite.putCountFn != nil {
|
||||
suite.Equal(2, suite.putCountFn())
|
||||
@@ -45,11 +46,11 @@ func (suite *ChunkStoreTestSuite) TestChunkStorePutMany() {
|
||||
c1, c2 := NewChunk([]byte(input1)), NewChunk([]byte(input2))
|
||||
suite.Store.PutMany([]Chunk{c1, c2})
|
||||
|
||||
suite.Store.UpdateRoot(c1.Ref(), suite.Store.Root()) // Commit writes
|
||||
suite.Store.UpdateRoot(c1.Hash(), suite.Store.Root()) // Commit writes
|
||||
|
||||
// And reading it via the API should work...
|
||||
assertInputInStore(input1, c1.Ref(), suite.Store, suite.Assert())
|
||||
assertInputInStore(input2, c2.Ref(), suite.Store, suite.Assert())
|
||||
assertInputInStore(input1, c1.Hash(), suite.Store, suite.Assert())
|
||||
assertInputInStore(input2, c2.Hash(), suite.Store, suite.Assert())
|
||||
if suite.putCountFn != nil {
|
||||
suite.Equal(2, suite.putCountFn())
|
||||
}
|
||||
@@ -59,8 +60,8 @@ func (suite *ChunkStoreTestSuite) TestChunkStoreRoot() {
|
||||
oldRoot := suite.Store.Root()
|
||||
suite.True(oldRoot.IsEmpty())
|
||||
|
||||
bogusRoot := ref.Parse("sha1-81c870618113ba29b6f2b396ea3a69c6f1d626c5") // sha1("Bogus, Dude")
|
||||
newRoot := ref.Parse("sha1-907d14fb3af2b0d4f18c2d46abe8aedce17367bd") // sha1("Hello, World")
|
||||
bogusRoot := hash.Parse("sha1-81c870618113ba29b6f2b396ea3a69c6f1d626c5") // sha1("Bogus, Dude")
|
||||
newRoot := hash.Parse("sha1-907d14fb3af2b0d4f18c2d46abe8aedce17367bd") // sha1("Hello, World")
|
||||
|
||||
// Try to update root with bogus oldRoot
|
||||
result := suite.Store.UpdateRoot(newRoot, bogusRoot)
|
||||
@@ -72,7 +73,7 @@ func (suite *ChunkStoreTestSuite) TestChunkStoreRoot() {
|
||||
}
|
||||
|
||||
func (suite *ChunkStoreTestSuite) TestChunkStoreGetNonExisting() {
|
||||
ref := ref.Parse("sha1-1111111111111111111111111111111111111111")
|
||||
c := suite.Store.Get(ref)
|
||||
h := hash.Parse("sha1-1111111111111111111111111111111111111111")
|
||||
c := suite.Store.Get(h)
|
||||
suite.True(c.IsEmpty())
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
@@ -103,27 +103,27 @@ func newDynamoStoreFromDDBsvc(table, namespace string, ddb ddbsvc, showStats boo
|
||||
return store
|
||||
}
|
||||
|
||||
func (s *DynamoStore) Get(r ref.Ref) Chunk {
|
||||
pending := s.unwrittenPuts.Get(r)
|
||||
func (s *DynamoStore) Get(h hash.Hash) Chunk {
|
||||
pending := s.unwrittenPuts.Get(h)
|
||||
if !pending.IsEmpty() {
|
||||
return pending
|
||||
}
|
||||
|
||||
ch := make(chan Chunk)
|
||||
s.requestWg.Add(1)
|
||||
s.readQueue <- GetRequest{r, ch}
|
||||
s.readQueue <- GetRequest{h, ch}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
func (s *DynamoStore) Has(r ref.Ref) bool {
|
||||
pending := s.unwrittenPuts.Get(r)
|
||||
func (s *DynamoStore) Has(h hash.Hash) bool {
|
||||
pending := s.unwrittenPuts.Get(h)
|
||||
if !pending.IsEmpty() {
|
||||
return true
|
||||
}
|
||||
|
||||
ch := make(chan bool)
|
||||
s.requestWg.Add(1)
|
||||
s.readQueue <- HasRequest{r, ch}
|
||||
s.readQueue <- HasRequest{h, ch}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ func (s *DynamoStore) PutMany(chunks []Chunk) (e BackpressureError) {
|
||||
notPut := chunks[i:]
|
||||
e = make(BackpressureError, len(notPut))
|
||||
for j, np := range notPut {
|
||||
e[j] = np.Ref()
|
||||
e[j] = np.Hash()
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -180,10 +180,10 @@ func (s *DynamoStore) sendGetRequests(req ReadRequest) {
|
||||
s.readTime += time.Now().UnixNano() - n
|
||||
}()
|
||||
batch := ReadBatch{}
|
||||
refs := map[ref.Ref]bool{}
|
||||
refs := map[hash.Hash]bool{}
|
||||
|
||||
addReq := func(req ReadRequest) {
|
||||
r := req.Ref()
|
||||
r := req.Hash()
|
||||
batch[r] = append(batch[r], req.Outstanding())
|
||||
refs[r] = true
|
||||
s.requestWg.Done()
|
||||
@@ -217,10 +217,10 @@ func (s *DynamoStore) sendGetRequests(req ReadRequest) {
|
||||
batch.Close()
|
||||
}
|
||||
|
||||
func (s *DynamoStore) buildRequestItems(refs map[ref.Ref]bool) map[string]*dynamodb.KeysAndAttributes {
|
||||
func (s *DynamoStore) buildRequestItems(hashes map[hash.Hash]bool) map[string]*dynamodb.KeysAndAttributes {
|
||||
makeKeysAndAttrs := func() *dynamodb.KeysAndAttributes {
|
||||
out := &dynamodb.KeysAndAttributes{ConsistentRead: aws.Bool(true)} // This doubles the cost :-(
|
||||
for r := range refs {
|
||||
for r := range hashes {
|
||||
out.Keys = append(out.Keys, map[string]*dynamodb.AttributeValue{refAttr: {B: s.makeNamespacedKey(r)}})
|
||||
}
|
||||
return out
|
||||
@@ -232,7 +232,7 @@ func (s *DynamoStore) processResponses(responses []map[string]*dynamodb.Attribut
|
||||
for _, item := range responses {
|
||||
p := item[refAttr]
|
||||
d.Chk.NotNil(p)
|
||||
r := ref.FromSlice(s.removeNamespace(p.B))
|
||||
r := hash.FromSlice(s.removeNamespace(p.B))
|
||||
p = item[chunkAttr]
|
||||
d.Chk.NotNil(p)
|
||||
b := p.B
|
||||
@@ -244,7 +244,7 @@ func (s *DynamoStore) processResponses(responses []map[string]*dynamodb.Attribut
|
||||
d.Chk.NoError(err)
|
||||
b = buf.Bytes()
|
||||
}
|
||||
c := NewChunkWithRef(r, b)
|
||||
c := NewChunkWithHash(r, b)
|
||||
for _, reqChan := range batch[r] {
|
||||
reqChan.Satisfy(c)
|
||||
}
|
||||
@@ -314,7 +314,7 @@ func (s *DynamoStore) sendWriteRequests(first Chunk) {
|
||||
}
|
||||
|
||||
func chunkItemSize(c Chunk) int {
|
||||
r := c.Ref()
|
||||
r := c.Hash()
|
||||
return len(refAttr) + len(r.DigestSlice()) + len(chunkAttr) + len(c.Data()) + len(compAttr) + len(noneValue)
|
||||
}
|
||||
|
||||
@@ -338,7 +338,7 @@ func (s *DynamoStore) buildWriteRequests(chunks []Chunk) map[string][]*dynamodb.
|
||||
s.writeTotal += chunkDataLen
|
||||
s.writeCompTotal += compDataLen
|
||||
return map[string]*dynamodb.AttributeValue{
|
||||
refAttr: {B: s.makeNamespacedKey(c.Ref())},
|
||||
refAttr: {B: s.makeNamespacedKey(c.Hash())},
|
||||
chunkAttr: {B: chunkData},
|
||||
compAttr: {S: aws.String(compression)},
|
||||
}
|
||||
@@ -380,7 +380,7 @@ func (s *DynamoStore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DynamoStore) Root() ref.Ref {
|
||||
func (s *DynamoStore) Root() hash.Hash {
|
||||
result, err := s.ddbsvc.GetItem(&dynamodb.GetItemInput{
|
||||
TableName: aws.String(s.table),
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
@@ -390,7 +390,7 @@ func (s *DynamoStore) Root() ref.Ref {
|
||||
d.Exp.NoError(err)
|
||||
|
||||
if len(result.Item) == 0 {
|
||||
return ref.Ref{}
|
||||
return hash.Hash{}
|
||||
}
|
||||
|
||||
itemLen := len(result.Item)
|
||||
@@ -400,10 +400,10 @@ func (s *DynamoStore) Root() ref.Ref {
|
||||
d.Chk.NotNil(result.Item[compAttr].S)
|
||||
d.Chk.Equal(noneValue, *result.Item[compAttr].S)
|
||||
}
|
||||
return ref.FromSlice(result.Item[chunkAttr].B)
|
||||
return hash.FromSlice(result.Item[chunkAttr].B)
|
||||
}
|
||||
|
||||
func (s *DynamoStore) UpdateRoot(current, last ref.Ref) bool {
|
||||
func (s *DynamoStore) UpdateRoot(current, last hash.Hash) bool {
|
||||
s.requestWg.Wait()
|
||||
|
||||
putArgs := dynamodb.PutItemInput{
|
||||
@@ -439,12 +439,12 @@ func (s *DynamoStore) UpdateRoot(current, last ref.Ref) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *DynamoStore) makeNamespacedKey(r ref.Ref) []byte {
|
||||
func (s *DynamoStore) makeNamespacedKey(h hash.Hash) []byte {
|
||||
// This is semantically `return append(s.namespace, r.DigestSlice()...)`, but it seemed like we'd be doing this a LOT, and we know how much space we're going to need anyway. So, pre-allocate a slice and then copy into it.
|
||||
refSlice := r.DigestSlice()
|
||||
key := make([]byte, s.namespaceLen+len(refSlice))
|
||||
hashSlice := h.DigestSlice()
|
||||
key := make([]byte, s.namespaceLen+len(hashSlice))
|
||||
copy(key, s.namespace)
|
||||
copy(key[s.namespaceLen:], refSlice)
|
||||
copy(key[s.namespaceLen:], hashSlice)
|
||||
return key
|
||||
}
|
||||
|
||||
|
||||
@@ -35,18 +35,18 @@ func TestGetRetrying(t *testing.T) {
|
||||
c1 := NewChunk([]byte("abc"))
|
||||
|
||||
store.Put(c1)
|
||||
store.UpdateRoot(c1.Ref(), store.Root()) // Commit writes
|
||||
assert.True(store.Has(c1.Ref()))
|
||||
store.UpdateRoot(c1.Hash(), store.Root()) // Commit writes
|
||||
assert.True(store.Has(c1.Hash()))
|
||||
store.Close()
|
||||
}
|
||||
|
||||
func (suite *DynamoStoreTestSuite) TestChunkCompression() {
|
||||
c1 := NewChunk(make([]byte, dynamoWriteUnitSize+1))
|
||||
suite.Store.Put(c1)
|
||||
suite.Store.UpdateRoot(c1.Ref(), suite.Store.Root()) // Commit writes
|
||||
suite.True(suite.Store.Has(c1.Ref()))
|
||||
suite.Store.UpdateRoot(c1.Hash(), suite.Store.Root()) // Commit writes
|
||||
suite.True(suite.Store.Has(c1.Hash()))
|
||||
suite.Equal(1, suite.ddb.numCompPuts)
|
||||
|
||||
roundTrip := suite.Store.Get(c1.Ref())
|
||||
roundTrip := suite.Store.Get(c1.Hash())
|
||||
suite.Equal(c1.Data(), roundTrip.Data())
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
@@ -61,29 +61,29 @@ type LevelDBStore struct {
|
||||
closeBackingStore bool
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) Root() ref.Ref {
|
||||
func (l *LevelDBStore) Root() hash.Hash {
|
||||
d.Chk.NotNil(l.internalLevelDBStore, "Cannot use LevelDBStore after Close().")
|
||||
return l.rootByKey(l.rootKey)
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) UpdateRoot(current, last ref.Ref) bool {
|
||||
func (l *LevelDBStore) UpdateRoot(current, last hash.Hash) bool {
|
||||
d.Chk.NotNil(l.internalLevelDBStore, "Cannot use LevelDBStore after Close().")
|
||||
return l.updateRootByKey(l.rootKey, current, last)
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) Get(ref ref.Ref) Chunk {
|
||||
func (l *LevelDBStore) Get(ref hash.Hash) Chunk {
|
||||
d.Chk.NotNil(l.internalLevelDBStore, "Cannot use LevelDBStore after Close().")
|
||||
return l.getByKey(l.toChunkKey(ref), ref)
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) Has(ref ref.Ref) bool {
|
||||
func (l *LevelDBStore) Has(ref hash.Hash) bool {
|
||||
d.Chk.NotNil(l.internalLevelDBStore, "Cannot use LevelDBStore after Close().")
|
||||
return l.hasByKey(l.toChunkKey(ref))
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) Put(c Chunk) {
|
||||
d.Chk.NotNil(l.internalLevelDBStore, "Cannot use LevelDBStore after Close().")
|
||||
l.putByKey(l.toChunkKey(c.Ref()), c)
|
||||
l.putByKey(l.toChunkKey(c.Hash()), c)
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) {
|
||||
@@ -92,7 +92,7 @@ func (l *LevelDBStore) PutMany(chunks []Chunk) (e BackpressureError) {
|
||||
for _, c := range chunks {
|
||||
d := c.Data()
|
||||
numBytes += len(d)
|
||||
b.Put(l.toChunkKey(c.Ref()), d)
|
||||
b.Put(l.toChunkKey(c.Hash()), d)
|
||||
}
|
||||
l.putBatch(b, numBytes)
|
||||
return
|
||||
@@ -106,7 +106,7 @@ func (l *LevelDBStore) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LevelDBStore) toChunkKey(r ref.Ref) []byte {
|
||||
func (l *LevelDBStore) toChunkKey(r hash.Hash) []byte {
|
||||
digest := r.DigestSlice()
|
||||
out := make([]byte, len(l.chunkPrefix), len(l.chunkPrefix)+len(digest))
|
||||
copy(out, l.chunkPrefix)
|
||||
@@ -139,17 +139,17 @@ func newBackingStore(dir string, maxFileHandles int, dumpStats bool) *internalLe
|
||||
}
|
||||
}
|
||||
|
||||
func (l *internalLevelDBStore) rootByKey(key []byte) ref.Ref {
|
||||
func (l *internalLevelDBStore) rootByKey(key []byte) hash.Hash {
|
||||
val, err := l.db.Get(key, nil)
|
||||
if err == errors.ErrNotFound {
|
||||
return ref.Ref{}
|
||||
return hash.Hash{}
|
||||
}
|
||||
d.Chk.NoError(err)
|
||||
|
||||
return ref.Parse(string(val))
|
||||
return hash.Parse(string(val))
|
||||
}
|
||||
|
||||
func (l *internalLevelDBStore) updateRootByKey(key []byte, current, last ref.Ref) bool {
|
||||
func (l *internalLevelDBStore) updateRootByKey(key []byte, current, last hash.Hash) bool {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if last != l.rootByKey(key) {
|
||||
@@ -162,7 +162,7 @@ func (l *internalLevelDBStore) updateRootByKey(key []byte, current, last ref.Ref
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *internalLevelDBStore) getByKey(key []byte, ref ref.Ref) Chunk {
|
||||
func (l *internalLevelDBStore) getByKey(key []byte, ref hash.Hash) Chunk {
|
||||
data, err := l.db.Get(key, nil)
|
||||
l.getCount++
|
||||
if err == errors.ErrNotFound {
|
||||
@@ -170,7 +170,7 @@ func (l *internalLevelDBStore) getByKey(key []byte, ref ref.Ref) Chunk {
|
||||
}
|
||||
d.Chk.NoError(err)
|
||||
|
||||
return NewChunkWithRef(ref, data)
|
||||
return NewChunkWithHash(ref, data)
|
||||
}
|
||||
|
||||
func (l *internalLevelDBStore) hasByKey(key []byte) bool {
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
package chunks
|
||||
|
||||
import "github.com/attic-labs/noms/ref"
|
||||
import "github.com/attic-labs/noms/hash"
|
||||
|
||||
type memoryRootTracker ref.Ref
|
||||
type memoryRootTracker hash.Hash
|
||||
|
||||
func (ms *memoryRootTracker) Root() ref.Ref {
|
||||
return ref.Ref(*ms)
|
||||
func (ms *memoryRootTracker) Root() hash.Hash {
|
||||
return hash.Hash(*ms)
|
||||
}
|
||||
|
||||
func (ms *memoryRootTracker) UpdateRoot(current, last ref.Ref) bool {
|
||||
if last != ref.Ref(*ms) {
|
||||
func (ms *memoryRootTracker) UpdateRoot(current, last hash.Hash) bool {
|
||||
if last != hash.Hash(*ms) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// An in-memory implementation of store.ChunkStore. Useful mainly for tests.
|
||||
type MemoryStore struct {
|
||||
data map[ref.Ref]Chunk
|
||||
data map[hash.Hash]Chunk
|
||||
memoryRootTracker
|
||||
mu *sync.Mutex
|
||||
}
|
||||
@@ -20,16 +20,16 @@ func NewMemoryStore() *MemoryStore {
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MemoryStore) Get(ref ref.Ref) Chunk {
|
||||
func (ms *MemoryStore) Get(h hash.Hash) Chunk {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if c, ok := ms.data[ref]; ok {
|
||||
if c, ok := ms.data[h]; ok {
|
||||
return c
|
||||
}
|
||||
return EmptyChunk
|
||||
}
|
||||
|
||||
func (ms *MemoryStore) Has(r ref.Ref) bool {
|
||||
func (ms *MemoryStore) Has(r hash.Hash) bool {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.data == nil {
|
||||
@@ -43,9 +43,9 @@ func (ms *MemoryStore) Put(c Chunk) {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.data == nil {
|
||||
ms.data = map[ref.Ref]Chunk{}
|
||||
ms.data = map[hash.Hash]Chunk{}
|
||||
}
|
||||
ms.data[c.Ref()] = c
|
||||
ms.data[c.Hash()] = c
|
||||
}
|
||||
|
||||
func (ms *MemoryStore) PutMany(chunks []Chunk) (e BackpressureError) {
|
||||
|
||||
@@ -3,23 +3,23 @@ package chunks
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
func newUnwrittenPutCache() *unwrittenPutCache {
|
||||
return &unwrittenPutCache{map[ref.Ref]Chunk{}, &sync.Mutex{}}
|
||||
return &unwrittenPutCache{map[hash.Hash]Chunk{}, &sync.Mutex{}}
|
||||
}
|
||||
|
||||
type unwrittenPutCache struct {
|
||||
unwrittenPuts map[ref.Ref]Chunk
|
||||
unwrittenPuts map[hash.Hash]Chunk
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func (p *unwrittenPutCache) Add(c Chunk) bool {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if _, ok := p.unwrittenPuts[c.Ref()]; !ok {
|
||||
p.unwrittenPuts[c.Ref()] = c
|
||||
if _, ok := p.unwrittenPuts[c.Hash()]; !ok {
|
||||
p.unwrittenPuts[c.Hash()] = c
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -29,11 +29,11 @@ func (p *unwrittenPutCache) Add(c Chunk) bool {
|
||||
func (p *unwrittenPutCache) Has(c Chunk) (has bool) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
_, has = p.unwrittenPuts[c.Ref()]
|
||||
_, has = p.unwrittenPuts[c.Hash()]
|
||||
return
|
||||
}
|
||||
|
||||
func (p *unwrittenPutCache) Get(r ref.Ref) Chunk {
|
||||
func (p *unwrittenPutCache) Get(r hash.Hash) Chunk {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if c, ok := p.unwrittenPuts[r]; ok {
|
||||
@@ -46,6 +46,6 @@ func (p *unwrittenPutCache) Clear(chunks []Chunk) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, c := range chunks {
|
||||
delete(p.unwrittenPuts, c.Ref())
|
||||
delete(p.unwrittenPuts, c.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// ReadThroughStore is a store that consists of two other stores. A caching and
|
||||
// a backing store. All reads check the caching store first and if the ref is
|
||||
// a backing store. All reads check the caching store first and if the h is
|
||||
// present there the caching store is used. If not present the backing store is
|
||||
// used and the value gets cached in the caching store. All writes go directly
|
||||
// to the backing store.
|
||||
@@ -24,12 +24,12 @@ func NewReadThroughStore(cachingStore ChunkStore, backingStore ChunkStore) ReadT
|
||||
return ReadThroughStore{ioutil.NopCloser(nil), cachingStore, backingStore, 0}
|
||||
}
|
||||
|
||||
func (rts ReadThroughStore) Get(ref ref.Ref) Chunk {
|
||||
c := rts.cachingStore.Get(ref)
|
||||
func (rts ReadThroughStore) Get(h hash.Hash) Chunk {
|
||||
c := rts.cachingStore.Get(h)
|
||||
if !c.IsEmpty() {
|
||||
return c
|
||||
}
|
||||
c = rts.backingStore.Get(ref)
|
||||
c = rts.backingStore.Get(h)
|
||||
if c.IsEmpty() {
|
||||
return c
|
||||
}
|
||||
@@ -38,8 +38,8 @@ func (rts ReadThroughStore) Get(ref ref.Ref) Chunk {
|
||||
return c
|
||||
}
|
||||
|
||||
func (rts ReadThroughStore) Has(ref ref.Ref) bool {
|
||||
return rts.cachingStore.Has(ref) || rts.backingStore.Has(ref)
|
||||
func (rts ReadThroughStore) Has(h hash.Hash) bool {
|
||||
return rts.cachingStore.Has(h) || rts.backingStore.Has(h)
|
||||
}
|
||||
|
||||
func (rts ReadThroughStore) Put(c Chunk) {
|
||||
@@ -49,13 +49,13 @@ func (rts ReadThroughStore) Put(c Chunk) {
|
||||
|
||||
func (rts ReadThroughStore) PutMany(chunks []Chunk) BackpressureError {
|
||||
bpe := rts.backingStore.PutMany(chunks)
|
||||
lookup := make(map[ref.Ref]bool, len(bpe))
|
||||
lookup := make(map[hash.Hash]bool, len(bpe))
|
||||
for _, r := range bpe {
|
||||
lookup[r] = true
|
||||
}
|
||||
toPut := make([]Chunk, 0, len(chunks)-len(bpe))
|
||||
for _, c := range chunks {
|
||||
if lookup[c.Ref()] {
|
||||
if lookup[c.Hash()] {
|
||||
toPut = append(toPut, c)
|
||||
}
|
||||
}
|
||||
@@ -63,10 +63,10 @@ func (rts ReadThroughStore) PutMany(chunks []Chunk) BackpressureError {
|
||||
return bpe
|
||||
}
|
||||
|
||||
func (rts ReadThroughStore) Root() ref.Ref {
|
||||
func (rts ReadThroughStore) Root() hash.Hash {
|
||||
return rts.backingStore.Root()
|
||||
}
|
||||
|
||||
func (rts ReadThroughStore) UpdateRoot(current, last ref.Ref) bool {
|
||||
func (rts ReadThroughStore) UpdateRoot(current, last hash.Hash) bool {
|
||||
return rts.backingStore.UpdateRoot(current, last)
|
||||
}
|
||||
|
||||
@@ -29,10 +29,10 @@ func (suite *LevelDBStoreTestSuite) TestReadThroughStoreGet() {
|
||||
input := "abc"
|
||||
c := NewChunk([]byte(input))
|
||||
bs.Put(c)
|
||||
ref := c.Ref()
|
||||
h := c.Hash()
|
||||
|
||||
// See http://www.di-mgt.com.au/sha_testvectors.html
|
||||
suite.Equal("sha1-a9993e364706816aba3e25717850c26c9cd0d89d", ref.String())
|
||||
suite.Equal("sha1-a9993e364706816aba3e25717850c26c9cd0d89d", h.String())
|
||||
|
||||
suite.Equal(1, bs.Len())
|
||||
suite.Equal(1, bs.Writes)
|
||||
@@ -42,7 +42,7 @@ func (suite *LevelDBStoreTestSuite) TestReadThroughStoreGet() {
|
||||
rts := NewReadThroughStore(cs, bs)
|
||||
|
||||
// Now read "abc". It is not yet in the cache so we hit the backing store.
|
||||
chunk := rts.Get(ref)
|
||||
chunk := rts.Get(h)
|
||||
suite.Equal(input, string(chunk.Data()))
|
||||
|
||||
suite.Equal(1, bs.Len())
|
||||
@@ -53,7 +53,7 @@ func (suite *LevelDBStoreTestSuite) TestReadThroughStoreGet() {
|
||||
suite.Equal(1, bs.Reads)
|
||||
|
||||
// Reading it again should not hit the backing store.
|
||||
chunk = rts.Get(ref)
|
||||
chunk = rts.Get(h)
|
||||
suite.Equal(input, string(chunk.Data()))
|
||||
|
||||
suite.Equal(1, bs.Len())
|
||||
@@ -73,12 +73,12 @@ func (suite *LevelDBStoreTestSuite) TestReadThroughStorePut() {
|
||||
input := "abc"
|
||||
c := NewChunk([]byte(input))
|
||||
rts.Put(c)
|
||||
ref := c.Ref()
|
||||
h := c.Hash()
|
||||
|
||||
// See http://www.di-mgt.com.au/sha_testvectors.html
|
||||
suite.Equal("sha1-a9993e364706816aba3e25717850c26c9cd0d89d", ref.String())
|
||||
suite.Equal("sha1-a9993e364706816aba3e25717850c26c9cd0d89d", h.String())
|
||||
|
||||
assertInputInStore("abc", ref, bs, suite.Assert())
|
||||
assertInputInStore("abc", ref, cs, suite.Assert())
|
||||
assertInputInStore("abc", ref, rts, suite.Assert())
|
||||
assertInputInStore("abc", h, bs, suite.Assert())
|
||||
assertInputInStore("abc", h, cs, suite.Assert())
|
||||
assertInputInStore("abc", h, rts, suite.Assert())
|
||||
}
|
||||
|
||||
@@ -1,31 +1,31 @@
|
||||
package chunks
|
||||
|
||||
import "github.com/attic-labs/noms/ref"
|
||||
import "github.com/attic-labs/noms/hash"
|
||||
|
||||
type ReadRequest interface {
|
||||
Ref() ref.Ref
|
||||
Hash() hash.Hash
|
||||
Outstanding() OutstandingRequest
|
||||
}
|
||||
|
||||
func NewGetRequest(r ref.Ref, ch chan Chunk) GetRequest {
|
||||
func NewGetRequest(r hash.Hash, ch chan Chunk) GetRequest {
|
||||
return GetRequest{r, ch}
|
||||
}
|
||||
|
||||
type GetRequest struct {
|
||||
r ref.Ref
|
||||
r hash.Hash
|
||||
ch chan Chunk
|
||||
}
|
||||
|
||||
func NewHasRequest(r ref.Ref, ch chan bool) HasRequest {
|
||||
func NewHasRequest(r hash.Hash, ch chan bool) HasRequest {
|
||||
return HasRequest{r, ch}
|
||||
}
|
||||
|
||||
type HasRequest struct {
|
||||
r ref.Ref
|
||||
r hash.Hash
|
||||
ch chan bool
|
||||
}
|
||||
|
||||
func (g GetRequest) Ref() ref.Ref {
|
||||
func (g GetRequest) Hash() hash.Hash {
|
||||
return g.r
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func (g GetRequest) Outstanding() OutstandingRequest {
|
||||
return OutstandingGet(g.ch)
|
||||
}
|
||||
|
||||
func (h HasRequest) Ref() ref.Ref {
|
||||
func (h HasRequest) Hash() hash.Hash {
|
||||
return h.r
|
||||
}
|
||||
|
||||
@@ -70,11 +70,11 @@ func (h OutstandingHas) Fail() {
|
||||
}
|
||||
|
||||
// ReadBatch represents a set of queued Get/Has requests, each of which are blocking on a receive channel for a response.
|
||||
type ReadBatch map[ref.Ref][]OutstandingRequest
|
||||
type ReadBatch map[hash.Hash][]OutstandingRequest
|
||||
|
||||
// GetBatch represents a set of queued Get requests, each of which are blocking on a receive channel for a response.
|
||||
type GetBatch map[ref.Ref][]chan Chunk
|
||||
type HasBatch map[ref.Ref][]chan bool
|
||||
type GetBatch map[hash.Hash][]chan Chunk
|
||||
type HasBatch map[hash.Hash][]chan bool
|
||||
|
||||
// Close ensures that callers to Get() and Has() are failed correctly if the corresponding chunk wasn't in the response from the server (i.e. it wasn't found).
|
||||
func (rb *ReadBatch) Close() error {
|
||||
@@ -88,11 +88,11 @@ func (rb *ReadBatch) Close() error {
|
||||
|
||||
// Put is implemented so that ReadBatch implements the ChunkSink interface.
|
||||
func (rb *ReadBatch) Put(c Chunk) {
|
||||
for _, or := range (*rb)[c.Ref()] {
|
||||
for _, or := range (*rb)[c.Hash()] {
|
||||
or.Satisfy(c)
|
||||
}
|
||||
|
||||
delete(*rb, c.Ref())
|
||||
delete(*rb, c.Hash())
|
||||
}
|
||||
|
||||
// PutMany is implemented so that ReadBatch implements the ChunkSink interface.
|
||||
@@ -115,11 +115,11 @@ func (gb *GetBatch) Close() error {
|
||||
|
||||
// Put is implemented so that GetBatch implements the ChunkSink interface.
|
||||
func (gb *GetBatch) Put(c Chunk) {
|
||||
for _, ch := range (*gb)[c.Ref()] {
|
||||
for _, ch := range (*gb)[c.Hash()] {
|
||||
ch <- c
|
||||
}
|
||||
|
||||
delete(*gb, c.Ref())
|
||||
delete(*gb, c.Hash())
|
||||
}
|
||||
|
||||
// PutMany is implemented so that GetBatch implements the ChunkSink interface.
|
||||
|
||||
@@ -3,17 +3,17 @@ package chunks
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetRequestBatch(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
r0 := ref.Parse("sha1-0000000000000000000000000000000000000000")
|
||||
r0 := hash.Parse("sha1-0000000000000000000000000000000000000000")
|
||||
c1 := NewChunk([]byte("abc"))
|
||||
r1 := c1.Ref()
|
||||
r1 := c1.Hash()
|
||||
c2 := NewChunk([]byte("123"))
|
||||
r2 := c2.Ref()
|
||||
r2 := c2.Hash()
|
||||
|
||||
tally := func(b bool, trueCnt, falseCnt *int) {
|
||||
if b {
|
||||
@@ -55,7 +55,7 @@ func TestGetRequestBatch(t *testing.T) {
|
||||
tally(b, &r2True, &r2False)
|
||||
}
|
||||
for c := range req4chan {
|
||||
assert.EqualValues(c2.Ref(), c.Ref())
|
||||
assert.EqualValues(c2.Hash(), c.Hash())
|
||||
}
|
||||
|
||||
assert.Equal(1, r1True)
|
||||
@@ -69,7 +69,7 @@ func TestGetRequestBatch(t *testing.T) {
|
||||
tally(b, &r0True, &r0False)
|
||||
}
|
||||
for c := range req1chan {
|
||||
assert.EqualValues(EmptyChunk.Ref(), c.Ref())
|
||||
assert.EqualValues(EmptyChunk.Hash(), c.Hash())
|
||||
}
|
||||
assert.Equal(0, r0True)
|
||||
assert.Equal(1, r0False)
|
||||
|
||||
@@ -3,19 +3,19 @@ package chunks
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func assertInputInStore(input string, ref ref.Ref, s ChunkStore, assert *assert.Assertions) {
|
||||
chunk := s.Get(ref)
|
||||
assert.False(chunk.IsEmpty(), "Shouldn't get empty chunk for %s", ref.String())
|
||||
func assertInputInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) {
|
||||
chunk := s.Get(h)
|
||||
assert.False(chunk.IsEmpty(), "Shouldn't get empty chunk for %s", h.String())
|
||||
assert.Equal(input, string(chunk.Data()))
|
||||
}
|
||||
|
||||
func assertInputNotInStore(input string, ref ref.Ref, s ChunkStore, assert *assert.Assertions) {
|
||||
data := s.Get(ref)
|
||||
assert.Nil(data, "Shouldn't have gotten data for %s", ref.String())
|
||||
func assertInputNotInStore(input string, h hash.Hash, s ChunkStore, assert *assert.Assertions) {
|
||||
data := s.Get(h)
|
||||
assert.Nil(data, "Shouldn't have gotten data for %s", h.String())
|
||||
}
|
||||
|
||||
type TestStore struct {
|
||||
@@ -33,14 +33,14 @@ func NewTestStore() *TestStore {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestStore) Get(ref ref.Ref) Chunk {
|
||||
func (s *TestStore) Get(h hash.Hash) Chunk {
|
||||
s.Reads++
|
||||
return s.MemoryStore.Get(ref)
|
||||
return s.MemoryStore.Get(h)
|
||||
}
|
||||
|
||||
func (s *TestStore) Has(ref ref.Ref) bool {
|
||||
func (s *TestStore) Has(h hash.Hash) bool {
|
||||
s.Hases++
|
||||
return s.MemoryStore.Has(ref)
|
||||
return s.MemoryStore.Has(h)
|
||||
}
|
||||
|
||||
func (s *TestStore) Put(c Chunk) {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/datas"
|
||||
"github.com/attic-labs/noms/dataset"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ type DatasetSpec struct {
|
||||
|
||||
type RefSpec struct {
|
||||
StoreSpec DatabaseSpec
|
||||
Ref ref.Ref
|
||||
Ref hash.Hash
|
||||
}
|
||||
|
||||
type PathSpec interface {
|
||||
@@ -87,7 +87,7 @@ func ParseRefSpec(spec string) (RefSpec, error) {
|
||||
return RefSpec{}, err
|
||||
}
|
||||
|
||||
if r, ok := ref.MaybeParse(dspec.DatasetName); ok {
|
||||
if r, ok := hash.MaybeParse(dspec.DatasetName); ok {
|
||||
return RefSpec{StoreSpec: dspec.StoreSpec, Ref: r}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/datas"
|
||||
"github.com/attic-labs/noms/dataset"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -47,7 +47,7 @@ func disabledTestHTTPDatabase(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
store2, err := sp2.Database()
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.NewString(testString), store2.ReadValue(r1.TargetRef()))
|
||||
assert.Equal(types.NewString(testString), store2.ReadValue(r1.TargetHash()))
|
||||
|
||||
server.Stop()
|
||||
wg.Wait()
|
||||
@@ -74,7 +74,7 @@ func TestLDBDatabase(t *testing.T) {
|
||||
assert.NoError(errRead)
|
||||
store, err := sp.Database()
|
||||
assert.NoError(err)
|
||||
assert.Equal(s1.String(), store.ReadValue(s1.Ref()).(types.String).String())
|
||||
assert.Equal(s1.String(), store.ReadValue(s1.Hash()).(types.String).String())
|
||||
store.Close()
|
||||
os.Remove(dir)
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func TestMemDatabase(t *testing.T) {
|
||||
r := store.WriteValue(types.Bool(true))
|
||||
|
||||
assert.NoError(err)
|
||||
assert.Equal(types.Bool(true), store.ReadValue(r.TargetRef()))
|
||||
assert.Equal(types.Bool(true), store.ReadValue(r.TargetHash()))
|
||||
}
|
||||
|
||||
// TODO: implement this with mock httpService
|
||||
@@ -198,7 +198,7 @@ func TestLDBObject(t *testing.T) {
|
||||
assert.Equal(s1.String(), s2.(types.String).String())
|
||||
dataset2.Store().Close()
|
||||
|
||||
spec3 := fmt.Sprintf("ldb:%s:%s", ldbpath, s1.Ref().String())
|
||||
spec3 := fmt.Sprintf("ldb:%s:%s", ldbpath, s1.Hash().String())
|
||||
sp3, err := ParsePathSpec(spec3)
|
||||
database, v3, err := sp3.Value()
|
||||
assert.Equal(s1.String(), v3.(types.String).String())
|
||||
@@ -219,7 +219,7 @@ func TestReadRef(t *testing.T) {
|
||||
commit := types.NewString("Commit Value")
|
||||
dataset1, err = dataset1.Commit(commit)
|
||||
assert.NoError(err)
|
||||
r1 := dataset1.Head().Ref()
|
||||
r1 := dataset1.Head().Hash()
|
||||
dataset1.Store().Close()
|
||||
|
||||
spec2 := fmt.Sprintf("ldb:%s:%s", ldbPath, r1.String())
|
||||
@@ -228,7 +228,7 @@ func TestReadRef(t *testing.T) {
|
||||
database, v2, err := sp2.Value()
|
||||
assert.NoError(err)
|
||||
|
||||
assert.EqualValues(r1.String(), v2.Ref().String())
|
||||
assert.EqualValues(r1.String(), v2.Hash().String())
|
||||
database.Close()
|
||||
}
|
||||
|
||||
@@ -315,7 +315,7 @@ func TestDatasetSpecs(t *testing.T) {
|
||||
func TestRefSpec(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testRef := ref.Parse("sha1-0123456789012345678901234567890123456789")
|
||||
testRef := hash.Parse("sha1-0123456789012345678901234567890123456789")
|
||||
|
||||
refSpec, err := ParseRefSpec("http://local.attic.io/john/doe:sha1-0123456789012345678901234567890123456789")
|
||||
assert.NoError(err)
|
||||
@@ -333,7 +333,7 @@ func TestRefSpec(t *testing.T) {
|
||||
func TestPathSpec(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
testRef := ref.Parse("sha1-0123456789012345678901234567890123456789")
|
||||
testRef := hash.Parse("sha1-0123456789012345678901234567890123456789")
|
||||
|
||||
pathSpec, err := ParsePathSpec("http://local.attic.io/john/doe:sha1-0123456789012345678901234567890123456789")
|
||||
assert.NoError(err)
|
||||
@@ -369,7 +369,7 @@ func disabledTestRefSpec(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
assert.Equal(DatabaseSpec{Protocol: "ldb", Path: "/path/to/somewhere"}, storeSpec)
|
||||
|
||||
testRef := ref.Parse("sha1-0123456789012345678901234567890123456789")
|
||||
testRef := hash.Parse("sha1-0123456789012345678901234567890123456789")
|
||||
|
||||
refSpec, err := ParseRefSpec("/filesys/john/doe:sha1-0123456789012345678901234567890123456789")
|
||||
assert.NoError(err)
|
||||
|
||||
@@ -32,7 +32,7 @@ func main() {
|
||||
set, err := setSpec.Dataset()
|
||||
util.CheckError(err)
|
||||
|
||||
oldCommitRef, errBool := set.MaybeHeadRef()
|
||||
oldCommitRef, errBool := set.MaybeHeadHash()
|
||||
if !errBool {
|
||||
util.CheckError(fmt.Errorf("Dataset %v not found", set.ID()))
|
||||
}
|
||||
@@ -41,7 +41,7 @@ func main() {
|
||||
util.CheckError(err)
|
||||
defer store.Close()
|
||||
|
||||
fmt.Printf("Deleted dataset %v (was %v)\n\n", set.ID(), oldCommitRef.TargetRef().String())
|
||||
fmt.Printf("Deleted dataset %v (was %v)\n\n", set.ID(), oldCommitRef.TargetHash().String())
|
||||
} else {
|
||||
if flag.NArg() != 1 {
|
||||
flag.Usage()
|
||||
|
||||
@@ -46,7 +46,7 @@ func (iter *CommitIterator) Next() (LogNode, bool) {
|
||||
branches := branchList{}
|
||||
parents := commitRefsFromSet(br.commit.Get(datas.ParentsField).(types.Set))
|
||||
for _, p := range parents {
|
||||
b := branch{cr: p, commit: iter.db.ReadValue(p.TargetRef()).(types.Struct)}
|
||||
b := branch{cr: p, commit: iter.db.ReadValue(p.TargetHash()).(types.Struct)}
|
||||
branches = append(branches, b)
|
||||
}
|
||||
iter.branches = iter.branches.Splice(col, 1, branches...)
|
||||
@@ -84,7 +84,7 @@ type LogNode struct {
|
||||
}
|
||||
|
||||
func (n LogNode) String() string {
|
||||
return fmt.Sprintf("cr: %s, startingColCount: %d, endingColCount: %d, col: %d, newCols: %v, foldedCols: %v, expanding: %t, shrunk: %t, shrinking: %t", n.cr.TargetRef(), n.startingColCount, n.endingColCount, n.col, n.newCols, n.foldedCols, n.Expanding(), n.Shrunk(), n.Shrinking())
|
||||
return fmt.Sprintf("cr: %s, startingColCount: %d, endingColCount: %d, col: %d, newCols: %v, foldedCols: %v, expanding: %t, shrunk: %t, shrinking: %t", n.cr.TargetHash(), n.startingColCount, n.endingColCount, n.col, n.newCols, n.foldedCols, n.Expanding(), n.Shrunk(), n.Shrinking())
|
||||
}
|
||||
|
||||
// True if this commit's graph will expand to show an additional branch
|
||||
@@ -108,7 +108,7 @@ type branch struct {
|
||||
}
|
||||
|
||||
func (b branch) String() string {
|
||||
return b.cr.TargetRef().String()
|
||||
return b.cr.TargetHash().String()
|
||||
}
|
||||
|
||||
type branchList []branch
|
||||
|
||||
@@ -74,17 +74,17 @@ func printCommit(node LogNode) (err error) {
|
||||
doColor = ansi.ColorFunc("red+h")
|
||||
}
|
||||
|
||||
fmt.Printf("%s%s\n", genGraph(node, lineno), doColor(node.commit.Ref().String()))
|
||||
fmt.Printf("%s%s\n", genGraph(node, lineno), doColor(node.commit.Hash().String()))
|
||||
parents := commitRefsFromSet(node.commit.Get(datas.ParentsField).(types.Set))
|
||||
lineno++
|
||||
if len(parents) > 1 {
|
||||
pstrings := []string{}
|
||||
for _, cr := range parents {
|
||||
pstrings = append(pstrings, cr.TargetRef().String())
|
||||
pstrings = append(pstrings, cr.TargetHash().String())
|
||||
}
|
||||
fmt.Printf("%sMerge: %s\n", genGraph(node, lineno), strings.Join(pstrings, " "))
|
||||
} else if len(parents) == 1 {
|
||||
fmt.Printf("%sParent: %s\n", genGraph(node, lineno), parents[0].TargetRef().String())
|
||||
fmt.Printf("%sParent: %s\n", genGraph(node, lineno), parents[0].TargetHash().String())
|
||||
} else {
|
||||
fmt.Printf("%sParent: None\n", genGraph(node, lineno))
|
||||
}
|
||||
|
||||
@@ -28,9 +28,9 @@ func testCommitInResults(s *nomsShowTestSuite, spec string, i int) {
|
||||
ds, err = ds.Commit(types.Number(1))
|
||||
s.NoError(err)
|
||||
commit := ds.Head()
|
||||
fmt.Printf("commit ref: %s, type: %s\n", commit.Ref(), commit.Type().Name())
|
||||
fmt.Printf("commit hash: %s, type: %s\n", commit.Hash(), commit.Type().Name())
|
||||
ds.Store().Close()
|
||||
s.Contains(s.Run(main, []string{spec}), commit.Ref().String())
|
||||
s.Contains(s.Run(main, []string{spec}), commit.Hash().String())
|
||||
}
|
||||
|
||||
func (s *nomsShowTestSuite) TestNomsLog() {
|
||||
@@ -57,11 +57,11 @@ func addCommitWithValue(ds dataset.Dataset, v types.Value) (dataset.Dataset, err
|
||||
}
|
||||
|
||||
func addBranchedDataset(newDs, parentDs dataset.Dataset, v string) (dataset.Dataset, error) {
|
||||
return newDs.CommitWithParents(types.NewString(v), types.NewSet().Insert(parentDs.HeadRef()))
|
||||
return newDs.CommitWithParents(types.NewString(v), types.NewSet().Insert(parentDs.HeadHash()))
|
||||
}
|
||||
|
||||
func mergeDatasets(ds1, ds2 dataset.Dataset, v string) (dataset.Dataset, error) {
|
||||
return ds1.CommitWithParents(types.NewString(v), types.NewSet(ds1.HeadRef(), ds2.HeadRef()))
|
||||
return ds1.CommitWithParents(types.NewString(v), types.NewSet(ds1.HeadHash(), ds2.HeadHash()))
|
||||
}
|
||||
|
||||
func (s *nomsShowTestSuite) TestNomsGraph1() {
|
||||
|
||||
@@ -50,7 +50,7 @@ func (s *nomsShowTestSuite) TestNomsShow() {
|
||||
r := writeTestData(ds, s1)
|
||||
s.Equal(res1, s.Run(main, []string{spec}))
|
||||
|
||||
spec1 := fmt.Sprintf("ldb:%s:%s", s.LdbDir, r.TargetRef().String())
|
||||
spec1 := fmt.Sprintf("ldb:%s:%s", s.LdbDir, r.TargetHash().String())
|
||||
s.Equal(res2, s.Run(main, []string{spec1}))
|
||||
|
||||
ds, err = sp.Dataset()
|
||||
@@ -58,7 +58,7 @@ func (s *nomsShowTestSuite) TestNomsShow() {
|
||||
r = writeTestData(ds, list)
|
||||
s.Equal(res3, s.Run(main, []string{spec}))
|
||||
|
||||
spec1 = fmt.Sprintf("ldb:%s:%s", s.LdbDir, r.TargetRef().String())
|
||||
spec1 = fmt.Sprintf("ldb:%s:%s", s.LdbDir, r.TargetHash().String())
|
||||
s.Equal(res4, s.Run(main, []string{spec1}))
|
||||
|
||||
ds, err = sp.Dataset()
|
||||
|
||||
@@ -27,7 +27,7 @@ func (s *testSuite) TestSync() {
|
||||
s.NoError(err)
|
||||
source2, err := source1.Commit(types.Number(43))
|
||||
s.NoError(err)
|
||||
source1HeadRef := source1.Head().Ref()
|
||||
source1HeadRef := source1.Head().Hash()
|
||||
source2.Store().Close() // Close Database backing both Datasets
|
||||
|
||||
sourceSpec := fmt.Sprintf("ldb:%s:%s", s.LdbDir, source1HeadRef)
|
||||
|
||||
@@ -4,20 +4,20 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type cachingChunkHaver struct {
|
||||
backing chunks.ChunkSource
|
||||
hasCache map[ref.Ref]bool
|
||||
hasCache map[hash.Hash]bool
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func newCachingChunkHaver(cs chunks.ChunkSource) *cachingChunkHaver {
|
||||
return &cachingChunkHaver{cs, map[ref.Ref]bool{}, &sync.Mutex{}}
|
||||
return &cachingChunkHaver{cs, map[hash.Hash]bool{}, &sync.Mutex{}}
|
||||
}
|
||||
|
||||
func (ccs *cachingChunkHaver) Has(r ref.Ref) bool {
|
||||
func (ccs *cachingChunkHaver) Has(r hash.Hash) bool {
|
||||
if has, ok := checkCache(ccs, r); ok {
|
||||
return has
|
||||
}
|
||||
@@ -26,14 +26,14 @@ func (ccs *cachingChunkHaver) Has(r ref.Ref) bool {
|
||||
return has
|
||||
}
|
||||
|
||||
func checkCache(ccs *cachingChunkHaver, r ref.Ref) (has, ok bool) {
|
||||
func checkCache(ccs *cachingChunkHaver, r hash.Hash) (has, ok bool) {
|
||||
ccs.mu.Lock()
|
||||
defer ccs.mu.Unlock()
|
||||
has, ok = ccs.hasCache[r]
|
||||
return
|
||||
}
|
||||
|
||||
func setCache(ccs *cachingChunkHaver, r ref.Ref, has bool) {
|
||||
func setCache(ccs *cachingChunkHaver, r hash.Hash, has bool) {
|
||||
ccs.mu.Lock()
|
||||
defer ccs.mu.Unlock()
|
||||
ccs.hasCache[r] = has
|
||||
|
||||
@@ -14,15 +14,15 @@ func TestCachingChunkHaver(t *testing.T) {
|
||||
input := "abc"
|
||||
|
||||
c := chunks.NewChunk([]byte(input))
|
||||
assert.False(ccs.Has(c.Ref()))
|
||||
assert.False(ccs.Has(c.Hash()))
|
||||
assert.Equal(ts.Hases, 1)
|
||||
assert.False(ccs.Has(c.Ref()))
|
||||
assert.False(ccs.Has(c.Hash()))
|
||||
assert.Equal(ts.Hases, 1)
|
||||
|
||||
ts.Put(c)
|
||||
ccs = newCachingChunkHaver(ts)
|
||||
assert.True(ccs.Has(c.Ref()))
|
||||
assert.True(ccs.Has(c.Hash()))
|
||||
assert.Equal(ts.Hases, 2)
|
||||
assert.True(ccs.Has(c.Ref()))
|
||||
assert.True(ccs.Has(c.Hash()))
|
||||
assert.Equal(ts.Hases, 2)
|
||||
}
|
||||
|
||||
@@ -5,14 +5,14 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
type databaseCommon struct {
|
||||
vs *types.ValueStore
|
||||
rt chunks.RootTracker
|
||||
rootRef ref.Ref
|
||||
rootRef hash.Hash
|
||||
datasets *types.Map
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (ds *databaseCommon) Datasets() types.Map {
|
||||
return *ds.datasets
|
||||
}
|
||||
|
||||
func (ds *databaseCommon) ReadValue(r ref.Ref) types.Value {
|
||||
func (ds *databaseCommon) ReadValue(r hash.Hash) types.Value {
|
||||
return ds.vs.ReadValue(r)
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func (ds *databaseCommon) Close() error {
|
||||
return ds.vs.Close()
|
||||
}
|
||||
|
||||
func (ds *databaseCommon) datasetsFromRef(datasetsRef ref.Ref) *types.Map {
|
||||
func (ds *databaseCommon) datasetsFromRef(datasetsRef hash.Hash) *types.Map {
|
||||
c := ds.ReadValue(datasetsRef).(types.Map)
|
||||
return &c
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (ds *databaseCommon) commit(datasetID string, commit types.Struct) error {
|
||||
return ds.doCommit(datasetID, commit)
|
||||
}
|
||||
|
||||
// doCommit manages concurrent access the single logical piece of mutable state: the current Root. doCommit is optimistic in that it is attempting to update head making the assumption that currentRootRef is the ref of the current head. The call to UpdateRoot below will return an 'ErrOptimisticLockFailed' error if that assumption fails (e.g. because of a race with another writer) and the entire algorithm must be tried again. This method will also fail and return an 'ErrMergeNeeded' error if the |commit| is not a descendent of the current dataset head
|
||||
// doCommit manages concurrent access the single logical piece of mutable state: the current Root. doCommit is optimistic in that it is attempting to update head making the assumption that currentRootRef is the hash of the current head. The call to UpdateRoot below will return an 'ErrOptimisticLockFailed' error if that assumption fails (e.g. because of a race with another writer) and the entire algorithm must be tried again. This method will also fail and return an 'ErrMergeNeeded' error if the |commit| is not a descendent of the current dataset head
|
||||
func (ds *databaseCommon) doCommit(datasetID string, commit types.Struct) error {
|
||||
currentRootRef, currentDatasets := ds.getRootAndDatasets()
|
||||
|
||||
@@ -112,27 +112,27 @@ func (ds *databaseCommon) doCommit(datasetID string, commit types.Struct) error
|
||||
return ds.tryUpdateRoot(currentDatasets, currentRootRef)
|
||||
}
|
||||
|
||||
// doDelete manages concurrent access the single logical piece of mutable state: the current Root. doDelete is optimistic in that it is attempting to update head making the assumption that currentRootRef is the ref of the current head. The call to UpdateRoot below will return an 'ErrOptimisticLockFailed' error if that assumption fails (e.g. because of a race with another writer) and the entire algorithm must be tried again.
|
||||
// doDelete manages concurrent access the single logical piece of mutable state: the current Root. doDelete is optimistic in that it is attempting to update head making the assumption that currentRootRef is the hash of the current head. The call to UpdateRoot below will return an 'ErrOptimisticLockFailed' error if that assumption fails (e.g. because of a race with another writer) and the entire algorithm must be tried again.
|
||||
func (ds *databaseCommon) doDelete(datasetID string) error {
|
||||
currentRootRef, currentDatasets := ds.getRootAndDatasets()
|
||||
currentDatasets = currentDatasets.Remove(types.NewString(datasetID))
|
||||
return ds.tryUpdateRoot(currentDatasets, currentRootRef)
|
||||
}
|
||||
|
||||
func (ds *databaseCommon) getRootAndDatasets() (currentRootRef ref.Ref, currentDatasets types.Map) {
|
||||
func (ds *databaseCommon) getRootAndDatasets() (currentRootRef hash.Hash, currentDatasets types.Map) {
|
||||
currentRootRef = ds.rt.Root()
|
||||
currentDatasets = ds.Datasets()
|
||||
|
||||
if currentRootRef != currentDatasets.Ref() && !currentRootRef.IsEmpty() {
|
||||
if currentRootRef != currentDatasets.Hash() && !currentRootRef.IsEmpty() {
|
||||
// The root has been advanced.
|
||||
currentDatasets = *ds.datasetsFromRef(currentRootRef)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ds *databaseCommon) tryUpdateRoot(currentDatasets types.Map, currentRootRef ref.Ref) (err error) {
|
||||
func (ds *databaseCommon) tryUpdateRoot(currentDatasets types.Map, currentRootRef hash.Hash) (err error) {
|
||||
// TODO: This Commit will be orphaned if the UpdateRoot below fails
|
||||
newRootRef := ds.WriteValue(currentDatasets).TargetRef()
|
||||
newRootRef := ds.WriteValue(currentDatasets).TargetHash()
|
||||
// If the root has been updated by another process in the short window since we read it, this call will fail. See issue #404
|
||||
if !ds.rt.UpdateRoot(newRootRef, currentRootRef) {
|
||||
err = ErrOptimisticLockFailed
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
@@ -57,8 +57,8 @@ func (suite *DatabaseSuite) TearDownTest() {
|
||||
|
||||
func (suite *DatabaseSuite) TestReadWriteCache() {
|
||||
var v types.Value = types.Bool(true)
|
||||
suite.NotEqual(ref.Ref{}, suite.ds.WriteValue(v))
|
||||
r := suite.ds.WriteValue(v).TargetRef()
|
||||
suite.NotEqual(hash.Hash{}, suite.ds.WriteValue(v))
|
||||
r := suite.ds.WriteValue(v).TargetHash()
|
||||
commit := NewCommit().Set(ValueField, v)
|
||||
newDs, err := suite.ds.Commit("foo", commit)
|
||||
suite.NoError(err)
|
||||
@@ -71,7 +71,7 @@ func (suite *DatabaseSuite) TestReadWriteCache() {
|
||||
func (suite *DatabaseSuite) TestReadWriteCachePersists() {
|
||||
var err error
|
||||
var v types.Value = types.Bool(true)
|
||||
suite.NotEqual(ref.Ref{}, suite.ds.WriteValue(v))
|
||||
suite.NotEqual(hash.Hash{}, suite.ds.WriteValue(v))
|
||||
r := suite.ds.WriteValue(v)
|
||||
commit := NewCommit().Set(ValueField, v)
|
||||
suite.ds, err = suite.ds.Commit("foo", commit)
|
||||
@@ -88,7 +88,7 @@ func (suite *DatabaseSuite) TestWriteRefToNonexistentValue() {
|
||||
}
|
||||
|
||||
func (suite *DatabaseSuite) TestTolerateUngettableRefs() {
|
||||
suite.Nil(suite.ds.ReadValue(ref.Ref{}))
|
||||
suite.Nil(suite.ds.ReadValue(hash.Hash{}))
|
||||
}
|
||||
|
||||
func (suite *DatabaseSuite) TestDatabaseCommit() {
|
||||
@@ -112,7 +112,7 @@ func (suite *DatabaseSuite) TestDatabaseCommit() {
|
||||
aCommit1 := ds2.Head(datasetID)
|
||||
suite.True(aCommit1.Get(ValueField).Equals(a))
|
||||
aRef1 := ds2.HeadRef(datasetID)
|
||||
suite.Equal(aCommit1.Ref(), aRef1.TargetRef())
|
||||
suite.Equal(aCommit1.Hash(), aRef1.TargetHash())
|
||||
suite.Equal(uint64(1), aRef1.Height())
|
||||
suite.ds = ds2
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/constants"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
@@ -69,7 +69,7 @@ type httpDoer interface {
|
||||
}
|
||||
|
||||
type writeRequest struct {
|
||||
hash ref.Ref
|
||||
hash hash.Hash
|
||||
hints types.Hints
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func (bhcs *httpBatchStore) Close() (e error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (bhcs *httpBatchStore) Get(r ref.Ref) chunks.Chunk {
|
||||
func (bhcs *httpBatchStore) Get(r hash.Hash) chunks.Chunk {
|
||||
pending := bhcs.unwrittenPuts.Get(r)
|
||||
if !pending.IsEmpty() {
|
||||
return pending
|
||||
@@ -141,7 +141,7 @@ func (bhcs *httpBatchStore) sendGetRequests(req chunks.ReadRequest) {
|
||||
hashes := hashSet{}
|
||||
|
||||
addReq := func(req chunks.ReadRequest) {
|
||||
hash := req.Ref()
|
||||
hash := req.Hash()
|
||||
batch[hash] = append(batch[hash], req.Outstanding())
|
||||
hashes.Insert(hash)
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func (bhcs *httpBatchStore) SchedulePut(c chunks.Chunk, refHeight uint64, hints
|
||||
}
|
||||
|
||||
bhcs.requestWg.Add(1)
|
||||
bhcs.writeQueue <- writeRequest{c.Ref(), hints}
|
||||
bhcs.writeQueue <- writeRequest{c.Hash(), hints}
|
||||
}
|
||||
|
||||
func (bhcs *httpBatchStore) batchPutRequests() {
|
||||
@@ -308,18 +308,18 @@ func (bhcs *httpBatchStore) sendWriteRequests(hashes hashSet, hints types.Hints)
|
||||
}()
|
||||
}
|
||||
|
||||
func (bhcs *httpBatchStore) Root() ref.Ref {
|
||||
func (bhcs *httpBatchStore) Root() hash.Hash {
|
||||
// GET http://<host>/root. Response will be ref of root.
|
||||
res := bhcs.requestRoot("GET", ref.Ref{}, ref.Ref{})
|
||||
res := bhcs.requestRoot("GET", hash.Hash{}, hash.Hash{})
|
||||
defer closeResponse(res)
|
||||
|
||||
d.Chk.Equal(http.StatusOK, res.StatusCode, "Unexpected response: %s", http.StatusText(res.StatusCode))
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
d.Chk.NoError(err)
|
||||
return ref.Parse(string(data))
|
||||
return hash.Parse(string(data))
|
||||
}
|
||||
|
||||
func (bhcs *httpBatchStore) UpdateRoot(current, last ref.Ref) bool {
|
||||
func (bhcs *httpBatchStore) UpdateRoot(current, last hash.Hash) bool {
|
||||
// POST http://<host>/root?current=<ref>&last=<ref>. Response will be 200 on success, 409 if current is outdated.
|
||||
bhcs.Flush()
|
||||
|
||||
@@ -330,7 +330,7 @@ func (bhcs *httpBatchStore) UpdateRoot(current, last ref.Ref) bool {
|
||||
return res.StatusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func (bhcs *httpBatchStore) requestRoot(method string, current, last ref.Ref) *http.Response {
|
||||
func (bhcs *httpBatchStore) requestRoot(method string, current, last hash.Hash) *http.Response {
|
||||
u := *bhcs.host
|
||||
u.Path = httprouter.CleanPath(bhcs.host.Path + constants.RootPath)
|
||||
if method == "POST" {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/constants"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@@ -118,8 +118,8 @@ func (suite *HTTPBatchStoreSuite) TestPutChunkWithHints() {
|
||||
l := types.NewList(types.NewRef(vals[0]), types.NewRef(vals[1]))
|
||||
|
||||
suite.store.SchedulePut(types.EncodeValue(l, nil), 2, types.Hints{
|
||||
chnx[0].Ref(): struct{}{},
|
||||
chnx[1].Ref(): struct{}{},
|
||||
chnx[0].Hash(): struct{}{},
|
||||
chnx[1].Hash(): struct{}{},
|
||||
})
|
||||
suite.store.Flush()
|
||||
|
||||
@@ -146,7 +146,7 @@ func (b *backpressureCS) PutMany(chnx []chunks.Chunk) chunks.BackpressureError {
|
||||
|
||||
bpe := make(chunks.BackpressureError, len(chnx)-b.tries)
|
||||
for i, c := range chnx[b.tries:] {
|
||||
bpe[i] = c.Ref()
|
||||
bpe[i] = c.Hash()
|
||||
}
|
||||
return bpe
|
||||
}
|
||||
@@ -174,19 +174,19 @@ func (suite *HTTPBatchStoreSuite) TestPutChunksBackpressure() {
|
||||
|
||||
func (suite *HTTPBatchStoreSuite) TestRoot() {
|
||||
c := chunks.NewChunk([]byte("abc"))
|
||||
suite.True(suite.cs.UpdateRoot(c.Ref(), ref.Ref{}))
|
||||
suite.Equal(c.Ref(), suite.store.Root())
|
||||
suite.True(suite.cs.UpdateRoot(c.Hash(), hash.Hash{}))
|
||||
suite.Equal(c.Hash(), suite.store.Root())
|
||||
}
|
||||
|
||||
func (suite *HTTPBatchStoreSuite) TestUpdateRoot() {
|
||||
c := chunks.NewChunk([]byte("abc"))
|
||||
suite.True(suite.store.UpdateRoot(c.Ref(), ref.Ref{}))
|
||||
suite.Equal(c.Ref(), suite.cs.Root())
|
||||
suite.True(suite.store.UpdateRoot(c.Hash(), hash.Hash{}))
|
||||
suite.Equal(c.Hash(), suite.cs.Root())
|
||||
}
|
||||
|
||||
func (suite *HTTPBatchStoreSuite) TestGet() {
|
||||
c := chunks.NewChunk([]byte("abc"))
|
||||
suite.cs.Put(c)
|
||||
got := suite.store.Get(c.Ref())
|
||||
suite.Equal(c.Ref(), got.Ref())
|
||||
got := suite.store.Get(c.Hash())
|
||||
suite.Equal(c.Hash(), got.Hash())
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package datas
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
@@ -37,7 +37,7 @@ func (lds *LocalDatabase) Delete(datasetID string) (Database, error) {
|
||||
}, err
|
||||
}
|
||||
|
||||
func (lds *LocalDatabase) has(r ref.Ref) bool {
|
||||
func (lds *LocalDatabase) has(r hash.Hash) bool {
|
||||
return lds.cch.Has(r)
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/constants"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
@@ -129,7 +129,7 @@ func (bhcs *notABatchSink) sendWriteRequests(chnx []chunks.Chunk) {
|
||||
gw := gzip.NewWriter(body)
|
||||
sz := chunks.NewSerializer(gw)
|
||||
for _, chunk := range chnx {
|
||||
hashes.Insert(chunk.Ref())
|
||||
hashes.Insert(chunk.Hash())
|
||||
sz.Put(chunk)
|
||||
}
|
||||
sz.Close()
|
||||
@@ -151,10 +151,10 @@ func (bhcs *notABatchSink) sendWriteRequests(chnx []chunks.Chunk) {
|
||||
}()
|
||||
}
|
||||
|
||||
func (bhcs *notABatchSink) Root() ref.Ref {
|
||||
func (bhcs *notABatchSink) Root() hash.Hash {
|
||||
panic("Not Reached")
|
||||
}
|
||||
|
||||
func (bhcs *notABatchSink) UpdateRoot(current, last ref.Ref) bool {
|
||||
func (bhcs *notABatchSink) UpdateRoot(current, last hash.Hash) bool {
|
||||
panic("Not Reached")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/attic-labs/noms/walk"
|
||||
)
|
||||
@@ -12,21 +12,21 @@ import (
|
||||
// CopyMissingChunksP copies to |sink| all chunks in source that are reachable from (and including) |r|, skipping chunks that |sink| already has
|
||||
func CopyMissingChunksP(source Database, sink *LocalDatabase, sourceRef types.Ref, concurrency int) {
|
||||
stopCallback := func(r types.Ref) bool {
|
||||
return sink.has(r.TargetRef())
|
||||
return sink.has(r.TargetHash())
|
||||
}
|
||||
copyWorker(source, sink, sourceRef, stopCallback, concurrency)
|
||||
}
|
||||
|
||||
// CopyReachableChunksP copies to |sink| all chunks reachable from (and including) |r|, but that are not in the subtree rooted at |exclude|
|
||||
func CopyReachableChunksP(source, sink Database, sourceRef, exclude types.Ref, concurrency int) {
|
||||
excludeRefs := map[ref.Ref]bool{}
|
||||
excludeRefs := map[hash.Hash]bool{}
|
||||
|
||||
if !exclude.TargetRef().IsEmpty() {
|
||||
if !exclude.TargetHash().IsEmpty() {
|
||||
mu := sync.Mutex{}
|
||||
excludeCallback := func(r types.Ref) bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
excludeRefs[r.TargetRef()] = true
|
||||
excludeRefs[r.TargetHash()] = true
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func CopyReachableChunksP(source, sink Database, sourceRef, exclude types.Ref, c
|
||||
}
|
||||
|
||||
stopCallback := func(r types.Ref) bool {
|
||||
return excludeRefs[r.TargetRef()]
|
||||
return excludeRefs[r.TargetHash()]
|
||||
}
|
||||
copyWorker(source, sink, sourceRef, stopCallback, concurrency)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -28,7 +28,7 @@ func newOrderedChunkCache() *orderedChunkCache {
|
||||
d.Chk.NoError(err, "opening put cache in %s", dir)
|
||||
return &orderedChunkCache{
|
||||
orderedChunks: db,
|
||||
chunkIndex: map[ref.Ref][]byte{},
|
||||
chunkIndex: map[hash.Hash][]byte{},
|
||||
dbDir: dir,
|
||||
mu: &sync.RWMutex{},
|
||||
}
|
||||
@@ -37,30 +37,30 @@ func newOrderedChunkCache() *orderedChunkCache {
|
||||
// orderedChunkCache holds Chunks, allowing them to be retrieved by hash or enumerated in ref-height order.
|
||||
type orderedChunkCache struct {
|
||||
orderedChunks *leveldb.DB
|
||||
chunkIndex map[ref.Ref][]byte
|
||||
chunkIndex map[hash.Hash][]byte
|
||||
dbDir string
|
||||
mu *sync.RWMutex
|
||||
}
|
||||
|
||||
type hashSet map[ref.Ref]struct{}
|
||||
type hashSet map[hash.Hash]struct{}
|
||||
|
||||
func (hs hashSet) Insert(hash ref.Ref) {
|
||||
func (hs hashSet) Insert(hash hash.Hash) {
|
||||
hs[hash] = struct{}{}
|
||||
}
|
||||
|
||||
func (hs hashSet) Has(hash ref.Ref) (has bool) {
|
||||
func (hs hashSet) Has(hash hash.Hash) (has bool) {
|
||||
_, has = hs[hash]
|
||||
return
|
||||
}
|
||||
|
||||
// Insert can be called from any goroutine to store c in the cache. If c is successfully added to the cache, Insert returns true. If c was already in the cache, Insert returns false.
|
||||
func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool {
|
||||
hash := c.Ref()
|
||||
hash := c.Hash()
|
||||
dbKey, present := func() (dbKey []byte, present bool) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if _, present = p.chunkIndex[hash]; !present {
|
||||
dbKey = toDbKey(refHeight, c.Ref())
|
||||
dbKey = toDbKey(refHeight, c.Hash())
|
||||
p.chunkIndex[hash] = dbKey
|
||||
}
|
||||
return
|
||||
@@ -77,7 +77,7 @@ func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *orderedChunkCache) has(hash ref.Ref) (has bool) {
|
||||
func (p *orderedChunkCache) has(hash hash.Hash) (has bool) {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
_, has = p.chunkIndex[hash]
|
||||
@@ -85,7 +85,7 @@ func (p *orderedChunkCache) has(hash ref.Ref) (has bool) {
|
||||
}
|
||||
|
||||
// Get can be called from any goroutine to retrieve the chunk referenced by hash. If the chunk is not present, Get returns the empty Chunk.
|
||||
func (p *orderedChunkCache) Get(hash ref.Ref) chunks.Chunk {
|
||||
func (p *orderedChunkCache) Get(hash hash.Hash) chunks.Chunk {
|
||||
// Don't use defer p.mu.RUnlock() here, because I want reading from orderedChunks NOT to be guarded by the lock. LevelDB handles its own goroutine-safety.
|
||||
p.mu.RLock()
|
||||
dbKey, ok := p.chunkIndex[hash]
|
||||
@@ -118,7 +118,7 @@ func (p *orderedChunkCache) Clear(hashes hashSet) {
|
||||
var uint64Size = binary.Size(uint64(0))
|
||||
|
||||
// toDbKey takes a refHeight and a hash and returns a binary key suitable for use with LevelDB. The default sort order used by LevelDB ensures that these keys (and their associated values) will be iterated in ref-height order.
|
||||
func toDbKey(refHeight uint64, hash ref.Ref) []byte {
|
||||
func toDbKey(refHeight uint64, hash hash.Hash) []byte {
|
||||
digest := hash.DigestSlice()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, uint64Size+binary.Size(digest)))
|
||||
err := binary.Write(buf, binary.BigEndian, refHeight)
|
||||
@@ -128,15 +128,15 @@ func toDbKey(refHeight uint64, hash ref.Ref) []byte {
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func fromDbKey(key []byte) (uint64, ref.Ref) {
|
||||
func fromDbKey(key []byte) (uint64, hash.Hash) {
|
||||
refHeight := uint64(0)
|
||||
r := bytes.NewReader(key)
|
||||
err := binary.Read(r, binary.BigEndian, &refHeight)
|
||||
d.Chk.NoError(err)
|
||||
digest := ref.Sha1Digest{}
|
||||
digest := hash.Sha1Digest{}
|
||||
err = binary.Read(r, binary.BigEndian, &digest)
|
||||
d.Chk.NoError(err)
|
||||
return refHeight, ref.New(digest)
|
||||
return refHeight, hash.New(digest)
|
||||
}
|
||||
|
||||
// ExtractChunks can be called from any goroutine to write Chunks referenced by the given hashes to w. The chunks are ordered by ref-height. Chunks of the same height are written in an unspecified order, relative to one another.
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
@@ -20,7 +20,7 @@ type LevelDBPutCacheSuite struct {
|
||||
suite.Suite
|
||||
cache *orderedChunkCache
|
||||
values []types.Value
|
||||
chnx map[ref.Ref]chunks.Chunk
|
||||
chnx map[hash.Hash]chunks.Chunk
|
||||
}
|
||||
|
||||
func (suite *LevelDBPutCacheSuite) SetupTest() {
|
||||
@@ -32,9 +32,9 @@ func (suite *LevelDBPutCacheSuite) SetupTest() {
|
||||
types.NewString("jkl"),
|
||||
types.NewString("mno"),
|
||||
}
|
||||
suite.chnx = map[ref.Ref]chunks.Chunk{}
|
||||
suite.chnx = map[hash.Hash]chunks.Chunk{}
|
||||
for _, v := range suite.values {
|
||||
suite.chnx[v.Ref()] = types.EncodeValue(v, nil)
|
||||
suite.chnx[v.Hash()] = types.EncodeValue(v, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,17 +43,17 @@ func (suite *LevelDBPutCacheSuite) TearDownTest() {
|
||||
}
|
||||
|
||||
func (suite *LevelDBPutCacheSuite) TestAddTwice() {
|
||||
chunk := suite.chnx[suite.values[0].Ref()]
|
||||
chunk := suite.chnx[suite.values[0].Hash()]
|
||||
suite.True(suite.cache.Insert(chunk, 1))
|
||||
suite.False(suite.cache.Insert(chunk, 1))
|
||||
}
|
||||
|
||||
func (suite *LevelDBPutCacheSuite) TestAddParallel() {
|
||||
hashes := make(chan ref.Ref)
|
||||
hashes := make(chan hash.Hash)
|
||||
for _, chunk := range suite.chnx {
|
||||
go func(c chunks.Chunk) {
|
||||
suite.cache.Insert(c, 1)
|
||||
hashes <- c.Ref()
|
||||
hashes <- c.Hash()
|
||||
}(chunk)
|
||||
}
|
||||
|
||||
@@ -72,15 +72,15 @@ func (suite *LevelDBPutCacheSuite) TestGetParallel() {
|
||||
}
|
||||
|
||||
chunkChan := make(chan chunks.Chunk)
|
||||
for hash := range suite.chnx {
|
||||
go func(h ref.Ref) {
|
||||
for h := range suite.chnx {
|
||||
go func(h hash.Hash) {
|
||||
chunkChan <- suite.cache.Get(h)
|
||||
}(hash)
|
||||
}(h)
|
||||
}
|
||||
|
||||
for i := 0; i < len(suite.values); i++ {
|
||||
c := <-chunkChan
|
||||
delete(suite.chnx, c.Ref())
|
||||
delete(suite.chnx, c.Hash())
|
||||
}
|
||||
close(chunkChan)
|
||||
suite.Len(suite.chnx, 0)
|
||||
@@ -92,9 +92,9 @@ func (suite *LevelDBPutCacheSuite) TestClearParallel() {
|
||||
for i, v := range suite.values {
|
||||
suite.cache.Insert(types.EncodeValue(v, nil), 1)
|
||||
if i < keepIdx {
|
||||
toClear1.Insert(v.Ref())
|
||||
toClear1.Insert(v.Hash())
|
||||
} else if i > keepIdx {
|
||||
toClear2.Insert(v.Ref())
|
||||
toClear2.Insert(v.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,10 +111,10 @@ func (suite *LevelDBPutCacheSuite) TestClearParallel() {
|
||||
wg.Wait()
|
||||
for i, v := range suite.values {
|
||||
if i == keepIdx {
|
||||
suite.True(suite.cache.has(v.Ref()))
|
||||
suite.True(suite.cache.has(v.Hash()))
|
||||
continue
|
||||
}
|
||||
suite.False(suite.cache.has(v.Ref()))
|
||||
suite.False(suite.cache.has(v.Hash()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func (suite *LevelDBPutCacheSuite) TestReaderSubset() {
|
||||
chunkChan := suite.extractChunks(toExtract)
|
||||
count := 0
|
||||
for c := range chunkChan {
|
||||
if suite.Contains(toExtract, c.Ref()) {
|
||||
if suite.Contains(toExtract, c.Hash()) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
@@ -150,14 +150,14 @@ func (suite *LevelDBPutCacheSuite) TestReaderSnapshot() {
|
||||
suite.cache.Clear(hashes)
|
||||
|
||||
for c := range chunkChan {
|
||||
delete(suite.chnx, c.Ref())
|
||||
delete(suite.chnx, c.Hash())
|
||||
}
|
||||
suite.Len(suite.chnx, 0)
|
||||
}
|
||||
|
||||
func (suite *LevelDBPutCacheSuite) TestExtractChunksOrder() {
|
||||
maxHeight := len(suite.chnx)
|
||||
orderedHashes := make(ref.RefSlice, maxHeight)
|
||||
orderedHashes := make(hash.HashSlice, maxHeight)
|
||||
toExtract := hashSet{}
|
||||
heights := rand.Perm(maxHeight)
|
||||
for hash, c := range suite.chnx {
|
||||
@@ -169,7 +169,7 @@ func (suite *LevelDBPutCacheSuite) TestExtractChunksOrder() {
|
||||
|
||||
chunkChan := suite.extractChunks(toExtract)
|
||||
for c := range chunkChan {
|
||||
suite.Equal(orderedHashes[0], c.Ref())
|
||||
suite.Equal(orderedHashes[0], c.Hash())
|
||||
orderedHashes = orderedHashes[1:]
|
||||
}
|
||||
suite.Len(orderedHashes, 0)
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
@@ -67,7 +67,7 @@ func (wc wc) Close() error {
|
||||
}
|
||||
|
||||
func HandleWriteValue(w http.ResponseWriter, req *http.Request, ps URLParams, cs chunks.ChunkStore) {
|
||||
hashes := ref.RefSlice{}
|
||||
hashes := hash.HashSlice{}
|
||||
err := d.Try(func() {
|
||||
d.Exp.Equal("POST", req.Method)
|
||||
|
||||
@@ -85,7 +85,7 @@ func HandleWriteValue(w http.ResponseWriter, req *http.Request, ps URLParams, cs
|
||||
}
|
||||
// If a previous Enqueue() errored, we still need to drain chunkChan
|
||||
// TODO: what about having DeserializeToChan take a 'done' channel to stop it?
|
||||
hashes = append(hashes, c.Ref())
|
||||
hashes = append(hashes, c.Hash())
|
||||
}
|
||||
if bpe == nil {
|
||||
bpe = vbs.Flush()
|
||||
@@ -108,7 +108,7 @@ func HandleWriteValue(w http.ResponseWriter, req *http.Request, ps URLParams, cs
|
||||
}
|
||||
|
||||
// Contents of the returned io.Reader are gzipped.
|
||||
func buildWriteValueRequest(serializedChunks io.Reader, hints map[ref.Ref]struct{}) io.Reader {
|
||||
func buildWriteValueRequest(serializedChunks io.Reader, hints map[hash.Hash]struct{}) io.Reader {
|
||||
body := &bytes.Buffer{}
|
||||
gw := gzip.NewWriter(body)
|
||||
serializeHints(gw, hints)
|
||||
@@ -125,9 +125,9 @@ func HandleGetRefs(w http.ResponseWriter, req *http.Request, ps URLParams, cs ch
|
||||
refStrs := req.PostForm["ref"]
|
||||
d.Exp.True(len(refStrs) > 0)
|
||||
|
||||
refs := make([]ref.Ref, len(refStrs))
|
||||
refs := make([]hash.Hash, len(refStrs))
|
||||
for idx, refStr := range refStrs {
|
||||
refs[idx] = ref.Parse(refStr)
|
||||
refs[idx] = hash.Parse(refStr)
|
||||
}
|
||||
|
||||
w.Header().Add("Content-Type", "application/octet-stream")
|
||||
@@ -150,7 +150,7 @@ func HandleGetRefs(w http.ResponseWriter, req *http.Request, ps URLParams, cs ch
|
||||
}
|
||||
}
|
||||
|
||||
func buildGetRefsRequest(refs map[ref.Ref]struct{}) io.Reader {
|
||||
func buildGetRefsRequest(refs map[hash.Hash]struct{}) io.Reader {
|
||||
values := &url.Values{}
|
||||
for r := range refs {
|
||||
values.Add("ref", r.String())
|
||||
@@ -180,10 +180,10 @@ func HandleRootPost(w http.ResponseWriter, req *http.Request, ps URLParams, rt c
|
||||
params := req.URL.Query()
|
||||
tokens := params["last"]
|
||||
d.Exp.Len(tokens, 1)
|
||||
last := ref.Parse(tokens[0])
|
||||
last := hash.Parse(tokens[0])
|
||||
tokens = params["current"]
|
||||
d.Exp.Len(tokens, 1)
|
||||
current := ref.Parse(tokens[0])
|
||||
current := hash.Parse(tokens[0])
|
||||
|
||||
if !rt.UpdateRoot(current, last) {
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -30,14 +30,14 @@ func TestHandleWriteValue(t *testing.T) {
|
||||
)
|
||||
ds.WriteValue(l)
|
||||
|
||||
hint := l.Ref()
|
||||
hint := l.Hash()
|
||||
newItem := types.NewEmptyBlob()
|
||||
itemChunk := types.EncodeValue(newItem, nil)
|
||||
l2 := l.Insert(1, types.NewRef(newItem))
|
||||
listChunk := types.EncodeValue(l2, nil)
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
serializeHints(body, map[ref.Ref]struct{}{hint: struct{}{}})
|
||||
serializeHints(body, map[hash.Hash]struct{}{hint: struct{}{}})
|
||||
sz := chunks.NewSerializer(body)
|
||||
sz.Put(itemChunk)
|
||||
sz.Put(listChunk)
|
||||
@@ -48,7 +48,7 @@ func TestHandleWriteValue(t *testing.T) {
|
||||
|
||||
if assert.Equal(http.StatusCreated, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) {
|
||||
ds2 := NewDatabase(cs)
|
||||
v := ds2.ReadValue(l2.Ref())
|
||||
v := ds2.ReadValue(l2.Hash())
|
||||
if assert.NotNil(v) {
|
||||
assert.True(v.Equals(l2), "%+v != %+v", v, l2)
|
||||
}
|
||||
@@ -66,14 +66,14 @@ func TestHandleWriteValueBackpressure(t *testing.T) {
|
||||
)
|
||||
ds.WriteValue(l)
|
||||
|
||||
hint := l.Ref()
|
||||
hint := l.Hash()
|
||||
newItem := types.NewEmptyBlob()
|
||||
itemChunk := types.EncodeValue(newItem, nil)
|
||||
l2 := l.Insert(1, types.NewRef(newItem))
|
||||
listChunk := types.EncodeValue(l2, nil)
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
serializeHints(body, map[ref.Ref]struct{}{hint: struct{}{}})
|
||||
serializeHints(body, map[hash.Hash]struct{}{hint: struct{}{}})
|
||||
sz := chunks.NewSerializer(body)
|
||||
sz.Put(itemChunk)
|
||||
sz.Put(listChunk)
|
||||
@@ -85,7 +85,7 @@ func TestHandleWriteValueBackpressure(t *testing.T) {
|
||||
if assert.Equal(httpStatusTooManyRequests, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) {
|
||||
hashes := deserializeHashes(w.Body)
|
||||
assert.Len(hashes, 1)
|
||||
assert.Equal(l2.Ref(), hashes[0])
|
||||
assert.Equal(l2.Hash(), hashes[0])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,9 +97,9 @@ func TestBuildWriteValueRequest(t *testing.T) {
|
||||
chunks.NewChunk([]byte(input2)),
|
||||
}
|
||||
|
||||
hints := map[ref.Ref]struct{}{
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
|
||||
hints := map[hash.Hash]struct{}{
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
|
||||
}
|
||||
compressed := buildWriteValueRequest(serializeChunks(chnx, assert), hints)
|
||||
gr, err := gzip.NewReader(compressed)
|
||||
@@ -117,7 +117,7 @@ func TestBuildWriteValueRequest(t *testing.T) {
|
||||
chunkChan := make(chan chunks.Chunk, 16)
|
||||
go chunks.DeserializeToChan(gr, chunkChan)
|
||||
for c := range chunkChan {
|
||||
assert.Equal(chnx[0].Ref(), c.Ref())
|
||||
assert.Equal(chnx[0].Hash(), c.Hash())
|
||||
chnx = chnx[1:]
|
||||
}
|
||||
assert.Empty(chnx)
|
||||
@@ -135,11 +135,11 @@ func serializeChunks(chnx []chunks.Chunk, assert *assert.Assertions) io.Reader {
|
||||
|
||||
func TestBuildGetRefsRequest(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
refs := map[ref.Ref]struct{}{
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
|
||||
hashes := map[hash.Hash]struct{}{
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
|
||||
}
|
||||
r := buildGetRefsRequest(refs)
|
||||
r := buildGetRefsRequest(hashes)
|
||||
b, err := ioutil.ReadAll(r)
|
||||
assert.NoError(err)
|
||||
|
||||
@@ -148,9 +148,9 @@ func TestBuildGetRefsRequest(t *testing.T) {
|
||||
assert.NotEmpty(urlValues)
|
||||
|
||||
queryRefs := urlValues["ref"]
|
||||
assert.Len(queryRefs, len(refs))
|
||||
assert.Len(queryRefs, len(hashes))
|
||||
for _, r := range queryRefs {
|
||||
_, present := refs[ref.Parse(r)]
|
||||
_, present := hashes[hash.Parse(r)]
|
||||
assert.True(present, "Query contains %s, which is not in initial refs", r)
|
||||
}
|
||||
}
|
||||
@@ -166,7 +166,7 @@ func TestHandleGetRefs(t *testing.T) {
|
||||
err := cs.PutMany(chnx)
|
||||
assert.NoError(err)
|
||||
|
||||
body := strings.NewReader(fmt.Sprintf("ref=%s&ref=%s", chnx[0].Ref(), chnx[1].Ref()))
|
||||
body := strings.NewReader(fmt.Sprintf("ref=%s&ref=%s", chnx[0].Hash(), chnx[1].Hash()))
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
HandleGetRefs(w,
|
||||
@@ -181,7 +181,7 @@ func TestHandleGetRefs(t *testing.T) {
|
||||
chunkChan := make(chan chunks.Chunk)
|
||||
go chunks.DeserializeToChan(w.Body, chunkChan)
|
||||
for c := range chunkChan {
|
||||
assert.Equal(chnx[0].Ref(), c.Ref())
|
||||
assert.Equal(chnx[0].Hash(), c.Hash())
|
||||
chnx = chnx[1:]
|
||||
}
|
||||
assert.Empty(chnx)
|
||||
@@ -193,14 +193,14 @@ func TestHandleGetRoot(t *testing.T) {
|
||||
cs := chunks.NewTestStore()
|
||||
c := chunks.NewChunk([]byte("abc"))
|
||||
cs.Put(c)
|
||||
assert.True(cs.UpdateRoot(c.Ref(), ref.Ref{}))
|
||||
assert.True(cs.UpdateRoot(c.Hash(), hash.Hash{}))
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
HandleRootGet(w, &http.Request{Method: "GET"}, params{}, cs)
|
||||
|
||||
if assert.Equal(http.StatusOK, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) {
|
||||
root := ref.Parse(string(w.Body.Bytes()))
|
||||
assert.Equal(c.Ref(), root)
|
||||
root := hash.Parse(string(w.Body.Bytes()))
|
||||
assert.Equal(c.Hash(), root)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,8 +218,8 @@ func TestHandlePostRoot(t *testing.T) {
|
||||
// First attempt should fail, as 'last' won't match.
|
||||
u := &url.URL{}
|
||||
queryParams := url.Values{}
|
||||
queryParams.Add("last", chnx[0].Ref().String())
|
||||
queryParams.Add("current", chnx[1].Ref().String())
|
||||
queryParams.Add("last", chnx[0].Hash().String())
|
||||
queryParams.Add("current", chnx[1].Hash().String())
|
||||
u.RawQuery = queryParams.Encode()
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
@@ -227,7 +227,7 @@ func TestHandlePostRoot(t *testing.T) {
|
||||
assert.Equal(http.StatusConflict, w.Code, "Handler error:\n%s", string(w.Body.Bytes()))
|
||||
|
||||
// Now, update the root manually to 'last' and try again.
|
||||
assert.True(cs.UpdateRoot(chnx[0].Ref(), ref.Ref{}))
|
||||
assert.True(cs.UpdateRoot(chnx[0].Hash(), hash.Hash{}))
|
||||
w = httptest.NewRecorder()
|
||||
HandleRootPost(w, &http.Request{URL: u, Method: "POST"}, params{}, cs)
|
||||
assert.Equal(http.StatusOK, w.Code, "Handler error:\n%s", string(w.Body.Bytes()))
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ func serializeHints(w io.Writer, hints types.Hints) {
|
||||
}
|
||||
}
|
||||
|
||||
func serializeHashes(w io.Writer, hashes ref.RefSlice) {
|
||||
func serializeHashes(w io.Writer, hashes hash.HashSlice) {
|
||||
err := binary.Write(w, binary.BigEndian, uint32(len(hashes))) // 4 billion hashes is probably absurd. Maybe this should be smaller?
|
||||
d.Chk.NoError(err)
|
||||
for _, r := range hashes {
|
||||
@@ -27,7 +27,7 @@ func serializeHashes(w io.Writer, hashes ref.RefSlice) {
|
||||
}
|
||||
}
|
||||
|
||||
func serializeHash(w io.Writer, hash ref.Ref) {
|
||||
func serializeHash(w io.Writer, hash hash.Hash) {
|
||||
digest := hash.Digest()
|
||||
n, err := io.Copy(w, bytes.NewReader(digest[:]))
|
||||
d.Chk.NoError(err)
|
||||
@@ -46,22 +46,22 @@ func deserializeHints(reader io.Reader) types.Hints {
|
||||
return hints
|
||||
}
|
||||
|
||||
func deserializeHashes(reader io.Reader) ref.RefSlice {
|
||||
func deserializeHashes(reader io.Reader) hash.HashSlice {
|
||||
numRefs := uint32(0)
|
||||
err := binary.Read(reader, binary.BigEndian, &numRefs)
|
||||
d.Chk.NoError(err)
|
||||
|
||||
hashes := make(ref.RefSlice, numRefs)
|
||||
hashes := make(hash.HashSlice, numRefs)
|
||||
for i := range hashes {
|
||||
hashes[i] = deserializeHash(reader)
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
func deserializeHash(reader io.Reader) ref.Ref {
|
||||
digest := ref.Sha1Digest{}
|
||||
func deserializeHash(reader io.Reader) hash.Hash {
|
||||
digest := hash.Sha1Digest{}
|
||||
n, err := io.ReadFull(reader, digest[:])
|
||||
d.Chk.NoError(err)
|
||||
d.Chk.Equal(int(sha1.Size), n)
|
||||
return ref.New(digest)
|
||||
return hash.New(digest)
|
||||
}
|
||||
|
||||
@@ -4,17 +4,17 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHintRoundTrip(t *testing.T) {
|
||||
b := &bytes.Buffer{}
|
||||
input := map[ref.Ref]struct{}{
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000000"): struct{}{},
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000001"): struct{}{},
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
|
||||
ref.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
|
||||
input := map[hash.Hash]struct{}{
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000000"): struct{}{},
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000001"): struct{}{},
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
|
||||
hash.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
|
||||
}
|
||||
serializeHints(b, input)
|
||||
output := deserializeHints(b)
|
||||
|
||||
@@ -29,7 +29,7 @@ func (ds *Dataset) MaybeHead() (types.Struct, bool) {
|
||||
return ds.Store().MaybeHead(ds.id)
|
||||
}
|
||||
|
||||
func (ds *Dataset) MaybeHeadRef() (types.Ref, bool) {
|
||||
func (ds *Dataset) MaybeHeadHash() (types.Ref, bool) {
|
||||
return ds.Store().MaybeHeadRef(ds.id)
|
||||
}
|
||||
|
||||
@@ -40,8 +40,8 @@ func (ds *Dataset) Head() types.Struct {
|
||||
return c
|
||||
}
|
||||
|
||||
func (ds *Dataset) HeadRef() types.Ref {
|
||||
r, ok := ds.MaybeHeadRef()
|
||||
func (ds *Dataset) HeadHash() types.Ref {
|
||||
r, ok := ds.MaybeHeadHash()
|
||||
d.Chk.True(ok, "Dataset \"%s\" does not exist", ds.id)
|
||||
return r
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func (ds *Dataset) HeadRef() types.Ref {
|
||||
// If the update cannot be performed, e.g., because of a conflict, Commit returns an 'ErrMergeNeeded' error and the current snapshot of the dataset so that the client can merge the changes and try again.
|
||||
func (ds *Dataset) Commit(v types.Value) (Dataset, error) {
|
||||
p := types.NewSet()
|
||||
if headRef, ok := ds.MaybeHeadRef(); ok {
|
||||
if headRef, ok := ds.MaybeHeadHash(); ok {
|
||||
headRef.TargetValue(ds.Store()) // TODO: This is a hack to deconfuse the validation code, which doesn't hold onto validation state between commits.
|
||||
p = p.Insert(headRef)
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (ds *Dataset) pull(source datas.Database, sourceRef types.Ref, concurrency
|
||||
sink := *ds
|
||||
|
||||
sinkHeadRef := types.Ref{}
|
||||
if currentHeadRef, ok := sink.MaybeHeadRef(); ok {
|
||||
if currentHeadRef, ok := sink.MaybeHeadHash(); ok {
|
||||
sinkHeadRef = currentHeadRef
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (ds *Dataset) pull(source datas.Database, sourceRef types.Ref, concurrency
|
||||
}
|
||||
|
||||
func (ds *Dataset) validateRefAsCommit(r types.Ref) types.Struct {
|
||||
v := ds.store.ReadValue(r.TargetRef())
|
||||
v := ds.store.ReadValue(r.TargetHash())
|
||||
|
||||
d.Exp.NotNil(v, "%v cannot be found", r)
|
||||
d.Exp.True(v.Type().Equals(datas.NewCommit().Type()), "Not a Commit: %+v", v)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package ref
|
||||
package hash
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
@@ -12,51 +12,51 @@ import (
|
||||
var (
|
||||
// In the future we will allow different digest types, so this will get more complicated. For now sha1 is fine.
|
||||
pattern = regexp.MustCompile("^sha1-([0-9a-f]{40})$")
|
||||
emptyHash = Ref{}
|
||||
emptyHash = Hash{}
|
||||
)
|
||||
|
||||
type Sha1Digest [sha1.Size]byte
|
||||
|
||||
type Ref struct {
|
||||
type Hash struct {
|
||||
// In the future, we will also store the algorithm, and digest will thus probably have to be a slice (because it can vary in size)
|
||||
digest Sha1Digest
|
||||
}
|
||||
|
||||
// Digest returns a *copy* of the digest that backs Ref.
|
||||
func (r Ref) Digest() Sha1Digest {
|
||||
// Digest returns a *copy* of the digest that backs Hash.
|
||||
func (r Hash) Digest() Sha1Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
func (r Ref) IsEmpty() bool {
|
||||
func (r Hash) IsEmpty() bool {
|
||||
return r.digest == emptyHash.digest
|
||||
}
|
||||
|
||||
// DigestSlice returns a slice of the digest that backs A NEW COPY of Ref, because the receiver of this method is not a pointer.
|
||||
func (r Ref) DigestSlice() []byte {
|
||||
// DigestSlice returns a slice of the digest that backs A NEW COPY of Hash, because the receiver of this method is not a pointer.
|
||||
func (r Hash) DigestSlice() []byte {
|
||||
return r.digest[:]
|
||||
}
|
||||
|
||||
func (r Ref) String() string {
|
||||
func (r Hash) String() string {
|
||||
return fmt.Sprintf("sha1-%s", hex.EncodeToString(r.digest[:]))
|
||||
}
|
||||
|
||||
func New(digest Sha1Digest) Ref {
|
||||
return Ref{digest}
|
||||
func New(digest Sha1Digest) Hash {
|
||||
return Hash{digest}
|
||||
}
|
||||
|
||||
func FromData(data []byte) Ref {
|
||||
func FromData(data []byte) Hash {
|
||||
return New(sha1.Sum(data))
|
||||
}
|
||||
|
||||
// FromSlice creates a new Ref backed by data, ensuring that data is an acceptable length.
|
||||
func FromSlice(data []byte) Ref {
|
||||
// FromSlice creates a new Hash backed by data, ensuring that data is an acceptable length.
|
||||
func FromSlice(data []byte) Hash {
|
||||
d.Chk.Len(data, sha1.Size)
|
||||
digest := Sha1Digest{}
|
||||
copy(digest[:], data)
|
||||
return New(digest)
|
||||
}
|
||||
|
||||
func MaybeParse(s string) (r Ref, ok bool) {
|
||||
func MaybeParse(s string) (r Hash, ok bool) {
|
||||
match := pattern.FindStringSubmatch(s)
|
||||
if match == nil {
|
||||
return
|
||||
@@ -72,15 +72,15 @@ func MaybeParse(s string) (r Ref, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func Parse(s string) Ref {
|
||||
func Parse(s string) Hash {
|
||||
r, ok := MaybeParse(s)
|
||||
if !ok {
|
||||
d.Exp.Fail(fmt.Sprintf("Cound not parse ref: %s", s))
|
||||
d.Exp.Fail(fmt.Sprintf("Cound not parse Hash: %s", s))
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r Ref) Less(other Ref) bool {
|
||||
func (r Hash) Less(other Hash) bool {
|
||||
d1, d2 := r.digest, other.digest
|
||||
for k := 0; k < len(d1); k++ {
|
||||
b1, b2 := d1[k], d2[k]
|
||||
@@ -94,6 +94,6 @@ func (r Ref) Less(other Ref) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r Ref) Greater(other Ref) bool {
|
||||
func (r Hash) Greater(other Hash) bool {
|
||||
return !r.Less(other) && r != other
|
||||
}
|
||||
@@ -1,20 +1,20 @@
|
||||
package ref
|
||||
package hash
|
||||
|
||||
type RefSlice []Ref
|
||||
type HashSlice []Hash
|
||||
|
||||
func (rs RefSlice) Len() int {
|
||||
func (rs HashSlice) Len() int {
|
||||
return len(rs)
|
||||
}
|
||||
|
||||
func (rs RefSlice) Less(i, j int) bool {
|
||||
func (rs HashSlice) Less(i, j int) bool {
|
||||
return rs[i].Less(rs[j])
|
||||
}
|
||||
|
||||
func (rs RefSlice) Swap(i, j int) {
|
||||
func (rs HashSlice) Swap(i, j int) {
|
||||
rs[i], rs[j] = rs[j], rs[i]
|
||||
}
|
||||
|
||||
func (rs RefSlice) Equals(other RefSlice) bool {
|
||||
func (rs HashSlice) Equals(other HashSlice) bool {
|
||||
if len(rs) != len(other) {
|
||||
return false
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package ref
|
||||
package hash
|
||||
|
||||
import (
|
||||
"sort"
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRefSliceSort(t *testing.T) {
|
||||
func TestHashSliceSort(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
rs := RefSlice{}
|
||||
rs := HashSlice{}
|
||||
for i := 1; i <= 3; i++ {
|
||||
for j := 1; j <= 3; j++ {
|
||||
d := Sha1Digest{}
|
||||
@@ -21,7 +21,7 @@ func TestRefSliceSort(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
rs2 := RefSlice(make([]Ref, len(rs)))
|
||||
rs2 := HashSlice(make([]Hash, len(rs)))
|
||||
copy(rs2, rs)
|
||||
sort.Sort(sort.Reverse(rs2))
|
||||
assert.False(rs.Equals(rs2))
|
||||
@@ -1,4 +1,4 @@
|
||||
package ref
|
||||
package hash
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -95,7 +95,7 @@ func TestFromData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsEmpty(t *testing.T) {
|
||||
r1 := Ref{}
|
||||
r1 := Hash{}
|
||||
assert.True(t, r1.IsEmpty())
|
||||
|
||||
r2 := Parse("sha1-0000000000000000000000000000000000000000")
|
||||
@@ -116,7 +116,7 @@ func TestLess(t *testing.T) {
|
||||
assert.False(r2.Less(r1))
|
||||
assert.False(r2.Less(r2))
|
||||
|
||||
r0 := Ref{}
|
||||
r0 := Hash{}
|
||||
assert.False(r0.Less(r0))
|
||||
assert.True(r0.Less(r2))
|
||||
assert.False(r2.Less(r0))
|
||||
@@ -133,7 +133,7 @@ func TestGreater(t *testing.T) {
|
||||
assert.True(r2.Greater(r1))
|
||||
assert.False(r2.Greater(r2))
|
||||
|
||||
r0 := Ref{}
|
||||
r0 := Hash{}
|
||||
assert.False(r0.Greater(r0))
|
||||
assert.False(r0.Greater(r2))
|
||||
assert.True(r2.Greater(r0))
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// BatchStore provides an interface similar to chunks.ChunkStore, but batch-oriented. Instead of Put(), it provides SchedulePut(), which enqueues a Chunk to be sent at a possibly later time.
|
||||
type BatchStore interface {
|
||||
// Get returns from the store the Value Chunk by r. If r is absent from the store, chunks.EmptyChunk is returned.
|
||||
Get(r ref.Ref) chunks.Chunk
|
||||
Get(r hash.Hash) chunks.Chunk
|
||||
|
||||
// SchedulePut enqueues a write for the Chunk c with the given refHeight. Typically, the Value which was encoded to provide c can also be queried for its refHeight. The call may or may not block until c is persisted. The provided hints are used to assist in validation. Validation requires checking that all refs embedded in c are themselves valid, which could naively be done by resolving each one. Instead, hints provides a (smaller) set of refs that point to Chunks that themselves contain many of c's refs. Thus, by checking only the hinted Chunks, c can be validated with fewer read operations.
|
||||
// c may or may not be persisted when Put() returns, but is guaranteed to be persistent after a call to Flush() or Close().
|
||||
@@ -22,7 +22,7 @@ type BatchStore interface {
|
||||
}
|
||||
|
||||
// Hints are a set of hashes that should be used to speed up the validation of one or more Chunks.
|
||||
type Hints map[ref.Ref]struct{}
|
||||
type Hints map[hash.Hash]struct{}
|
||||
|
||||
// BatchStoreAdaptor provides a naive implementation of BatchStore should only be used with ChunkStores that can Put relatively quickly. It provides no actual batching or validation. Its intended use is for adapting a ChunkStore for use in something that requires a BatchStore.
|
||||
type BatchStoreAdaptor struct {
|
||||
@@ -35,8 +35,8 @@ func NewBatchStoreAdaptor(cs chunks.ChunkStore) BatchStore {
|
||||
}
|
||||
|
||||
// Get simply proxies to the backing ChunkStore
|
||||
func (lbs *BatchStoreAdaptor) Get(ref ref.Ref) chunks.Chunk {
|
||||
return lbs.cs.Get(ref)
|
||||
func (lbs *BatchStoreAdaptor) Get(h hash.Hash) chunks.Chunk {
|
||||
return lbs.cs.Get(h)
|
||||
}
|
||||
|
||||
// SchedulePut simply calls Put on the underlying ChunkStore, and ignores hints.
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,15 +21,15 @@ var RefOfBlobType = MakeRefType(BlobType)
|
||||
// Blob represents a list of Blobs.
|
||||
type Blob struct {
|
||||
seq indexedSequence
|
||||
ref *ref.Ref
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
func newBlob(seq indexedSequence) Blob {
|
||||
return Blob{seq, &ref.Ref{}}
|
||||
return Blob{seq, &hash.Hash{}}
|
||||
}
|
||||
|
||||
func NewEmptyBlob() Blob {
|
||||
return Blob{newBlobLeafSequence(nil, []byte{}), &ref.Ref{}}
|
||||
return Blob{newBlobLeafSequence(nil, []byte{}), &hash.Hash{}}
|
||||
}
|
||||
|
||||
// BUG 155 - Should provide Write... Maybe even have Blob implement ReadWriteSeeker
|
||||
@@ -53,15 +53,15 @@ func (b Blob) sequence() sequence {
|
||||
|
||||
// Value interface
|
||||
func (b Blob) Equals(other Value) bool {
|
||||
return other != nil && b.Ref() == other.Ref()
|
||||
return other != nil && b.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (b Blob) Less(other Value) bool {
|
||||
return valueLess(b, other)
|
||||
}
|
||||
|
||||
func (b Blob) Ref() ref.Ref {
|
||||
return EnsureRef(b.ref, b)
|
||||
func (b Blob) Hash() hash.Hash {
|
||||
return EnsureRef(b.h, b)
|
||||
}
|
||||
|
||||
func (b Blob) ChildValues() []Value {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type Bool bool
|
||||
@@ -18,7 +18,7 @@ func (v Bool) Less(other Value) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (v Bool) Ref() ref.Ref {
|
||||
func (v Bool) Hash() hash.Hash {
|
||||
return getRef(v)
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ type collectionTestSuite struct {
|
||||
type validateFn func(v2 Collection) bool
|
||||
type deltaFn func() Collection
|
||||
|
||||
func (suite *collectionTestSuite) TestRef() {
|
||||
suite.Equal(suite.expectRef, suite.col.Ref().String())
|
||||
func (suite *collectionTestSuite) TestHash() {
|
||||
suite.Equal(suite.expectRef, suite.col.Hash().String())
|
||||
}
|
||||
|
||||
func (suite *collectionTestSuite) TestType() {
|
||||
@@ -50,7 +50,7 @@ func (suite *collectionTestSuite) TestChunkCountAndType() {
|
||||
func (suite *collectionTestSuite) TestRoundTripAndValidate() {
|
||||
vs := NewTestValueStore()
|
||||
r := vs.WriteValue(suite.col)
|
||||
v2 := vs.ReadValue(r.TargetRef()).(Collection)
|
||||
v2 := vs.ReadValue(r.TargetHash()).(Collection)
|
||||
suite.True(v2.Equals(suite.col))
|
||||
suite.True(suite.col.Equals(v2))
|
||||
suite.True(suite.validate(v2))
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
func fromTypedEncodeable(i []interface{}, vr ValueReader) Value {
|
||||
@@ -79,9 +79,9 @@ func (r *jsonArrayReader) readKind() NomsKind {
|
||||
}
|
||||
|
||||
func (r *jsonArrayReader) readRef(t *Type) Ref {
|
||||
ref := ref.Parse(r.readString())
|
||||
h := hash.Parse(r.readString())
|
||||
height := r.readUint()
|
||||
return constructRef(t, ref, height)
|
||||
return constructRef(t, h, height)
|
||||
}
|
||||
|
||||
func (r *jsonArrayReader) readType(parentStructTypes []*Type) *Type {
|
||||
@@ -156,10 +156,10 @@ func (r *jsonArrayReader) readMapLeafSequence(t *Type) orderedSequence {
|
||||
func (r *jsonArrayReader) readMetaSequence() metaSequenceData {
|
||||
data := metaSequenceData{}
|
||||
for !r.atEnd() {
|
||||
ref := r.readValue().(Ref)
|
||||
h := r.readValue().(Ref)
|
||||
v := r.readValue()
|
||||
numLeaves := uint64(r.readUint())
|
||||
data = append(data, newMetaTuple(v, nil, ref, numLeaves))
|
||||
data = append(data, newMetaTuple(v, nil, h, numLeaves))
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -181,7 +181,7 @@ func TestReadCompoundList(t *testing.T) {
|
||||
RefKind, ListKind, NumberKind, "%s", "1", NumberKind, "1", "1",
|
||||
RefKind, ListKind, NumberKind, "%s", "1", NumberKind, "4", "4"
|
||||
]
|
||||
]`, list1.Ref(), list2.Ref())
|
||||
]`, list1.Hash(), list2.Hash())
|
||||
r := newJSONArrayReader(a, cs)
|
||||
l := r.readValue()
|
||||
|
||||
@@ -204,7 +204,7 @@ func TestReadCompoundSet(t *testing.T) {
|
||||
RefKind, SetKind, NumberKind, "%s", "1", NumberKind, "1", "2",
|
||||
RefKind, SetKind, NumberKind, "%s", "1", NumberKind, "4", "3"
|
||||
]
|
||||
]`, set1.Ref(), set2.Ref())
|
||||
]`, set1.Hash(), set2.Hash())
|
||||
r := newJSONArrayReader(a, cs)
|
||||
l := r.readValue()
|
||||
|
||||
@@ -240,9 +240,9 @@ func TestReadCompoundBlob(t *testing.T) {
|
||||
cs := NewTestValueStore()
|
||||
|
||||
// Arbitrary valid refs.
|
||||
r1 := Number(1).Ref()
|
||||
r2 := Number(2).Ref()
|
||||
r3 := Number(3).Ref()
|
||||
r1 := Number(1).Hash()
|
||||
r2 := Number(2).Hash()
|
||||
r3 := Number(3).Hash()
|
||||
a := parseJSON(`[
|
||||
BlobKind, true, [
|
||||
RefKind, BlobKind, "%s", "1", NumberKind, "20", "20",
|
||||
@@ -262,7 +262,7 @@ func TestReadCompoundBlob(t *testing.T) {
|
||||
}, cs))
|
||||
|
||||
assert.True(m.Type().Equals(m2.Type()))
|
||||
assert.Equal(m.Ref().String(), m2.Ref().String())
|
||||
assert.Equal(m.Hash().String(), m2.Hash().String())
|
||||
}
|
||||
|
||||
func TestReadStruct(t *testing.T) {
|
||||
@@ -342,7 +342,7 @@ func TestReadRef(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
cs := NewTestValueStore()
|
||||
|
||||
r := ref.Parse("sha1-a9993e364706816aba3e25717850c26c9cd0d89d")
|
||||
r := hash.Parse("sha1-a9993e364706816aba3e25717850c26c9cd0d89d")
|
||||
a := parseJSON(`[RefKind, NumberKind, "%s", "42"]`, r.String())
|
||||
reader := newJSONArrayReader(a, cs)
|
||||
v := reader.readValue()
|
||||
|
||||
@@ -103,7 +103,7 @@ func (w *hrsWriter) Write(v Value) {
|
||||
w.write("}")
|
||||
|
||||
case RefKind:
|
||||
w.write(v.(Ref).TargetRef().String())
|
||||
w.write(v.(Ref).TargetHash().String())
|
||||
|
||||
case SetKind:
|
||||
w.write("{")
|
||||
|
||||
@@ -70,7 +70,7 @@ func (w *jsonArrayWriter) toArray() []interface{} {
|
||||
}
|
||||
|
||||
func (w *jsonArrayWriter) writeRef(r Ref) {
|
||||
w.write(r.TargetRef().String())
|
||||
w.write(r.TargetHash().String())
|
||||
w.writeUint(r.Height())
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ func (w *jsonArrayWriter) maybeWriteMetaSequence(seq sequence, tr *Type) bool {
|
||||
// Write unwritten chunked sequences. Chunks are lazily written so that intermediate chunked structures like NewList().Append(x).Append(y) don't cause unnecessary churn.
|
||||
w.vw.WriteValue(tuple.child)
|
||||
}
|
||||
w2.writeValue(tuple.ChildRef())
|
||||
w2.writeValue(tuple.ChildHash())
|
||||
w2.writeValue(tuple.value)
|
||||
w2.writeUint(tuple.numLeaves)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -107,7 +107,7 @@ func TestWriteMapOfMap(t *testing.T) {
|
||||
|
||||
w := newJSONArrayWriter(NewTestValueStore())
|
||||
w.writeValue(v)
|
||||
// the order of the elements is based on the ref of the value.
|
||||
// the order of the elements is based on the hash of the value.
|
||||
assert.EqualValues([]interface{}{MapKind, MapKind, StringKind, NumberKind, SetKind, BoolKind, false, []interface{}{
|
||||
MapKind, StringKind, NumberKind, false, []interface{}{StringKind, "a", NumberKind, "0"},
|
||||
SetKind, BoolKind, false, []interface{}{BoolKind, true}}}, w.toArray())
|
||||
@@ -116,9 +116,9 @@ func TestWriteMapOfMap(t *testing.T) {
|
||||
func TestWriteCompoundBlob(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
r1 := ref.Parse("sha1-0000000000000000000000000000000000000001")
|
||||
r2 := ref.Parse("sha1-0000000000000000000000000000000000000002")
|
||||
r3 := ref.Parse("sha1-0000000000000000000000000000000000000003")
|
||||
r1 := hash.Parse("sha1-0000000000000000000000000000000000000001")
|
||||
r2 := hash.Parse("sha1-0000000000000000000000000000000000000002")
|
||||
r3 := hash.Parse("sha1-0000000000000000000000000000000000000003")
|
||||
|
||||
v := newBlob(newBlobMetaSequence([]metaTuple{
|
||||
newMetaTuple(Number(20), nil, constructRef(RefOfBlobType, r1, 11), 20),
|
||||
@@ -128,7 +128,7 @@ func TestWriteCompoundBlob(t *testing.T) {
|
||||
w := newJSONArrayWriter(NewTestValueStore())
|
||||
w.writeValue(v)
|
||||
|
||||
// the order of the elements is based on the ref of the value.
|
||||
// the order of the elements is based on the hash of the value.
|
||||
assert.EqualValues([]interface{}{
|
||||
BlobKind, true, []interface{}{
|
||||
RefKind, BlobKind, r1.String(), "11", NumberKind, "20", "20",
|
||||
@@ -216,8 +216,8 @@ func TestWriteCompoundList(t *testing.T) {
|
||||
w.writeValue(cl)
|
||||
assert.EqualValues([]interface{}{
|
||||
ListKind, NumberKind, true, []interface{}{
|
||||
RefKind, ListKind, NumberKind, list1.Ref().String(), "1", NumberKind, "1", "1",
|
||||
RefKind, ListKind, NumberKind, list2.Ref().String(), "1", NumberKind, "4", "4",
|
||||
RefKind, ListKind, NumberKind, list1.Hash().String(), "1", NumberKind, "1", "1",
|
||||
RefKind, ListKind, NumberKind, list2.Hash().String(), "1", NumberKind, "4", "4",
|
||||
},
|
||||
}, w.toArray())
|
||||
}
|
||||
@@ -236,8 +236,8 @@ func TestWriteCompoundSet(t *testing.T) {
|
||||
w.writeValue(cl)
|
||||
assert.EqualValues([]interface{}{
|
||||
SetKind, NumberKind, true, []interface{}{
|
||||
RefKind, SetKind, NumberKind, set1.Ref().String(), "1", NumberKind, "1", "2",
|
||||
RefKind, SetKind, NumberKind, set2.Ref().String(), "1", NumberKind, "4", "3",
|
||||
RefKind, SetKind, NumberKind, set1.Hash().String(), "1", NumberKind, "1", "2",
|
||||
RefKind, SetKind, NumberKind, set2.Hash().String(), "1", NumberKind, "4", "3",
|
||||
},
|
||||
}, w.toArray())
|
||||
}
|
||||
@@ -301,7 +301,7 @@ func TestWriteRef(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
typ := MakeRefType(NumberType)
|
||||
r := ref.Parse("sha1-0123456789abcdef0123456789abcdef01234567")
|
||||
r := hash.Parse("sha1-0123456789abcdef0123456789abcdef01234567")
|
||||
v := constructRef(typ, r, 4)
|
||||
|
||||
w := newJSONArrayWriter(NewTestValueStore())
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
package types
|
||||
|
||||
import "github.com/attic-labs/noms/ref"
|
||||
import "github.com/attic-labs/noms/hash"
|
||||
|
||||
var getRefOverride func(v Value) ref.Ref
|
||||
var getRefOverride func(v Value) hash.Hash
|
||||
|
||||
func getRef(v Value) ref.Ref {
|
||||
func getRef(v Value) hash.Hash {
|
||||
if getRefOverride != nil {
|
||||
return getRefOverride(v)
|
||||
}
|
||||
return getRefNoOverride(v)
|
||||
}
|
||||
|
||||
func getRefNoOverride(v Value) ref.Ref {
|
||||
return EncodeValue(v, nil).Ref()
|
||||
func getRefNoOverride(v Value) hash.Hash {
|
||||
return EncodeValue(v, nil).Hash()
|
||||
}
|
||||
|
||||
func EnsureRef(r *ref.Ref, v Value) ref.Ref {
|
||||
func EnsureRef(r *hash.Hash, v Value) hash.Hash {
|
||||
if r.IsEmpty() {
|
||||
*r = getRef(v)
|
||||
}
|
||||
|
||||
@@ -4,14 +4,14 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetRef(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
input := fmt.Sprintf("t [%d,false]", BoolKind)
|
||||
expected := ref.FromData([]byte(input))
|
||||
expected := hash.FromData([]byte(input))
|
||||
actual := getRef(Bool(false))
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
@@ -20,13 +20,13 @@ func TestEnsureRef(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
vs := NewTestValueStore()
|
||||
count := byte(1)
|
||||
mockGetRef := func(v Value) ref.Ref {
|
||||
d := ref.Sha1Digest{}
|
||||
mockGetRef := func(v Value) hash.Hash {
|
||||
d := hash.Sha1Digest{}
|
||||
d[0] = count
|
||||
count++
|
||||
return ref.New(d)
|
||||
return hash.New(d)
|
||||
}
|
||||
testRef := func(r ref.Ref, expected byte) {
|
||||
testRef := func(r hash.Hash, expected byte) {
|
||||
d := r.Digest()
|
||||
assert.Equal(expected, d[0])
|
||||
for i := 1; i < len(d); i++ {
|
||||
@@ -66,7 +66,7 @@ func TestEnsureRef(t *testing.T) {
|
||||
}
|
||||
for i := 0; i < 2; i++ {
|
||||
for j, v := range values {
|
||||
testRef(v.Ref(), byte(j+1))
|
||||
testRef(v.Hash(), byte(j+1))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestEnsureRef(t *testing.T) {
|
||||
}
|
||||
for i := 0; i < 2; i++ {
|
||||
for j, v := range values {
|
||||
testRef(v.Ref(), byte(i*len(values)+(j+1)))
|
||||
testRef(v.Hash(), byte(i*len(values)+(j+1)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestIncrementalLoadList(t *testing.T) {
|
||||
vs := newLocalValueStore(cs)
|
||||
|
||||
expected := NewList(testVals...)
|
||||
ref := vs.WriteValue(expected).TargetRef()
|
||||
ref := vs.WriteValue(expected).TargetHash()
|
||||
|
||||
actualVar := vs.ReadValue(ref)
|
||||
actual := actualVar.(List)
|
||||
@@ -63,7 +63,7 @@ func SkipTestIncrementalLoadSet(t *testing.T) {
|
||||
vs := newLocalValueStore(cs)
|
||||
|
||||
expected := NewSet(testVals...)
|
||||
ref := vs.WriteValue(expected).TargetRef()
|
||||
ref := vs.WriteValue(expected).TargetHash()
|
||||
|
||||
actualVar := vs.ReadValue(ref)
|
||||
actual := actualVar.(Set)
|
||||
@@ -83,7 +83,7 @@ func SkipTestIncrementalLoadMap(t *testing.T) {
|
||||
vs := newLocalValueStore(cs)
|
||||
|
||||
expected := NewMap(testVals...)
|
||||
ref := vs.WriteValue(expected).TargetRef()
|
||||
ref := vs.WriteValue(expected).TargetHash()
|
||||
|
||||
actualVar := vs.ReadValue(ref)
|
||||
actual := actualVar.(Map)
|
||||
@@ -108,7 +108,7 @@ func SkipTestIncrementalAddRef(t *testing.T) {
|
||||
|
||||
expected := NewList(ref)
|
||||
ref = vs.WriteValue(expected)
|
||||
actualVar := vs.ReadValue(ref.TargetRef())
|
||||
actualVar := vs.ReadValue(ref.TargetHash())
|
||||
|
||||
assert.Equal(1, cs.Reads)
|
||||
assert.True(expected.Equals(actualVar))
|
||||
|
||||
@@ -21,7 +21,7 @@ func newListMetaSequence(tuples metaSequenceData, vr ValueReader) indexedMetaSeq
|
||||
ts := make([]*Type, len(tuples))
|
||||
for i, mt := range tuples {
|
||||
// Ref<List<T>>
|
||||
ts[i] = mt.ChildRef().Type().Desc.(CompoundDesc).ElemTypes[0].Desc.(CompoundDesc).ElemTypes[0]
|
||||
ts[i] = mt.ChildHash().Type().Desc.(CompoundDesc).ElemTypes[0].Desc.(CompoundDesc).ElemTypes[0]
|
||||
}
|
||||
t := MakeListType(MakeUnionType(ts...))
|
||||
return newIndexedMetaSequence(tuples, t, vr)
|
||||
@@ -94,7 +94,7 @@ func advanceCursorToOffset(cur *sequenceCursor, idx uint64) uint64 {
|
||||
|
||||
func newIndexedMetaSequenceBoundaryChecker() boundaryChecker {
|
||||
return newBuzHashBoundaryChecker(objectWindowSize, sha1.Size, objectPattern, func(item sequenceItem) []byte {
|
||||
digest := item.(metaTuple).ChildRef().TargetRef().Digest()
|
||||
digest := item.(metaTuple).ChildHash().TargetHash().Digest()
|
||||
return digest[:]
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,6 +5,6 @@ func valueLess(v1, v2 Value) bool {
|
||||
case BoolKind, NumberKind, StringKind:
|
||||
return false
|
||||
default:
|
||||
return v1.Ref().Less(v2.Ref())
|
||||
return v1.Hash().Less(v2.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"crypto/sha1"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,11 +15,11 @@ const (
|
||||
|
||||
type List struct {
|
||||
seq indexedSequence
|
||||
ref *ref.Ref
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
func newList(seq indexedSequence) List {
|
||||
return List{seq, &ref.Ref{}}
|
||||
return List{seq, &hash.Hash{}}
|
||||
}
|
||||
|
||||
// NewList creates a new List where the type is computed from the elements in the list, populated with values, chunking if and when needed.
|
||||
@@ -60,15 +60,15 @@ func (l List) sequence() sequence {
|
||||
|
||||
// Value interface
|
||||
func (l List) Equals(other Value) bool {
|
||||
return other != nil && l.Ref() == other.Ref()
|
||||
return other != nil && l.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (l List) Less(other Value) bool {
|
||||
return valueLess(l, other)
|
||||
}
|
||||
|
||||
func (l List) Ref() ref.Ref {
|
||||
return EnsureRef(l.ref, l)
|
||||
func (l List) Hash() hash.Hash {
|
||||
return EnsureRef(l.h, l)
|
||||
}
|
||||
|
||||
func (l List) ChildValues() (values []Value) {
|
||||
@@ -182,7 +182,7 @@ func (l List) IterAll(f listIterAllFunc) {
|
||||
|
||||
func newListLeafBoundaryChecker() boundaryChecker {
|
||||
return newBuzHashBoundaryChecker(listWindowSize, sha1.Size, listPattern, func(item sequenceItem) []byte {
|
||||
digest := item.(Value).Ref().Digest()
|
||||
digest := item.(Value).Hash().Digest()
|
||||
return digest[:]
|
||||
})
|
||||
}
|
||||
|
||||
@@ -552,7 +552,7 @@ func TestListFirstNNumbers(t *testing.T) {
|
||||
|
||||
nums := firstNNumbers(testListSize)
|
||||
s := NewList(nums...)
|
||||
assert.Equal("sha1-aa1605484d993e89dbc0431acb9f2478282f9d94", s.Ref().String())
|
||||
assert.Equal("sha1-aa1605484d993e89dbc0431acb9f2478282f9d94", s.Hash().String())
|
||||
}
|
||||
|
||||
func TestListRefOfStructFirstNNumbers(t *testing.T) {
|
||||
@@ -574,7 +574,7 @@ func TestListRefOfStructFirstNNumbers(t *testing.T) {
|
||||
|
||||
nums := firstNNumbers(testListSize)
|
||||
s := NewList(nums...)
|
||||
assert.Equal("sha1-2e79d54322aa793d0e8d48380a28927a257a141a", s.Ref().String())
|
||||
assert.Equal("sha1-2e79d54322aa793d0e8d48380a28927a257a141a", s.Hash().String())
|
||||
}
|
||||
|
||||
func TestListModifyAfterRead(t *testing.T) {
|
||||
@@ -583,7 +583,7 @@ func TestListModifyAfterRead(t *testing.T) {
|
||||
|
||||
list := getTestList().toList()
|
||||
// Drop chunk values.
|
||||
list = vs.ReadValue(vs.WriteValue(list).TargetRef()).(List)
|
||||
list = vs.ReadValue(vs.WriteValue(list).TargetHash()).(List)
|
||||
// Modify/query. Once upon a time this would crash.
|
||||
llen := list.Len()
|
||||
z := list.Get(0)
|
||||
|
||||
14
types/map.go
14
types/map.go
@@ -5,7 +5,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,11 +15,11 @@ const (
|
||||
|
||||
type Map struct {
|
||||
seq orderedSequence
|
||||
ref *ref.Ref
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
func newMap(seq orderedSequence) Map {
|
||||
return Map{seq, &ref.Ref{}}
|
||||
return Map{seq, &hash.Hash{}}
|
||||
}
|
||||
|
||||
func NewMap(kv ...Value) Map {
|
||||
@@ -52,15 +52,15 @@ func (m Map) sequence() sequence {
|
||||
|
||||
// Value interface
|
||||
func (m Map) Equals(other Value) bool {
|
||||
return other != nil && m.Ref() == other.Ref()
|
||||
return other != nil && m.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (m Map) Less(other Value) bool {
|
||||
return valueLess(m, other)
|
||||
}
|
||||
|
||||
func (m Map) Ref() ref.Ref {
|
||||
return EnsureRef(m.ref, m)
|
||||
func (m Map) Hash() hash.Hash {
|
||||
return EnsureRef(m.h, m)
|
||||
}
|
||||
|
||||
func (m Map) ChildValues() (values []Value) {
|
||||
@@ -216,7 +216,7 @@ func buildMapData(values []Value) mapEntrySlice {
|
||||
|
||||
func newMapLeafBoundaryChecker() boundaryChecker {
|
||||
return newBuzHashBoundaryChecker(mapWindowSize, sha1.Size, mapPattern, func(item sequenceItem) []byte {
|
||||
digest := item.(mapEntry).key.Ref().Digest()
|
||||
digest := item.(mapEntry).key.Hash().Digest()
|
||||
return digest[:]
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
type mapLeafSequence struct {
|
||||
data []mapEntry // sorted by entry.key.Ref()
|
||||
data []mapEntry // sorted by entry.key.Hash()
|
||||
t *Type
|
||||
vr ValueReader
|
||||
}
|
||||
|
||||
@@ -308,7 +308,7 @@ func TestMapHas(t *testing.T) {
|
||||
vs := NewTestValueStore()
|
||||
doTest := func(tm testMap) {
|
||||
m := tm.toMap()
|
||||
m2 := vs.ReadValue(vs.WriteValue(m).TargetRef()).(Map)
|
||||
m2 := vs.ReadValue(vs.WriteValue(m).TargetHash()).(Map)
|
||||
for _, entry := range tm.entries {
|
||||
k, v := entry.key, entry.value
|
||||
assert.True(m.Has(k))
|
||||
@@ -689,7 +689,7 @@ func testMapOrder(assert *assert.Assertions, keyType, valueType *Type, tuples []
|
||||
m := NewMap(tuples...)
|
||||
i := 0
|
||||
m.IterAll(func(key, value Value) {
|
||||
assert.Equal(expectOrdering[i].Ref().String(), key.Ref().String())
|
||||
assert.Equal(expectOrdering[i].Hash().String(), key.Hash().String())
|
||||
i++
|
||||
})
|
||||
}
|
||||
@@ -872,7 +872,7 @@ func TestMapFirstNNumbers(t *testing.T) {
|
||||
}
|
||||
|
||||
m := NewMap(kvs...)
|
||||
assert.Equal("sha1-2bc451349d04c5f90cfe73d1e6eb3ee626db99a1", m.Ref().String())
|
||||
assert.Equal("sha1-2bc451349d04c5f90cfe73d1e6eb3ee626db99a1", m.Hash().String())
|
||||
assert.Equal(deriveCollectionHeight(m), getRefHeightOfCollection(m))
|
||||
}
|
||||
|
||||
@@ -893,7 +893,7 @@ func TestMapRefOfStructFirstNNumbers(t *testing.T) {
|
||||
}
|
||||
|
||||
m := NewMap(kvs...)
|
||||
assert.Equal("sha1-5c9a17f6da0ebfebc1f82f498ac46992fad85250", m.Ref().String())
|
||||
assert.Equal("sha1-5c9a17f6da0ebfebc1f82f498ac46992fad85250", m.Hash().String())
|
||||
// height + 1 because the leaves are Ref values (with height 1).
|
||||
assert.Equal(deriveCollectionHeight(m)+1, getRefHeightOfCollection(m))
|
||||
}
|
||||
@@ -903,7 +903,7 @@ func TestMapModifyAfterRead(t *testing.T) {
|
||||
vs := NewTestValueStore()
|
||||
m := getTestNativeOrderMap(2).toMap()
|
||||
// Drop chunk values.
|
||||
m = vs.ReadValue(vs.WriteValue(m).TargetRef()).(Map)
|
||||
m = vs.ReadValue(vs.WriteValue(m).TargetHash()).(Map)
|
||||
// Modify/query. Once upon a time this would crash.
|
||||
fst, fstval := m.First()
|
||||
m = m.Remove(fst)
|
||||
|
||||
@@ -27,7 +27,7 @@ type metaTuple struct {
|
||||
numLeaves uint64
|
||||
}
|
||||
|
||||
func (mt metaTuple) ChildRef() Ref {
|
||||
func (mt metaTuple) ChildHash() Ref {
|
||||
return mt.childRef
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func (ms metaSequenceObject) valueReader() ValueReader {
|
||||
|
||||
func (ms metaSequenceObject) Chunks() (chunks []Ref) {
|
||||
for _, tuple := range ms.tuples {
|
||||
chunks = append(chunks, tuple.ChildRef())
|
||||
chunks = append(chunks, tuple.ChildHash())
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func readMetaTupleValue(item sequenceItem, vr ValueReader) Value {
|
||||
return mt.child
|
||||
}
|
||||
|
||||
r := mt.childRef.TargetRef()
|
||||
r := mt.childRef.TargetHash()
|
||||
d.Chk.False(r.IsEmpty())
|
||||
return vr.ReadValue(r)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type Number float64
|
||||
@@ -18,7 +18,7 @@ func (v Number) Less(other Value) bool {
|
||||
return NumberKind < other.Type().Kind()
|
||||
}
|
||||
|
||||
func (v Number) Ref() ref.Ref {
|
||||
func (v Number) Hash() hash.Hash {
|
||||
return getRef(v)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type orderedSequence interface {
|
||||
@@ -23,7 +23,7 @@ func newSetMetaSequence(tuples metaSequenceData, vr ValueReader) orderedMetaSequ
|
||||
ts := make([]*Type, len(tuples))
|
||||
for i, mt := range tuples {
|
||||
// Ref<Set<T>>
|
||||
ts[i] = mt.ChildRef().Type().Desc.(CompoundDesc).ElemTypes[0].Desc.(CompoundDesc).ElemTypes[0]
|
||||
ts[i] = mt.ChildHash().Type().Desc.(CompoundDesc).ElemTypes[0].Desc.(CompoundDesc).ElemTypes[0]
|
||||
}
|
||||
t := MakeSetType(MakeUnionType(ts...))
|
||||
return newOrderedMetaSequence(tuples, t, vr)
|
||||
@@ -34,7 +34,7 @@ func newMapMetaSequence(tuples metaSequenceData, vr ValueReader) orderedMetaSequ
|
||||
vts := make([]*Type, len(tuples))
|
||||
for i, mt := range tuples {
|
||||
// Ref<Map<K, V>>
|
||||
ets := mt.ChildRef().Type().Desc.(CompoundDesc).ElemTypes[0].Desc.(CompoundDesc).ElemTypes
|
||||
ets := mt.ChildHash().Type().Desc.(CompoundDesc).ElemTypes[0].Desc.(CompoundDesc).ElemTypes
|
||||
kts[i] = ets[0]
|
||||
vts[i] = ets[1]
|
||||
}
|
||||
@@ -96,13 +96,13 @@ func seekTo(cur *sequenceCursor, key Value, lastPositionIfNotFound bool) bool {
|
||||
seq := cur.seq.(orderedSequence)
|
||||
keyIsOrderedByValue := isKindOrderedByValue(key.Type().Kind())
|
||||
_, seqIsMeta := seq.(metaSequence)
|
||||
var keyRef ref.Ref
|
||||
var keyRef hash.Hash
|
||||
|
||||
var searchFn func(i int) bool
|
||||
|
||||
if seqIsMeta {
|
||||
if !keyIsOrderedByValue {
|
||||
keyRef = key.Ref()
|
||||
keyRef = key.Hash()
|
||||
}
|
||||
// For non-native values, meta sequences will hold types.Ref rather than the value
|
||||
searchFn = func(i int) bool {
|
||||
@@ -111,7 +111,7 @@ func seekTo(cur *sequenceCursor, key Value, lastPositionIfNotFound bool) bool {
|
||||
if keyIsOrderedByValue {
|
||||
return true // Values > ordered
|
||||
}
|
||||
return !sr.TargetRef().Less(keyRef)
|
||||
return !sr.TargetHash().Less(keyRef)
|
||||
}
|
||||
return !sk.Less(key)
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func getCurrentKey(cur *sequenceCursor) Value {
|
||||
|
||||
func newOrderedMetaSequenceBoundaryChecker() boundaryChecker {
|
||||
return newBuzHashBoundaryChecker(orderedSequenceWindowSize, sha1.Size, objectPattern, func(item sequenceItem) []byte {
|
||||
digest := item.(metaTuple).ChildRef().TargetRef().Digest()
|
||||
digest := item.(metaTuple).ChildHash().TargetHash().Digest()
|
||||
return digest[:]
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// ValueReader is an interface that knows how to read Noms Values, e.g. datas/Database. Required to avoid import cycle between this package and the package that implements Value reading.
|
||||
type ValueReader interface {
|
||||
ReadValue(r ref.Ref) Value
|
||||
ReadValue(r hash.Hash) Value
|
||||
}
|
||||
|
||||
// ValueReadWriter is an interface that knows how to read and write Noms Values, e.g. datas/Database. Required to avoid import cycle between this package and the package that implements Value read/writing.
|
||||
|
||||
20
types/ref.go
20
types/ref.go
@@ -2,25 +2,25 @@ package types
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type Ref struct {
|
||||
target ref.Ref
|
||||
target hash.Hash
|
||||
height uint64
|
||||
t *Type
|
||||
ref *ref.Ref
|
||||
hash *hash.Hash
|
||||
}
|
||||
|
||||
func NewRef(v Value) Ref {
|
||||
return Ref{v.Ref(), maxChunkHeight(v) + 1, MakeRefType(v.Type()), &ref.Ref{}}
|
||||
return Ref{v.Hash(), maxChunkHeight(v) + 1, MakeRefType(v.Type()), &hash.Hash{}}
|
||||
}
|
||||
|
||||
// Constructs a Ref directly from struct properties. This should not be used outside decoding and testing within the types package.
|
||||
func constructRef(t *Type, target ref.Ref, height uint64) Ref {
|
||||
func constructRef(t *Type, target hash.Hash, height uint64) Ref {
|
||||
d.Chk.Equal(RefKind, t.Kind(), "Invalid type. Expected: RefKind, found: %s", t.Describe())
|
||||
d.Chk.NotEqual(ValueType, t.Desc.(CompoundDesc).ElemTypes[0])
|
||||
return Ref{target, height, t, &ref.Ref{}}
|
||||
return Ref{target, height, t, &hash.Hash{}}
|
||||
}
|
||||
|
||||
func maxChunkHeight(v Value) (max uint64) {
|
||||
@@ -34,7 +34,7 @@ func maxChunkHeight(v Value) (max uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (r Ref) TargetRef() ref.Ref {
|
||||
func (r Ref) TargetHash() hash.Hash {
|
||||
return r.target
|
||||
}
|
||||
|
||||
@@ -48,15 +48,15 @@ func (r Ref) TargetValue(vr ValueReader) Value {
|
||||
|
||||
// Value interface
|
||||
func (r Ref) Equals(other Value) bool {
|
||||
return other != nil && r.t.Equals(other.Type()) && r.Ref() == other.Ref()
|
||||
return other != nil && r.t.Equals(other.Type()) && r.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (r Ref) Less(other Value) bool {
|
||||
return valueLess(r, other)
|
||||
}
|
||||
|
||||
func (r Ref) Ref() ref.Ref {
|
||||
return EnsureRef(r.ref, r)
|
||||
func (r Ref) Hash() hash.Hash {
|
||||
return EnsureRef(r.hash, r)
|
||||
}
|
||||
|
||||
func (r Ref) ChildValues() []Value {
|
||||
|
||||
@@ -3,7 +3,7 @@ package types
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -43,7 +43,7 @@ func (ts testSequence) Less(other Value) bool {
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (ts testSequence) Ref() ref.Ref {
|
||||
func (ts testSequence) Hash() hash.Hash {
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
|
||||
14
types/set.go
14
types/set.go
@@ -4,7 +4,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"sort"
|
||||
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,11 +15,11 @@ const (
|
||||
|
||||
type Set struct {
|
||||
seq orderedSequence
|
||||
ref *ref.Ref
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
func newSet(seq orderedSequence) Set {
|
||||
return Set{seq, &ref.Ref{}}
|
||||
return Set{seq, &hash.Hash{}}
|
||||
}
|
||||
|
||||
func NewSet(v ...Value) Set {
|
||||
@@ -55,15 +55,15 @@ func (s Set) sequence() sequence {
|
||||
|
||||
// Value interface
|
||||
func (s Set) Equals(other Value) bool {
|
||||
return other != nil && s.Ref() == other.Ref()
|
||||
return other != nil && s.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (s Set) Less(other Value) bool {
|
||||
return valueLess(s, other)
|
||||
}
|
||||
|
||||
func (s Set) Ref() ref.Ref {
|
||||
return EnsureRef(s.ref, s)
|
||||
func (s Set) Hash() hash.Hash {
|
||||
return EnsureRef(s.h, s)
|
||||
}
|
||||
|
||||
func (s Set) ChildValues() (values []Value) {
|
||||
@@ -191,7 +191,7 @@ func buildSetData(values ValueSlice) ValueSlice {
|
||||
|
||||
func newSetLeafBoundaryChecker() boundaryChecker {
|
||||
return newBuzHashBoundaryChecker(setWindowSize, sha1.Size, setPattern, func(item sequenceItem) []byte {
|
||||
digest := item.(Value).Ref().Digest()
|
||||
digest := item.(Value).Hash().Digest()
|
||||
return digest[:]
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
type setLeafSequence struct {
|
||||
data []Value // sorted by Ref()
|
||||
data []Value // sorted by Hash()
|
||||
t *Type
|
||||
vr ValueReader
|
||||
}
|
||||
|
||||
@@ -315,7 +315,7 @@ func TestSetHas2(t *testing.T) {
|
||||
vs := NewTestValueStore()
|
||||
doTest := func(ts testSet) {
|
||||
set := ts.toSet()
|
||||
set2 := vs.ReadValue(vs.WriteValue(set).TargetRef()).(Set)
|
||||
set2 := vs.ReadValue(vs.WriteValue(set).TargetHash()).(Set)
|
||||
for _, v := range ts {
|
||||
assert.True(set.Has(v))
|
||||
assert.True(set2.Has(v))
|
||||
@@ -565,7 +565,7 @@ func testSetOrder(assert *assert.Assertions, valueType *Type, value []Value, exp
|
||||
m := NewSet(value...)
|
||||
i := 0
|
||||
m.IterAll(func(value Value) {
|
||||
assert.Equal(expectOrdering[i].Ref().String(), value.Ref().String())
|
||||
assert.Equal(expectOrdering[i].Hash().String(), value.Hash().String())
|
||||
i++
|
||||
})
|
||||
}
|
||||
@@ -727,7 +727,7 @@ func TestSetChunks2(t *testing.T) {
|
||||
vs := NewTestValueStore()
|
||||
doTest := func(ts testSet) {
|
||||
set := ts.toSet()
|
||||
set2chunks := vs.ReadValue(vs.WriteValue(set).TargetRef()).Chunks()
|
||||
set2chunks := vs.ReadValue(vs.WriteValue(set).TargetHash()).Chunks()
|
||||
for i, r := range set.Chunks() {
|
||||
assert.True(r.Type().Equals(set2chunks[i].Type()), "%s != %s", r.Type().Describe(), set2chunks[i].Type().Describe())
|
||||
}
|
||||
@@ -744,7 +744,7 @@ func TestSetFirstNNumbers(t *testing.T) {
|
||||
|
||||
nums := generateNumbersAsValues(testSetSize)
|
||||
s := NewSet(nums...)
|
||||
assert.Equal("sha1-8186877fb71711b8e6a516ed5c8ad1ccac8c6c00", s.Ref().String())
|
||||
assert.Equal("sha1-8186877fb71711b8e6a516ed5c8ad1ccac8c6c00", s.Hash().String())
|
||||
assert.Equal(deriveCollectionHeight(s), getRefHeightOfCollection(s))
|
||||
}
|
||||
|
||||
@@ -756,7 +756,7 @@ func TestSetRefOfStructFirstNNumbers(t *testing.T) {
|
||||
|
||||
nums := generateNumbersAsRefOfStructs(testSetSize)
|
||||
s := NewSet(nums...)
|
||||
assert.Equal("sha1-14eeb2d1835011bf3e018121ba3274bc08e634e5", s.Ref().String())
|
||||
assert.Equal("sha1-14eeb2d1835011bf3e018121ba3274bc08e634e5", s.Hash().String())
|
||||
// height + 1 because the leaves are Ref values (with height 1).
|
||||
assert.Equal(deriveCollectionHeight(s)+1, getRefHeightOfCollection(s))
|
||||
}
|
||||
@@ -766,7 +766,7 @@ func TestSetModifyAfterRead(t *testing.T) {
|
||||
vs := NewTestValueStore()
|
||||
set := getTestNativeOrderSet(2).toSet()
|
||||
// Drop chunk values.
|
||||
set = vs.ReadValue(vs.WriteValue(set).TargetRef()).(Set)
|
||||
set = vs.ReadValue(vs.WriteValue(set).TargetHash()).(Set)
|
||||
// Modify/query. Once upon a time this would crash.
|
||||
fst := set.First()
|
||||
set = set.Remove(fst)
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package types
|
||||
|
||||
import "github.com/attic-labs/noms/ref"
|
||||
import "github.com/attic-labs/noms/hash"
|
||||
|
||||
type String struct {
|
||||
s string
|
||||
ref *ref.Ref
|
||||
s string
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
func NewString(s string) String {
|
||||
return String{s, &ref.Ref{}}
|
||||
return String{s, &hash.Hash{}}
|
||||
}
|
||||
|
||||
func (fs String) String() string {
|
||||
@@ -30,8 +30,8 @@ func (s String) Less(other Value) bool {
|
||||
return StringKind < other.Type().Kind()
|
||||
}
|
||||
|
||||
func (fs String) Ref() ref.Ref {
|
||||
return EnsureRef(fs.ref, fs)
|
||||
func (fs String) Hash() hash.Hash {
|
||||
return EnsureRef(fs.h, fs)
|
||||
}
|
||||
|
||||
func (fs String) ChildValues() []Value {
|
||||
|
||||
@@ -2,7 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
type structData map[string]Value
|
||||
@@ -10,12 +10,12 @@ type structData map[string]Value
|
||||
type Struct struct {
|
||||
data structData
|
||||
t *Type
|
||||
ref *ref.Ref
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
func newStructFromData(data structData, t *Type) Struct {
|
||||
d.Chk.Equal(t.Kind(), StructKind)
|
||||
return Struct{data, t, &ref.Ref{}}
|
||||
return Struct{data, t, &hash.Hash{}}
|
||||
}
|
||||
|
||||
func NewStruct(name string, data structData) Struct {
|
||||
@@ -43,15 +43,15 @@ func NewStructWithType(t *Type, data structData) Struct {
|
||||
|
||||
// Value interface
|
||||
func (s Struct) Equals(other Value) bool {
|
||||
return other != nil && s.t.Equals(other.Type()) && s.Ref() == other.Ref()
|
||||
return other != nil && s.t.Equals(other.Type()) && s.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (s Struct) Less(other Value) bool {
|
||||
return valueLess(s, other)
|
||||
}
|
||||
|
||||
func (s Struct) Ref() ref.Ref {
|
||||
return EnsureRef(s.ref, s)
|
||||
func (s Struct) Hash() hash.Hash {
|
||||
return EnsureRef(s.h, s)
|
||||
}
|
||||
|
||||
func (s Struct) ChildValues() (res []Value) {
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestGenericStructChunks(t *testing.T) {
|
||||
s1 := newStructFromData(data1, typ)
|
||||
|
||||
assert.Len(s1.Chunks(), 1)
|
||||
assert.Equal(b.Ref(), s1.Chunks()[0].TargetRef())
|
||||
assert.Equal(b.Hash(), s1.Chunks()[0].TargetHash())
|
||||
}
|
||||
|
||||
func TestGenericStructNew(t *testing.T) {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// Type defines and describes Noms types, both custom and built-in.
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
type Type struct {
|
||||
Desc TypeDesc
|
||||
ref *ref.Ref
|
||||
h *hash.Hash
|
||||
}
|
||||
|
||||
var typeForType = makePrimitiveType(TypeKind)
|
||||
@@ -40,15 +40,15 @@ func (t *Type) Name() string {
|
||||
|
||||
// Value interface
|
||||
func (t *Type) Equals(other Value) (res bool) {
|
||||
return other != nil && t.Ref() == other.Ref()
|
||||
return other != nil && t.Hash() == other.Hash()
|
||||
}
|
||||
|
||||
func (t *Type) Less(other Value) (res bool) {
|
||||
return valueLess(t, other)
|
||||
}
|
||||
|
||||
func (t *Type) Ref() ref.Ref {
|
||||
return EnsureRef(t.ref, t)
|
||||
func (t *Type) Hash() hash.Hash {
|
||||
return EnsureRef(t.h, t)
|
||||
}
|
||||
|
||||
func (t *Type) ChildValues() (res []Value) {
|
||||
@@ -142,12 +142,12 @@ func MakeRefType(elemType *Type) *Type {
|
||||
type unionTypes []*Type
|
||||
|
||||
func (uts unionTypes) Len() int { return len(uts) }
|
||||
func (uts unionTypes) Less(i, j int) bool { return uts[i].Ref().Less(uts[j].Ref()) }
|
||||
func (uts unionTypes) Less(i, j int) bool { return uts[i].Hash().Less(uts[j].Hash()) }
|
||||
func (uts unionTypes) Swap(i, j int) { uts[i], uts[j] = uts[j], uts[i] }
|
||||
|
||||
// MakeUnionType creates a new union type unless the elemTypes can be folded into a single non union type.
|
||||
func MakeUnionType(elemTypes ...*Type) *Type {
|
||||
seenTypes := map[ref.Ref]bool{}
|
||||
seenTypes := map[hash.Hash]bool{}
|
||||
ts := flattenUnionTypes(elemTypes, &seenTypes)
|
||||
if len(ts) == 1 {
|
||||
return ts[0]
|
||||
@@ -156,7 +156,7 @@ func MakeUnionType(elemTypes ...*Type) *Type {
|
||||
return buildType(CompoundDesc{UnionKind, ts})
|
||||
}
|
||||
|
||||
func flattenUnionTypes(ts []*Type, seenTypes *map[ref.Ref]bool) []*Type {
|
||||
func flattenUnionTypes(ts []*Type, seenTypes *map[hash.Hash]bool) []*Type {
|
||||
if len(ts) == 0 {
|
||||
return ts
|
||||
}
|
||||
@@ -166,8 +166,8 @@ func flattenUnionTypes(ts []*Type, seenTypes *map[ref.Ref]bool) []*Type {
|
||||
if t.Kind() == UnionKind {
|
||||
ts2 = append(ts2, flattenUnionTypes(t.Desc.(CompoundDesc).ElemTypes, seenTypes)...)
|
||||
} else {
|
||||
if !(*seenTypes)[t.Ref()] {
|
||||
(*seenTypes)[t.Ref()] = true
|
||||
if !(*seenTypes)[t.Hash()] {
|
||||
(*seenTypes)[t.Hash()] = true
|
||||
ts2 = append(ts2, t)
|
||||
}
|
||||
}
|
||||
@@ -176,7 +176,7 @@ func flattenUnionTypes(ts []*Type, seenTypes *map[ref.Ref]bool) []*Type {
|
||||
}
|
||||
|
||||
func buildType(desc TypeDesc) *Type {
|
||||
return &Type{Desc: desc, ref: &ref.Ref{}}
|
||||
return &Type{Desc: desc, h: &hash.Hash{}}
|
||||
}
|
||||
|
||||
var NumberType = makePrimitiveType(NumberKind)
|
||||
|
||||
@@ -21,10 +21,10 @@ func TestTypes(t *testing.T) {
|
||||
})
|
||||
recType.Desc.(StructDesc).Fields["self"] = recType
|
||||
|
||||
mRef := vs.WriteValue(mapType).TargetRef()
|
||||
setRef := vs.WriteValue(setType).TargetRef()
|
||||
mahRef := vs.WriteValue(mahType).TargetRef()
|
||||
recRef := vs.WriteValue(recType).TargetRef()
|
||||
mRef := vs.WriteValue(mapType).TargetHash()
|
||||
setRef := vs.WriteValue(setType).TargetHash()
|
||||
mahRef := vs.WriteValue(mahType).TargetHash()
|
||||
recRef := vs.WriteValue(recType).TargetHash()
|
||||
|
||||
assert.True(mapType.Equals(vs.ReadValue(mRef)))
|
||||
assert.True(setType.Equals(vs.ReadValue(setRef)))
|
||||
|
||||
@@ -2,7 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
var generateNumbersAsValues = func(n int) []Value {
|
||||
@@ -42,24 +42,24 @@ var generateNumbersAsRefOfStructs = func(n int) []Value {
|
||||
|
||||
func chunkDiffCount(c1 []Ref, c2 []Ref) int {
|
||||
count := 0
|
||||
refs := make(map[ref.Ref]int)
|
||||
hashes := make(map[hash.Hash]int)
|
||||
|
||||
for _, r := range c1 {
|
||||
refs[r.TargetRef()]++
|
||||
hashes[r.TargetHash()]++
|
||||
}
|
||||
|
||||
for _, r := range c2 {
|
||||
if c, ok := refs[r.TargetRef()]; ok {
|
||||
if c, ok := hashes[r.TargetHash()]; ok {
|
||||
if c == 1 {
|
||||
delete(refs, r.TargetRef())
|
||||
delete(hashes, r.TargetHash())
|
||||
} else {
|
||||
refs[r.TargetRef()] = c - 1
|
||||
hashes[r.TargetHash()] = c - 1
|
||||
}
|
||||
} else {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
count += len(refs)
|
||||
count += len(hashes)
|
||||
return count
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func (vbs *ValidatingBatchingSink) Prepare(hints Hints) {
|
||||
|
||||
// Enequeue adds a Chunk to the queue of Chunks waiting to be Put into vbs' backing ChunkStore. The instance keeps an internal buffer of Chunks, spilling to the ChunkStore when the buffer is full. If an attempt to Put Chunks fails, this method returns the BackpressureError from the underlying ChunkStore.
|
||||
func (vbs *ValidatingBatchingSink) Enqueue(c chunks.Chunk) chunks.BackpressureError {
|
||||
r := c.Ref()
|
||||
r := c.Hash()
|
||||
if vbs.vs.isPresent(r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// Value is implemented by every noms value
|
||||
@@ -9,7 +9,7 @@ type Value interface {
|
||||
Equals(other Value) bool
|
||||
Less(other Value) bool
|
||||
|
||||
Ref() ref.Ref
|
||||
Hash() hash.Hash
|
||||
// Returns the immediate children of this value in the DAG, if any, not including Type().
|
||||
ChildValues() []Value
|
||||
Chunks() []Ref
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
)
|
||||
|
||||
// ValueStore provides methods to read and write Noms Values to a BatchStore. It validates Values as they are written, but does not guarantee that these Values are persisted to the BatchStore until a subsequent Flush. or Close.
|
||||
@@ -15,13 +15,13 @@ import (
|
||||
// - all Refs in v point to a Value of the correct Type
|
||||
type ValueStore struct {
|
||||
bs BatchStore
|
||||
cache map[ref.Ref]chunkCacheEntry
|
||||
cache map[hash.Hash]chunkCacheEntry
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
type chunkCacheEntry interface {
|
||||
Present() bool
|
||||
Hint() ref.Ref
|
||||
Hint() hash.Hash
|
||||
Type() *Type
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func newLocalValueStore(cs chunks.ChunkStore) *ValueStore {
|
||||
|
||||
// NewValueStore returns a ValueStore instance that owns the provided BatchStore and manages its lifetime. Calling Close on the returned ValueStore will Close bs.
|
||||
func NewValueStore(bs BatchStore) *ValueStore {
|
||||
return &ValueStore{bs, map[ref.Ref]chunkCacheEntry{}, &sync.Mutex{}}
|
||||
return &ValueStore{bs, map[hash.Hash]chunkCacheEntry{}, &sync.Mutex{}}
|
||||
}
|
||||
|
||||
func (lvs *ValueStore) BatchStore() BatchStore {
|
||||
@@ -44,7 +44,7 @@ func (lvs *ValueStore) BatchStore() BatchStore {
|
||||
}
|
||||
|
||||
// ReadValue reads and decodes a value from lvs. It is not considered an error for the requested chunk to be empty; in this case, the function simply returns nil.
|
||||
func (lvs *ValueStore) ReadValue(r ref.Ref) Value {
|
||||
func (lvs *ValueStore) ReadValue(r hash.Hash) Value {
|
||||
v := DecodeChunk(lvs.bs.Get(r), lvs)
|
||||
|
||||
var entry chunkCacheEntry = absentChunk{}
|
||||
@@ -66,7 +66,7 @@ func (lvs *ValueStore) WriteValue(v Value) Ref {
|
||||
// Encoding v causes any child chunks, e.g. internal nodes if v is a meta sequence, to get written. That needs to happen before we try to validate v.
|
||||
c := EncodeValue(v, lvs)
|
||||
d.Chk.False(c.IsEmpty())
|
||||
hash := c.Ref()
|
||||
hash := c.Hash()
|
||||
height := maxChunkHeight(v) + 1
|
||||
r := constructRef(MakeRefType(v.Type()), hash, height)
|
||||
if lvs.isPresent(hash) {
|
||||
@@ -89,46 +89,46 @@ func (lvs *ValueStore) Close() error {
|
||||
}
|
||||
|
||||
// cacheChunks looks at the Chunks reachable from v and, for each one checks if there's a hint in the cache. If there isn't, or if the hint is a self-reference, the chunk gets r set as its new hint.
|
||||
func (lvs *ValueStore) cacheChunks(v Value, r ref.Ref) {
|
||||
func (lvs *ValueStore) cacheChunks(v Value, r hash.Hash) {
|
||||
for _, reachable := range v.Chunks() {
|
||||
hash := reachable.TargetRef()
|
||||
hash := reachable.TargetHash()
|
||||
if cur := lvs.check(hash); cur == nil || cur.Hint().IsEmpty() || cur.Hint() == hash {
|
||||
lvs.set(hash, hintedChunk{getTargetType(reachable), r})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lvs *ValueStore) isPresent(r ref.Ref) (present bool) {
|
||||
func (lvs *ValueStore) isPresent(r hash.Hash) (present bool) {
|
||||
if entry := lvs.check(r); entry != nil && entry.Present() {
|
||||
present = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (lvs *ValueStore) check(r ref.Ref) chunkCacheEntry {
|
||||
func (lvs *ValueStore) check(r hash.Hash) chunkCacheEntry {
|
||||
lvs.mu.Lock()
|
||||
defer lvs.mu.Unlock()
|
||||
return lvs.cache[r]
|
||||
}
|
||||
|
||||
func (lvs *ValueStore) set(r ref.Ref, entry chunkCacheEntry) {
|
||||
func (lvs *ValueStore) set(r hash.Hash, entry chunkCacheEntry) {
|
||||
lvs.mu.Lock()
|
||||
defer lvs.mu.Unlock()
|
||||
lvs.cache[r] = entry
|
||||
}
|
||||
|
||||
func (lvs *ValueStore) checkAndSet(r ref.Ref, entry chunkCacheEntry) {
|
||||
func (lvs *ValueStore) checkAndSet(r hash.Hash, entry chunkCacheEntry) {
|
||||
if cur := lvs.check(r); cur == nil || cur.Hint().IsEmpty() {
|
||||
lvs.set(r, entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (lvs *ValueStore) checkChunksInCache(v Value) map[ref.Ref]struct{} {
|
||||
hints := map[ref.Ref]struct{}{}
|
||||
func (lvs *ValueStore) checkChunksInCache(v Value) map[hash.Hash]struct{} {
|
||||
hints := map[hash.Hash]struct{}{}
|
||||
for _, reachable := range v.Chunks() {
|
||||
entry := lvs.check(reachable.TargetRef())
|
||||
entry := lvs.check(reachable.TargetHash())
|
||||
if entry == nil || !entry.Present() {
|
||||
d.Exp.Fail("Attempted to write Value containing Ref to non-existent object.", "%s\n, contains ref %s, which points to a non-existent Value.", EncodedValueWithTags(v), reachable.TargetRef())
|
||||
d.Exp.Fail("Attempted to write Value containing Ref to non-existent object.", "%s\n, contains ref %s, which points to a non-existent Value.", EncodedValueWithTags(v), reachable.TargetHash())
|
||||
}
|
||||
if hint := entry.Hint(); !hint.IsEmpty() {
|
||||
hints[hint] = struct{}{}
|
||||
@@ -143,7 +143,7 @@ func (lvs *ValueStore) checkChunksInCache(v Value) map[ref.Ref]struct{} {
|
||||
if targetType.Equals(ValueType) {
|
||||
continue
|
||||
}
|
||||
d.Exp.True(entry.Type().Equals(targetType), "Value to write contains ref %s, which points to a value of a different type: %+v != %+v", reachable.TargetRef(), entry.Type(), targetType)
|
||||
d.Exp.True(entry.Type().Equals(targetType), "Value to write contains ref %s, which points to a value of a different type: %+v != %+v", reachable.TargetHash(), entry.Type(), targetType)
|
||||
}
|
||||
return hints
|
||||
}
|
||||
@@ -156,14 +156,14 @@ func getTargetType(refBase Ref) *Type {
|
||||
|
||||
type hintedChunk struct {
|
||||
t *Type
|
||||
hint ref.Ref
|
||||
hint hash.Hash
|
||||
}
|
||||
|
||||
func (h hintedChunk) Present() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (h hintedChunk) Hint() (r ref.Ref) {
|
||||
func (h hintedChunk) Hint() (r hash.Hash) {
|
||||
return h.hint
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func (a absentChunk) Present() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (a absentChunk) Hint() (r ref.Ref) {
|
||||
func (a absentChunk) Hint() (r hash.Hash) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -35,11 +35,11 @@ func TestWriteValue(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
vs := NewTestValueStore()
|
||||
testEncode := func(expected string, v Value) ref.Ref {
|
||||
r := vs.WriteValue(v).TargetRef()
|
||||
testEncode := func(expected string, v Value) hash.Hash {
|
||||
r := vs.WriteValue(v).TargetHash()
|
||||
|
||||
// Assuming that MemoryStore works correctly, we don't need to check the actual serialization, only the hash. Neat.
|
||||
assert.EqualValues(sha1.Sum([]byte(expected)), r.Digest(), "Incorrect ref serializing %+v. Got: %#x", v, r.Digest())
|
||||
assert.EqualValues(sha1.Sum([]byte(expected)), r.Digest(), "Incorrect hash serializing %+v. Got: %#x", v, r.Digest())
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -57,13 +57,13 @@ func TestWriteBlobLeaf(t *testing.T) {
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
b1 := NewBlob(buf)
|
||||
r1 := vs.WriteValue(b1).TargetRef()
|
||||
r1 := vs.WriteValue(b1).TargetHash()
|
||||
// echo -n 'b ' | sha1sum
|
||||
assert.Equal("sha1-e1bc846440ec2fb557a5a271e785cd4c648883fa", r1.String())
|
||||
|
||||
buf = bytes.NewBufferString("Hello, World!")
|
||||
b2 := NewBlob(buf)
|
||||
r2 := vs.WriteValue(b2).TargetRef()
|
||||
r2 := vs.WriteValue(b2).TargetHash()
|
||||
// echo -n 'b Hello, World!' | sha1sum
|
||||
assert.Equal("sha1-135fe1453330547994b2ce8a1b238adfbd7df87e", r2.String())
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func TestCheckChunksInCache(t *testing.T) {
|
||||
|
||||
b := NewEmptyBlob()
|
||||
cs.Put(EncodeValue(b, nil))
|
||||
cvs.set(b.Ref(), hintedChunk{b.Type(), b.Ref()})
|
||||
cvs.set(b.Hash(), hintedChunk{b.Type(), b.Hash()})
|
||||
|
||||
bref := NewRef(b)
|
||||
assert.NotPanics(func() { cvs.checkChunksInCache(bref) })
|
||||
@@ -91,10 +91,10 @@ func TestCacheOnReadValue(t *testing.T) {
|
||||
r := cvs.WriteValue(bref)
|
||||
|
||||
cvs2 := newLocalValueStore(cs)
|
||||
v := cvs2.ReadValue(r.TargetRef())
|
||||
v := cvs2.ReadValue(r.TargetHash())
|
||||
assert.True(bref.Equals(v))
|
||||
assert.True(cvs2.isPresent(b.Ref()))
|
||||
assert.True(cvs2.isPresent(bref.Ref()))
|
||||
assert.True(cvs2.isPresent(b.Hash()))
|
||||
assert.True(cvs2.isPresent(b.Hash()))
|
||||
}
|
||||
|
||||
func TestHintsOnCache(t *testing.T) {
|
||||
@@ -109,7 +109,7 @@ func TestHintsOnCache(t *testing.T) {
|
||||
}
|
||||
r := cvs.WriteValue(l)
|
||||
|
||||
v := cvs.ReadValue(r.TargetRef())
|
||||
v := cvs.ReadValue(r.TargetHash())
|
||||
if assert.True(l.Equals(v)) {
|
||||
l = v.(List)
|
||||
bref := cvs.WriteValue(NewBlob(bytes.NewBufferString("g")))
|
||||
@@ -117,7 +117,7 @@ func TestHintsOnCache(t *testing.T) {
|
||||
|
||||
hints := cvs.checkChunksInCache(l)
|
||||
if assert.Len(hints, 2) {
|
||||
for _, hash := range []ref.Ref{v.Ref(), bref.TargetRef()} {
|
||||
for _, hash := range []hash.Hash{v.Hash(), bref.TargetHash()} {
|
||||
_, present := hints[hash]
|
||||
assert.True(present)
|
||||
}
|
||||
|
||||
@@ -52,11 +52,11 @@ func newTestSuite() *testSuite {
|
||||
func (suite *testSuite) roundTripDigestTest(t *testValue) {
|
||||
vs := NewTestValueStore()
|
||||
r := vs.WriteValue(t.value)
|
||||
v2 := vs.ReadValue(r.TargetRef())
|
||||
v2 := vs.ReadValue(r.TargetHash())
|
||||
|
||||
suite.True(v2.Equals(t.value), t.description)
|
||||
suite.True(t.value.Equals(v2), t.description)
|
||||
suite.Equal(t.expectedRef, r.TargetRef().String(), t.description)
|
||||
suite.Equal(t.expectedRef, r.TargetHash().String(), t.description)
|
||||
}
|
||||
|
||||
// Called from testify suite.Run()
|
||||
|
||||
12
vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
12
vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
@@ -378,7 +378,7 @@ func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Han
|
||||
if n.value == nil {
|
||||
if setFunc == nil {
|
||||
n.mu.Unlock()
|
||||
n.unref()
|
||||
n.unHash()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Han
|
||||
if n.value == nil {
|
||||
n.size = 0
|
||||
n.mu.Unlock()
|
||||
n.unref()
|
||||
n.unHash()
|
||||
return nil
|
||||
}
|
||||
atomic.AddInt32(&r.size, int32(n.size))
|
||||
@@ -434,7 +434,7 @@ func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
|
||||
if r.cacher != nil {
|
||||
r.cacher.Ban(n)
|
||||
}
|
||||
n.unref()
|
||||
n.unHash()
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -469,7 +469,7 @@ func (r *Cache) Evict(ns, key uint64) bool {
|
||||
if r.cacher != nil {
|
||||
r.cacher.Evict(n)
|
||||
}
|
||||
n.unref()
|
||||
n.unHash()
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -582,7 +582,7 @@ func (n *Node) Value() Value {
|
||||
}
|
||||
|
||||
// Ref returns this 'cache node' ref counter.
|
||||
func (n *Node) Ref() int32 {
|
||||
func (n *Node) Hash() int32 {
|
||||
return atomic.LoadInt32(&n.ref)
|
||||
}
|
||||
|
||||
@@ -594,7 +594,7 @@ func (n *Node) GetHandle() *Handle {
|
||||
return &Handle{unsafe.Pointer(n)}
|
||||
}
|
||||
|
||||
func (n *Node) unref() {
|
||||
func (n *Node) unHash() {
|
||||
if atomic.AddInt32(&n.ref, -1) == 0 {
|
||||
n.r.delete(n)
|
||||
}
|
||||
|
||||
14
walk/walk.go
14
walk/walk.go
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/attic-labs/noms/chunks"
|
||||
"github.com/attic-labs/noms/d"
|
||||
"github.com/attic-labs/noms/ref"
|
||||
"github.com/attic-labs/noms/hash"
|
||||
"github.com/attic-labs/noms/types"
|
||||
)
|
||||
|
||||
@@ -33,7 +33,7 @@ func doTreeWalkP(v types.Value, vr types.ValueReader, cb SomeCallback, concurren
|
||||
rq := newRefQueue()
|
||||
f := newFailure()
|
||||
|
||||
visited := map[ref.Ref]bool{}
|
||||
visited := map[hash.Hash]bool{}
|
||||
mu := sync.Mutex{}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
@@ -57,15 +57,15 @@ func doTreeWalkP(v types.Value, vr types.ValueReader, cb SomeCallback, concurren
|
||||
defer wg.Done()
|
||||
|
||||
mu.Lock()
|
||||
skip := visited[r.TargetRef()]
|
||||
visited[r.TargetRef()] = true
|
||||
skip := visited[r.TargetHash()]
|
||||
visited[r.TargetHash()] = true
|
||||
mu.Unlock()
|
||||
|
||||
if skip || f.didFail() {
|
||||
return
|
||||
}
|
||||
|
||||
target := r.TargetRef()
|
||||
target := r.TargetHash()
|
||||
v := vr.ReadValue(target)
|
||||
if v == nil {
|
||||
f.fail(fmt.Errorf("Attempt to copy absent ref:%s", target.String()))
|
||||
@@ -106,12 +106,12 @@ func SomeChunksP(r types.Ref, bs types.BatchStore, stopCb SomeChunksStopCallback
|
||||
rq := newRefQueue()
|
||||
wg := sync.WaitGroup{}
|
||||
mu := sync.Mutex{}
|
||||
visitedRefs := map[ref.Ref]bool{}
|
||||
visitedRefs := map[hash.Hash]bool{}
|
||||
|
||||
walkChunk := func(r types.Ref) {
|
||||
defer wg.Done()
|
||||
|
||||
tr := r.TargetRef()
|
||||
tr := r.TargetHash()
|
||||
|
||||
mu.Lock()
|
||||
visited := visitedRefs[tr]
|
||||
|
||||
Reference in New Issue
Block a user