Minor rework of hash.Hash API (#2888)

Define the hash.Hash type to be a 20-byte array, rather than embed one. Hash API Changes: `hash.FromSlice` -> `hash.New`, `hash.FromData` -> `hash.Of`
This commit is contained in:
Rafael Weinstein
2016-12-02 12:11:00 -08:00
committed by GitHub
parent ca232f0ad7
commit a67bb9bf7b
27 changed files with 102 additions and 146 deletions

View File

@@ -34,7 +34,7 @@ func (c Chunk) IsEmpty() bool {
// NewChunk creates a new Chunk backed by data. This means that the returned Chunk has ownership of this slice of memory.
func NewChunk(data []byte) Chunk {
r := hash.FromData(data)
r := hash.Of(data)
return Chunk{r, data}
}

View File

@@ -31,8 +31,8 @@ import (
func Serialize(chunk Chunk, writer io.Writer) {
d.PanicIfFalse(chunk.data != nil)
digest := chunk.Hash().Digest()
n, err := io.Copy(writer, bytes.NewReader(digest[:]))
h := chunk.Hash()
n, err := io.Copy(writer, bytes.NewReader(h[:]))
d.Chk.NoError(err)
d.PanicIfFalse(int64(hash.ByteLen) == n)
@@ -90,14 +90,13 @@ func DeserializeToChan(reader io.Reader, chunkChan chan<- interface{}) {
}
func deserializeChunk(reader io.Reader) (Chunk, bool) {
digest := hash.Digest{}
n, err := io.ReadFull(reader, digest[:])
h := hash.Hash{}
n, err := io.ReadFull(reader, h[:])
if err == io.EOF {
return EmptyChunk, false
}
d.Chk.NoError(err)
d.PanicIfFalse(int(hash.ByteLen) == n)
h := hash.New(digest)
chunkSize := uint32(0)
err = binary.Read(reader, binary.BigEndian, &chunkSize)

View File

@@ -238,7 +238,7 @@ func (s *DynamoStore) processResponses(responses []map[string]*dynamodb.Attribut
for _, item := range responses {
p := item[refAttr]
d.PanicIfFalse(p != nil)
r := hash.FromSlice(s.removeNamespace(p.B))
r := hash.New(s.removeNamespace(p.B))
p = item[chunkAttr]
d.PanicIfFalse(p != nil)
b := p.B
@@ -321,8 +321,7 @@ func (s *DynamoStore) sendWriteRequests(first Chunk) {
}
func chunkItemSize(c Chunk) int {
r := c.Hash()
return len(refAttr) + len(r.DigestSlice()) + len(chunkAttr) + len(c.Data()) + len(compAttr) + len(noneValue)
return len(refAttr) + hash.ByteLen + len(chunkAttr) + len(c.Data()) + len(compAttr) + len(noneValue)
}
func (s *DynamoStore) buildWriteRequests(chunks []Chunk) map[string][]*dynamodb.WriteRequest {
@@ -450,7 +449,7 @@ func (s *DynamoStore) Root() hash.Hash {
d.PanicIfFalse(result.Item[compAttr].S != nil)
d.PanicIfFalse(noneValue == *result.Item[compAttr].S)
}
return hash.FromSlice(result.Item[chunkAttr].B)
return hash.New(result.Item[chunkAttr].B)
}
func (s *DynamoStore) UpdateRoot(current, last hash.Hash) bool {
@@ -461,7 +460,7 @@ func (s *DynamoStore) UpdateRoot(current, last hash.Hash) bool {
TableName: aws.String(s.table),
Item: map[string]*dynamodb.AttributeValue{
refAttr: {B: s.rootKey},
chunkAttr: {B: current.DigestSlice()},
chunkAttr: {B: current[:]},
compAttr: {S: aws.String(noneValue)},
},
}
@@ -471,7 +470,7 @@ func (s *DynamoStore) UpdateRoot(current, last hash.Hash) bool {
} else {
putArgs.ConditionExpression = aws.String(valueEqualsExpression)
putArgs.ExpressionAttributeValues = map[string]*dynamodb.AttributeValue{
":prev": {B: last.DigestSlice()},
":prev": {B: last[:]},
}
}
@@ -491,11 +490,10 @@ func (s *DynamoStore) UpdateRoot(current, last hash.Hash) bool {
}
func (s *DynamoStore) makeNamespacedKey(h hash.Hash) []byte {
// This is semantically `return append(s.namespace, r.DigestSlice()...)`, but it seemed like we'd be doing this a LOT, and we know how much space we're going to need anyway. So, pre-allocate a slice and then copy into it.
hashSlice := h.DigestSlice()
key := make([]byte, s.namespaceLen+len(hashSlice))
// This is semantically `return append(s.namespace, h[:])`, but it seemed like we'd be doing this a LOT, and we know how much space we're going to need anyway. So, pre-allocate a slice and then copy into it.
key := make([]byte, s.namespaceLen+hash.ByteLen)
copy(key, s.namespace)
copy(key[s.namespaceLen:], hashSlice)
copy(key[s.namespaceLen:], h[:])
return key
}

View File

@@ -146,11 +146,10 @@ func (l *LevelDBStore) Close() error {
return nil
}
func (l *LevelDBStore) toChunkKey(r hash.Hash) []byte {
digest := r.DigestSlice()
out := make([]byte, len(l.chunkPrefix), len(l.chunkPrefix)+len(digest))
func (l *LevelDBStore) toChunkKey(h hash.Hash) []byte {
out := make([]byte, len(l.chunkPrefix), len(l.chunkPrefix)+hash.ByteLen)
copy(out, l.chunkPrefix)
return append(out, digest...)
return append(out, h[:]...)
}
func (l *LevelDBStore) setVersIfUnset() {

View File

@@ -108,12 +108,11 @@ func (p *orderedChunkCache) Clear(hashes hash.HashSet) {
var uint64Size = binary.Size(uint64(0))
// toDbKey takes a refHeight and a hash and returns a binary key suitable for use with LevelDB. The default sort order used by LevelDB ensures that these keys (and their associated values) will be iterated in ref-height order.
func toDbKey(refHeight uint64, hash hash.Hash) []byte {
digest := hash.DigestSlice()
buf := bytes.NewBuffer(make([]byte, 0, uint64Size+binary.Size(digest)))
func toDbKey(refHeight uint64, h hash.Hash) []byte {
buf := bytes.NewBuffer(make([]byte, 0, uint64Size+hash.ByteLen))
err := binary.Write(buf, binary.BigEndian, refHeight)
d.Chk.NoError(err)
err = binary.Write(buf, binary.BigEndian, digest)
err = binary.Write(buf, binary.BigEndian, h[:])
d.Chk.NoError(err)
return buf.Bytes()
}
@@ -123,10 +122,10 @@ func fromDbKey(key []byte) (uint64, hash.Hash) {
r := bytes.NewReader(key)
err := binary.Read(r, binary.BigEndian, &refHeight)
d.Chk.NoError(err)
digest := hash.Digest{}
err = binary.Read(r, binary.BigEndian, &digest)
h := hash.Hash{}
err = binary.Read(r, binary.BigEndian, &h)
d.Chk.NoError(err)
return refHeight, hash.New(digest)
return refHeight, h
}
// ExtractChunks can be called from any goroutine to write Chunks referenced by the given hashes to w. The chunks are ordered by ref-height. Chunks of the same height are written in an unspecified order, relative to one another.

View File

@@ -31,8 +31,7 @@ func serializeHashes(w io.Writer, hashes hash.HashSlice) {
}
func serializeHash(w io.Writer, h hash.Hash) {
digest := h.Digest()
n, err := io.Copy(w, bytes.NewReader(digest[:]))
n, err := io.Copy(w, bytes.NewReader(h[:]))
d.Chk.NoError(err)
d.PanicIfFalse(int64(hash.ByteLen) == n)
}
@@ -62,9 +61,9 @@ func deserializeHashes(reader io.Reader) hash.HashSlice {
}
func deserializeHash(reader io.Reader) hash.Hash {
digest := hash.Digest{}
n, err := io.ReadFull(reader, digest[:])
h := hash.Hash{}
n, err := io.ReadFull(reader, h[:])
d.Chk.NoError(err)
d.PanicIfFalse(int(hash.ByteLen) == n)
return hash.New(digest)
return h
}

View File

@@ -24,8 +24,9 @@ func (r Patch) Len() int {
}
var vals = map[types.DiffChangeType]int{types.DiffChangeRemoved: 0, types.DiffChangeModified: 1, types.DiffChangeAdded: 2}
func (r Patch) Less(i, j int) bool {
if r[i].Path.Equals(r[j].Path) {
if r[i].Path.Equals(r[j].Path) {
return vals[r[i].ChangeType] < vals[r[j].ChangeType]
}
return pathIsLess(r[i].Path, r[j].Path)
@@ -98,7 +99,7 @@ func hashIndexPathCompare(pp types.HashIndexPath, o types.PathPart) int {
case types.IndexPath:
return 1
case types.HashIndexPath:
switch bytes.Compare(pp.Hash.DigestSlice(), opp.Hash.DigestSlice()) {
switch bytes.Compare(pp.Hash[:], opp.Hash[:]) {
case -1:
return -1
case 0:

View File

@@ -52,53 +52,32 @@ var (
)
// Hash is used to represent the hash of a Noms Value.
type Hash struct {
digest Digest
}
// Digest is used for the underlying data of the Hash.
type Digest [ByteLen]byte
// Digest returns a *copy* of the digest that backs Hash.
func (r Hash) Digest() Digest {
return r.digest
}
type Hash [ByteLen]byte
// IsEmpty determines if this Hash is equal to the empty hash (all zeroes).
func (r Hash) IsEmpty() bool {
return r.digest == emptyHash.digest
}
// DigestSlice returns a slice of the digest that backs A NEW COPY of Hash, because the receiver of
// this method is not a pointer.
func (r Hash) DigestSlice() []byte {
return r.digest[:]
func (h Hash) IsEmpty() bool {
return h == emptyHash
}
// String returns a string representation of the hash using Base32 encoding.
func (r Hash) String() string {
return encode(r.digest[:])
}
// New creates a new Hash from a Digest.
func New(digest Digest) Hash {
return Hash{digest}
func (h Hash) String() string {
return encode(h[:])
}
// FromData computes a new Hash from data.
func FromData(data []byte) Hash {
func Of(data []byte) Hash {
r := sha512.Sum512(data)
d := Digest{}
copy(d[:], r[:ByteLen])
return New(d)
h := Hash{}
copy(h[:], r[:ByteLen])
return h
}
// FromSlice creates a new Hash backed by data, ensuring that data is an acceptable length.
func FromSlice(data []byte) Hash {
func New(data []byte) Hash {
d.PanicIfFalse(len(data) == ByteLen)
digest := Digest{}
copy(digest[:], data)
return New(digest)
h := Hash{}
copy(h[:], data)
return h
}
// MaybeParse parses a string representing a hash as a Base32 encoded byte array.
@@ -108,7 +87,7 @@ func MaybeParse(s string) (Hash, bool) {
if match == nil {
return emptyHash, false
}
return FromSlice(decode(s)), true
return New(decode(s)), true
}
// MaybeParse parses a string representing a hash as a Base32 encoded byte array.
@@ -122,14 +101,14 @@ func Parse(s string) Hash {
}
// Less compares two hashes returning whether this Hash is less than other.
func (r Hash) Less(other Hash) bool {
return bytes.Compare(r.digest[:], other.digest[:]) < 0
func (h Hash) Less(other Hash) bool {
return bytes.Compare(h[:], other[:]) < 0
}
// Greater compares two hashes returning whether this Hash is greater than other.
func (r Hash) Greater(other Hash) bool {
func (h Hash) Greater(other Hash) bool {
// TODO: Remove this
return bytes.Compare(r.digest[:], other.digest[:]) > 0
return bytes.Compare(h[:], other[:]) > 0
}
// HashSet is a set of Hashes.

View File

@@ -17,11 +17,11 @@ func TestHashSliceSort(t *testing.T) {
rs := HashSlice{}
for i := 1; i <= 3; i++ {
for j := 1; j <= 3; j++ {
d := Digest{}
h := Hash{}
for k := 1; k <= j; k++ {
d[k-1] = byte(i)
h[k-1] = byte(i)
}
rs = append(rs, New(d))
rs = append(rs, h)
}
}

View File

@@ -79,26 +79,8 @@ func TestString(t *testing.T) {
assert.Equal(t, s, r.String())
}
func TestDigest(t *testing.T) {
r := New(Digest{})
d := r.Digest()
assert.Equal(t, r.Digest(), d)
// Digest() must return a copy otherwise things get weird.
d[0] = 0x01
assert.NotEqual(t, r.Digest(), d)
}
func TestDigestSlice(t *testing.T) {
r := New(Digest{})
d := r.DigestSlice()
assert.Equal(t, r.DigestSlice(), d)
// DigestSlice() must return a copy otherwise things get weird.
d[0] = 0x01
assert.NotEqual(t, r.DigestSlice(), d)
}
func TestFromData(t *testing.T) {
r := FromData([]byte("abc"))
func TestOf(t *testing.T) {
r := Of([]byte("abc"))
assert.Equal(t, "rmnjb8cjc5tblj21ed4qs821649eduie", r.String())
}

View File

@@ -126,7 +126,7 @@ func (src *dataSource) readTuples(tuples chan<- offsetTuple) {
return
}
d.Chk.True(n == gen.OffsetTupleLen)
ot.h = hash.FromSlice(otBuf[:20])
ot.h = hash.New(otBuf[:20])
ot.l = uint64(binary.BigEndian.Uint32(otBuf[20:]))
src.dataRead += ot.l
tuples <- ot

View File

@@ -33,8 +33,8 @@ func TestFileManifestParseIfExists(t *testing.T) {
assert.False(exists)
// Simulate another process writing a manifest (with an old Noms version).
newRoot := hash.FromData([]byte("new root"))
tableName := hash.FromData([]byte("table1"))
newRoot := hash.Of([]byte("new root"))
tableName := hash.Of([]byte("table1"))
b, err := clobberManifest(fm.dir, strings.Join([]string{StorageVersion, "0", newRoot.String(), tableName.String(), "0"}, ":"))
assert.NoError(err, string(b))
@@ -55,15 +55,15 @@ func TestFileManifestParseIfExistsHoldsLock(t *testing.T) {
defer os.RemoveAll(fm.dir)
// Simulate another process writing a manifest.
newRoot := hash.FromData([]byte("new root"))
tableName := hash.FromData([]byte("table1"))
newRoot := hash.Of([]byte("new root"))
tableName := hash.Of([]byte("table1"))
b, err := clobberManifest(fm.dir, strings.Join([]string{StorageVersion, constants.NomsVersion, newRoot.String(), tableName.String(), "0"}, ":"))
assert.NoError(err, string(b))
// ParseIfExists should now reflect the manifest written above.
exists, vers, root, tableSpecs := fm.ParseIfExists(func() {
// This should fail to get the lock, and therefore _not_ clobber the manifest.
badRoot := hash.FromData([]byte("bad root"))
badRoot := hash.Of([]byte("bad root"))
b, err := tryClobberManifest(fm.dir, strings.Join([]string{StorageVersion, "0", badRoot.String(), tableName.String(), "0"}, ":"))
assert.NoError(err, string(b))
})
@@ -94,10 +94,10 @@ func TestFileManifestUpdateWinRace(t *testing.T) {
fm := makeFileManifestTempDir(t)
defer os.RemoveAll(fm.dir)
newRoot2 := hash.FromData([]byte("new root 2"))
newRoot2 := hash.Of([]byte("new root 2"))
actual, tableSpecs := fm.Update(nil, hash.Hash{}, newRoot2, func() {
// This should fail to get the lock, and therefore _not_ clobber the manifest. So the Update should succeed.
newRoot := hash.FromData([]byte("new root"))
newRoot := hash.Of([]byte("new root"))
b, err := tryClobberManifest(fm.dir, strings.Join([]string{StorageVersion, constants.NomsVersion, newRoot.String()}, ":"))
assert.NoError(err, string(b))
})
@@ -110,12 +110,12 @@ func TestFileManifestUpdateRootOptimisticLockFail(t *testing.T) {
fm := makeFileManifestTempDir(t)
defer os.RemoveAll(fm.dir)
tableName := hash.FromData([]byte("table1"))
newRoot := hash.FromData([]byte("new root"))
tableName := hash.Of([]byte("table1"))
newRoot := hash.Of([]byte("new root"))
b, err := tryClobberManifest(fm.dir, strings.Join([]string{StorageVersion, constants.NomsVersion, newRoot.String(), tableName.String(), "3"}, ":"))
assert.NoError(err, string(b))
newRoot2 := hash.FromData([]byte("new root 2"))
newRoot2 := hash.Of([]byte("new root 2"))
actual, tableSpecs := fm.Update(nil, hash.Hash{}, newRoot2, nil)
assert.Equal(newRoot, actual)
if assert.Len(tableSpecs, 1) {

View File

@@ -43,7 +43,7 @@ func TestChunkStoreVersion(t *testing.T) {
defer store.Close()
assert.Equal(constants.NomsVersion, store.Version())
newRoot := hash.FromData([]byte("new root"))
newRoot := hash.Of([]byte("new root"))
if assert.True(store.UpdateRoot(newRoot, hash.Hash{})) {
assert.Equal(constants.NomsVersion, store.Version())
}
@@ -86,7 +86,7 @@ func TestChunkStoreManifestAppearsAfterConstruction(t *testing.T) {
// Simulate another process writing a manifest (with an old Noms version) after construction.
chunks := [][]byte{[]byte("hello2"), []byte("goodbye2"), []byte("badbye2")}
newRoot := hash.FromData([]byte("new root"))
newRoot := hash.Of([]byte("new root"))
h := createOnDiskTable(dir, chunks)
b, err := clobberManifest(dir, strings.Join([]string{StorageVersion, "0", newRoot.String(), h.String(), "3"}, ":"))
assert.NoError(err, string(b))
@@ -121,13 +121,13 @@ func TestChunkStoreManifestFirstWriteByOtherProcess(t *testing.T) {
// Simulate another process having already written a manifest (with an old Noms version).
chunks := [][]byte{[]byte("hello2"), []byte("goodbye2"), []byte("badbye2")}
h := createOnDiskTable(dir, chunks)
newRoot := hash.FromData([]byte("new root"))
newRoot := hash.Of([]byte("new root"))
b, err := tryClobberManifest(dir, strings.Join([]string{StorageVersion, "0", newRoot.String(), h.String(), "3"}, ":"))
assert.NoError(err, string(b))
store := hookedNewNomsBlockStore(dir, defaultMemTableSize, func() {
// This should fail to get the lock, and therefore _not_ clobber the manifest.
badRoot := hash.FromData([]byte("bad root"))
badRoot := hash.Of([]byte("bad root"))
b, err := tryClobberManifest(dir, strings.Join([]string{StorageVersion, "0", badRoot.String(), h.String(), "3"}, ":"))
assert.NoError(err, string(b))
})

View File

@@ -120,7 +120,7 @@ func unionTables(curTables chunkSources, tm tableManager, tableSpecs []tableSpec
}
func (nbs *NomsBlockStore) Put(c chunks.Chunk) {
a := addr(c.Hash().Digest())
a := addr(c.Hash())
d.PanicIfFalse(nbs.addChunk(a, c.Data()))
nbs.putCount++
}
@@ -132,7 +132,7 @@ func (nbs *NomsBlockStore) SchedulePut(c chunks.Chunk, refHeight uint64, hints t
func (nbs *NomsBlockStore) PutMany(chunx []chunks.Chunk) (err chunks.BackpressureError) {
for ; len(chunx) > 0; chunx = chunx[1:] {
c := chunx[0]
a := addr(c.Hash().Digest())
a := addr(c.Hash())
if !nbs.addChunk(a, c.Data()) {
break
}
@@ -168,7 +168,7 @@ func prependTable(curTables chunkSources, crc chunkSource) chunkSources {
}
func (nbs *NomsBlockStore) Get(h hash.Hash) chunks.Chunk {
a := addr(h.Digest())
a := addr(h)
data, tables := func() (data []byte, tables chunkSources) {
nbs.mu.RLock()
defer nbs.mu.RUnlock()
@@ -189,7 +189,7 @@ func (nbs *NomsBlockStore) Get(h hash.Hash) chunks.Chunk {
func (nbs *NomsBlockStore) GetMany(hashes []hash.Hash) []chunks.Chunk {
reqs := make([]getRecord, len(hashes))
for i, h := range hashes {
a := addr(h.Digest())
a := addr(h)
reqs[i] = getRecord{
a: &a,
prefix: a.Prefix(),
@@ -232,7 +232,7 @@ func (nbs *NomsBlockStore) GetMany(hashes []hash.Hash) []chunks.Chunk {
}
func (nbs *NomsBlockStore) Has(h hash.Hash) bool {
a := addr(h.Digest())
a := addr(h)
has, tables := func() (bool, chunkSources) {
nbs.mu.RLock()
defer nbs.mu.RUnlock()

View File

@@ -118,7 +118,7 @@ func TestHasManySequentialPrefix(t *testing.T) {
addrs := make([]addr, len(addrStrings))
for i, s := range addrStrings {
addrs[i] = addr(hash.Parse(s).Digest())
addrs[i] = addr(hash.Parse(s))
}
bogusData := []byte("bogus") // doesn't matter what this is. hasMany() won't check chunkRecords

View File

@@ -170,10 +170,10 @@ func (b *binaryNomsReader) readIdent(tc *TypeCache) uint32 {
}
func (b *binaryNomsReader) readHash() hash.Hash {
digest := hash.Digest{}
copy(digest[:], b.buff[b.offset:b.offset+hash.ByteLen])
h := hash.Hash{}
copy(h[:], b.buff[b.offset:b.offset+hash.ByteLen])
b.offset += hash.ByteLen
return hash.New(digest)
return h
}
type binaryNomsWriter struct {
@@ -276,8 +276,7 @@ func (b *binaryNomsWriter) writeString(v string) {
func (b *binaryNomsWriter) writeHash(h hash.Hash) {
b.ensureCapacity(hash.ByteLen)
digest := h.Digest()
copy(b.buff[b.offset:], digest[:])
copy(b.buff[b.offset:], h[:])
b.offset += hash.ByteLen
}

View File

@@ -16,16 +16,15 @@ func TestEnsureHash(t *testing.T) {
vs := NewTestValueStore()
count := byte(1)
mockGetRef := func(v Value) hash.Hash {
d := hash.Digest{}
d[0] = count
h := hash.Hash{}
h[0] = count
count++
return hash.New(d)
return h
}
testRef := func(r hash.Hash, expected byte) {
d := r.Digest()
assert.Equal(expected, d[0])
for i := 1; i < len(d); i++ {
assert.Equal(byte(0), d[i])
testRef := func(h hash.Hash, expected byte) {
assert.Equal(expected, h[0])
for i := 1; i < hash.ByteLen; i++ {
assert.Equal(byte(0), h[i])
}
}
@@ -70,7 +69,9 @@ func TestEnsureHash(t *testing.T) {
for _, v := range values {
expected := byte(0x42)
assignHash(v.(hashCacher), hash.New(hash.Digest{0: expected}))
h := hash.Hash{}
h[0] = expected
assignHash(v.(hashCacher), h)
testRef(v.Hash(), expected)
}

View File

@@ -317,7 +317,8 @@ func encodeForGraph(bs []byte, v Value, asValue bool, vrw ValueReadWriter) []byt
} else {
// if we're encoding hash values, we know the length, so we can leave that out
bs = append(bs, uint8(v.Type().Kind()))
bs = append(bs, v.Hash().DigestSlice()...)
h := v.Hash()
bs = append(bs, h[:]...)
}
return bs
}

View File

@@ -9,6 +9,7 @@ import (
"testing"
"bytes"
"github.com/attic-labs/noms/go/hash"
"github.com/attic-labs/testify/assert"
)
@@ -295,7 +296,7 @@ func TestPathParseErrors(t *testing.T) {
test("@foo", "Unsupported annotation: @foo")
test("@key", "Cannot use @key annotation at beginning of path")
test(".foo@key", "Cannot use @key annotation on: .foo")
test(fmt.Sprintf(".foo[#%s]@soup", hash.FromData([]byte{42}).String()), "Unsupported annotation: @soup")
test(fmt.Sprintf(".foo[#%s]@soup", hash.Of([]byte{42}).String()), "Unsupported annotation: @soup")
}
func TestPathEquals(t *testing.T) {

View File

@@ -155,8 +155,7 @@ func (rv *rollingValueHasher) writeString(v string) {
}
func (rv *rollingValueHasher) writeHash(h hash.Hash) {
digest := h.Digest()
for _, b := range digest[:] {
for _, b := range h[:] {
rv.HashByte(b)
}
}

View File

@@ -313,7 +313,7 @@ func generateOID(t *Type, allowUnresolvedCycles bool) {
if t.oid == nil {
buf := newBinaryNomsWriter()
encodeForOID(t, buf, allowUnresolvedCycles, t, nil)
oid := hash.FromData(buf.data())
oid := hash.Of(buf.data())
t.oid = &oid
}
}
@@ -356,7 +356,7 @@ func encodeForOID(t *Type, buf nomsWriter, allowUnresolvedCycles bool, root *Typ
mbuf.reset()
encodeForOID(elemType, mbuf, allowUnresolvedCycles, root, parentStructTypes)
h2 := hash.FromData(mbuf.data())
h2 := hash.Of(mbuf.data())
if _, found := indexOfType(elemType, parentStructTypes); !found {
elemType.oid = &h2
}
@@ -369,9 +369,8 @@ func encodeForOID(t *Type, buf nomsWriter, allowUnresolvedCycles bool, root *Typ
data := make([]byte, hash.ByteLen)
for o := range oids {
digest := o.Digest()
for i := 0; i < len(data); i++ {
data[i] ^= digest[i]
data[i] ^= o[i]
}
}
buf.writeBytes(data)

View File

@@ -14,7 +14,7 @@ import (
)
func hashFromString(s string) hash.Hash {
return hash.FromData([]byte(s))
return hash.Of([]byte(s))
}
func TestSizeCache(t *testing.T) {

View File

@@ -1,7 +1,7 @@
{
"name": "@attic/noms",
"license": "Apache-2.0",
"version": "64.2.2",
"version": "65.0.0",
"description": "Noms JS SDK",
"repository": "https://github.com/attic-labs/noms/tree/master/js/noms",
"main": "dist/commonjs/noms.js",

View File

@@ -17,7 +17,7 @@ export default class Chunk {
}
get hash(): Hash {
return this._hash || (this._hash = Hash.fromData(this.data));
return this._hash || (this._hash = Hash.of(this.data));
}
isEmpty(): boolean {

View File

@@ -53,7 +53,7 @@ suite('Hash', () => {
});
test('fromData', () => {
const r = Hash.fromData(Bytes.fromString('abc'));
const r = Hash.of(Bytes.fromString('abc'));
assert.strictEqual('rmnjb8cjc5tblj21ed4qs821649eduie', r.toString());
});

View File

@@ -75,7 +75,7 @@ export default class Hash {
/**
* Computes the hash from `data`.
*/
static fromData(data: Uint8Array): Hash {
static of(data: Uint8Array): Hash {
return new Hash(sha512(data));
}
}

View File

@@ -313,7 +313,7 @@ function generateOID(t: Type<any>, allowUnresolvedCycles: boolean) {
if (!hasOID(t)) {
const buf = new BinaryWriter();
encodeForOID(t, buf, allowUnresolvedCycles, t, []);
const oid = Hash.fromData(buf.data);
const oid = Hash.of(buf.data);
t.updateOID(oid);
}
}
@@ -366,7 +366,7 @@ function encodeForOID(t: Type<any>, buf: BinaryWriter, allowUnresolvedCycles: bo
if (!h) {
mbuf.reset();
encodeForOID(elemType, mbuf, allowUnresolvedCycles, root, parentStructTypes);
h = Hash.fromData(mbuf.data);
h = Hash.of(mbuf.data);
if (parentStructTypes.indexOf(elemType) === -1) {
elemType.updateOID(h);
}