go/store/nbs: remove unused error from chunkSource.hash()

This commit is contained in:
Andy Arthur
2022-11-23 12:06:24 -08:00
parent 1e7287e69a
commit 0324e0752e
14 changed files with 50 additions and 123 deletions
+2 -12
View File
@@ -494,12 +494,7 @@ func dividePlan(ctx context.Context, plan compactionPlan, minPartSize, maxPartSi
break
}
if sws.dataLen <= maxPartSize {
h, err := sws.source.hash()
if err != nil {
return nil, nil, nil, err
}
h := sws.source.hash()
copies = append(copies, copyPart{h.String(), 0, int64(sws.dataLen)})
continue
}
@@ -509,12 +504,7 @@ func dividePlan(ctx context.Context, plan compactionPlan, minPartSize, maxPartSi
var srcStart int64
for _, length := range lens {
h, err := sws.source.hash()
if err != nil {
return nil, nil, nil, err
}
h := sws.source.hash()
copies = append(copies, copyPart{h.String(), srcStart, length})
srcStart += length
}
+18 -18
View File
@@ -60,7 +60,7 @@ func TestAWSTablePersisterPersist(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTableWithNamespace(ctx, ns, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTableWithNamespace(ctx, ns, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
@@ -76,7 +76,7 @@ func TestAWSTablePersisterPersist(t *testing.T) {
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTableWithNamespace(ctx, ns, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTableWithNamespace(ctx, ns, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
@@ -101,7 +101,7 @@ func TestAWSTablePersisterPersist(t *testing.T) {
require.NoError(t, err)
assert.True(mustUint32(src.count()) == 0)
_, present := s3svc.data[mustAddr(src.hash()).String()]
_, present := s3svc.data[src.hash().String()]
assert.False(present)
})
@@ -138,7 +138,7 @@ func TestAWSTablePersisterPersist(t *testing.T) {
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
@@ -162,7 +162,7 @@ func TestAWSTablePersisterPersist(t *testing.T) {
src, err := s3p.Open(context.Background(), name, uint32(len(testChunks)), &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
if data, present := tc.Get(name); assert.True(present) {
@@ -183,8 +183,8 @@ func TestAWSTablePersisterPersist(t *testing.T) {
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, mustAddr(src.hash())); assert.Nil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.Nil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
@@ -203,8 +203,8 @@ func TestAWSTablePersisterPersist(t *testing.T) {
src, err := s3p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := ddb.readerForTable(ctx, mustAddr(src.hash())); assert.Nil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := ddb.readerForTable(ctx, src.hash()); assert.Nil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(testChunks, r, assert)
}
}
@@ -282,14 +282,14 @@ func TestAWSTablePersisterDividePlan(t *testing.T) {
perTableDataSize[c.name] = totalSize
}
assert.Len(perTableDataSize, 2)
assert.Contains(perTableDataSize, mustAddr(justRight.hash()).String())
assert.Contains(perTableDataSize, mustAddr(tooBig.hash()).String())
assert.Contains(perTableDataSize, justRight.hash().String())
assert.Contains(perTableDataSize, tooBig.hash().String())
ti, err := justRight.index()
require.NoError(t, err)
assert.EqualValues(calcChunkDataLen(ti), perTableDataSize[mustAddr(justRight.hash()).String()])
assert.EqualValues(calcChunkDataLen(ti), perTableDataSize[justRight.hash().String()])
ti, err = tooBig.index()
require.NoError(t, err)
assert.EqualValues(calcChunkDataLen(ti), perTableDataSize[mustAddr(tooBig.hash()).String()])
assert.EqualValues(calcChunkDataLen(ti), perTableDataSize[tooBig.hash().String()])
assert.Len(manuals, 1)
ti, err = tooSmall.index()
@@ -374,7 +374,7 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(chunks, r, assert)
}
}
@@ -390,7 +390,7 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(smallChunks, r, assert)
}
}
@@ -426,7 +426,7 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(bigUns1, r, assert)
assertChunksInReader(bigUns2, r, assert)
}
@@ -462,7 +462,7 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(bigUns1, r, assert)
assertChunksInReader(medChunks, r, assert)
}
@@ -512,7 +512,7 @@ func TestAWSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
if r, err := s3svc.readerForTable(ctx, mustAddr(src.hash())); assert.NotNil(r) && assert.NoError(err) {
if r, err := s3svc.readerForTable(ctx, src.hash()); assert.NotNil(r) && assert.NoError(err) {
assertChunksInReader(smallChunks, r, assert)
assertChunksInReader(bigUns1, r, assert)
assertChunksInReader(medChunks, r, assert)
+1 -1
View File
@@ -469,7 +469,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
}
conjoined, err := p.ConjoinAll(context.Background(), srcs, stats)
require.NoError(t, err)
cannedSpecs := []tableSpec{{mustAddr(conjoined.hash()), mustUint32(conjoined.count())}}
cannedSpecs := []tableSpec{{conjoined.hash(), mustUint32(conjoined.count())}}
return cannedConjoin{true, append(cannedSpecs, keepers...)}
}
+2 -2
View File
@@ -21,8 +21,8 @@ type chunkSourceAdapter struct {
h addr
}
func (csa chunkSourceAdapter) hash() (addr, error) {
return csa.h, nil
func (csa chunkSourceAdapter) hash() addr {
return csa.h
}
func newReaderFromIndexData(ctx context.Context, q MemoryQuotaProvider, idxData []byte, name addr, tra tableReaderAt, blockSize uint64) (cs chunkSource, err error) {
+3 -19
View File
@@ -221,14 +221,8 @@ func conjoinTables(ctx context.Context, p tablePersister, upstream []tableSpec,
return tableSpec{}, nil, nil, err
}
h, err := conjoinedSrc.hash()
if err != nil {
return tableSpec{}, nil, nil, err
}
h := conjoinedSrc.hash()
cnt, err = conjoinedSrc.count()
if err != nil {
return tableSpec{}, nil, nil, err
}
@@ -284,27 +278,17 @@ func toSpecs(srcs chunkSources) ([]tableSpec, error) {
specs := make([]tableSpec, len(srcs))
for i, src := range srcs {
cnt, err := src.count()
if err != nil {
return nil, err
}
if cnt <= 0 {
} else if cnt <= 0 {
return nil, errors.New("invalid table spec has no sources")
}
h, err := src.hash()
if err != nil {
return nil, err
}
h := src.hash()
cnt, err = src.count()
if err != nil {
return nil, err
}
specs[i] = tableSpec{h, cnt}
}
+2 -2
View File
@@ -75,7 +75,7 @@ func TestConjoin(t *testing.T) {
// Makes a tableSet with len(tableSizes) upstream tables containing tableSizes[N] unique chunks
makeTestTableSpecs := func(tableSizes []uint32, p tablePersister) (specs []tableSpec) {
for _, src := range makeTestSrcs(t, tableSizes, p) {
specs = append(specs, tableSpec{mustAddr(src.hash()), mustUint32(src.count())})
specs = append(specs, tableSpec{src.hash(), mustUint32(src.count())})
err := src.close()
require.NoError(t, err)
}
@@ -143,7 +143,7 @@ func TestConjoin(t *testing.T) {
mt.addChunk(computeAddr(data), data)
src, err := p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
return tableSpec{mustAddr(src.hash()), mustUint32(src.count())}
return tableSpec{src.hash(), mustUint32(src.count())}
}
tc := []struct {
+2 -2
View File
@@ -61,8 +61,8 @@ func (ecs emptyChunkSource) uncompressedLen() (uint64, error) {
return 0, nil
}
func (ecs emptyChunkSource) hash() (addr, error) {
return addr{}, nil
func (ecs emptyChunkSource) hash() addr {
return addr{}
}
func (ecs emptyChunkSource) index() (tableIndex, error) {
+5 -5
View File
@@ -126,7 +126,7 @@ func TestFSTablePersisterPersist(t *testing.T) {
src, err := persistTableData(fts, testChunks...)
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
buff, err := os.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
buff, err := os.ReadFile(filepath.Join(dir, src.hash().String()))
require.NoError(t, err)
ti, err := parseTableIndexByCopy(ctx, buff, &UnlimitedQuotaProvider{})
require.NoError(t, err)
@@ -166,7 +166,7 @@ func TestFSTablePersisterPersistNoData(t *testing.T) {
require.NoError(t, err)
assert.True(mustUint32(src.count()) == 0)
_, err = os.Stat(filepath.Join(dir, mustAddr(src.hash()).String()))
_, err = os.Stat(filepath.Join(dir, src.hash().String()))
assert.True(os.IsNotExist(err), "%v", err)
}
@@ -182,7 +182,7 @@ func TestFSTablePersisterCacheOnPersist(t *testing.T) {
func() {
src, err := persistTableData(fts, testChunks...)
require.NoError(t, err)
name = mustAddr(src.hash())
name = src.hash()
}()
// Table should still be cached
@@ -228,7 +228,7 @@ func TestFSTablePersisterConjoinAll(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
buff, err := os.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
buff, err := os.ReadFile(filepath.Join(dir, src.hash().String()))
require.NoError(t, err)
ti, err := parseTableIndexByCopy(ctx, buff, &UnlimitedQuotaProvider{})
require.NoError(t, err)
@@ -268,7 +268,7 @@ func TestFSTablePersisterConjoinAllDups(t *testing.T) {
require.NoError(t, err)
if assert.True(mustUint32(src.count()) > 0) {
buff, err := os.ReadFile(filepath.Join(dir, mustAddr(src.hash()).String()))
buff, err := os.ReadFile(filepath.Join(dir, src.hash().String()))
require.NoError(t, err)
ti, err := parseTableIndexByCopy(ctx, buff, &UnlimitedQuotaProvider{})
require.NoError(t, err)
+2 -2
View File
@@ -130,8 +130,8 @@ func newFileTableReader(ctx context.Context, dir string, h addr, chunkCount uint
}, nil
}
func (mmtr *fileTableReader) hash() (addr, error) {
return mmtr.h, nil
func (mmtr *fileTableReader) hash() addr {
return mmtr.h
}
func (mmtr *fileTableReader) close() error {
+2 -2
View File
@@ -399,7 +399,7 @@ func interloperWrite(fm *fakeManifest, p tablePersister, rootChunk []byte, chunk
return hash.Hash{}, nil, err
}
fm.set(constants.NomsVersion, newLock, newRoot, []tableSpec{{mustAddr(src.hash()), uint32(len(chunks) + 1)}}, nil)
fm.set(constants.NomsVersion, newLock, newRoot, []tableSpec{{src.hash(), uint32(len(chunks) + 1)}}, nil)
if err = src.close(); err != nil {
return [20]byte{}, nil, err
@@ -576,7 +576,7 @@ func compactSourcesToBuffer(sources chunkSources) (name addr, data []byte, chunk
ch <- rec
})
if err != nil {
ch <- extractRecord{a: mustAddr(src.hash()), err: err}
ch <- extractRecord{a: src.hash(), err: err}
}
}()
+2 -10
View File
@@ -1302,18 +1302,10 @@ func (nbs *NomsBlockStore) Size(ctx context.Context) (uint64, error) {
func (nbs *NomsBlockStore) chunkSourcesByAddr() (map[addr]chunkSource, error) {
css := make(map[addr]chunkSource, len(nbs.tables.upstream)+len(nbs.tables.novel))
for _, cs := range nbs.tables.upstream {
a, err := cs.hash()
if err != nil {
return nil, err
}
css[a] = cs
css[cs.hash()] = cs
}
for _, cs := range nbs.tables.novel {
a, err := cs.hash()
if err != nil {
return nil, err
}
css[a] = cs
css[cs.hash()] = cs
}
return css, nil
+1 -1
View File
@@ -241,7 +241,7 @@ type chunkSource interface {
chunkReader
// hash returns the hash address of this chunkSource.
hash() (addr, error)
hash() addr
// opens a Reader to the first byte of the chunkData segment of this table.
reader(context.Context) (io.Reader, error)
+4 -28
View File
@@ -80,20 +80,8 @@ func (csbc chunkSourcesByAscendingCount) Less(i, j int) bool {
}
if cntI == cntJ {
hi, err := srcI.hash()
if err != nil {
csbc.err = err
return false
}
hj, err := srcJ.hash()
if err != nil {
csbc.err = err
return false
}
hi := srcI.hash()
hj := srcJ.hash()
return bytes.Compare(hi[:], hj[:]) < 0
}
@@ -113,20 +101,8 @@ func (csbds chunkSourcesByDescendingDataSize) Len() int { return len(csbds.sws)
func (csbds chunkSourcesByDescendingDataSize) Less(i, j int) bool {
swsI, swsJ := csbds.sws[i], csbds.sws[j]
if swsI.dataLen == swsJ.dataLen {
hi, err := swsI.source.hash()
if err != nil {
csbds.err = err
return false
}
hj, err := swsJ.source.hash()
if err != nil {
csbds.err = err
return false
}
hi := swsI.source.hash()
hj := swsJ.source.hash()
return bytes.Compare(hi[:], hj[:]) < 0
}
return swsI.dataLen > swsJ.dataLen
+4 -19
View File
@@ -369,11 +369,7 @@ func (ts tableSet) rebase(ctx context.Context, specs []tableSpec, stats *Stats)
existing := make(map[addr]chunkSource, len(ts.upstream))
for _, cs := range ts.upstream {
a, err := cs.hash()
if err != nil {
return tableSet{}, err
}
existing[a] = cs
existing[cs.hash()] = cs
}
// newly opened tables are unowned, we must
@@ -433,12 +429,7 @@ func (ts tableSet) toSpecs() ([]tableSpec, error) {
}
if cnt > 0 {
h, err := src.hash()
if err != nil {
return nil, err
}
h := src.hash()
tableSpecs = append(tableSpecs, tableSpec{h, cnt})
}
}
@@ -453,12 +444,7 @@ func (ts tableSet) toSpecs() ([]tableSpec, error) {
return nil, errors.New("no upstream chunks")
}
h, err := src.hash()
if err != nil {
return nil, err
}
h := src.hash()
tableSpecs = append(tableSpecs, tableSpec{h, cnt})
}
return tableSpecs, nil
@@ -469,8 +455,7 @@ func tableSetCalcReads(ts tableSet, reqs []getRecord, blockSize uint64) (reads i
for _, tbl := range all {
rdr, ok := tbl.(*fileTableReader)
if !ok {
h, _ := tbl.hash()
err = fmt.Errorf("chunkSource %s is not a fileTableReader", h.String())
err = fmt.Errorf("chunkSource %s is not a fileTableReader", tbl.hash().String())
return
}