This commit is contained in:
Dhruv Sringari
2022-02-14 14:43:11 -08:00
parent f580a5d03b
commit 5822653cd8
2 changed files with 1 additions and 245 deletions
+1 -4
View File
@@ -3,7 +3,6 @@ package memprof
import (
"context"
"flag"
"log"
"os"
"testing"
@@ -19,9 +18,6 @@ var ddb *doltdb.DoltDB
func TestMain(m *testing.M) {
flag.Parse()
if *loc == "" {
log.Panicf("doltDir must be specified")
}
urlStr = "file://" + *loc + dbfactory.DoltDataDir
@@ -30,6 +26,7 @@ func TestMain(m *testing.M) {
}
func BenchmarkLoadDoltDBMemory(b *testing.B) {
b.SkipNow()
for i := 0; i < b.N; i++ {
ctx := context.Background()
var err error
-241
View File
@@ -48,247 +48,6 @@ type tableIndex interface {
Clone() (tableIndex, error)
}
//var _ tableIndex = mmapTableIndex{}
//
//// parses a valid nbs tableIndex from a byte stream. |buff| must end with an NBS index
//// and footer, though it may contain an unspecified number of bytes before that data.
//// |tableIndex| doesn't keep alive any references to |buff|.
//func parseTableIndex(buff []byte) (onHeapTableIndex, error) {
// return ReadTableIndex(bytes.NewReader(buff))
//}
//
//func ReadTableIndex(rd io.ReadSeeker) (onHeapTableIndex, error) {
// footerSize := int64(magicNumberSize + uint64Size + uint32Size)
// _, err := rd.Seek(-footerSize, io.SeekEnd)
//
// if err != nil {
// return onHeapTableIndex{}, err
// }
//
// footer, err := iohelp.ReadNBytes(rd, int(footerSize))
//
// if err != nil {
// return onHeapTableIndex{}, err
// }
//
// if string(footer[uint32Size+uint64Size:]) != magicNumber {
// return onHeapTableIndex{}, ErrInvalidTableFile
// }
//
// chunkCount := binary.BigEndian.Uint32(footer)
// totalUncompressedData := binary.BigEndian.Uint64(footer[uint32Size:])
//
// // index
// suffixesSize := int64(chunkCount) * addrSuffixSize
// lengthsSize := int64(chunkCount) * lengthSize
// tuplesSize := int64(chunkCount) * prefixTupleSize
// indexSize := suffixesSize + lengthsSize + tuplesSize
//
// _, err = rd.Seek(-(indexSize + footerSize), io.SeekEnd)
// if err != nil {
// return onHeapTableIndex{}, ErrInvalidTableFile
// }
//
// prefixes, ordinals, err := streamComputePrefixes(chunkCount, rd)
// if err != nil {
// return onHeapTableIndex{}, ErrInvalidTableFile
// }
// lengths, offsets, err := streamComputeOffsets(chunkCount, rd)
// if err != nil {
// return onHeapTableIndex{}, ErrInvalidTableFile
// }
// suffixes, err := iohelp.ReadNBytes(rd, int(suffixesSize))
// if err != nil {
// return onHeapTableIndex{}, ErrInvalidTableFile
// }
//
// return onHeapTableIndex{
// chunkCount, totalUncompressedData,
// prefixes, offsets,
// lengths, ordinals,
// suffixes,
// }, nil
//}
//
//type onHeapTableIndex struct {
// chunkCount uint32
// totalUncompressedData uint64
// prefixes, offsets []uint64
// lengths, ordinals []uint32
// suffixes []byte
//}
//
//func (ti onHeapTableIndex) ChunkCount() uint32 {
// return ti.chunkCount
//}
//
//// EntrySuffixMatches returns true IFF the suffix for prefix entry |idx|
//// matches the address |a|.
//func (ti onHeapTableIndex) EntrySuffixMatches(idx uint32, h *addr) bool {
// li := uint64(ti.ordinals[idx]) * addrSuffixSize
// return bytes.Equal(h[addrPrefixSize:], ti.suffixes[li:li+addrSuffixSize])
//}
//
//func (ti onHeapTableIndex) IndexEntry(idx uint32, a *addr) indexEntry {
// ord := ti.ordinals[idx]
// if a != nil {
// binary.BigEndian.PutUint64(a[:], ti.prefixes[idx])
// li := uint64(ord) * addrSuffixSize
// copy(a[addrPrefixSize:], ti.suffixes[li:li+addrSuffixSize])
// }
// return indexResult{ti.offsets[ord], ti.lengths[ord]}
//}
//
//func (ti onHeapTableIndex) Lookup(h *addr) (indexEntry, bool) {
// ord := ti.lookupOrdinal(h)
// if ord == ti.chunkCount {
// return indexResult{}, false
// }
// return indexResult{ti.offsets[ord], ti.lengths[ord]}, true
//}
//
//func (ti onHeapTableIndex) Ordinals() []uint32 {
// return ti.ordinals
//}
//
//func (ti onHeapTableIndex) Prefixes() []uint64 {
// return ti.prefixes
//}
//
//// TableFileSize returns the size of the table file that this index references.
//// This assumes that the index follows immediately after the last chunk in the
//// file and that the last chunk in the file is in the index.
//func (ti onHeapTableIndex) TableFileSize() uint64 {
// if ti.chunkCount == 0 {
// return footerSize
// }
// len, offset := ti.offsets[ti.chunkCount-1], uint64(ti.lengths[ti.chunkCount-1])
// return offset + len + indexSize(ti.chunkCount) + footerSize
//}
//
//func (ti onHeapTableIndex) TotalUncompressedData() uint64 {
// return ti.totalUncompressedData
//}
//
//func (ti onHeapTableIndex) Close() error {
// return nil
//}
//
//func (ti onHeapTableIndex) Clone() tableIndex {
// return ti
//}
//
//func (ti onHeapTableIndex) prefixIdxToOrdinal(idx uint32) uint32 {
// return ti.ordinals[idx]
//}
//
//// prefixIdx returns the first position in |tr.prefixes| whose value ==
//// |prefix|. Returns |tr.chunkCount| if absent
//func (ti onHeapTableIndex) prefixIdx(prefix uint64) (idx uint32) {
// // NOTE: The golang impl of sort.Search is basically inlined here. This method can be called in
// // an extremely tight loop and inlining the code was a significant perf improvement.
// idx, j := 0, ti.chunkCount
// for idx < j {
// h := idx + (j-idx)/2 // avoid overflow when computing h
// // i ≤ h < j
// if ti.prefixes[h] < prefix {
// idx = h + 1 // preserves f(i-1) == false
// } else {
// j = h // preserves f(j) == true
// }
// }
//
// return
//}
//
//// lookupOrdinal returns the ordinal of |h| if present. Returns |ti.chunkCount|
//// if absent.
//func (ti onHeapTableIndex) lookupOrdinal(h *addr) uint32 {
// prefix := h.Prefix()
//
// for idx := ti.prefixIdx(prefix); idx < ti.chunkCount && ti.prefixes[idx] == prefix; idx++ {
// if ti.EntrySuffixMatches(idx, h) {
// return ti.ordinals[idx]
// }
// }
//
// return ti.chunkCount
//}
//
//func computeOffsets(count uint32, buff []byte) (lengths []uint32, offsets []uint64) {
// lengths = make([]uint32, count)
// offsets = make([]uint64, count)
//
// lengths[0] = binary.BigEndian.Uint32(buff)
//
// for i := uint64(1); i < uint64(count); i++ {
// lengths[i] = binary.BigEndian.Uint32(buff[i*lengthSize:])
// offsets[i] = offsets[i-1] + uint64(lengths[i-1])
// }
// return
//}
//
//func streamComputeOffsets(count uint32, rd io.Reader) (lengths []uint32, offsets []uint64, err error) {
// lengths = make([]uint32, count)
// offsets = make([]uint64, count)
// buff := make([]byte, lengthSize)
//
// n, err := rd.Read(buff)
// if err != nil {
// return nil, nil, err
// }
// if n != lengthSize {
// return nil, nil, ErrNotEnoughBytes
// }
// lengths[0] = binary.BigEndian.Uint32(buff)
//
// for i := uint64(1); i < uint64(count); i++ {
// n, err := rd.Read(buff)
// if err != nil {
// return nil, nil, err
// }
// if n != lengthSize {
// return nil, nil, ErrNotEnoughBytes
// }
// lengths[i] = binary.BigEndian.Uint32(buff)
// offsets[i] = offsets[i-1] + uint64(lengths[i-1])
// }
//
// return
//}
//
//func computePrefixes(count uint32, buff []byte) (prefixes []uint64, ordinals []uint32) {
// prefixes = make([]uint64, count)
// ordinals = make([]uint32, count)
//
// for i := uint64(0); i < uint64(count); i++ {
// idx := i * prefixTupleSize
// prefixes[i] = binary.BigEndian.Uint64(buff[idx:])
// ordinals[i] = binary.BigEndian.Uint32(buff[idx+addrPrefixSize:])
// }
// return
//}
//
//func streamComputePrefixes(count uint32, rd io.Reader) (prefixes []uint64, ordinals []uint32, err error) {
// prefixes = make([]uint64, count)
// ordinals = make([]uint32, count)
// buff := make([]byte, prefixTupleSize)
//
// for i := uint64(0); i < uint64(count); i++ {
// n, err := rd.Read(buff)
// if err != nil {
// return nil, nil, err
// }
// if n != prefixTupleSize {
// return nil, nil, ErrNotEnoughBytes
// }
// prefixes[i] = binary.BigEndian.Uint64(buff)
// ordinals[i] = binary.BigEndian.Uint32(buff[addrPrefixSize:])
// }
//
// return
//}
func ReadTableFooter(rd io.ReadSeeker) (chunkCount uint32, totalUncompressedData uint64, err error) {
footerSize := int64(magicNumberSize + uint64Size + uint32Size)
_, err = rd.Seek(-footerSize, io.SeekEnd)