refactor val.SlicedBuffer as message.ItemArray

This commit is contained in:
Andy Arthur
2022-07-28 13:29:15 -07:00
parent 6bda9ba556
commit 8dc1322a72
15 changed files with 71 additions and 73 deletions
+2 -3
View File
@@ -23,7 +23,6 @@ import (
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/val"
)
const (
@@ -78,14 +77,14 @@ func (s AddressMapSerializer) Serialize(keys, addrs [][]byte, subtrees []uint64,
return serial.FinishMessage(b, serial.AddressMapEnd(b), addressMapFileID)
}
func getAddressMapKeys(msg serial.Message) (keys val.SlicedBuffer) {
func getAddressMapKeys(msg serial.Message) (keys ItemArray) {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
keys.Buf = am.KeyItemsBytes()
keys.Offs = getAddressMapKeyOffsets(am)
return
}
func getAddressMapValues(msg serial.Message) (values val.SlicedBuffer) {
func getAddressMapValues(msg serial.Message) (values ItemArray) {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
values.Buf = am.AddressArrayBytes()
values.Offs = offsetsForAddressArray(values.Buf)
+5 -6
View File
@@ -21,7 +21,6 @@ import (
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/val"
)
var blobFileID = []byte(serial.BlobFileID)
@@ -56,7 +55,7 @@ func (s BlobSerializer) Serialize(keys, values [][]byte, subtrees []uint64, leve
return serial.FinishMessage(b, serial.BlobEnd(b), blobFileID)
}
func getBlobKeys(msg serial.Message) val.SlicedBuffer {
func getBlobKeys(msg serial.Message) ItemArray {
cnt := getBlobCount(msg)
buf := make([]byte, cnt)
for i := range buf {
@@ -67,24 +66,24 @@ func getBlobKeys(msg serial.Message) val.SlicedBuffer {
b := offs[i*2 : (i+1)*2]
binary.LittleEndian.PutUint16(b, uint16(i))
}
return val.SlicedBuffer{
return ItemArray{
Buf: buf,
Offs: offs,
}
}
func getBlobValues(msg serial.Message) val.SlicedBuffer {
func getBlobValues(msg serial.Message) ItemArray {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
if b.TreeLevel() > 0 {
arr := b.AddressArrayBytes()
off := offsetsForAddressArray(arr)
return val.SlicedBuffer{
return ItemArray{
Buf: arr,
Offs: off,
}
}
return val.SlicedBuffer{
return ItemArray{
Buf: b.PayloadBytes(),
Offs: []byte{},
}
+4 -5
View File
@@ -23,7 +23,6 @@ import (
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/pool"
"github.com/dolthub/dolt/go/store/val"
)
var commitClosureKeyOffsets []byte
@@ -49,16 +48,16 @@ func offsetsForCommitClosureKeys(buf []byte) []byte {
return commitClosureKeyOffsets[:cnt*uint16Size]
}
func getCommitClosureKeys(msg serial.Message) val.SlicedBuffer {
var ret val.SlicedBuffer
func getCommitClosureKeys(msg serial.Message) ItemArray {
var ret ItemArray
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
ret.Buf = m.KeyItemsBytes()
ret.Offs = offsetsForCommitClosureKeys(ret.Buf)
return ret
}
func getCommitClosureValues(msg serial.Message) val.SlicedBuffer {
var ret val.SlicedBuffer
func getCommitClosureValues(msg serial.Message) ItemArray {
var ret ItemArray
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
if m.AddressArrayLength() == 0 {
ret.Buf = commitClosureEmptyValueBytes
@@ -12,51 +12,33 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package val
package message
type SlicedBuffer struct {
import "github.com/dolthub/dolt/go/store/val"
type ItemArray struct {
Buf []byte
Offs offsets
Offs []byte
}
// GetSlice returns the ith slice of |sb.Buf|.
func (sb SlicedBuffer) GetSlice(i int) []byte {
// GetSlice returns the ith slice of |sb.Items|.
func (sb ItemArray) GetSlice(i int) []byte {
start := uint16(0)
if i > 0 {
pos := (i - 1) * 2
start = readUint16(sb.Offs[pos : pos+2])
start = val.ReadUint16(sb.Offs[pos : pos+2])
}
stop := uint16(len(sb.Buf))
if i*2 < len(sb.Offs) {
pos := i * 2
stop = readUint16(sb.Offs[pos : pos+2])
stop = val.ReadUint16(sb.Offs[pos : pos+2])
}
return sb.Buf[start:stop]
}
func (sb SlicedBuffer) Len() int {
func (sb ItemArray) Len() int {
// offsets stored as uint16s with first offset omitted
return len(sb.Offs)/2 + 1
}
type offsets []byte
// offsetsSize returns the number of bytes needed to
// store |fieldCount| offsets.
func offsetsSize(count int) ByteSize {
if count == 0 {
return 0
}
return ByteSize((count - 1) * 2)
}
// writeOffset writes offset |pos| at index |i|.
func writeOffset(i int, off ByteSize, arr offsets) {
if i == 0 {
return
}
start := (i - 1) * 2
writeUint16(arr[start:start+2], uint16(off))
}
+1 -1
View File
@@ -95,7 +95,7 @@ func (s MergeArtifactSerializer) Serialize(keys, values [][]byte, subtrees []uin
return serial.FinishMessage(b, serial.MergeArtifactsEnd(b), mergeArtifactFileID)
}
func getArtifactMapKeysAndValues(msg serial.Message) (keys, values val.SlicedBuffer, cnt uint16) {
func getArtifactMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16) {
am := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
keys.Buf = am.KeyItemsBytes()
+1 -2
View File
@@ -20,14 +20,13 @@ import (
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/val"
)
type Serializer interface {
Serialize(keys, values [][]byte, subtrees []uint64, level int) serial.Message
}
func GetKeysAndValues(msg serial.Message) (keys, values val.SlicedBuffer, cnt uint16) {
func GetKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16) {
id := serial.GetFileID(msg)
if id == serial.ProllyTreeNodeFileID {
+1 -1
View File
@@ -97,7 +97,7 @@ func (s ProllyMapSerializer) Serialize(keys, values [][]byte, subtrees []uint64,
return serial.FinishMessage(b, serial.ProllyTreeNodeEnd(b), prollyMapFileID)
}
func getProllyMapKeysAndValues(msg serial.Message) (keys, values val.SlicedBuffer, cnt uint16) {
func getProllyMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16) {
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
keys.Buf = pm.KeyItemsBytes()
+1 -1
View File
@@ -33,7 +33,7 @@ type subtreeCounts []uint64
type Node struct {
// keys and values contain sub-slices of |msg|,
// allowing faster lookups by avoiding the vtable
keys, values val.SlicedBuffer
keys, values message.ItemArray
subtrees subtreeCounts
count uint16
msg serial.Message
+4 -4
View File
@@ -250,12 +250,12 @@ func compareInt16(l, r int16) int {
}
}
func readUint16(val []byte) uint16 {
func ReadUint16(val []byte) uint16 {
expectSize(val, uint16Size)
return binary.LittleEndian.Uint16(val)
}
func writeUint16(buf []byte, val uint16) {
func WriteUint16(buf []byte, val uint16) {
expectSize(buf, uint16Size)
binary.LittleEndian.PutUint16(buf, val)
}
@@ -511,11 +511,11 @@ func compareDatetime(l, r time.Time) int {
}
func readEnum(val []byte) uint16 {
return readUint16(val)
return ReadUint16(val)
}
func writeEnum(buf []byte, val uint16) {
writeUint16(buf, val)
WriteUint16(buf, val)
}
func compareEnum(l, r uint16) int {
+2 -2
View File
@@ -426,8 +426,8 @@ func roundTripUints(t *testing.T) {
uintegers = append(uintegers, math.MaxUint16)
for _, value := range uintegers {
exp := uint16(value)
writeUint16(buf, exp)
assert.Equal(t, exp, readUint16(buf))
WriteUint16(buf, exp)
assert.Equal(t, exp, ReadUint16(buf))
zero(buf)
}
+6 -6
View File
@@ -32,8 +32,8 @@ func NewTriple[V ~[]byte](pool pool.BuffPool, one, two, three V) (tri Triple[V])
copy(tri[o2:], three)
// populate offsets
writeUint16(tri[end:end+2], uint16(o1))
writeUint16(tri[end+2:], uint16(o2))
WriteUint16(tri[end:end+2], uint16(o1))
WriteUint16(tri[end+2:], uint16(o2))
return
}
@@ -41,19 +41,19 @@ type Triple[V ~[]byte] []byte
func (t Triple[V]) First() V {
l := len(t)
o1 := readUint16(t[l-4 : l-2])
o1 := ReadUint16(t[l-4 : l-2])
return V(t[:o1])
}
func (t Triple[V]) Second() V {
l := len(t)
o1 := readUint16(t[l-4 : l-2])
o2 := readUint16(t[l-2 : l])
o1 := ReadUint16(t[l-4 : l-2])
o2 := ReadUint16(t[l-2 : l])
return V(t[o1:o2])
}
func (t Triple[V]) Third() V {
l := len(t)
o2 := readUint16(t[l-2 : l])
o2 := ReadUint16(t[l-2 : l])
return V(t[o2 : l-4])
}
+30 -10
View File
@@ -140,11 +140,11 @@ func (tup Tuple) GetOffset(i int) (int, bool) {
start, stop := uint16(0), uint16(split)
if i*2 < len(offs) {
pos := i * 2
stop = readUint16(offs[pos : pos+2])
stop = ReadUint16(offs[pos : pos+2])
}
if i > 0 {
pos := (i - 1) * 2
start = readUint16(offs[pos : pos+2])
start = ReadUint16(offs[pos : pos+2])
}
return int(start), start != stop
@@ -164,11 +164,11 @@ func (tup Tuple) GetField(i int) []byte {
start, stop := uint16(0), uint16(split)
if i*2 < len(offs) {
pos := i * 2
stop = readUint16(offs[pos : pos+2])
stop = ReadUint16(offs[pos : pos+2])
}
if i > 0 {
pos := (i - 1) * 2
start = readUint16(offs[pos : pos+2])
start = ReadUint16(offs[pos : pos+2])
}
if start == stop {
@@ -190,7 +190,7 @@ func (tup Tuple) FieldIsNull(i int) bool {
func (tup Tuple) Count() int {
sl := tup[len(tup)-int(countSize):]
return int(readUint16(sl))
return int(ReadUint16(sl))
}
func isNull(val []byte) bool {
@@ -203,7 +203,7 @@ func sizeOf(val []byte) ByteSize {
func writeFieldCount(tup Tuple, count int) {
sl := tup[len(tup)-int(countSize):]
writeUint16(sl, uint16(count))
WriteUint16(sl, uint16(count))
}
func sliceManyFields(tuple Tuple, indexes []int, slices [][]byte) [][]byte {
@@ -227,7 +227,7 @@ func sliceManyFields(tuple Tuple, indexes []int, slices [][]byte) [][]byte {
// we don't have a "stop" offset for the last field
n := len(slices)
if indexes[n-1] == cnt-1 {
o := readUint16(offs[len(offs)-2:])
o := ReadUint16(offs[len(offs)-2:])
slices[n-1] = data[o:]
indexes = indexes[:n-1]
subset = subset[:n-1]
@@ -235,15 +235,15 @@ func sliceManyFields(tuple Tuple, indexes []int, slices [][]byte) [][]byte {
// we don't have a "start" offset for the first field
if len(indexes) > 0 && indexes[0] == 0 {
o := readUint16(offs[:2])
o := ReadUint16(offs[:2])
slices[0] = data[:o]
indexes = indexes[1:]
subset = subset[1:]
}
for i, k := range indexes {
start := readUint16(offs[(k-1)*2 : k*2])
stop := readUint16(offs[k*2 : (k+1)*2])
start := ReadUint16(offs[(k-1)*2 : k*2])
stop := ReadUint16(offs[k*2 : (k+1)*2])
subset[i] = tuple[start:stop]
}
@@ -255,3 +255,23 @@ func sliceManyFields(tuple Tuple, indexes []int, slices [][]byte) [][]byte {
return slices
}
type offsets []byte
// offsetsSize returns the number of bytes needed to
// store |fieldCount| offsets.
func offsetsSize(count int) ByteSize {
if count == 0 {
return 0
}
return ByteSize((count - 1) * 2)
}
// writeOffset writes offset |pos| at index |i|.
func writeOffset(i int, off ByteSize, arr offsets) {
if i == 0 {
return
}
start := (i - 1) * 2
WriteUint16(arr[start:start+2], uint16(off))
}
+1 -1
View File
@@ -119,7 +119,7 @@ func (tb *TupleBuilder) PutUint16(i int, v uint16) {
tb.Desc.expectEncoding(i, Uint16Enc)
tb.ensureCapacity(uint16Size)
tb.fields[i] = tb.buf[tb.pos : tb.pos+uint16Size]
writeUint16(tb.fields[i], v)
WriteUint16(tb.fields[i], v)
tb.pos += uint16Size
}
+1 -1
View File
@@ -75,7 +75,7 @@ func compare(typ Type, left, right []byte) int {
case Int16Enc:
return compareInt16(readInt16(left), readInt16(right))
case Uint16Enc:
return compareUint16(readUint16(left), readUint16(right))
return compareUint16(ReadUint16(left), ReadUint16(right))
case Int32Enc:
return compareInt32(readInt32(left), readInt32(right))
case Uint32Enc:
+2 -2
View File
@@ -198,7 +198,7 @@ func (td TupleDesc) GetUint16(i int, tup Tuple) (v uint16, ok bool) {
td.expectEncoding(i, Uint16Enc)
b := td.GetField(i, tup)
if b != nil {
v, ok = readUint16(b), true
v, ok = ReadUint16(b), true
}
return
}
@@ -492,7 +492,7 @@ func formatValue(enc Encoding, value []byte) string {
v := readInt16(value)
return strconv.Itoa(int(v))
case Uint16Enc:
v := readUint16(value)
v := ReadUint16(value)
return strconv.Itoa(int(v))
case Int32Enc:
v := readInt32(value)