go/{store,libraries,gen}: Adopt new flatc, throw an error if we encounter unknown fields.

This commit is contained in:
Aaron Son
2022-08-12 10:52:57 -07:00
parent b9f3cfd5c9
commit 7906c9e98f
78 changed files with 1757 additions and 516 deletions

3
.gitmodules vendored
View File

@@ -18,3 +18,6 @@
path = integration-tests/mysql-client-tests/cpp/third_party/mysql-connector-cpp
url = https://github.com/mysql/mysql-connector-cpp
ignore = dirty
[submodule "proto/third_party/flatbuffers"]
path = proto/third_party/flatbuffers
url = git@github.com:dolthub/flatbuffers.git

View File

@@ -152,10 +152,16 @@ func (cmd RootsCmd) processTableFile(ctx context.Context, path string, modified
}
} else if sm, ok := value.(types.SerialMessage); ok {
if serial.GetFileID(sm) == serial.StoreRootFileID {
msg := serial.GetRootAsStoreRoot([]byte(sm), serial.MessagePrefixSz)
msg, err := serial.TryGetRootAsStoreRoot([]byte(sm), serial.MessagePrefixSz)
if err != nil {
return false, err
}
ambytes := msg.AddressMapBytes()
node := tree.NodeFromBytes(ambytes)
err := tree.OutputAddressMapNode(cli.OutStream, node)
node, err := tree.NodeFromBytes(ambytes)
if err != nil {
return false, err
}
err = tree.OutputAddressMapNode(cli.OutStream, node)
if err != nil {
return false, err
}

View File

@@ -24,17 +24,34 @@ type AddressMap struct {
_tab flatbuffers.Table
}
func GetRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) *AddressMap {
func InitAddressMapRoot(o *AddressMap, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if AddressMapNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) (*AddressMap, error) {
x := &AddressMap{}
x.Init(buf, n+offset)
return x, InitAddressMapRoot(x, buf, offset)
}
func GetRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) *AddressMap {
x := &AddressMap{}
InitAddressMapRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) *AddressMap {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) (*AddressMap, error) {
x := &AddressMap{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitAddressMapRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsAddressMap(buf []byte, offset flatbuffers.UOffsetT) *AddressMap {
x := &AddressMap{}
InitAddressMapRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -199,8 +216,10 @@ func (rcv *AddressMap) MutateTreeLevel(n byte) bool {
return rcv._tab.MutateByteSlot(14, n)
}
const AddressMapNumFields = 6
func AddressMapStart(builder *flatbuffers.Builder) {
builder.StartObject(6)
builder.StartObject(AddressMapNumFields)
}
func AddressMapAddKeyItems(builder *flatbuffers.Builder, keyItems flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(keyItems), 0)

View File

@@ -24,17 +24,34 @@ type Blob struct {
_tab flatbuffers.Table
}
func GetRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) *Blob {
func InitBlobRoot(o *Blob, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if BlobNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) (*Blob, error) {
x := &Blob{}
x.Init(buf, n+offset)
return x, InitBlobRoot(x, buf, offset)
}
func GetRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) *Blob {
x := &Blob{}
InitBlobRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) *Blob {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) (*Blob, error) {
x := &Blob{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitBlobRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsBlob(buf []byte, offset flatbuffers.UOffsetT) *Blob {
x := &Blob{}
InitBlobRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -173,8 +190,10 @@ func (rcv *Blob) MutateTreeLevel(n byte) bool {
return rcv._tab.MutateByteSlot(12, n)
}
const BlobNumFields = 5
func BlobStart(builder *flatbuffers.Builder) {
builder.StartObject(5)
builder.StartObject(BlobNumFields)
}
func BlobAddPayload(builder *flatbuffers.Builder, payload flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(payload), 0)

View File

@@ -24,17 +24,34 @@ type Commit struct {
_tab flatbuffers.Table
}
func GetRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) *Commit {
func InitCommitRoot(o *Commit, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if CommitNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) (*Commit, error) {
x := &Commit{}
x.Init(buf, n+offset)
return x, InitCommitRoot(x, buf, offset)
}
func GetRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) *Commit {
x := &Commit{}
InitCommitRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) *Commit {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) (*Commit, error) {
x := &Commit{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitCommitRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsCommit(buf []byte, offset flatbuffers.UOffsetT) *Commit {
x := &Commit{}
InitCommitRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -209,8 +226,10 @@ func (rcv *Commit) MutateUserTimestampMillis(n int64) bool {
return rcv._tab.MutateInt64Slot(20, n)
}
const CommitNumFields = 9
func CommitStart(builder *flatbuffers.Builder) {
builder.StartObject(9)
builder.StartObject(CommitNumFields)
}
func CommitAddRoot(builder *flatbuffers.Builder, root flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(root), 0)

View File

@@ -24,17 +24,34 @@ type CommitClosure struct {
_tab flatbuffers.Table
}
func GetRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) *CommitClosure {
func InitCommitClosureRoot(o *CommitClosure, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if CommitClosureNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) (*CommitClosure, error) {
x := &CommitClosure{}
x.Init(buf, n+offset)
return x, InitCommitClosureRoot(x, buf, offset)
}
func GetRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) *CommitClosure {
x := &CommitClosure{}
InitCommitClosureRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) *CommitClosure {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) (*CommitClosure, error) {
x := &CommitClosure{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitCommitClosureRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsCommitClosure(buf []byte, offset flatbuffers.UOffsetT) *CommitClosure {
x := &CommitClosure{}
InitCommitClosureRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -173,8 +190,10 @@ func (rcv *CommitClosure) MutateTreeLevel(n byte) bool {
return rcv._tab.MutateByteSlot(12, n)
}
const CommitClosureNumFields = 5
func CommitClosureStart(builder *flatbuffers.Builder) {
builder.StartObject(5)
builder.StartObject(CommitClosureNumFields)
}
func CommitClosureAddKeyItems(builder *flatbuffers.Builder, keyItems flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(keyItems), 0)

View File

@@ -59,17 +59,34 @@ type ForeignKeyCollection struct {
_tab flatbuffers.Table
}
func GetRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) *ForeignKeyCollection {
func InitForeignKeyCollectionRoot(o *ForeignKeyCollection, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ForeignKeyCollectionNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKeyCollection, error) {
x := &ForeignKeyCollection{}
x.Init(buf, n+offset)
return x, InitForeignKeyCollectionRoot(x, buf, offset)
}
func GetRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) *ForeignKeyCollection {
x := &ForeignKeyCollection{}
InitForeignKeyCollectionRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) *ForeignKeyCollection {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKeyCollection, error) {
x := &ForeignKeyCollection{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitForeignKeyCollectionRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsForeignKeyCollection(buf []byte, offset flatbuffers.UOffsetT) *ForeignKeyCollection {
x := &ForeignKeyCollection{}
InitForeignKeyCollectionRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -94,6 +111,21 @@ func (rcv *ForeignKeyCollection) ForeignKeys(obj *ForeignKey, j int) bool {
return false
}
func (rcv *ForeignKeyCollection) TryForeignKeys(obj *ForeignKey, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
if ForeignKeyNumFields < obj.Table().NumFields() {
return false, flatbuffers.ErrTableHasUnknownFields
}
return true, nil
}
return false, nil
}
func (rcv *ForeignKeyCollection) ForeignKeysLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -102,8 +134,10 @@ func (rcv *ForeignKeyCollection) ForeignKeysLength() int {
return 0
}
const ForeignKeyCollectionNumFields = 1
func ForeignKeyCollectionStart(builder *flatbuffers.Builder) {
builder.StartObject(1)
builder.StartObject(ForeignKeyCollectionNumFields)
}
func ForeignKeyCollectionAddForeignKeys(builder *flatbuffers.Builder, foreignKeys flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(foreignKeys), 0)
@@ -119,17 +153,34 @@ type ForeignKey struct {
_tab flatbuffers.Table
}
func GetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
func InitForeignKeyRoot(o *ForeignKey, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ForeignKeyNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKey, error) {
x := &ForeignKey{}
x.Init(buf, n+offset)
return x, InitForeignKeyRoot(x, buf, offset)
}
func GetRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
x := &ForeignKey{}
InitForeignKeyRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) (*ForeignKey, error) {
x := &ForeignKey{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitForeignKeyRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsForeignKey(buf []byte, offset flatbuffers.UOffsetT) *ForeignKey {
x := &ForeignKey{}
InitForeignKeyRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -292,8 +343,10 @@ func (rcv *ForeignKey) UnresolvedParentColumnsLength() int {
return 0
}
const ForeignKeyNumFields = 11
func ForeignKeyStart(builder *flatbuffers.Builder) {
builder.StartObject(11)
builder.StartObject(ForeignKeyNumFields)
}
func ForeignKeyAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)

View File

@@ -24,17 +24,34 @@ type MergeArtifacts struct {
_tab flatbuffers.Table
}
func GetRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) *MergeArtifacts {
func InitMergeArtifactsRoot(o *MergeArtifacts, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if MergeArtifactsNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) (*MergeArtifacts, error) {
x := &MergeArtifacts{}
x.Init(buf, n+offset)
return x, InitMergeArtifactsRoot(x, buf, offset)
}
func GetRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) *MergeArtifacts {
x := &MergeArtifacts{}
InitMergeArtifactsRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) *MergeArtifacts {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) (*MergeArtifacts, error) {
x := &MergeArtifacts{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitMergeArtifactsRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsMergeArtifacts(buf []byte, offset flatbuffers.UOffsetT) *MergeArtifacts {
x := &MergeArtifacts{}
InitMergeArtifactsRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -285,8 +302,10 @@ func (rcv *MergeArtifacts) MutateTreeLevel(n byte) bool {
return rcv._tab.MutateByteSlot(20, n)
}
const MergeArtifactsNumFields = 9
func MergeArtifactsStart(builder *flatbuffers.Builder) {
builder.StartObject(9)
builder.StartObject(MergeArtifactsNumFields)
}
func MergeArtifactsAddKeyItems(builder *flatbuffers.Builder, keyItems flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(keyItems), 0)

View File

@@ -50,17 +50,34 @@ type ProllyTreeNode struct {
_tab flatbuffers.Table
}
func GetRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) *ProllyTreeNode {
func InitProllyTreeNodeRoot(o *ProllyTreeNode, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ProllyTreeNodeNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) (*ProllyTreeNode, error) {
x := &ProllyTreeNode{}
x.Init(buf, n+offset)
return x, InitProllyTreeNodeRoot(x, buf, offset)
}
func GetRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) *ProllyTreeNode {
x := &ProllyTreeNode{}
InitProllyTreeNodeRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) *ProllyTreeNode {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) (*ProllyTreeNode, error) {
x := &ProllyTreeNode{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitProllyTreeNodeRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsProllyTreeNode(buf []byte, offset flatbuffers.UOffsetT) *ProllyTreeNode {
x := &ProllyTreeNode{}
InitProllyTreeNodeRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -335,8 +352,10 @@ func (rcv *ProllyTreeNode) MutateTreeLevel(n byte) bool {
return rcv._tab.MutateByteSlot(24, n)
}
const ProllyTreeNodeNumFields = 11
func ProllyTreeNodeStart(builder *flatbuffers.Builder) {
builder.StartObject(11)
builder.StartObject(ProllyTreeNodeNumFields)
}
func ProllyTreeNodeAddKeyItems(builder *flatbuffers.Builder, keyItems flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(keyItems), 0)

View File

@@ -24,17 +24,34 @@ type RootValue struct {
_tab flatbuffers.Table
}
func GetRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) *RootValue {
func InitRootValueRoot(o *RootValue, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if RootValueNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) (*RootValue, error) {
x := &RootValue{}
x.Init(buf, n+offset)
return x, InitRootValueRoot(x, buf, offset)
}
func GetRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) *RootValue {
x := &RootValue{}
InitRootValueRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) *RootValue {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) (*RootValue, error) {
x := &RootValue{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitRootValueRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsRootValue(buf []byte, offset flatbuffers.UOffsetT) *RootValue {
x := &RootValue{}
InitRootValueRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -127,8 +144,10 @@ func (rcv *RootValue) MutateForeignKeyAddr(j int, n byte) bool {
return false
}
const RootValueNumFields = 3
func RootValueStart(builder *flatbuffers.Builder) {
builder.StartObject(3)
builder.StartObject(RootValueNumFields)
}
func RootValueAddFeatureVersion(builder *flatbuffers.Builder, featureVersion int64) {
builder.PrependInt64Slot(0, featureVersion, 0)

View File

@@ -24,17 +24,34 @@ type TableSchema struct {
_tab flatbuffers.Table
}
func GetRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) *TableSchema {
func InitTableSchemaRoot(o *TableSchema, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if TableSchemaNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) (*TableSchema, error) {
x := &TableSchema{}
x.Init(buf, n+offset)
return x, InitTableSchemaRoot(x, buf, offset)
}
func GetRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) *TableSchema {
x := &TableSchema{}
InitTableSchemaRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) *TableSchema {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) (*TableSchema, error) {
x := &TableSchema{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitTableSchemaRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsTableSchema(buf []byte, offset flatbuffers.UOffsetT) *TableSchema {
x := &TableSchema{}
InitTableSchemaRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -59,6 +76,21 @@ func (rcv *TableSchema) Columns(obj *Column, j int) bool {
return false
}
func (rcv *TableSchema) TryColumns(obj *Column, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
if ColumnNumFields < obj.Table().NumFields() {
return false, flatbuffers.ErrTableHasUnknownFields
}
return true, nil
}
return false, nil
}
func (rcv *TableSchema) ColumnsLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
@@ -80,6 +112,22 @@ func (rcv *TableSchema) ClusteredIndex(obj *Index) *Index {
return nil
}
func (rcv *TableSchema) TryClusteredIndex(obj *Index) (*Index, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Index)
}
obj.Init(rcv._tab.Bytes, x)
if IndexNumFields < obj.Table().NumFields() {
return nil, flatbuffers.ErrTableHasUnknownFields
}
return obj, nil
}
return nil, nil
}
func (rcv *TableSchema) SecondaryIndexes(obj *Index, j int) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
@@ -92,6 +140,21 @@ func (rcv *TableSchema) SecondaryIndexes(obj *Index, j int) bool {
return false
}
func (rcv *TableSchema) TrySecondaryIndexes(obj *Index, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
if IndexNumFields < obj.Table().NumFields() {
return false, flatbuffers.ErrTableHasUnknownFields
}
return true, nil
}
return false, nil
}
func (rcv *TableSchema) SecondaryIndexesLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
@@ -112,6 +175,21 @@ func (rcv *TableSchema) Checks(obj *CheckConstraint, j int) bool {
return false
}
func (rcv *TableSchema) TryChecks(obj *CheckConstraint, j int) (bool, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := rcv._tab.Vector(o)
x += flatbuffers.UOffsetT(j) * 4
x = rcv._tab.Indirect(x)
obj.Init(rcv._tab.Bytes, x)
if CheckConstraintNumFields < obj.Table().NumFields() {
return false, flatbuffers.ErrTableHasUnknownFields
}
return true, nil
}
return false, nil
}
func (rcv *TableSchema) ChecksLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
@@ -120,8 +198,10 @@ func (rcv *TableSchema) ChecksLength() int {
return 0
}
const TableSchemaNumFields = 4
func TableSchemaStart(builder *flatbuffers.Builder) {
builder.StartObject(4)
builder.StartObject(TableSchemaNumFields)
}
func TableSchemaAddColumns(builder *flatbuffers.Builder, columns flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(columns), 0)
@@ -152,17 +232,34 @@ type Column struct {
_tab flatbuffers.Table
}
func GetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
func InitColumnRoot(o *Column, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ColumnNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) (*Column, error) {
x := &Column{}
x.Init(buf, n+offset)
return x, InitColumnRoot(x, buf, offset)
}
func GetRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
x := &Column{}
InitColumnRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) (*Column, error) {
x := &Column{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitColumnRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsColumn(buf []byte, offset flatbuffers.UOffsetT) *Column {
x := &Column{}
InitColumnRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -315,8 +412,10 @@ func (rcv *Column) MutateVirtual(n bool) bool {
return rcv._tab.MutateBoolSlot(28, n)
}
const ColumnNumFields = 13
func ColumnStart(builder *flatbuffers.Builder) {
builder.StartObject(13)
builder.StartObject(ColumnNumFields)
}
func ColumnAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
@@ -365,17 +464,34 @@ type Index struct {
_tab flatbuffers.Table
}
func GetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
func InitIndexRoot(o *Index, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if IndexNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) (*Index, error) {
x := &Index{}
x.Init(buf, n+offset)
return x, InitIndexRoot(x, buf, offset)
}
func GetRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
x := &Index{}
InitIndexRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) (*Index, error) {
x := &Index{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitIndexRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsIndex(buf []byte, offset flatbuffers.UOffsetT) *Index {
x := &Index{}
InitIndexRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -518,8 +634,10 @@ func (rcv *Index) MutateSystemDefined(n bool) bool {
return rcv._tab.MutateBoolSlot(18, n)
}
const IndexNumFields = 8
func IndexStart(builder *flatbuffers.Builder) {
builder.StartObject(8)
builder.StartObject(IndexNumFields)
}
func IndexAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
@@ -562,17 +680,34 @@ type CheckConstraint struct {
_tab flatbuffers.Table
}
func GetRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) *CheckConstraint {
func InitCheckConstraintRoot(o *CheckConstraint, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if CheckConstraintNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) (*CheckConstraint, error) {
x := &CheckConstraint{}
x.Init(buf, n+offset)
return x, InitCheckConstraintRoot(x, buf, offset)
}
func GetRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) *CheckConstraint {
x := &CheckConstraint{}
InitCheckConstraintRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) *CheckConstraint {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) (*CheckConstraint, error) {
x := &CheckConstraint{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitCheckConstraintRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsCheckConstraint(buf []byte, offset flatbuffers.UOffsetT) *CheckConstraint {
x := &CheckConstraint{}
InitCheckConstraintRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -613,8 +748,10 @@ func (rcv *CheckConstraint) MutateEnforced(n bool) bool {
return rcv._tab.MutateBoolSlot(8, n)
}
const CheckConstraintNumFields = 3
func CheckConstraintStart(builder *flatbuffers.Builder) {
builder.StartObject(3)
builder.StartObject(CheckConstraintNumFields)
}
func CheckConstraintAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)

View File

@@ -24,17 +24,34 @@ type StoreRoot struct {
_tab flatbuffers.Table
}
func GetRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) *StoreRoot {
func InitStoreRootRoot(o *StoreRoot, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if StoreRootNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) (*StoreRoot, error) {
x := &StoreRoot{}
x.Init(buf, n+offset)
return x, InitStoreRootRoot(x, buf, offset)
}
func GetRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) *StoreRoot {
x := &StoreRoot{}
InitStoreRootRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) *StoreRoot {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) (*StoreRoot, error) {
x := &StoreRoot{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitStoreRootRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsStoreRoot(buf []byte, offset flatbuffers.UOffsetT) *StoreRoot {
x := &StoreRoot{}
InitStoreRootRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -81,8 +98,10 @@ func (rcv *StoreRoot) MutateAddressMap(j int, n byte) bool {
return false
}
const StoreRootNumFields = 1
func StoreRootStart(builder *flatbuffers.Builder) {
builder.StartObject(1)
builder.StartObject(StoreRootNumFields)
}
func StoreRootAddAddressMap(builder *flatbuffers.Builder, addressMap flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(addressMap), 0)

View File

@@ -24,17 +24,34 @@ type Table struct {
_tab flatbuffers.Table
}
func GetRootAsTable(buf []byte, offset flatbuffers.UOffsetT) *Table {
func InitTableRoot(o *Table, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if TableNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsTable(buf []byte, offset flatbuffers.UOffsetT) (*Table, error) {
x := &Table{}
x.Init(buf, n+offset)
return x, InitTableRoot(x, buf, offset)
}
func GetRootAsTable(buf []byte, offset flatbuffers.UOffsetT) *Table {
x := &Table{}
InitTableRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsTable(buf []byte, offset flatbuffers.UOffsetT) *Table {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsTable(buf []byte, offset flatbuffers.UOffsetT) (*Table, error) {
x := &Table{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitTableRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsTable(buf []byte, offset flatbuffers.UOffsetT) *Table {
x := &Table{}
InitTableRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -174,6 +191,22 @@ func (rcv *Table) Conflicts(obj *Conflicts) *Conflicts {
return nil
}
func (rcv *Table) TryConflicts(obj *Conflicts) (*Conflicts, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Conflicts)
}
obj.Init(rcv._tab.Bytes, x)
if ConflictsNumFields < obj.Table().NumFields() {
return nil, flatbuffers.ErrTableHasUnknownFields
}
return obj, nil
}
return nil, nil
}
func (rcv *Table) Violations(j int) byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
@@ -242,8 +275,10 @@ func (rcv *Table) MutateArtifacts(j int, n byte) bool {
return false
}
const TableNumFields = 7
func TableStart(builder *flatbuffers.Builder) {
builder.StartObject(7)
builder.StartObject(TableNumFields)
}
func TableAddSchema(builder *flatbuffers.Builder, schema flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(schema), 0)
@@ -289,17 +324,34 @@ type Conflicts struct {
_tab flatbuffers.Table
}
func GetRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) *Conflicts {
func InitConflictsRoot(o *Conflicts, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if ConflictsNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) (*Conflicts, error) {
x := &Conflicts{}
x.Init(buf, n+offset)
return x, InitConflictsRoot(x, buf, offset)
}
func GetRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) *Conflicts {
x := &Conflicts{}
InitConflictsRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) *Conflicts {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) (*Conflicts, error) {
x := &Conflicts{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitConflictsRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsConflicts(buf []byte, offset flatbuffers.UOffsetT) *Conflicts {
x := &Conflicts{}
InitConflictsRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -448,8 +500,10 @@ func (rcv *Conflicts) MutateAncestorSchema(j int, n byte) bool {
return false
}
const ConflictsNumFields = 4
func ConflictsStart(builder *flatbuffers.Builder) {
builder.StartObject(4)
builder.StartObject(ConflictsNumFields)
}
func ConflictsAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(data), 0)

View File

@@ -24,17 +24,34 @@ type Tag struct {
_tab flatbuffers.Table
}
func GetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag {
func InitTagRoot(o *Tag, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if TagNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) (*Tag, error) {
x := &Tag{}
x.Init(buf, n+offset)
return x, InitTagRoot(x, buf, offset)
}
func GetRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag {
x := &Tag{}
InitTagRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsTag(buf []byte, offset flatbuffers.UOffsetT) (*Tag, error) {
x := &Tag{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitTagRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsTag(buf []byte, offset flatbuffers.UOffsetT) *Tag {
x := &Tag{}
InitTagRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -129,8 +146,10 @@ func (rcv *Tag) MutateUserTimestampMillis(n int64) bool {
return rcv._tab.MutateInt64Slot(14, n)
}
const TagNumFields = 6
func TagStart(builder *flatbuffers.Builder) {
builder.StartObject(6)
builder.StartObject(TagNumFields)
}
func TagAddCommitAddr(builder *flatbuffers.Builder, commitAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(commitAddr), 0)

View File

@@ -24,17 +24,34 @@ type WorkingSet struct {
_tab flatbuffers.Table
}
func GetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
func InitWorkingSetRoot(o *WorkingSet, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if WorkingSetNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) (*WorkingSet, error) {
x := &WorkingSet{}
x.Init(buf, n+offset)
return x, InitWorkingSetRoot(x, buf, offset)
}
func GetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
x := &WorkingSet{}
InitWorkingSetRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) (*WorkingSet, error) {
x := &WorkingSet{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitWorkingSetRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
x := &WorkingSet{}
InitWorkingSetRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -164,8 +181,26 @@ func (rcv *WorkingSet) MergeState(obj *MergeState) *MergeState {
return nil
}
func (rcv *WorkingSet) TryMergeState(obj *MergeState) (*MergeState, error) {
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(MergeState)
}
obj.Init(rcv._tab.Bytes, x)
if MergeStateNumFields < obj.Table().NumFields() {
return nil, flatbuffers.ErrTableHasUnknownFields
}
return obj, nil
}
return nil, nil
}
const WorkingSetNumFields = 7
func WorkingSetStart(builder *flatbuffers.Builder) {
builder.StartObject(7)
builder.StartObject(WorkingSetNumFields)
}
func WorkingSetAddWorkingRootAddr(builder *flatbuffers.Builder, workingRootAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(workingRootAddr), 0)
@@ -202,17 +237,34 @@ type MergeState struct {
_tab flatbuffers.Table
}
func GetRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) *MergeState {
func InitMergeStateRoot(o *MergeState, buf []byte, offset flatbuffers.UOffsetT) error {
n := flatbuffers.GetUOffsetT(buf[offset:])
o.Init(buf, n+offset)
if MergeStateNumFields < o.Table().NumFields() {
return flatbuffers.ErrTableHasUnknownFields
}
return nil
}
func TryGetRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) (*MergeState, error) {
x := &MergeState{}
x.Init(buf, n+offset)
return x, InitMergeStateRoot(x, buf, offset)
}
func GetRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) *MergeState {
x := &MergeState{}
InitMergeStateRoot(x, buf, offset)
return x
}
func GetSizePrefixedRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) *MergeState {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
func TryGetSizePrefixedRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) (*MergeState, error) {
x := &MergeState{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x, InitMergeStateRoot(x, buf, offset+flatbuffers.SizeUint32)
}
func GetSizePrefixedRootAsMergeState(buf []byte, offset flatbuffers.UOffsetT) *MergeState {
x := &MergeState{}
InitMergeStateRoot(x, buf, offset+flatbuffers.SizeUint32)
return x
}
@@ -293,8 +345,10 @@ func (rcv *MergeState) MutateFromCommitAddr(j int, n byte) bool {
return false
}
const MergeStateNumFields = 2
func MergeStateStart(builder *flatbuffers.Builder) {
builder.StartObject(2)
builder.StartObject(MergeStateNumFields)
}
func MergeStateAddPreWorkingRootAddr(builder *flatbuffers.Builder, preWorkingRootAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(preWorkingRootAddr), 0)

View File

@@ -138,7 +138,9 @@ require (
replace (
github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi => ./gen/proto/dolt/services/eventsapi
github.com/google/flatbuffers => github.com/dolthub/flatbuffers v1.13.0-dh.1
github.com/oliveagle/jsonpath => github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474
)
go 1.19

View File

@@ -171,6 +171,8 @@ github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waN
github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dolthub/flatbuffers v1.13.0-dh.1 h1:OWJdaPep22N52O/0xsUevxJ6Qfw1M2txCjZPOdjXybE=
github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
github.com/dolthub/go-mysql-server v0.12.1-0.20220805234254-84c6aaf02af6 h1:Hd2BxoA8j6XBjAWmZDLkNLYTNeUL2hR2XDpdQgCua4s=
@@ -326,8 +328,6 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunE
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/flatbuffers v2.0.6+incompatible h1:XHFReMv7nFFusa+CEokzWbzaYocKXI6C7hdU5Kgh9Lw=
github.com/google/flatbuffers v2.0.6+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=

View File

@@ -42,7 +42,15 @@ type nomsReporter func(ctx context.Context, change *diff.Difference, ch chan<- D
// Summary reports a summary of diff changes between two values
// todo: make package private once dolthub is migrated
func Summary(ctx context.Context, ch chan DiffSummaryProgress, from, to durable.Index, fromSch, toSch schema.Schema) (err error) {
ch <- DiffSummaryProgress{OldSize: from.Count(), NewSize: to.Count()}
fc, err := from.Count()
if err != nil {
return err
}
tc, err := to.Count()
if err != nil {
return err
}
ch <- DiffSummaryProgress{OldSize: fc, NewSize: tc}
fk, tk := schema.IsKeyless(fromSch), schema.IsKeyless(toSch)
var keyless bool
@@ -102,10 +110,18 @@ func diffProllyTrees(ctx context.Context, ch chan DiffSummaryProgress, keyless b
if keyless {
rpr = reportKeylessChanges
} else {
fc, err := from.Count()
if err != nil {
return err
}
tc, err := to.Count()
if err != nil {
return err
}
rpr = reportPkChanges
ch <- DiffSummaryProgress{
OldSize: from.Count(),
NewSize: to.Count(),
OldSize: fc,
NewSize: tc,
}
}
@@ -123,10 +139,18 @@ func diffNomsMaps(ctx context.Context, ch chan DiffSummaryProgress, keyless bool
if keyless {
rpr = reportNomsKeylessChanges
} else {
fc, err := fromRows.Count()
if err != nil {
return err
}
tc, err := toRows.Count()
if err != nil {
return err
}
rpr = reportNomsPkChanges
ch <- DiffSummaryProgress{
OldSize: fromRows.Count(),
NewSize: toRows.Count(),
OldSize: fc,
NewSize: tc,
}
}

View File

@@ -27,7 +27,7 @@ import (
type ArtifactIndex interface {
HashOf() (hash.Hash, error)
Count() uint64
Count() (uint64, error)
Format() *types.NomsBinFormat
HasConflicts(ctx context.Context) (bool, error)
// ConflictCount returns the number of conflicts
@@ -98,7 +98,10 @@ func artifactIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tr
panic("TODO")
case types.Format_DOLT_1:
root := shim.NodeFromValue(v)
root, err := shim.NodeFromValue(v)
if err != nil {
return nil, err
}
kd := shim.KeyDescriptorFromSchema(tableSch)
m := prolly.NewArtifactMap(root, ns, kd)
return ArtifactIndexFromProllyMap(m), nil
@@ -116,8 +119,9 @@ func (i prollyArtifactIndex) HashOf() (hash.Hash, error) {
return i.index.HashOf(), nil
}
func (i prollyArtifactIndex) Count() uint64 {
return uint64(i.index.Count())
func (i prollyArtifactIndex) Count() (uint64, error) {
c, err := i.index.Count()
return uint64(c), err
}
func (i prollyArtifactIndex) Format() *types.NomsBinFormat {

View File

@@ -35,10 +35,10 @@ type Index interface {
HashOf() (hash.Hash, error)
// Count returns the cardinality of the index.
Count() uint64
Count() (uint64, error)
// Empty returns true if the index is empty.
Empty() bool
Empty() (bool, error)
// Format returns the types.NomsBinFormat for this index.
Format() *types.NomsBinFormat
@@ -105,7 +105,10 @@ func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
return IndexFromNomsMap(v.(types.Map), vrw, ns), nil
case types.Format_DOLT_1:
pm := shim.MapFromValue(v, sch, ns)
pm, err := shim.MapFromValue(v, sch, ns)
if err != nil {
return nil, err
}
return IndexFromProllyMap(pm), nil
default:
@@ -184,13 +187,13 @@ func (i nomsIndex) HashOf() (hash.Hash, error) {
}
// Count implements Index.
func (i nomsIndex) Count() uint64 {
return i.index.Len()
func (i nomsIndex) Count() (uint64, error) {
return i.index.Len(), nil
}
// Empty implements Index.
func (i nomsIndex) Empty() bool {
return i.index.Len() == 0
func (i nomsIndex) Empty() (bool, error) {
return i.index.Len() == 0, nil
}
// Format implements Index.
@@ -234,13 +237,18 @@ func (i prollyIndex) HashOf() (hash.Hash, error) {
}
// Count implements Index.
func (i prollyIndex) Count() uint64 {
return uint64(i.index.Count())
func (i prollyIndex) Count() (uint64, error) {
c, err := i.index.Count()
return uint64(c), err
}
// Empty implements Index.
func (i prollyIndex) Empty() bool {
return i.index.Count() == 0
func (i prollyIndex) Empty() (bool, error) {
c, err := i.index.Count()
if err != nil {
return false, err
}
return c == 0, nil
}
// Format implements Index.
@@ -319,21 +327,30 @@ func (i prollyIndex) AddColumnToRows(ctx context.Context, newCol string, newSche
}
// NewIndexSet returns an empty IndexSet.
func NewIndexSet(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) IndexSet {
func NewIndexSet(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) (IndexSet, error) {
if vrw.Format().UsesFlatbuffers() {
emptyam := prolly.NewEmptyAddressMap(ns)
return doltDevIndexSet{vrw, ns, emptyam}
emptyam, err := prolly.NewEmptyAddressMap(ns)
if err != nil {
return nil, err
}
return doltDevIndexSet{vrw, ns, emptyam}, nil
}
empty, _ := types.NewMap(ctx, vrw)
empty, err := types.NewMap(ctx, vrw)
if err != nil {
return nil, err
}
return nomsIndexSet{
indexes: empty,
vrw: vrw,
}
}, nil
}
func NewIndexSetWithEmptyIndexes(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema) (IndexSet, error) {
s := NewIndexSet(ctx, vrw, ns)
s, err := NewIndexSet(ctx, vrw, ns)
if err != nil {
return nil, err
}
for _, index := range sch.Indexes().AllIndexes() {
empty, err := NewEmptyIndex(ctx, vrw, ns, index.Schema())
if err != nil {

View File

@@ -147,7 +147,10 @@ func NewTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore,
}
if indexes == nil {
indexes = NewIndexSet(ctx, vrw, ns)
indexes, err = NewIndexSet(ctx, vrw, ns)
if err != nil {
return nil, err
}
}
indexesRef, err := refFromNomsValue(ctx, vrw, mapFromIndexSet(indexes))
@@ -199,7 +202,11 @@ func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
err = errors.New("table ref is unexpected noms value; GetFileID == " + id)
return nil, err
}
return doltDevTable{vrw, ns, serial.GetRootAsTable([]byte(sm), serial.MessagePrefixSz)}, nil
st, err := serial.TryGetRootAsTable([]byte(sm), serial.MessagePrefixSz)
if err != nil {
return nil, err
}
return doltDevTable{vrw, ns, st}, nil
}
}
@@ -328,7 +335,7 @@ func (t nomsTable) GetIndexes(ctx context.Context) (IndexSet, error) {
return nil, err
}
if !ok {
return NewIndexSet(ctx, t.vrw, t.ns), nil
return NewIndexSet(ctx, t.vrw, t.ns)
}
im, err := iv.(types.Ref).TargetValue(ctx, t.vrw)
@@ -346,7 +353,11 @@ func (t nomsTable) GetIndexes(ctx context.Context) (IndexSet, error) {
// SetIndexes implements Table.
func (t nomsTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, error) {
if indexes == nil {
indexes = NewIndexSet(ctx, t.vrw, t.ns)
var err error
indexes, err = NewIndexSet(ctx, t.vrw, t.ns)
if err != nil {
return nil, err
}
}
indexesRef, err := refFromNomsValue(ctx, t.vrw, mapFromIndexSet(indexes))
@@ -741,7 +752,7 @@ type serialTableFields struct {
autoincval uint64
}
func (fields serialTableFields) write() *serial.Table {
func (fields serialTableFields) write() (*serial.Table, error) {
// TODO: Chance for a pool.
builder := flatbuffers.NewBuilder(1024)
@@ -774,7 +785,7 @@ func (fields serialTableFields) write() *serial.Table {
serial.TableAddViolations(builder, violationsoff)
serial.TableAddArtifacts(builder, artifactsoff)
bs := serial.FinishMessage(builder, serial.TableEnd(builder), []byte(serial.TableFileID))
return serial.GetRootAsTable(bs, serial.MessagePrefixSz)
return serial.TryGetRootAsTable(bs, serial.MessagePrefixSz)
}
func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
@@ -795,7 +806,10 @@ func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.Nod
}
if indexes == nil {
indexes = NewIndexSet(ctx, vrw, ns)
indexes, err = NewIndexSet(ctx, vrw, ns)
if err != nil {
return nil, err
}
}
var autoInc uint64
@@ -804,7 +818,7 @@ func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.Nod
}
var emptyhash hash.Hash
msg := serialTableFields{
msg, err := serialTableFields{
schema: schemaAddr[:],
rows: rowsbytes,
indexes: indexes.(doltDevIndexSet).am,
@@ -816,6 +830,9 @@ func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.Nod
artifacts: emptyhash[:],
autoincval: autoInc,
}.write()
if err != nil {
return nil, err
}
return doltDevTable{vrw, ns, msg}, nil
}
@@ -872,7 +889,10 @@ func (t doltDevTable) GetTableRows(ctx context.Context) (Index, error) {
if err != nil {
return nil, err
}
m := shim.MapFromValue(types.SerialMessage(rowbytes), sch, t.ns)
m, err := shim.MapFromValue(types.SerialMessage(rowbytes), sch, t.ns)
if err != nil {
return nil, err
}
return IndexFromProllyMap(m), nil
}
}
@@ -883,24 +903,43 @@ func (t doltDevTable) SetTableRows(ctx context.Context, rows Index) (Table, erro
return nil, err
}
fields := t.fields()
fields, err := t.fields()
if err != nil {
return nil, err
}
fields.rows = rowsbytes
msg := fields.write()
msg, err := fields.write()
if err != nil {
return nil, err
}
return doltDevTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetIndexes(ctx context.Context) (IndexSet, error) {
ambytes := t.msg.SecondaryIndexesBytes()
node := tree.NodeFromBytes(ambytes)
node, err := tree.NodeFromBytes(ambytes)
if err != nil {
return nil, err
}
ns := t.ns
return doltDevIndexSet{t.vrw, t.ns, prolly.NewAddressMap(node, ns)}, nil
am, err := prolly.NewAddressMap(node, ns)
if err != nil {
return nil, err
}
return doltDevIndexSet{t.vrw, t.ns, am}, nil
}
func (t doltDevTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, error) {
fields := t.fields()
fields, err := t.fields()
if err != nil {
return nil, err
}
fields.indexes = indexes.(doltDevIndexSet).am
msg := fields.write()
msg, err := fields.write()
if err != nil {
return nil, err
}
return doltDevTable{t.vrw, t.ns, msg}, nil
}
@@ -985,12 +1024,18 @@ func (t doltDevTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex)
}
var addr hash.Hash
if artifacts != nil && artifacts.Count() != 0 {
ref, err := RefFromArtifactIndex(ctx, t.vrw, artifacts)
if artifacts != nil {
c, err := artifacts.Count()
if err != nil {
return nil, err
}
addr = ref.TargetHash()
if c != 0 {
ref, err := RefFromArtifactIndex(ctx, t.vrw, artifacts)
if err != nil {
return nil, err
}
addr = ref.TargetHash()
}
}
msg := t.clone()
copy(msg.ArtifactsBytes(), addr[:])
@@ -1083,9 +1128,15 @@ func (t doltDevTable) SetAutoIncrement(ctx context.Context, val uint64) (Table,
// TODO: This clones before checking if the mutate will work.
msg := t.clone()
if !msg.MutateAutoIncrementValue(val) {
fields := t.fields()
fields, err := t.fields()
if err != nil {
return nil, err
}
fields.autoincval = val
msg = fields.write()
msg, err = fields.write()
if err != nil {
return nil, err
}
}
return doltDevTable{t.vrw, t.ns, msg}, nil
}
@@ -1098,16 +1149,23 @@ func (t doltDevTable) clone() *serial.Table {
return &ret
}
func (t doltDevTable) fields() serialTableFields {
func (t doltDevTable) fields() (serialTableFields, error) {
ambytes := t.msg.SecondaryIndexesBytes()
node := tree.NodeFromBytes(ambytes)
node, err := tree.NodeFromBytes(ambytes)
if err != nil {
return serialTableFields{}, err
}
ns := t.ns
conflicts := t.msg.Conflicts(nil)
am, err := prolly.NewAddressMap(node, ns)
if err != nil {
return serialTableFields{}, err
}
return serialTableFields{
schema: t.msg.SchemaBytes(),
rows: t.msg.PrimaryIndexBytes(),
indexes: prolly.NewAddressMap(node, ns),
indexes: am,
conflictsdata: conflicts.DataBytes(),
conflictsours: conflicts.OurSchemaBytes(),
conflictstheirs: conflicts.TheirSchemaBytes(),
@@ -1115,7 +1173,7 @@ func (t doltDevTable) fields() serialTableFields {
violations: t.msg.ViolationsBytes(),
artifacts: t.msg.ArtifactsBytes(),
autoincval: t.msg.AutoIncrementValue(),
}
}, nil
}
func getSchemaAtAddr(ctx context.Context, vrw types.ValueReadWriter, addr hash.Hash) (schema.Schema, error) {

View File

@@ -88,7 +88,11 @@ func deserializeFlatbufferForeignKeys(msg types.SerialMessage) (*ForeignKeyColle
return nil, fmt.Errorf("expect Serial Message with ForeignKeyCollectionFileID")
}
c := serial.GetRootAsForeignKeyCollection(msg, serial.MessagePrefixSz)
var c serial.ForeignKeyCollection
err := serial.InitForeignKeyCollectionRoot(&c, msg, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
collection := &ForeignKeyCollection{
foreignKeys: make(map[string]ForeignKey, c.ForeignKeysLength()),
}
@@ -226,10 +230,14 @@ func serializeUint64Vector(b *fb.Builder, u []uint64) fb.UOffsetT {
return b.EndVector(len(u))
}
func emptyForeignKeyCollection(msg types.SerialMessage) bool {
func emptyForeignKeyCollection(msg types.SerialMessage) (bool, error) {
if serial.GetFileID(msg) != serial.ForeignKeyCollectionFileID {
return false
return false, nil
}
c := serial.GetRootAsForeignKeyCollection(msg, serial.MessagePrefixSz)
return c.ForeignKeysLength() == 0
var c serial.ForeignKeyCollection
err := serial.InitForeignKeyCollectionRoot(&c, msg, serial.MessagePrefixSz)
if err != nil {
return false, err
}
return c.ForeignKeysLength() == 0, nil
}

View File

@@ -253,7 +253,10 @@ func newRootValue(vrw types.ValueReadWriter, ns tree.NodeStore, v types.Value) (
var storage rvStorage
if vrw.Format().UsesFlatbuffers() {
srv := serial.GetRootAsRootValue([]byte(v.(types.SerialMessage)), serial.MessagePrefixSz)
srv, err := serial.TryGetRootAsRootValue([]byte(v.(types.SerialMessage)), serial.MessagePrefixSz)
if err != nil {
return nil, err
}
storage = fbRvStorage{srv}
} else {
st, ok := v.(types.Struct)
@@ -318,7 +321,10 @@ func EmptyRootValue(ctx context.Context, vrw types.ValueReadWriter, ns tree.Node
if vrw.Format().UsesFlatbuffers() {
builder := flatbuffers.NewBuilder(80)
emptyam := prolly.NewEmptyAddressMap(ns)
emptyam, err := prolly.NewEmptyAddressMap(ns)
if err != nil {
return nil, err
}
ambytes := []byte(tree.ValueFromNode(emptyam.Node()).(types.SerialMessage))
tablesoff := builder.CreateByteVector(ambytes)
@@ -798,7 +804,10 @@ func (root *RootValue) CreateEmptyTable(ctx context.Context, tName string, sch s
return nil, err
}
indexes := durable.NewIndexSet(ctx, root.VRW(), root.ns)
indexes, err := durable.NewIndexSet(ctx, root.VRW(), root.ns)
if err != nil {
return nil, err
}
err = sch.Indexes().Iter(func(index schema.Index) (stop bool, err error) {
// create an empty map for every index
indexes, err = indexes.PutIndex(ctx, index.Name(), empty)
@@ -1106,14 +1115,20 @@ func (r fbRvStorage) GetFeatureVersion() (FeatureVersion, bool, error) {
return FeatureVersion(r.srv.FeatureVersion()), true, nil
}
func (r fbRvStorage) getAddressMap(vrw types.ValueReadWriter, ns tree.NodeStore) prolly.AddressMap {
func (r fbRvStorage) getAddressMap(vrw types.ValueReadWriter, ns tree.NodeStore) (prolly.AddressMap, error) {
tbytes := r.srv.TablesBytes()
node := shim.NodeFromValue(types.SerialMessage(tbytes))
node, err := shim.NodeFromValue(types.SerialMessage(tbytes))
if err != nil {
return prolly.AddressMap{}, err
}
return prolly.NewAddressMap(node, ns)
}
func (r fbRvStorage) GetTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) (tableMap, error) {
am := r.getAddressMap(vrw, ns)
am, err := r.getAddressMap(vrw, ns)
if err != nil {
return nil, err
}
return fbTableMap{am}, nil
}
@@ -1152,7 +1167,10 @@ func (r fbRvStorage) GetForeignKeys(ctx context.Context, vr types.ValueReader) (
func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, edits []tableEdit) (rvStorage, error) {
builder := flatbuffers.NewBuilder(80)
am := r.getAddressMap(vrw, ns)
am, err := r.getAddressMap(vrw, ns)
if err != nil {
return nil, err
}
ae := am.Editor()
for _, e := range edits {
if e.old_name != "" {
@@ -1192,7 +1210,7 @@ func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWrite
}
}
}
am, err := ae.Flush(ctx)
am, err = ae.Flush(ctx)
if err != nil {
return nil, err
}
@@ -1207,12 +1225,20 @@ func (r fbRvStorage) EditTablesMap(ctx context.Context, vrw types.ValueReadWrite
serial.RootValueAddForeignKeyAddr(builder, fkoff)
bs := serial.FinishMessage(builder, serial.RootValueEnd(builder), []byte(serial.RootValueFileID))
return fbRvStorage{serial.GetRootAsRootValue(bs, serial.MessagePrefixSz)}, nil
msg, err := serial.TryGetRootAsRootValue(bs, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
return fbRvStorage{msg}, nil
}
func (r fbRvStorage) SetForeignKeyMap(ctx context.Context, vrw types.ValueReadWriter, v types.Value) (rvStorage, error) {
var h hash.Hash
if !emptyForeignKeyCollection(v.(types.SerialMessage)) {
isempty, err := emptyForeignKeyCollection(v.(types.SerialMessage))
if err != nil {
return nil, err
}
if !isempty {
ref, err := vrw.WriteValue(ctx, v)
if err != nil {
return nil, err

View File

@@ -115,7 +115,10 @@ func mergeProllySecondaryIndexes(
if err != nil {
return nil, err
}
mergedIndexSet := durable.NewIndexSet(ctx, tm.vrw, tm.ns)
mergedIndexSet, err := durable.NewIndexSet(ctx, tm.vrw, tm.ns)
if err != nil {
return nil, err
}
mergedM := durable.ProllyMapFromIndex(finalRows)

View File

@@ -415,7 +415,10 @@ func migrateIndexSet(
oldParentSet, oldSet, newParentSet durable.IndexSet,
vrw types.ValueReadWriter, ns tree.NodeStore,
) (durable.IndexSet, error) {
newSet := durable.NewIndexSet(ctx, vrw, ns)
newSet, err := durable.NewIndexSet(ctx, vrw, ns)
if err != nil {
return nil, err
}
for _, def := range sch.Indexes().AllIndexes() {
idx, err := oldParentSet.GetIndex(ctx, sch, def.Name())
if err != nil {

View File

@@ -261,15 +261,24 @@ func partitionTable(ctx context.Context, tbl *doltdb.Table) ([][2]uint64, error)
idx, err := tbl.GetRowData(ctx)
if err != nil {
return nil, err
} else if idx.Count() == 0 {
}
c, err := idx.Count()
if err != nil {
return nil, err
}
if c == 0 {
return nil, nil
}
n := (idx.Count() + fixedSize - 1) / fixedSize
n := (c + fixedSize - 1) / fixedSize
parts := make([][2]uint64, n)
parts[0][0] = 0
parts[n-1][1] = idx.Count()
parts[n-1][1], err = idx.Count()
if err != nil {
return nil, err
}
for i := 1; i < len(parts); i++ {
parts[i-1][1] = uint64(i) * fixedSize
parts[i][0] = uint64(i) * fixedSize

View File

@@ -76,7 +76,10 @@ func DeserializeSchema(ctx context.Context, nbf *types.NomsBinFormat, v types.Va
func deserializeSchemaFromFlatbuffer(ctx context.Context, buf []byte) (schema.Schema, error) {
assertTrue(serial.GetFileID(buf) == serial.TableSchemaFileID)
s := serial.GetRootAsTableSchema(buf, serial.MessagePrefixSz)
s, err := serial.TryGetRootAsTableSchema(buf, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
cols, err := deserializeColumns(ctx, s)
if err != nil {

View File

@@ -392,5 +392,5 @@ func isTableDataEmpty(ctx *sql.Context, table *doltdb.Table) (bool, error) {
return false, err
}
return rowData.Empty(), nil
return rowData.Empty()
}

View File

@@ -1229,8 +1229,12 @@ func TestAddDropPrimaryKeys(t *testing.T) {
// Assert the new index map is not empty
newRows, err := table.GetIndexRowData(ctx, "c1_idx")
require.NoError(t, err)
assert.False(t, newRows.Empty())
assert.Equal(t, newRows.Count(), uint64(2))
empty, err := newRows.Empty()
require.NoError(t, err)
assert.False(t, empty)
count, err := newRows.Count()
require.NoError(t, err)
assert.Equal(t, count, uint64(2))
})
t.Run("Add primary key when one more cells contain NULL", func(t *testing.T) {
@@ -1299,8 +1303,12 @@ func TestAddDropPrimaryKeys(t *testing.T) {
// Assert the index map is not empty
newIdx, err := table.GetIndexRowData(ctx, "c1_idx")
assert.NoError(t, err)
assert.False(t, newIdx.Empty())
assert.Equal(t, newIdx.Count(), uint64(2))
empty, err := newIdx.Empty()
require.NoError(t, err)
assert.False(t, empty)
count, err := newIdx.Count()
require.NoError(t, err)
assert.Equal(t, count, uint64(2))
})
}

View File

@@ -135,7 +135,11 @@ func newKeyedRowIter(ctx context.Context, tbl *doltdb.Table, projectedCols []uin
func iterForPartition(ctx context.Context, partition doltTablePartition) (types.MapTupleIterator, error) {
if partition.end == NoUpperBound {
partition.end = partition.rowData.Count()
c, err := partition.rowData.Count()
if err != nil {
return nil, err
}
partition.end = c
}
return partition.IteratorForPartition(ctx, partition.rowData)
}
@@ -184,8 +188,12 @@ func ProllyRowIterFromPartition(
partition doltTablePartition,
) (sql.RowIter, error) {
rows := durable.ProllyMapFromIndex(partition.rowData)
if partition.end > uint64(rows.Count()) {
partition.end = uint64(rows.Count())
c, err := rows.Count()
if err != nil {
return nil, err
}
if partition.end > uint64(c) {
partition.end = uint64(c)
}
iter, err := rows.IterOrdinalRange(ctx, partition.start, partition.end)

View File

@@ -220,7 +220,11 @@ func nextSchemasTableIndex(ctx *sql.Context, root *doltdb.RootValue) (int64, err
return 0, err
}
if rows.Empty() {
empty, err := rows.Empty()
if err != nil {
return 0, err
}
if empty {
return 1, nil
}

View File

@@ -357,7 +357,7 @@ func (t *DoltTable) numRows(ctx *sql.Context) (uint64, error) {
return 0, err
}
return m.Count(), nil
return m.Count()
}
// Format returns the NomsBinFormat for the underlying table
@@ -399,7 +399,10 @@ func (t *DoltTable) Partitions(ctx *sql.Context) (sql.PartitionIter, error) {
if err != nil {
return nil, err
}
partitions := partitionsFromRows(ctx, rows)
partitions, err := partitionsFromRows(ctx, rows)
if err != nil {
return nil, err
}
return newDoltTablePartitionIter(rows, partitions...), nil
}
@@ -458,8 +461,12 @@ func (t *DoltTable) AnalyzeTable(ctx *sql.Context) error {
return err
}
mc, err := m.Count()
if err != nil {
return err
}
t.doltStats = &DoltTableStatistics{
rowCount: m.Count(),
rowCount: mc,
createdAt: time.Now(),
}
@@ -674,7 +681,11 @@ func (t *WritableDoltTable) Truncate(ctx *sql.Context) (int, error) {
if err != nil {
return 0, err
}
numOfRows := int(rowData.Count())
c, err := rowData.Count()
if err != nil {
return 0, err
}
numOfRows := int(c)
newTable, err := truncate(ctx, table, sch)
if err != nil {
@@ -1006,18 +1017,25 @@ type doltTablePartition struct {
rowData durable.Index
}
func partitionsFromRows(ctx context.Context, rows durable.Index) []doltTablePartition {
if rows.Empty() {
func partitionsFromRows(ctx context.Context, rows durable.Index) ([]doltTablePartition, error) {
empty, err := rows.Empty()
if err != nil {
return nil, err
}
if empty {
return []doltTablePartition{
{start: 0, end: 0, rowData: rows},
}
}, nil
}
return partitionsFromTableRows(rows)
}
func partitionsFromTableRows(rows durable.Index) []doltTablePartition {
numElements := rows.Count()
func partitionsFromTableRows(rows durable.Index) ([]doltTablePartition, error) {
numElements, err := rows.Count()
if err != nil {
return nil, err
}
itemsPerPartition := MaxRowsPerPartition
numPartitions := (numElements / itemsPerPartition) + 1
@@ -1046,7 +1064,7 @@ func partitionsFromTableRows(rows durable.Index) []doltTablePartition {
rowData: rows,
}
return partitions
return partitions, nil
}
// Key returns the key for this partition, which must uniquely identity the partition.

View File

@@ -90,7 +90,10 @@ func NewTempTable(
if err != nil {
return nil, err
}
set := durable.NewIndexSet(ctx, vrw, ns)
set, err := durable.NewIndexSet(ctx, vrw, ns)
if err != nil {
return nil, err
}
tbl, err := doltdb.NewTable(ctx, vrw, ns, sch, idx, set, nil)
if err != nil {
@@ -197,7 +200,11 @@ func (t *TempTable) Partitions(ctx *sql.Context) (sql.PartitionIter, error) {
if err != nil {
return nil, err
}
return newDoltTablePartitionIter(rows, partitionsFromRows(ctx, rows)...), nil
parts, err := partitionsFromRows(ctx, rows)
if err != nil {
return nil, err
}
return newDoltTablePartitionIter(rows, parts...), nil
}
func (t *TempTable) IsTemporary() bool {
@@ -210,7 +217,7 @@ func (t *TempTable) DataLength(ctx *sql.Context) (uint64, error) {
if err != nil {
return 0, err
}
return idx.Count(), nil
return idx.Count()
}
// AnalyzeTable implements the sql.StatisticsTable interface.

View File

@@ -64,7 +64,11 @@ func NewTableIterator(ctx context.Context, sch schema.Schema, idx durable.Index,
var rowItr sql.RowIter
if types.IsFormat_DOLT_1(idx.Format()) {
m := durable.ProllyMapFromIndex(idx)
itr, err := m.IterOrdinalRange(ctx, offset, uint64(m.Count()))
c, err := m.Count()
if err != nil {
return nil, err
}
itr, err := m.IterOrdinalRange(ctx, offset, uint64(c))
if err != nil {
return nil, err
}

View File

@@ -93,7 +93,8 @@ func TestEndToEnd(t *testing.T) {
empty, err := types.NewMap(ctx, root.VRW())
require.NoError(t, err)
indexes := durable.NewIndexSet(ctx, root.VRW(), root.NodeStore())
indexes, err := durable.NewIndexSet(ctx, root.VRW(), root.NodeStore())
require.NoError(t, err)
indexes, err = indexes.PutNomsIndex(ctx, dtestutils.IndexName, empty)
require.NoError(t, err)

View File

@@ -10,8 +10,15 @@ if [ ! -z "$(ls $GEN_DIR)" ]; then
rm $GEN_DIR/*.go
fi
FLATC=${FLATC:-$SRC/../../proto/third_party/flatbuffers/bazel-bin/flatc}
if [ ! -x "$FLATC" ]; then
echo "$FLATC is not an executable. Did you remember to run 'bazel build //:flatc' in $(dirname $(dirname $FLATC))"
exit 1
fi
# generate golang (de)serialization package
flatc -o $GEN_DIR --gen-onefile --filename-suffix "" --gen-mutable --go-namespace "serial" --go \
"$FLATC" -o $GEN_DIR --gen-onefile --filename-suffix "" --gen-mutable --go-namespace "serial" --go \
addressmap.fbs \
blob.fbs \
commit.fbs \

View File

@@ -163,7 +163,10 @@ func outputEncodedValue(ctx context.Context, w io.Writer, value types.Value) err
case types.SerialMessage:
switch serial.GetFileID(value) {
case serial.TableFileID:
msg := serial.GetRootAsTable(value, serial.MessagePrefixSz)
msg, err := serial.TryGetRootAsTable(value, serial.MessagePrefixSz)
if err != nil {
return err
}
fmt.Fprintf(w, " {\n")
fmt.Fprintf(w, "\tSchema: #%s\n", hash.New(msg.SchemaBytes()).String())
@@ -174,17 +177,39 @@ func outputEncodedValue(ctx context.Context, w io.Writer, value types.Value) err
fmt.Fprintf(w, "\tAutoinc: %d\n", msg.AutoIncrementValue())
// clustered index
node := tree.NodeFromBytes(msg.PrimaryIndexBytes())
node, err := tree.NodeFromBytes(msg.PrimaryIndexBytes())
if err != nil {
return err
}
c, err := node.TreeCount()
if err != nil {
return err
}
l, err := node.Level()
if err != nil {
return err
}
fmt.Fprintf(w, "\tPrimary Index (rows %d, depth %d) %s {",
node.TreeCount(), node.Level()+1, node.HashOf().String()[:8])
c, l+1, node.HashOf().String()[:8])
tree.OutputProllyNode(w, node)
fmt.Fprintf(w, "\t}\n")
// secondary indexes
node = tree.NodeFromBytes(msg.SecondaryIndexesBytes())
node, err = tree.NodeFromBytes(msg.SecondaryIndexesBytes())
if err != nil {
return err
}
c, err = node.TreeCount()
if err != nil {
return err
}
l, err = node.Level()
if err != nil {
return err
}
fmt.Fprintf(w, "\tSecondary Indexes (indexes %d, depth %d) %s {",
node.TreeCount(), node.Level()+1, node.HashOf().String()[:8])
err := tree.OutputAddressMapNode(w, node)
c, l+1, node.HashOf().String()[:8])
err = tree.OutputAddressMapNode(w, node)
if err != nil {
return err
}
@@ -193,16 +218,25 @@ func outputEncodedValue(ctx context.Context, w io.Writer, value types.Value) err
return nil
case serial.StoreRootFileID:
msg := serial.GetRootAsStoreRoot(value, serial.MessagePrefixSz)
msg, err := serial.TryGetRootAsStoreRoot(value, serial.MessagePrefixSz)
if err != nil {
return err
}
ambytes := msg.AddressMapBytes()
node := tree.NodeFromBytes(ambytes)
node, err := tree.NodeFromBytes(ambytes)
if err != nil {
return err
}
return tree.OutputAddressMapNode(w, node)
case serial.ProllyTreeNodeFileID:
fallthrough
case serial.AddressMapFileID:
fallthrough
case serial.CommitClosureFileID:
node := shim.NodeFromValue(value)
node, err := shim.NodeFromValue(value)
if err != nil {
return err
}
return tree.OutputProllyNode(w, node)
default:
return types.WriteEncodedValue(ctx, w, value)

View File

@@ -204,7 +204,10 @@ func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.Valu
return nil, err
}
for i := range heights {
parents[i] = serial.GetRootAsCommit([]byte(parentValues[i].(types.SerialMessage)), serial.MessagePrefixSz)
parents[i], err = serial.TryGetRootAsCommit([]byte(parentValues[i].(types.SerialMessage)), serial.MessagePrefixSz)
if err != nil {
return nil, err
}
heights[i] = parents[i].Height()
}
parentClosureAddr, err := writeFbCommitParentClosure(ctx, cs, vrw, ns, parents, opts.Parents)
@@ -264,12 +267,15 @@ func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.Valu
func commitPtr(nbf *types.NomsBinFormat, v types.Value, r *types.Ref) (*Commit, error) {
if nbf.UsesFlatbuffers() {
bs := []byte(v.(types.SerialMessage))
height := serial.GetRootAsCommit(bs, serial.MessagePrefixSz).Height()
var cm serial.Commit
err := serial.InitCommitRoot(&cm, bs, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
var addr hash.Hash
if r != nil {
addr = r.TargetHash()
} else {
var err error
addr, err = v.Hash(nbf)
if err != nil {
return nil, err
@@ -277,7 +283,7 @@ func commitPtr(nbf *types.NomsBinFormat, v types.Value, r *types.Ref) (*Commit,
}
return &Commit{
val: v,
height: height,
height: cm.Height(),
addr: addr,
}, nil
}
@@ -446,7 +452,11 @@ func GetCommitParents(ctx context.Context, vr types.ValueReader, cv types.Value)
if v == nil {
return nil, fmt.Errorf("GetCommitParents: Did not find parent Commit in ValueReader: %s", addrs[i].String())
}
csm := serial.GetRootAsCommit([]byte(v.(types.SerialMessage)), serial.MessagePrefixSz)
var csm serial.Commit
err := serial.InitCommitRoot(&csm, []byte(v.(types.SerialMessage)), serial.MessagePrefixSz)
if err != nil {
return nil, err
}
res[i] = &Commit{
val: v,
height: csm.Height(),
@@ -516,7 +526,11 @@ func GetCommitMeta(ctx context.Context, cv types.Value) (*CommitMeta, error) {
if serial.GetFileID(data) != serial.CommitFileID {
return nil, errors.New("GetCommitMeta: provided value is not a commit.")
}
cmsg := serial.GetRootAsCommit(data, serial.MessagePrefixSz)
var cmsg serial.Commit
err := serial.InitCommitRoot(&cmsg, data, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
ret := &CommitMeta{}
ret.Name = string(cmsg.Name())
ret.Email = string(cmsg.Email())
@@ -552,7 +566,11 @@ func GetCommittedValue(ctx context.Context, vr types.ValueReader, cv types.Value
if serial.GetFileID(data) != serial.CommitFileID {
return nil, errors.New("GetCommittedValue: provided value is not a commit.")
}
cmsg := serial.GetRootAsCommit(data, serial.MessagePrefixSz)
var cmsg serial.Commit
err := serial.InitCommitRoot(&cmsg, data, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
var roothash hash.Hash
copy(roothash[:], cmsg.RootBytes())
return vr.ReadValue(ctx, roothash)

View File

@@ -32,7 +32,11 @@ func newParentsClosureIterator(ctx context.Context, c *Commit, vr types.ValueRea
sv := c.NomsValue()
if _, ok := sv.(types.SerialMessage); ok {
msg := serial.GetRootAsCommit(sv.(types.SerialMessage), serial.MessagePrefixSz)
var msg serial.Commit
err := serial.InitCommitRoot(&msg, sv.(types.SerialMessage), serial.MessagePrefixSz)
if err != nil {
return nil, err
}
addr := hash.New(msg.ParentClosureBytes())
if addr.IsEmpty() {
return nil, nil
@@ -44,8 +48,14 @@ func newParentsClosureIterator(ctx context.Context, c *Commit, vr types.ValueRea
if types.IsNull(v) {
return nil, fmt.Errorf("internal error or data loss: dangling commit parent closure for addr %s or commit %s", addr.String(), c.Addr().String())
}
node := tree.NodeFromBytes(v.(types.SerialMessage))
cc := prolly.NewCommitClosure(node, ns)
node, err := tree.NodeFromBytes(v.(types.SerialMessage))
if err != nil {
return nil, err
}
cc, err := prolly.NewCommitClosure(node, ns)
if err != nil {
return nil, err
}
ci, err := cc.IterAllReverse(ctx)
if err != nil {
return nil, err
@@ -391,10 +401,19 @@ func writeFbCommitParentClosure(ctx context.Context, cs chunks.ChunkStore, vrw t
closures := make([]prolly.CommitClosure, len(parents))
for i := range addrs {
if !types.IsNull(vs[i]) {
node := tree.NodeFromBytes(vs[i].(types.SerialMessage))
closures[i] = prolly.NewCommitClosure(node, ns)
node, err := tree.NodeFromBytes(vs[i].(types.SerialMessage))
if err != nil {
return hash.Hash{}, err
}
closures[i], err = prolly.NewCommitClosure(node, ns)
if err != nil {
return hash.Hash{}, err
}
} else {
closures[i] = prolly.NewEmptyCommitClosure(ns)
closures[i], err = prolly.NewEmptyCommitClosure(ns)
if err != nil {
return hash.Hash{}, err
}
}
}
// Add all the missing entries from [1, ...) maps to the 0th map.

View File

@@ -36,7 +36,7 @@ import (
type DatasetsMap interface {
// How many datasets are in the map
Len() uint64
Len() (uint64, error)
IterAll(ctx context.Context, cb func(id string, addr hash.Hash) error) error
}

View File

@@ -101,7 +101,7 @@ func (db *database) loadDatasetsNomsMap(ctx context.Context, rootHash hash.Hash)
func (db *database) loadDatasetsRefmap(ctx context.Context, rootHash hash.Hash) (prolly.AddressMap, error) {
if rootHash == (hash.Hash{}) {
return prolly.NewEmptyAddressMap(db.ns), nil
return prolly.NewEmptyAddressMap(db.ns)
}
val, err := db.ReadValue(ctx, rootHash)
@@ -113,15 +113,16 @@ func (db *database) loadDatasetsRefmap(ctx context.Context, rootHash hash.Hash)
return prolly.AddressMap{}, errors.New("Root hash doesn't exist")
}
return parse_storeroot([]byte(val.(types.SerialMessage)), db.nodeStore()), nil
return parse_storeroot([]byte(val.(types.SerialMessage)), db.nodeStore())
}
type refmapDatasetsMap struct {
am prolly.AddressMap
}
func (m refmapDatasetsMap) Len() uint64 {
return uint64(m.am.Count())
func (m refmapDatasetsMap) Len() (uint64, error) {
c, err := m.am.Count()
return uint64(c), err
}
func (m refmapDatasetsMap) IterAll(ctx context.Context, cb func(string, hash.Hash) error) error {
@@ -132,8 +133,8 @@ type nomsDatasetsMap struct {
m types.Map
}
func (m nomsDatasetsMap) Len() uint64 {
return m.m.Len()
func (m nomsDatasetsMap) Len() (uint64, error) {
return m.m.Len(), nil
}
func (m nomsDatasetsMap) IterAll(ctx context.Context, cb func(string, hash.Hash) error) error {

View File

@@ -268,7 +268,9 @@ func (suite *DatabaseSuite) TestDatabaseCommit() {
defer newDB.Close()
datasets2, err := newDB.Datasets(context.Background())
suite.NoError(err)
suite.Equal(uint64(2), datasets2.Len())
l, err := datasets2.Len()
suite.NoError(err)
suite.Equal(uint64(2), l)
}
func mustNomsMap(t *testing.T, dsm DatasetsMap) types.Map {
@@ -399,7 +401,9 @@ func (suite *DatabaseSuite) TestDatabaseDelete() {
defer newDB.Close()
datasets, err = newDB.Datasets(context.Background())
suite.NoError(err)
suite.Equal(uint64(1), datasets.Len())
l, err := datasets.Len()
suite.NoError(err)
suite.Equal(uint64(1), l)
newDS, err := newDB.GetDataset(context.Background(), datasetID2)
suite.NoError(err)
present = newDS.HasHead()

View File

@@ -147,8 +147,12 @@ type serialTagHead struct {
addr hash.Hash
}
func newSerialTagHead(bs []byte, addr hash.Hash) serialTagHead {
return serialTagHead{serial.GetRootAsTag(bs, serial.MessagePrefixSz), addr}
func newSerialTagHead(bs []byte, addr hash.Hash) (serialTagHead, error) {
tm, err := serial.TryGetRootAsTag(bs, serial.MessagePrefixSz)
if err != nil {
return serialTagHead{}, err
}
return serialTagHead{tm, addr}, nil
}
func (h serialTagHead) TypeName() string {
@@ -306,7 +310,7 @@ func newHead(head types.Value, addr hash.Hash) (dsHead, error) {
data := []byte(sm)
fid := serial.GetFileID(data)
if fid == serial.TagFileID {
return newSerialTagHead(data, addr), nil
return newSerialTagHead(data, addr)
}
if fid == serial.WorkingSetFileID {
return newSerialWorkingSetHead(data, addr), nil

View File

@@ -32,12 +32,18 @@ func storeroot_flatbuffer(am prolly.AddressMap) serial.Message {
return serial.FinishMessage(builder, serial.StoreRootEnd(builder), []byte(serial.StoreRootFileID))
}
func parse_storeroot(bs []byte, ns tree.NodeStore) prolly.AddressMap {
func parse_storeroot(bs []byte, ns tree.NodeStore) (prolly.AddressMap, error) {
if serial.GetFileID(bs) != serial.StoreRootFileID {
panic("expected store root file id, got: " + serial.GetFileID(bs))
}
sr := serial.GetRootAsStoreRoot(bs, serial.MessagePrefixSz)
sr, err := serial.TryGetRootAsStoreRoot(bs, serial.MessagePrefixSz)
if err != nil {
return prolly.AddressMap{}, err
}
mapbytes := sr.AddressMapBytes()
node := tree.NodeFromBytes(mapbytes)
node, err := tree.NodeFromBytes(mapbytes)
if err != nil {
return prolly.AddressMap{}, err
}
return prolly.NewAddressMap(node, ns)
}

View File

@@ -29,20 +29,24 @@ type AddressMap struct {
addresses orderedTree[stringSlice, address, lexicographic]
}
func NewEmptyAddressMap(ns tree.NodeStore) AddressMap {
func NewEmptyAddressMap(ns tree.NodeStore) (AddressMap, error) {
serializer := message.NewAddressMapSerializer(ns.Pool())
msg := serializer.Serialize(nil, nil, nil, 0)
return NewAddressMap(tree.NodeFromBytes(msg), ns)
n, err := tree.NodeFromBytes(msg)
if err != nil {
return AddressMap{}, err
}
return NewAddressMap(n, ns)
}
func NewAddressMap(node tree.Node, ns tree.NodeStore) AddressMap {
func NewAddressMap(node tree.Node, ns tree.NodeStore) (AddressMap, error) {
return AddressMap{
addresses: orderedTree[stringSlice, address, lexicographic]{
root: node,
ns: ns,
order: lexicographic{},
},
}
}, nil
}
type stringSlice []byte
@@ -57,11 +61,11 @@ func (l lexicographic) Compare(left, right stringSlice) int {
return bytes.Compare(left, right)
}
func (c AddressMap) Count() int {
func (c AddressMap) Count() (int, error) {
return c.addresses.count()
}
func (c AddressMap) Height() int {
func (c AddressMap) Height() (int, error) {
return c.addresses.height()
}

View File

@@ -20,6 +20,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/prolly/tree"
@@ -31,10 +32,10 @@ func TestAddressMap(t *testing.T) {
ns := tree.NewTestNodeStore()
pairs := randomAddressPairs(10_000)
empty := NewEmptyAddressMap(ns)
empty, err := NewEmptyAddressMap(ns)
require.NoError(t, err)
editor := empty.Editor()
var err error
for _, p := range pairs {
err = editor.Add(ctx, p.name(), p.addr())
assert.NoError(t, err)

View File

@@ -107,11 +107,11 @@ func NewArtifactMapFromTuples(ctx context.Context, ns tree.NodeStore, srcKeyDesc
}, nil
}
func (m ArtifactMap) Count() int {
func (m ArtifactMap) Count() (int, error) {
return m.tuples.count()
}
func (m ArtifactMap) Height() int {
func (m ArtifactMap) Height() (int, error) {
return m.tuples.height()
}
@@ -600,7 +600,10 @@ func ArtifactDebugFormat(ctx context.Context, m ArtifactMap) (string, error) {
if err != nil {
return "", err
}
c := m.Count()
c, err := m.Count()
if err != nil {
return "", err
}
var sb strings.Builder
sb.WriteString(fmt.Sprintf("Artifact Map (count: %d) {\n", c))

View File

@@ -46,28 +46,31 @@ func (o commitClosureKeyOrdering) Compare(left, right CommitClosureKey) int {
return 1
}
func NewEmptyCommitClosure(ns tree.NodeStore) CommitClosure {
func NewEmptyCommitClosure(ns tree.NodeStore) (CommitClosure, error) {
serializer := message.NewCommitClosureSerializer(ns.Pool())
msg := serializer.Serialize(nil, nil, nil, 0)
node := tree.NodeFromBytes(msg)
node, err := tree.NodeFromBytes(msg)
if err != nil {
return CommitClosure{}, err
}
return NewCommitClosure(node, ns)
}
func NewCommitClosure(node tree.Node, ns tree.NodeStore) CommitClosure {
func NewCommitClosure(node tree.Node, ns tree.NodeStore) (CommitClosure, error) {
return CommitClosure{
closure: orderedTree[CommitClosureKey, CommitClosureValue, commitClosureKeyOrdering]{
root: node,
ns: ns,
order: commitClosureKeyOrdering{},
},
}
}, nil
}
func (c CommitClosure) Count() int {
func (c CommitClosure) Count() (int, error) {
return c.closure.count()
}
func (c CommitClosure) Height() int {
func (c CommitClosure) Height() (int, error) {
return c.closure.height()
}

View File

@@ -52,11 +52,16 @@ func TestCommitClosure(t *testing.T) {
})
t.Run("Empty", func(t *testing.T) {
cc := NewEmptyCommitClosure(ns)
cc, err := NewEmptyCommitClosure(ns)
require.NoError(t, err)
assert.NotNil(t, cc)
assert.Equal(t, 0, cc.Count())
c, err := cc.Count()
require.NoError(t, err)
assert.Equal(t, 0, c)
assert.Equal(t, 0, cc.closure.root.Count())
assert.Equal(t, 1, cc.Height())
c, err = cc.Height()
require.NoError(t, err)
assert.Equal(t, 1, c)
i, err := cc.IterAllReverse(ctx)
_, _, err = i.Next(ctx)
@@ -65,15 +70,18 @@ func TestCommitClosure(t *testing.T) {
})
t.Run("Insert", func(t *testing.T) {
cc := NewEmptyCommitClosure(ns)
cc, err := NewEmptyCommitClosure(ns)
require.NoError(t, err)
e := cc.Editor()
err := e.Add(ctx, NewCommitClosureKey(ns.Pool(), 0, hash.Parse("00000000000000000000000000000000")))
err = e.Add(ctx, NewCommitClosureKey(ns.Pool(), 0, hash.Parse("00000000000000000000000000000000")))
assert.NoError(t, err)
err = e.Add(ctx, NewCommitClosureKey(ns.Pool(), 1, hash.Parse("00000000000000000000000000000000")))
assert.NoError(t, err)
cc, err = e.Flush(ctx)
assert.NoError(t, err)
assert.Equal(t, 2, cc.Count())
ccc, err := cc.Count()
require.NoError(t, err)
assert.Equal(t, 2, ccc)
i, err := cc.IterAllReverse(ctx)
assert.NoError(t, err)
@@ -94,21 +102,27 @@ func TestCommitClosure(t *testing.T) {
assert.NoError(t, err)
cc, err = e.Flush(ctx)
assert.NoError(t, err)
assert.Equal(t, 2, cc.Count())
ccc, err = cc.Count()
require.NoError(t, err)
assert.Equal(t, 2, ccc)
})
t.Run("Diff", func(t *testing.T) {
ccl := NewEmptyCommitClosure(ns)
ccl, err := NewEmptyCommitClosure(ns)
require.NoError(t, err)
e := ccl.Editor()
err := e.Add(ctx, NewCommitClosureKey(ns.Pool(), 0, hash.Parse("00000000000000000000000000000000")))
err = e.Add(ctx, NewCommitClosureKey(ns.Pool(), 0, hash.Parse("00000000000000000000000000000000")))
assert.NoError(t, err)
err = e.Add(ctx, NewCommitClosureKey(ns.Pool(), 1, hash.Parse("00000000000000000000000000000000")))
assert.NoError(t, err)
ccl, err = e.Flush(ctx)
assert.NoError(t, err)
assert.Equal(t, 2, ccl.Count())
cclc, err := ccl.Count()
require.NoError(t, err)
assert.Equal(t, 2, cclc)
ccr := NewEmptyCommitClosure(ns)
ccr, err := NewEmptyCommitClosure(ns)
require.NoError(t, err)
e = ccr.Editor()
err = e.Add(ctx, NewCommitClosureKey(ns.Pool(), 0, hash.Parse("00000000000000000000000000000000")))
assert.NoError(t, err)
@@ -120,7 +134,9 @@ func TestCommitClosure(t *testing.T) {
assert.NoError(t, err)
ccr, err = e.Flush(ctx)
assert.NoError(t, err)
assert.Equal(t, 4, ccr.Count())
ccrc, err := ccr.Count()
require.NoError(t, err)
assert.Equal(t, 4, ccrc)
var numadds, numdels int
err = DiffCommitClosures(ctx, ccl, ccr, func(ctx context.Context, d tree.Diff) error {
@@ -138,15 +154,18 @@ func TestCommitClosure(t *testing.T) {
})
t.Run("WalkAddresses", func(t *testing.T) {
cc := NewEmptyCommitClosure(ns)
cc, err := NewEmptyCommitClosure(ns)
require.NoError(t, err)
e := cc.Editor()
for i := 0; i < 4096; i++ {
err := e.Add(ctx, NewCommitClosureKey(ns.Pool(), uint64(i), hash.Parse(fmt.Sprintf("%0.32d", i))))
require.NoError(t, err)
}
cc, err := e.Flush(ctx)
cc, err = e.Flush(ctx)
require.NoError(t, err)
assert.Equal(t, 4096, cc.Count())
ccc, err := cc.Count()
require.NoError(t, err)
assert.Equal(t, 4096, ccc)
// Walk the addresses in the root.
msg := serial.Message(tree.ValueFromNode(cc.closure.root).(types.SerialMessage))
@@ -169,15 +188,18 @@ func TestCommitClosure(t *testing.T) {
})
t.Run("WalkNodes", func(t *testing.T) {
cc := NewEmptyCommitClosure(ns)
cc, err := NewEmptyCommitClosure(ns)
require.NoError(t, err)
e := cc.Editor()
for i := 0; i < 4096; i++ {
err := e.Add(ctx, NewCommitClosureKey(ns.Pool(), uint64(i), hash.Parse(fmt.Sprintf("%0.32d", i))))
require.NoError(t, err)
}
cc, err := e.Flush(ctx)
cc, err = e.Flush(ctx)
require.NoError(t, err)
assert.Equal(t, 4096, cc.Count())
ccc, err := cc.Count()
require.NoError(t, err)
assert.Equal(t, 4096, ccc)
numnodes := 0
totalentries := 0

View File

@@ -149,11 +149,11 @@ func (m Map) Mutate() MutableMap {
}
// Count returns the number of key-value pairs in the Map.
func (m Map) Count() int {
func (m Map) Count() (int, error) {
return m.tuples.count()
}
func (m Map) Height() int {
func (m Map) Height() (int, error) {
return m.tuples.height()
}
@@ -308,7 +308,10 @@ func DebugFormat(ctx context.Context, m Map) (string, error) {
if err != nil {
return "", err
}
c := m.Count()
c, err := m.Count()
if err != nil {
return "", err
}
var sb strings.Builder
sb.WriteString(fmt.Sprintf("Prolly Map (count: %d) {\n", c))

View File

@@ -238,12 +238,19 @@ func TestMutateMapWithTupleIter(t *testing.T) {
func TestNewEmptyNode(t *testing.T) {
s := message.NewProllyMapSerializer(val.TupleDesc{}, sharedPool)
msg := s.Serialize(nil, nil, nil, 0)
empty := tree.NodeFromBytes(msg)
assert.Equal(t, 0, empty.Level())
empty, err := tree.NodeFromBytes(msg)
require.NoError(t, err)
l, err := empty.Level()
require.NoError(t, err)
assert.Equal(t, 0, l)
assert.Equal(t, 0, empty.Count())
assert.Equal(t, 0, empty.TreeCount())
tc, err := empty.TreeCount()
require.NoError(t, err)
assert.Equal(t, 0, tc)
assert.Equal(t, 76, empty.Size())
assert.True(t, empty.IsLeaf())
leaf, err := empty.IsLeaf()
require.NoError(t, err)
assert.True(t, leaf)
}
// credit: https://github.com/tailscale/tailscale/commit/88586ec4a43542b758d6f4e15990573970fb4e8a

View File

@@ -81,22 +81,34 @@ func (s AddressMapSerializer) Serialize(keys, addrs [][]byte, subtrees []uint64,
return serial.FinishMessage(b, serial.AddressMapEnd(b), addressMapFileID)
}
func getAddressMapKeys(msg serial.Message) (keys ItemArray) {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
func getAddressMapKeys(msg serial.Message) (keys ItemArray, err error) {
var am serial.AddressMap
err = serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return keys, err
}
keys.Items = am.KeyItemsBytes()
keys.Offs = getAddressMapKeyOffsets(am)
keys.Offs = getAddressMapKeyOffsets(&am)
return
}
func getAddressMapValues(msg serial.Message) (values ItemArray) {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
func getAddressMapValues(msg serial.Message) (values ItemArray, err error) {
var am serial.AddressMap
err = serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return values, err
}
values.Items = am.AddressArrayBytes()
values.Offs = offsetsForAddressArray(values.Items)
return
}
func walkAddressMapAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.Context, addr hash.Hash) error) error {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
var am serial.AddressMap
err := serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return err
}
arr := am.AddressArrayBytes()
for i := 0; i < len(arr)/hash.ByteLen; i++ {
addr := hash.New(arr[i*addrSize : (i+1)*addrSize])
@@ -107,25 +119,45 @@ func walkAddressMapAddresses(ctx context.Context, msg serial.Message, cb func(ct
return nil
}
func getAddressMapCount(msg serial.Message) uint16 {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
return uint16(am.KeyOffsetsLength() - 1)
func getAddressMapCount(msg serial.Message) (uint16, error) {
var am serial.AddressMap
err := serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return uint16(am.KeyOffsetsLength() - 1), nil
}
func getAddressMapTreeLevel(msg serial.Message) int {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
return int(am.TreeLevel())
func getAddressMapTreeLevel(msg serial.Message) (int, error) {
var am serial.AddressMap
err := serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(am.TreeLevel()), nil
}
func getAddressMapTreeCount(msg serial.Message) int {
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
return int(am.TreeCount())
func getAddressMapTreeCount(msg serial.Message) (int, error) {
var am serial.AddressMap
err := serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(am.TreeCount()), nil
}
func getAddressMapSubtrees(msg serial.Message) []uint64 {
counts := make([]uint64, getAddressMapCount(msg))
am := serial.GetRootAsAddressMap(msg, serial.MessagePrefixSz)
return decodeVarints(am.SubtreeCountsBytes(), counts)
func getAddressMapSubtrees(msg serial.Message) ([]uint64, error) {
sz, err := getAddressMapCount(msg)
if err != nil {
return nil, err
}
counts := make([]uint64, sz)
var am serial.AddressMap
err = serial.InitAddressMapRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
return decodeVarints(am.SubtreeCountsBytes(), counts), nil
}
func getAddressMapKeyOffsets(pm *serial.AddressMap) []byte {

View File

@@ -59,8 +59,11 @@ func (s BlobSerializer) Serialize(keys, values [][]byte, subtrees []uint64, leve
return serial.FinishMessage(b, serial.BlobEnd(b), blobFileID)
}
func getBlobKeys(msg serial.Message) ItemArray {
cnt := getBlobCount(msg)
func getBlobKeys(msg serial.Message) (ItemArray, error) {
cnt, err := getBlobCount(msg)
if err != nil {
return ItemArray{}, err
}
buf := make([]byte, cnt)
for i := range buf {
buf[i] = 0
@@ -73,37 +76,49 @@ func getBlobKeys(msg serial.Message) ItemArray {
return ItemArray{
Items: buf,
Offs: offs,
}
}, nil
}
func getBlobValues(msg serial.Message) ItemArray {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
func getBlobValues(msg serial.Message) (ItemArray, error) {
var b serial.Blob
err := serial.InitBlobRoot(&b, msg, serial.MessagePrefixSz)
if err != nil {
return ItemArray{}, err
}
if b.TreeLevel() > 0 {
arr := b.AddressArrayBytes()
off := offsetsForAddressArray(arr)
return ItemArray{
Items: arr,
Offs: off,
}
}, nil
}
buf := b.PayloadBytes()
offs := make([]byte, 4)
binary.LittleEndian.PutUint16(offs[2:], uint16(len(buf)))
return ItemArray{Items: buf, Offs: offs}
return ItemArray{Items: buf, Offs: offs}, nil
}
func getBlobCount(msg serial.Message) uint16 {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
if b.TreeLevel() == 0 {
return 1
func getBlobCount(msg serial.Message) (uint16, error) {
var b serial.Blob
err := serial.InitBlobRoot(&b, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return uint16(b.AddressArrayLength() / hash.ByteLen)
if b.TreeLevel() == 0 {
return 1, nil
}
return uint16(b.AddressArrayLength() / hash.ByteLen), nil
}
func walkBlobAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.Context, addr hash.Hash) error) error {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
var b serial.Blob
err := serial.InitBlobRoot(&b, msg, serial.MessagePrefixSz)
if err != nil {
return err
}
arr := b.AddressArrayBytes()
for i := 0; i < len(arr)/hash.ByteLen; i++ {
addr := hash.New(arr[i*addrSize : (i+1)*addrSize])
@@ -114,23 +129,35 @@ func walkBlobAddresses(ctx context.Context, msg serial.Message, cb func(ctx cont
return nil
}
func getBlobTreeLevel(msg serial.Message) int {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
return int(b.TreeLevel())
func getBlobTreeLevel(msg serial.Message) (int, error) {
var b serial.Blob
err := serial.InitBlobRoot(&b, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(b.TreeLevel()), nil
}
func getBlobTreeCount(msg serial.Message) int {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
return int(b.TreeSize())
func getBlobTreeCount(msg serial.Message) (int, error) {
var b serial.Blob
err := serial.InitBlobRoot(&b, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(b.TreeSize()), nil
}
func getBlobSubtrees(msg serial.Message) []uint64 {
b := serial.GetRootAsBlob(msg, serial.MessagePrefixSz)
func getBlobSubtrees(msg serial.Message) ([]uint64, error) {
var b serial.Blob
err := serial.InitBlobRoot(&b, msg, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
if b.TreeLevel() == 0 {
return nil
return nil, nil
}
counts := make([]uint64, b.AddressArrayLength()/hash.ByteLen)
return decodeVarints(b.SubtreeSizesBytes(), counts)
return decodeVarints(b.SubtreeSizesBytes(), counts), nil
}
func estimateBlobSize(values [][]byte, subtrees []uint64) (bufSz int) {

View File

@@ -50,49 +50,77 @@ func offsetsForCommitClosureKeys(buf []byte) []byte {
return commitClosureKeyOffsets[:cnt*uint16Size]
}
func getCommitClosureKeys(msg serial.Message) ItemArray {
func getCommitClosureKeys(msg serial.Message) (ItemArray, error) {
var ret ItemArray
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
var m serial.CommitClosure
err := serial.InitCommitClosureRoot(&m, msg, serial.MessagePrefixSz)
if err != nil {
return ret, err
}
ret.Items = m.KeyItemsBytes()
ret.Offs = offsetsForCommitClosureKeys(ret.Items)
return ret
return ret, nil
}
func getCommitClosureValues(msg serial.Message) ItemArray {
func getCommitClosureValues(msg serial.Message) (ItemArray, error) {
var ret ItemArray
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
var m serial.CommitClosure
err := serial.InitCommitClosureRoot(&m, msg, serial.MessagePrefixSz)
if err != nil {
return ret, err
}
if m.AddressArrayLength() == 0 {
ret.Items = commitClosureEmptyValueBytes
ret.Offs = commitClosureValueOffsets[:getCommitClosureCount(msg)*uint16Size]
return ret
cnt, err := getCommitClosureCount(msg)
if err != nil {
return ret, nil
}
ret.Offs = commitClosureValueOffsets[:cnt*uint16Size]
return ret, nil
}
ret.Items = m.AddressArrayBytes()
ret.Offs = offsetsForAddressArray(ret.Items)
return ret
return ret, nil
}
// uint64 + hash.
const commitClosureKeyLength = 8 + 20
func getCommitClosureCount(msg serial.Message) uint16 {
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
return uint16(m.KeyItemsLength() / commitClosureKeyLength)
func getCommitClosureCount(msg serial.Message) (uint16, error) {
var m serial.CommitClosure
err := serial.InitCommitClosureRoot(&m, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return uint16(m.KeyItemsLength() / commitClosureKeyLength), nil
}
func getCommitClosureTreeLevel(msg serial.Message) int {
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
return int(m.TreeLevel())
func getCommitClosureTreeLevel(msg serial.Message) (int, error) {
var m serial.CommitClosure
err := serial.InitCommitClosureRoot(&m, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(m.TreeLevel()), nil
}
func getCommitClosureTreeCount(msg serial.Message) int {
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
return int(m.TreeCount())
func getCommitClosureTreeCount(msg serial.Message) (int, error) {
var m serial.CommitClosure
err := serial.InitCommitClosureRoot(&m, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(m.TreeCount()), nil
}
func getCommitClosureSubtrees(msg serial.Message) []uint64 {
counts := make([]uint64, getCommitClosureCount(msg))
func getCommitClosureSubtrees(msg serial.Message) ([]uint64, error) {
cnt, err := getCommitClosureCount(msg)
if err != nil {
return nil, err
}
counts := make([]uint64, cnt)
m := serial.GetRootAsCommitClosure(msg, serial.MessagePrefixSz)
return decodeVarints(m.SubtreeCountsBytes(), counts)
return decodeVarints(m.SubtreeCountsBytes(), counts), nil
}
func walkCommitClosureAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.Context, addr hash.Hash) error) error {

View File

@@ -102,17 +102,21 @@ func (s MergeArtifactSerializer) Serialize(keys, values [][]byte, subtrees []uin
return serial.FinishMessage(b, serial.MergeArtifactsEnd(b), mergeArtifactFileID)
}
func getArtifactMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16) {
am := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
func getArtifactMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16, err error) {
var am serial.MergeArtifacts
err = serial.InitMergeArtifactsRoot(&am, msg, serial.MessagePrefixSz)
if err != nil {
return
}
keys.Items = am.KeyItemsBytes()
keys.Offs = getMergeArtifactKeyOffsets(am)
keys.Offs = getMergeArtifactKeyOffsets(&am)
cnt = uint16(keys.Len())
vv := am.ValueItemsBytes()
if vv != nil {
values.Items = vv
values.Offs = getMergeArtifactValueOffsets(am)
values.Offs = getMergeArtifactValueOffsets(&am)
} else {
values.Items = am.AddressArrayBytes()
values.Offs = offsetsForAddressArray(values.Items)
@@ -122,7 +126,11 @@ func getArtifactMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cn
}
func walkMergeArtifactAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.Context, addr hash.Hash) error) error {
ma := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
var ma serial.MergeArtifacts
err := serial.InitMergeArtifactsRoot(&ma, msg, serial.MessagePrefixSz)
if err != nil {
return err
}
arr := ma.AddressArrayBytes()
for i := 0; i < len(arr)/hash.ByteLen; i++ {
addr := hash.New(arr[i*addrSize : (i+1)*addrSize])
@@ -144,29 +152,49 @@ func walkMergeArtifactAddresses(ctx context.Context, msg serial.Message, cb func
return nil
}
func getMergeArtifactCount(msg serial.Message) uint16 {
ma := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
func getMergeArtifactCount(msg serial.Message) (uint16, error) {
var ma serial.MergeArtifacts
err := serial.InitMergeArtifactsRoot(&ma, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
if ma.KeyItemsLength() == 0 {
return 0
return 0, nil
}
// zeroth offset ommitted from array
return uint16(ma.KeyOffsetsLength() + 1)
return uint16(ma.KeyOffsetsLength() + 1), nil
}
func getMergeArtifactTreeLevel(msg serial.Message) int {
ma := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
return int(ma.TreeLevel())
func getMergeArtifactTreeLevel(msg serial.Message) (int, error) {
var ma serial.MergeArtifacts
err := serial.InitMergeArtifactsRoot(&ma, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(ma.TreeLevel()), nil
}
func getMergeArtifactTreeCount(msg serial.Message) int {
ma := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
return int(ma.TreeCount())
func getMergeArtifactTreeCount(msg serial.Message) (int, error) {
var ma serial.MergeArtifacts
err := serial.InitMergeArtifactsRoot(&ma, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return int(ma.TreeCount()), nil
}
func getMergeArtifactSubtrees(msg serial.Message) []uint64 {
counts := make([]uint64, getMergeArtifactCount(msg))
ma := serial.GetRootAsMergeArtifacts(msg, serial.MessagePrefixSz)
return decodeVarints(ma.SubtreeCountsBytes(), counts)
func getMergeArtifactSubtrees(msg serial.Message) ([]uint64, error) {
sz, err := getMergeArtifactCount(msg)
if err != nil {
return nil, err
}
counts := make([]uint64, sz)
var ma serial.MergeArtifacts
err = serial.InitMergeArtifactsRoot(&ma, msg, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
return decodeVarints(ma.SubtreeCountsBytes(), counts), nil
}
func getMergeArtifactKeyOffsets(ma *serial.MergeArtifacts) []byte {

View File

@@ -26,31 +26,49 @@ type Serializer interface {
Serialize(keys, values [][]byte, subtrees []uint64, level int) serial.Message
}
func GetKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16) {
func GetKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16, err error) {
id := serial.GetFileID(msg)
if id == serial.ProllyTreeNodeFileID {
return getProllyMapKeysAndValues(msg)
}
if id == serial.AddressMapFileID {
keys = getAddressMapKeys(msg)
values = getAddressMapValues(msg)
cnt = getAddressMapCount(msg)
keys, err = getAddressMapKeys(msg)
if err != nil {
return
}
values, err = getAddressMapValues(msg)
if err != nil {
return
}
cnt, err = getAddressMapCount(msg)
return
}
if id == serial.MergeArtifactsFileID {
return getArtifactMapKeysAndValues(msg)
}
if id == serial.CommitClosureFileID {
keys = getCommitClosureKeys(msg)
values = getCommitClosureValues(msg)
cnt = getCommitClosureCount(msg)
keys, err = getCommitClosureKeys(msg)
if err != nil {
return
}
values, err = getCommitClosureValues(msg)
if err != nil {
return
}
cnt, err = getCommitClosureCount(msg)
return
}
if id == serial.BlobFileID {
keys = getBlobKeys(msg)
values = getBlobValues(msg)
cnt = getBlobCount(msg)
keys, err = getBlobKeys(msg)
if err != nil {
return
}
values, err = getBlobValues(msg)
if err != nil {
return
}
cnt, err = getBlobCount(msg)
return
}
@@ -75,7 +93,7 @@ func WalkAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.
}
}
func GetTreeLevel(msg serial.Message) int {
func GetTreeLevel(msg serial.Message) (int, error) {
id := serial.GetFileID(msg)
switch id {
case serial.ProllyTreeNodeFileID:
@@ -93,7 +111,7 @@ func GetTreeLevel(msg serial.Message) int {
}
}
func GetTreeCount(msg serial.Message) int {
func GetTreeCount(msg serial.Message) (int, error) {
id := serial.GetFileID(msg)
switch id {
case serial.ProllyTreeNodeFileID:
@@ -111,7 +129,7 @@ func GetTreeCount(msg serial.Message) int {
}
}
func GetSubtrees(msg serial.Message) []uint64 {
func GetSubtrees(msg serial.Message) ([]uint64, error) {
id := serial.GetFileID(msg)
switch id {
case serial.ProllyTreeNodeFileID:

View File

@@ -101,17 +101,21 @@ func (s ProllyMapSerializer) Serialize(keys, values [][]byte, subtrees []uint64,
return serial.FinishMessage(b, serial.ProllyTreeNodeEnd(b), prollyMapFileID)
}
func getProllyMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16) {
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
func getProllyMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt uint16, err error) {
var pm serial.ProllyTreeNode
err = serial.InitProllyTreeNodeRoot(&pm, msg, serial.MessagePrefixSz)
if err != nil {
return
}
keys.Items = pm.KeyItemsBytes()
keys.Offs = getProllyMapKeyOffsets(pm)
keys.Offs = getProllyMapKeyOffsets(&pm)
cnt = uint16(keys.Len())
vv := pm.ValueItemsBytes()
if vv != nil {
values.Items = vv
values.Offs = getProllyMapValueOffsets(pm)
values.Offs = getProllyMapValueOffsets(&pm)
} else {
values.Items = pm.AddressArrayBytes()
values.Offs = offsetsForAddressArray(values.Items)
@@ -121,7 +125,11 @@ func getProllyMapKeysAndValues(msg serial.Message) (keys, values ItemArray, cnt
}
func walkProllyMapAddresses(ctx context.Context, msg serial.Message, cb func(ctx context.Context, addr hash.Hash) error) error {
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
var pm serial.ProllyTreeNode
err := serial.InitProllyTreeNodeRoot(&pm, msg, serial.MessagePrefixSz)
if err != nil {
return err
}
arr := pm.AddressArrayBytes()
for i := 0; i < len(arr)/hash.ByteLen; i++ {
addr := hash.New(arr[i*addrSize : (i+1)*addrSize])
@@ -143,25 +151,49 @@ func walkProllyMapAddresses(ctx context.Context, msg serial.Message, cb func(ctx
return nil
}
func getProllyMapCount(msg serial.Message) uint16 {
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
return uint16(pm.KeyOffsetsLength() - 1)
func getProllyMapCount(msg serial.Message) (uint16, error) {
var pm serial.ProllyTreeNode
err := serial.InitProllyTreeNodeRoot(&pm, msg, serial.MessagePrefixSz)
if err != nil {
return 0, err
}
return uint16(pm.KeyOffsetsLength() - 1), nil
}
func getProllyMapTreeLevel(msg serial.Message) int {
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
return int(pm.TreeLevel())
func getProllyMapTreeLevel(msg serial.Message) (int, error) {
var pm serial.ProllyTreeNode
err := serial.InitProllyTreeNodeRoot(&pm, msg, serial.MessagePrefixSz)
if err != nil {
return 0, fb.ErrTableHasUnknownFields
}
return int(pm.TreeLevel()), nil
}
func getProllyMapTreeCount(msg serial.Message) int {
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
return int(pm.TreeCount())
func getProllyMapTreeCount(msg serial.Message) (int, error) {
var pm serial.ProllyTreeNode
err := serial.InitProllyTreeNodeRoot(&pm, msg, serial.MessagePrefixSz)
if err != nil {
return 0, fb.ErrTableHasUnknownFields
}
return int(pm.TreeCount()), nil
}
func getProllyMapSubtrees(msg serial.Message) []uint64 {
counts := make([]uint64, getProllyMapCount(msg))
pm := serial.GetRootAsProllyTreeNode(msg, serial.MessagePrefixSz)
return decodeVarints(pm.SubtreeCountsBytes(), counts)
func getProllyMapSubtrees(msg serial.Message) ([]uint64, error) {
sz, err := getProllyMapCount(msg)
if err != nil {
return nil, err
}
var pm serial.ProllyTreeNode
n := fb.GetUOffsetT(msg[serial.MessagePrefixSz:])
pm.Init(msg, serial.MessagePrefixSz+n)
if serial.ProllyTreeNodeNumFields < pm.Table().NumFields() {
return nil, fb.ErrTableHasUnknownFields
}
counts := make([]uint64, sz)
return decodeVarints(pm.SubtreeCountsBytes(), counts), nil
}
func getProllyMapKeyOffsets(pm *serial.ProllyTreeNode) []byte {

View File

@@ -34,7 +34,7 @@ func TestGetKeyValueOffsetsVectors(t *testing.T) {
msg := s.Serialize(keys, values, nil, 0)
// uses hard-coded vtable slot
keyBuf, valBuf, _ := getProllyMapKeysAndValues(msg)
keyBuf, valBuf, _, _ := getProllyMapKeysAndValues(msg)
for i := range keys {
assert.Equal(t, keys[i], keyBuf.GetItem(i))

View File

@@ -170,7 +170,12 @@ func debugFormat(ctx context.Context, m MutableMap) (string, error) {
}
sb.WriteString("\t},\n")
c = strconv.Itoa(m.tuples.tree.count())
ci, err := m.tuples.tree.count()
if err != nil {
return "", err
}
c = strconv.Itoa(ci)
sb.WriteString("\ttree (count: " + c + ") {\n")
for {
k, v, err := tupleIter.Next(ctx)

View File

@@ -76,7 +76,9 @@ func TestMutableMapWrites(t *testing.T) {
func testPointUpdates(t *testing.T, mapCount int) {
orig := ascendingIntMap(t, mapCount)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
updates := make([][2]val.Tuple, mapCount)
for i := range updates {
@@ -93,7 +95,9 @@ func testPointUpdates(t *testing.T, mapCount int) {
require.NoError(t, err)
m := materializeMap(t, mut)
assert.Equal(t, mapCount, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
err = m.Get(ctx, up[0], func(k, v val.Tuple) error {
assert.Equal(t, up[0], k)
@@ -106,7 +110,9 @@ func testPointUpdates(t *testing.T, mapCount int) {
func testMultiplePointUpdates(t *testing.T, batch int, mapCount int) {
orig := ascendingIntMap(t, mapCount)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
updates := make([][2]val.Tuple, mapCount)
for i := range updates {
@@ -126,7 +132,9 @@ func testMultiplePointUpdates(t *testing.T, batch int, mapCount int) {
require.NoError(t, err)
}
m := materializeMap(t, mut)
assert.Equal(t, mapCount, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
for _, up := range b {
err := m.Get(ctx, up[0], func(k, v val.Tuple) error {
@@ -142,7 +150,9 @@ func testMultiplePointUpdates(t *testing.T, batch int, mapCount int) {
func testPointInserts(t *testing.T, mapCount int) {
// create map of even numbers
orig := ascendingIntMapWithStep(t, mapCount, 2)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
inserts := make([][2]val.Tuple, mapCount)
for i := range inserts {
@@ -161,7 +171,9 @@ func testPointInserts(t *testing.T, mapCount int) {
require.NoError(t, err)
m := materializeMap(t, mut)
assert.Equal(t, mapCount+1, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount+1, c)
ok, err := m.Has(ctx, in[0])
assert.NoError(t, err)
@@ -179,7 +191,9 @@ func testPointInserts(t *testing.T, mapCount int) {
func testMultiplePointInserts(t *testing.T, batch int, mapCount int) {
// create map of even numbers
orig := ascendingIntMapWithStep(t, mapCount, 2)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
inserts := make([][2]val.Tuple, mapCount)
for i := range inserts {
@@ -201,7 +215,9 @@ func testMultiplePointInserts(t *testing.T, batch int, mapCount int) {
require.NoError(t, err)
}
m := materializeMap(t, mut)
assert.Equal(t, mapCount+batch, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount+batch, c)
for _, up := range b {
ok, err := m.Has(ctx, up[0])
@@ -220,7 +236,9 @@ func testMultiplePointInserts(t *testing.T, batch int, mapCount int) {
func testPointDeletes(t *testing.T, mapCount int) {
orig := ascendingIntMap(t, mapCount)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
deletes := make([]val.Tuple, mapCount)
for i := range deletes {
@@ -237,7 +255,9 @@ func testPointDeletes(t *testing.T, mapCount int) {
assert.NoError(t, err)
m := materializeMap(t, mut)
assert.Equal(t, mapCount-1, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount-1, c)
ok, err := m.Has(ctx, del)
assert.NoError(t, err)
@@ -254,7 +274,9 @@ func testPointDeletes(t *testing.T, mapCount int) {
func testMultiplePointDeletes(t *testing.T, batch int, mapCount int) {
orig := ascendingIntMap(t, mapCount)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
deletes := make([]val.Tuple, mapCount)
for i := range deletes {
@@ -274,7 +296,9 @@ func testMultiplePointDeletes(t *testing.T, batch int, mapCount int) {
require.NoError(t, err)
}
m := materializeMap(t, mut)
assert.Equal(t, mapCount-batch, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount-batch, c)
for _, del := range b {
ok, err := m.Has(ctx, del)
@@ -294,7 +318,9 @@ func testMultiplePointDeletes(t *testing.T, batch int, mapCount int) {
func testMixedMutations(t *testing.T, batch int, mapCount int) {
// create map of first |mapCount| *even* numbers
orig := ascendingIntMapWithStep(t, mapCount, 2)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
mutations := make([][2]val.Tuple, mapCount*2)
for i := 0; i < len(mutations); i += 2 {
@@ -317,7 +343,6 @@ func testMixedMutations(t *testing.T, batch int, mapCount int) {
mutations[i], mutations[j] = mutations[j], mutations[i]
})
var err error
ctx := context.Background()
for x := 0; x < len(mutations); x += batch {
b := mutations[x : x+batch]
@@ -355,7 +380,9 @@ func testMixedMutations(t *testing.T, batch int, mapCount int) {
func testInsertsOutsideExistingRange(t *testing.T, mapCount int) {
orig := ascendingIntMapWithStep(t, mapCount, 1)
assert.Equal(t, mapCount, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, mapCount, c)
inserts := make([][2]val.Tuple, 2)
// insert before beginning
@@ -372,7 +399,9 @@ func testInsertsOutsideExistingRange(t *testing.T, mapCount int) {
require.NoError(t, err)
m := materializeMap(t, mut)
assert.Equal(t, mapCount+1, m.Count())
c, err := m.Count()
require.NoError(t, err)
assert.Equal(t, mapCount+1, c)
ok, err := m.Has(ctx, in[0])
assert.NoError(t, err)
@@ -390,7 +419,9 @@ func testInsertsOutsideExistingRange(t *testing.T, mapCount int) {
func testBulkInserts(t *testing.T, size int) {
// create sparse map
orig := ascendingIntMapWithStep(t, size, size)
assert.Equal(t, size, orig.Count())
c, err := orig.Count()
require.NoError(t, err)
assert.Equal(t, size, c)
// make 10x as many inserts as the size of the map
inserts := make([][2]val.Tuple, size*10)
@@ -410,7 +441,9 @@ func testBulkInserts(t *testing.T, size int) {
}
m := materializeMap(t, mut)
assert.Equal(t, size*11, m.Count())
c, err = m.Count()
require.NoError(t, err)
assert.Equal(t, size*11, c)
for _, in := range inserts {
ok, err := m.Has(ctx, in[0])
@@ -462,7 +495,9 @@ func testInternalNodeSplits(t *testing.T) {
}
pm, err = mut.Map(ctx)
assert.NoError(t, err)
assert.Equal(t, n+k, pm.Count())
c, err := pm.Count()
require.NoError(t, err)
assert.Equal(t, n+k, c)
}
}

View File

@@ -88,12 +88,13 @@ func mergeOrderedTrees[K, V ~[]byte, O ordering[K], S message.Serializer](
}, nil
}
func (t orderedTree[K, V, O]) count() int {
func (t orderedTree[K, V, O]) count() (int, error) {
return t.root.TreeCount()
}
func (t orderedTree[K, V, O]) height() int {
return t.root.Level() + 1
func (t orderedTree[K, V, O]) height() (int, error) {
l, err := t.root.Level()
return l + 1, err
}
func (t orderedTree[K, V, O]) hashOf() hash.Hash {
@@ -218,8 +219,14 @@ func (t orderedTree[K, V, O]) iterOrdinalRange(ctx context.Context, start, stop
}
if stop < start {
return nil, fmt.Errorf("invalid ordinal bounds (%d, %d)", start, stop)
} else if stop > uint64(t.count()) {
return nil, fmt.Errorf("stop index (%d) out of bounds", stop)
} else {
c, err := t.count()
if err != nil {
return nil, err
}
if stop > uint64(c) {
return nil, fmt.Errorf("stop index (%d) out of bounds", stop)
}
}
lo, err := tree.NewCursorAtOrdinal(ctx, t.ns, t.root, start)

View File

@@ -24,7 +24,7 @@ import (
"github.com/dolthub/dolt/go/store/val"
)
func NodeFromValue(v types.Value) tree.Node {
func NodeFromValue(v types.Value) (tree.Node, error) {
return tree.NodeFromBytes(v.(types.SerialMessage))
}
@@ -36,11 +36,14 @@ func ValueFromArtifactMap(m prolly.ArtifactMap) types.Value {
return tree.ValueFromNode(m.Node())
}
func MapFromValue(v types.Value, sch schema.Schema, ns tree.NodeStore) prolly.Map {
root := NodeFromValue(v)
func MapFromValue(v types.Value, sch schema.Schema, ns tree.NodeStore) (prolly.Map, error) {
root, err := NodeFromValue(v)
if err != nil {
return prolly.Map{}, err
}
kd := KeyDescriptorFromSchema(sch)
vd := ValueDescriptorFromSchema(sch)
return prolly.NewMap(root, ns, kd, vd)
return prolly.NewMap(root, ns, kd, vd), nil
}
func MapDescriptorsFromSchema(sch schema.Schema) (kd, vd val.TupleDesc) {

View File

@@ -92,10 +92,15 @@ func (tc *chunker[S]) processPrefix(ctx context.Context) (err error) {
tc.cur.skipToNodeStart()
for tc.cur.idx < idx {
var sz uint64
sz, err = tc.cur.currentSubtreeSize()
if err != nil {
return err
}
_, err = tc.append(ctx,
tc.cur.CurrentKey(),
tc.cur.CurrentValue(),
tc.cur.currentSubtreeSize())
sz)
// todo(andy): seek to correct chunk
// currently when inserting tuples between chunks
@@ -178,7 +183,11 @@ func (tc *chunker[S]) AdvanceTo(ctx context.Context, next *Cursor) error {
return nil
}
split, err := tc.append(ctx, tc.cur.CurrentKey(), tc.cur.CurrentValue(), tc.cur.currentSubtreeSize())
sz, err := tc.cur.currentSubtreeSize()
if err != nil {
return err
}
split, err := tc.append(ctx, tc.cur.CurrentKey(), tc.cur.CurrentValue(), sz)
if err != nil {
return err
}
@@ -192,7 +201,11 @@ func (tc *chunker[S]) AdvanceTo(ctx context.Context, next *Cursor) error {
// we caught up before synchronizing
return nil
}
split, err = tc.append(ctx, tc.cur.CurrentKey(), tc.cur.CurrentValue(), tc.cur.currentSubtreeSize())
sz, err := tc.cur.currentSubtreeSize()
if err != nil {
return err
}
split, err = tc.append(ctx, tc.cur.CurrentKey(), tc.cur.CurrentValue(), sz)
if err != nil {
return err
}
@@ -404,11 +417,16 @@ func (tc *chunker[S]) Done(ctx context.Context) (Node, error) {
// boundary or the end of the Node.
func (tc *chunker[S]) finalizeCursor(ctx context.Context) (err error) {
for tc.cur.Valid() {
var sz uint64
sz, err = tc.cur.currentSubtreeSize()
if err != nil {
return
}
var ok bool
ok, err = tc.append(ctx,
tc.cur.CurrentKey(),
tc.cur.CurrentValue(),
tc.cur.currentSubtreeSize())
sz)
if err != nil {
return err
}
@@ -457,7 +475,10 @@ func getCanonicalRoot[S message.Serializer](ctx context.Context, ns NodeStore, b
cnt := builder.count()
assertTrue(cnt == 1)
nd := builder.build()
nd, err := builder.build()
if err != nil {
return Node{}, err
}
mt := nd.getAddress(0)
for {
@@ -466,7 +487,11 @@ func getCanonicalRoot[S message.Serializer](ctx context.Context, ns NodeStore, b
return Node{}, err
}
if child.IsLeaf() || child.count > 1 {
leaf, err := child.IsLeaf()
if err != nil {
return Node{}, err
}
if leaf || child.count > 1 {
return child, nil
}

View File

@@ -32,28 +32,40 @@ func roundTripTreeItems(t *testing.T) {
root, items, ns := randomTree(t, 1000)
assert.NotNil(t, root)
assert.True(t, root.count > 0)
assert.True(t, root.Level() > 0)
level, err := root.Level()
require.NoError(t, err)
assert.True(t, level > 0)
//assert.Equal(t, uint64(1000), root.cumulativeCount())
assert.Equal(t, countTree(t, ns, root), 1000)
assert.Equal(t, root.TreeCount()*2, 1000)
tc, err := root.TreeCount()
require.NoError(t, err)
assert.Equal(t, tc*2, 1000)
validateTreeItems(t, ns, root, items)
root, items, ns = randomTree(t, 10_000)
assert.NotNil(t, root)
assert.True(t, root.count > 0)
assert.True(t, root.Level() > 0)
level, err = root.Level()
require.NoError(t, err)
assert.True(t, level > 0)
//assert.Equal(t, uint64(10_000), root.cumulativeCount())
assert.Equal(t, countTree(t, ns, root), 10_000)
assert.Equal(t, root.TreeCount()*2, 10_000)
tc, err = root.TreeCount()
require.NoError(t, err)
assert.Equal(t, tc*2, 10_000)
validateTreeItems(t, ns, root, items)
root, items, ns = randomTree(t, 100_000)
assert.NotNil(t, root)
assert.True(t, root.count > 0)
assert.True(t, root.Level() > 0)
level, err = root.Level()
require.NoError(t, err)
assert.True(t, level > 0)
//assert.Equal(t, uint64(100_000), root.cumulativeCount())
assert.Equal(t, countTree(t, ns, root), 100_000)
assert.Equal(t, root.TreeCount()*2, 100_000)
tc, err = root.TreeCount()
require.NoError(t, err)
assert.Equal(t, tc*2, 100_000)
validateTreeItems(t, ns, root, items)
}

View File

@@ -39,7 +39,9 @@ func TestContentAddress(t *testing.T) {
m := makeTree(t, tups)
require.NotNil(t, m)
require.Equal(t, goldenHash, m.HashOf())
assert.Equal(t, 12345, m.TreeCount())
tc, err := m.TreeCount()
require.NoError(t, err)
assert.Equal(t, 12345, tc)
}
func makeTree(t *testing.T, tuples [][2]val.Tuple) Node {

View File

@@ -128,7 +128,10 @@ func _newInternal(ctx context.Context, ns NodeStore, s message.Serializer, nodes
treeCnt += nodes[i].treeCount
}
msg := s.Serialize(keys, vals, subtrees, level)
node := NodeFromBytes(msg)
node, err := NodeFromBytes(msg)
if err != nil {
return novelNode{}, err
}
addr, err := ns.Write(ctx, node)
if err != nil {
return novelNode{}, err
@@ -143,7 +146,10 @@ func _newInternal(ctx context.Context, ns NodeStore, s message.Serializer, nodes
func _newLeaf(ctx context.Context, ns NodeStore, s message.Serializer, buf []byte) (novelNode, error) {
msg := s.Serialize([][]byte{{0}}, [][]byte{buf}, []uint64{1}, 0)
node := NodeFromBytes(msg)
node, err := NodeFromBytes(msg)
if err != nil {
return novelNode{}, err
}
addr, err := ns.Write(ctx, node)
if err != nil {
return novelNode{}, err
@@ -263,7 +269,11 @@ func (t *ImmutableTree) load(ctx context.Context) error {
}
WalkNodes(ctx, n, t.ns, func(ctx context.Context, n Node) error {
if n.IsLeaf() {
leaf, err := n.IsLeaf()
if err != nil {
return err
}
if leaf {
t.buf = append(t.buf, n.getValue(0)...)
}
return nil

View File

@@ -158,7 +158,11 @@ func TestWriteImmutableTree(t *testing.T) {
byteCnt := 0
WalkNodes(ctx, root, ns, func(ctx context.Context, n Node) error {
var keyCnt int
if n.IsLeaf() {
leaf, err := n.IsLeaf()
if err != nil {
return err
}
if leaf {
byteCnt += len(n.values.Items)
for _, i := range n.getValue(0) {
sum += int(i)
@@ -176,16 +180,21 @@ func TestWriteImmutableTree(t *testing.T) {
return nil
})
assert.Equal(t, expLevel, root.Level())
level, err := root.Level()
require.NoError(t, err)
assert.Equal(t, expLevel, level)
if tt.checkSum {
assert.Equal(t, expSum, sum)
}
assert.Equal(t, tt.inputSize, byteCnt)
assert.Equal(t, expUnfilled, unfilledCnt)
if expLevel > 0 {
root = root.loadSubtrees()
root, err = root.loadSubtrees()
require.NoError(t, err)
for i := range expSubtrees {
assert.Equal(t, expSubtrees[i], root.getSubtreeCount(i))
sc, err := root.getSubtreeCount(i)
require.NoError(t, err)
assert.Equal(t, expSubtrees[i], sc)
}
}
})

View File

@@ -47,7 +47,11 @@ func WalkAddresses(ctx context.Context, nd Node, ns NodeStore, cb AddressCb) err
return err
}
if nd.IsLeaf() {
leaf, err := nd.IsLeaf()
if err != nil {
return err
}
if leaf {
return nil
}
@@ -69,7 +73,11 @@ func WalkNodes(ctx context.Context, nd Node, ns NodeStore, cb NodeCb) error {
return err
}
if nd.IsLeaf() {
leaf, err := nd.IsLeaf()
if err != nil {
return err
}
if leaf {
return nil
}
@@ -98,14 +106,14 @@ func walkOpaqueNodes(ctx context.Context, nd Node, ns NodeStore, cb NodeCb) erro
})
}
func NodeFromBytes(msg []byte) Node {
keys, values, count := message.GetKeysAndValues(msg)
func NodeFromBytes(msg []byte) (Node, error) {
keys, values, count, err := message.GetKeysAndValues(msg)
return Node{
keys: keys,
values: values,
count: count,
msg: msg,
}
}, err
}
func (nd Node) HashOf() hash.Hash {
@@ -116,7 +124,7 @@ func (nd Node) Count() int {
return int(nd.count)
}
func (nd Node) TreeCount() int {
func (nd Node) TreeCount() (int, error) {
return message.GetTreeCount(nd.msg)
}
@@ -125,13 +133,14 @@ func (nd Node) Size() int {
}
// Level returns the tree Level for this node
func (nd Node) Level() int {
func (nd Node) Level() (int, error) {
return message.GetTreeLevel(nd.msg)
}
// IsLeaf returns whether this node is a leaf
func (nd Node) IsLeaf() bool {
return nd.Level() == 0
func (nd Node) IsLeaf() (bool, error) {
l, err := nd.Level()
return l == 0, err
}
// GetKey returns the |ith| key of this node
@@ -144,21 +153,26 @@ func (nd Node) getValue(i int) Item {
return nd.values.GetItem(i)
}
func (nd Node) loadSubtrees() Node {
func (nd Node) loadSubtrees() (Node, error) {
var err error
if nd.subtrees == nil {
// deserializing subtree counts requires a malloc,
// we don't load them unless explicitly requested
nd.subtrees = message.GetSubtrees(nd.msg)
nd.subtrees, err = message.GetSubtrees(nd.msg)
}
return nd
return nd, err
}
func (nd Node) getSubtreeCount(i int) uint64 {
if nd.IsLeaf() {
return 1
func (nd Node) getSubtreeCount(i int) (uint64, error) {
leaf, err := nd.IsLeaf()
if err != nil {
return 0, err
}
if leaf {
return 1, nil
}
// this will panic unless subtrees were loaded.
return nd.subtrees[i]
return nd.subtrees[i], nil
}
// getAddress returns the |ith| address of this node.
@@ -196,7 +210,11 @@ func OutputProllyNode(w io.Writer, node Node) error {
w.Write([]byte(hex.EncodeToString(kt.GetField(j))))
}
if node.IsLeaf() {
leaf, err := node.IsLeaf()
if err != nil {
return err
}
if leaf {
v := node.getValue(i)
vt := val.Tuple(v)

View File

@@ -31,7 +31,10 @@ type novelNode struct {
}
func writeNewNode[S message.Serializer](ctx context.Context, ns NodeStore, bld *nodeBuilder[S]) (novelNode, error) {
node := bld.build()
node, err := bld.build()
if err != nil {
return novelNode{}, err
}
addr, err := ns.Write(ctx, node)
if err != nil {
@@ -45,13 +48,16 @@ func writeNewNode[S message.Serializer](ctx context.Context, ns NodeStore, bld *
copy(lastKey, k)
}
treeCount := uint64(node.TreeCount())
cnt, err := node.TreeCount()
if err != nil {
return novelNode{}, err
}
return novelNode{
addr: addr,
node: node,
lastKey: lastKey,
treeCount: treeCount,
treeCount: uint64(cnt),
}, nil
}
@@ -91,7 +97,7 @@ func (nb *nodeBuilder[S]) count() int {
return len(nb.keys)
}
func (nb *nodeBuilder[S]) build() (node Node) {
func (nb *nodeBuilder[S]) build() (node Node, err error) {
msg := nb.serializer.Serialize(nb.keys, nb.values, nb.subtrees, nb.level)
nb.recycleBuffers()
nb.size = 0

View File

@@ -44,7 +44,12 @@ type ItemSearchFn func(item Item, nd Node) (idx int)
func NewCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, err error) {
cur = &Cursor{nd: nd, nrw: ns}
for !cur.isLeaf() {
var leaf bool
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
for !leaf {
nd, err = fetchChild(ctx, ns, cur.CurrentRef())
if err != nil {
return nil, err
@@ -52,6 +57,10 @@ func NewCursorAtStart(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor,
parent := cur
cur = &Cursor{nd: nd, parent: parent, nrw: ns}
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
}
return
}
@@ -60,7 +69,12 @@ func NewCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, er
cur = &Cursor{nd: nd, nrw: ns}
cur.skipToNodeEnd()
for !cur.isLeaf() {
var leaf bool
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
for !leaf {
nd, err = fetchChild(ctx, ns, cur.CurrentRef())
if err != nil {
return nil, err
@@ -69,6 +83,10 @@ func NewCursorAtEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor, er
parent := cur
cur = &Cursor{nd: nd, parent: parent, nrw: ns}
cur.skipToNodeEnd()
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
}
return
}
@@ -92,19 +110,25 @@ func NewCursorPastEnd(ctx context.Context, ns NodeStore, nd Node) (cur *Cursor,
}
func NewCursorAtOrdinal(ctx context.Context, ns NodeStore, nd Node, ord uint64) (cur *Cursor, err error) {
if ord >= uint64(nd.TreeCount()) {
cnt, err := nd.TreeCount()
if err != nil {
return nil, err
}
if ord >= uint64(cnt) {
return NewCursorPastEnd(ctx, ns, nd)
}
distance := int64(ord)
return NewCursorFromSearchFn(ctx, ns, nd, func(nd Node) (idx int) {
if nd.IsLeaf() {
leaf, _ := nd.IsLeaf()
if leaf {
return int(distance)
}
nd = nd.loadSubtrees()
nd, _ = nd.loadSubtrees()
for idx = 0; idx < nd.Count(); idx++ {
card := int64(nd.getSubtreeCount(idx))
cnt, _ := nd.getSubtreeCount(idx)
card := int64(cnt)
if (distance - card) < 0 {
break
}
@@ -118,7 +142,12 @@ func NewCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search Se
cur = &Cursor{nd: nd, nrw: ns}
cur.idx = search(cur.nd)
for !cur.isLeaf() {
var leaf bool
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
for !leaf {
// stay in bounds for internal nodes
cur.keepInBounds()
@@ -132,6 +161,10 @@ func NewCursorFromSearchFn(ctx context.Context, ns NodeStore, nd Node, search Se
cur = &Cursor{nd: nd, parent: parent, nrw: ns}
cur.idx = search(cur.nd)
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
}
return
@@ -149,7 +182,12 @@ func NewCursorAtItem(ctx context.Context, ns NodeStore, nd Node, item Item, sear
cur = &Cursor{nd: nd, nrw: ns}
cur.idx = search(item, cur.nd)
for !cur.isLeaf() {
var leaf bool
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
for !leaf {
// stay in bounds for internal nodes
cur.keepInBounds()
@@ -163,6 +201,10 @@ func NewCursorAtItem(ctx context.Context, ns NodeStore, nd Node, item Item, sear
cur = &Cursor{nd: nd, parent: parent, nrw: ns}
cur.idx = search(item, cur.nd)
leaf, err = cur.isLeaf()
if err != nil {
return nil, err
}
}
return
@@ -172,7 +214,12 @@ func NewLeafCursorAtItem(ctx context.Context, ns NodeStore, nd Node, item Item,
cur = Cursor{nd: nd, parent: nil, nrw: ns}
cur.idx = search(item, cur.nd)
for !cur.isLeaf() {
var leaf bool
leaf, err = cur.isLeaf()
if err != nil {
return cur, err
}
for !leaf {
// stay in bounds for internal nodes
cur.keepInBounds()
@@ -184,6 +231,10 @@ func NewLeafCursorAtItem(ctx context.Context, ns NodeStore, nd Node, item Item,
}
cur.idx = search(item, cur.nd)
leaf, err = cur.isLeaf()
if err != nil {
return cur, err
}
}
return cur, nil
@@ -214,11 +265,18 @@ func (cur *Cursor) CurrentRef() hash.Hash {
return cur.nd.getAddress(cur.idx)
}
func (cur *Cursor) currentSubtreeSize() uint64 {
if cur.isLeaf() {
return 1
func (cur *Cursor) currentSubtreeSize() (uint64, error) {
leaf, err := cur.isLeaf()
if err != nil {
return 0, err
}
if leaf {
return 1, nil
}
cur.nd, err = cur.nd.loadSubtrees()
if err != nil {
return 0, err
}
cur.nd = cur.nd.loadSubtrees()
return cur.nd.getSubtreeCount(cur.idx)
}
@@ -261,13 +319,21 @@ func (cur *Cursor) atNodeEnd() bool {
return cur.idx == lastKeyIdx
}
func (cur *Cursor) isLeaf() bool {
func (cur *Cursor) isLeaf() (bool, error) {
// todo(andy): cache Level
return cur.level() == 0
lvl, err := cur.level()
if err != nil {
return false, err
}
return lvl == 0, nil
}
func (cur *Cursor) level() uint64 {
return uint64(cur.nd.Level())
func (cur *Cursor) level() (uint64, error) {
lvl, err := cur.nd.Level()
if err != nil {
return 0, err
}
return uint64(lvl), nil
}
// seek updates the cursor's node to one whose range spans the key's value, or the last

View File

@@ -67,7 +67,7 @@ func NewNodeStore(cs chunks.ChunkStore) NodeStore {
func (ns nodeStore) Read(ctx context.Context, ref hash.Hash) (Node, error) {
c, ok := ns.cache.get(ref)
if ok {
return NodeFromBytes(c.Data()), nil
return NodeFromBytes(c.Data())
}
c, err := ns.store.Get(ctx, ref)
@@ -78,7 +78,7 @@ func (ns nodeStore) Read(ctx context.Context, ref hash.Hash) (Node, error) {
ns.cache.insert(c)
return NodeFromBytes(c.Data()), err
return NodeFromBytes(c.Data())
}
// Write implements NodeStore.

View File

@@ -42,7 +42,9 @@ func TestRoundTripInts(t *testing.T) {
require.True(t, sumTupleSize(keys)+sumTupleSize(values) < message.MaxVectorOffset)
nd := NewTupleLeafNode(keys, values)
assert.True(t, nd.IsLeaf())
leaf, err := nd.IsLeaf()
require.NoError(t, err)
assert.True(t, leaf)
assert.Equal(t, len(keys), int(nd.count))
for i := range keys {
assert.Equal(t, keys[i], val.Tuple(nd.GetKey(i)))
@@ -56,7 +58,9 @@ func TestRoundTripNodeItems(t *testing.T) {
require.True(t, sumSize(keys)+sumSize(values) < message.MaxVectorOffset)
nd := newLeafNode(keys, values)
assert.True(t, nd.IsLeaf())
leaf, err := nd.IsLeaf()
require.NoError(t, err)
assert.True(t, leaf)
assert.Equal(t, len(keys), int(nd.count))
for i := range keys {
assert.Equal(t, keys[i], nd.GetKey(i))
@@ -85,7 +89,8 @@ func TestNodeHashValueCompatibility(t *testing.T) {
[][]byte{h1[:], h2[:]},
[]uint64{},
0)
nd = NodeFromBytes(msg)
nd, err = NodeFromBytes(msg)
require.NoError(t, err)
th, err = ValueFromNode(nd).Hash(nbf)
require.NoError(t, err)
assert.Equal(t, nd.HashOf(), th)

View File

@@ -74,10 +74,16 @@ func (s Samples) percentiles() (p50, p90, p99, p999, p100 int) {
func PrintTreeSummaryByLevel(t *testing.T, nd Node, ns NodeStore) {
ctx := context.Background()
sizeByLevel := make([]Samples, nd.Level()+1)
cardByLevel := make([]Samples, nd.Level()+1)
err := WalkNodes(ctx, nd, ns, func(ctx context.Context, nd Node) error {
lvl := nd.Level()
level, err := nd.Level()
require.NoError(t, err)
sizeByLevel := make([]Samples, level+1)
cardByLevel := make([]Samples, level+1)
err = WalkNodes(ctx, nd, ns, func(ctx context.Context, nd Node) error {
lvl, err := nd.Level()
if err != nil {
return err
}
sizeByLevel[lvl] = append(sizeByLevel[lvl], nd.Size())
cardByLevel[lvl] = append(cardByLevel[lvl], int(nd.count))
return nil
@@ -86,7 +92,9 @@ func PrintTreeSummaryByLevel(t *testing.T, nd Node, ns NodeStore) {
fmt.Println("pre-edit map Summary: ")
fmt.Println("| Level | count | avg Size \t p50 \t p90 \t p100 | avg card \t p50 \t p90 \t p100 |")
for i := nd.Level(); i >= 0; i-- {
level, err = nd.Level()
require.NoError(t, err)
for i := level; i >= 0; i-- {
sizes, cards := sizeByLevel[i], cardByLevel[i]
sp50, _, sp90, _, sp100 := sizes.percentiles()
cp50, _, cp90, _, cp100 := cards.percentiles()

View File

@@ -165,7 +165,11 @@ func newLeafNode(keys, values []Item) Node {
s := message.NewProllyMapSerializer(val.TupleDesc{}, sharedPool)
msg := s.Serialize(kk, vv, nil, 0)
return NodeFromBytes(msg)
n, err := NodeFromBytes(msg)
if err != nil {
panic(err)
}
return n
}
// assumes a sorted list

View File

@@ -150,7 +150,11 @@ func testWriteAmplification(t *testing.T, before Map, method mutationProvider) {
func collectMutations(t *testing.T, before Map, method mutationProvider) (muts []mutation) {
ctx := context.Background()
err := before.WalkNodes(ctx, func(ctx context.Context, nd tree.Node) error {
if nd.IsLeaf() {
leaf, err := nd.IsLeaf()
if err != nil {
return err
}
if leaf {
mm, err := method.makeMutations(ctx, nd)
require.NoError(t, err)
muts = append(muts, mm...)

View File

@@ -236,7 +236,11 @@ func (fp FieldPath) Resolve(ctx context.Context, v Value, vr ValueReader) (Value
}
case SerialMessage:
if serial.GetFileID(v) == serial.CommitFileID && fp.Name == "value" {
msg := serial.GetRootAsCommit(v, serial.MessagePrefixSz)
var msg serial.Commit
err := serial.InitCommitRoot(&msg, v, serial.MessagePrefixSz)
if err != nil {
return nil, err
}
addr := hash.New(msg.RootBytes())
return vr.ReadValue(ctx, addr)
}

View File

@@ -141,7 +141,10 @@ func (sm SerialMessage) HumanReadableString() string {
fmt.Fprintf(ret, "}")
return ret.String()
case serial.AddressMapFileID:
keys, values, cnt := message.GetKeysAndValues(serial.Message(sm))
keys, values, cnt, err := message.GetKeysAndValues(serial.Message(sm))
if err != nil {
return fmt.Sprintf("error in HumanReadString(): %s", err)
}
var b strings.Builder
b.Write([]byte("AddressMap{\n"))
for i := uint16(0); i < cnt; i++ {
@@ -174,13 +177,21 @@ const SerialMessageRefHeight = 1024
func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
switch serial.GetFileID(sm) {
case serial.StoreRootFileID:
msg := serial.GetRootAsStoreRoot([]byte(sm), serial.MessagePrefixSz)
var msg serial.StoreRoot
err := serial.InitStoreRootRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return err
}
if msg.AddressMapLength() > 0 {
mapbytes := msg.AddressMapBytes()
return SerialMessage(mapbytes).walkRefs(nbf, cb)
}
case serial.TagFileID:
msg := serial.GetRootAsTag([]byte(sm), serial.MessagePrefixSz)
var msg serial.Tag
err := serial.InitTagRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return err
}
addr := hash.New(msg.CommitAddrBytes())
r, err := constructRef(nbf, addr, PrimitiveTypeMap[ValueKind], SerialMessageRefHeight)
if err != nil {
@@ -188,7 +199,11 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
}
return cb(r)
case serial.WorkingSetFileID:
msg := serial.GetRootAsWorkingSet([]byte(sm), serial.MessagePrefixSz)
var msg serial.WorkingSet
err := serial.InitWorkingSetRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return err
}
addr := hash.New(msg.WorkingRootAddrBytes())
r, err := constructRef(nbf, addr, PrimitiveTypeMap[ValueKind], SerialMessageRefHeight)
if err != nil {
@@ -228,8 +243,12 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
}
}
case serial.RootValueFileID:
msg := serial.GetRootAsRootValue([]byte(sm), serial.MessagePrefixSz)
err := SerialMessage(msg.TablesBytes()).walkRefs(nbf, cb)
var msg serial.RootValue
err := serial.InitRootValueRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return err
}
err = SerialMessage(msg.TablesBytes()).walkRefs(nbf, cb)
if err != nil {
return err
}
@@ -244,7 +263,11 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
}
}
case serial.TableFileID:
msg := serial.GetRootAsTable([]byte(sm), serial.MessagePrefixSz)
var msg serial.Table
err := serial.InitTableRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return err
}
addr := hash.New(msg.SchemaBytes())
r, err := constructRef(nbf, addr, PrimitiveTypeMap[ValueKind], SerialMessageRefHeight)
if err != nil {
@@ -342,7 +365,11 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
return err
}
}
msg := serial.GetRootAsCommit([]byte(sm), serial.MessagePrefixSz)
var msg serial.Commit
err = serial.InitCommitRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return err
}
addr := hash.New(msg.RootBytes())
r, err := constructRef(nbf, addr, PrimitiveTypeMap[ValueKind], SerialMessageRefHeight)
if err != nil {
@@ -389,7 +416,11 @@ func (sm SerialMessage) walkRefs(nbf *NomsBinFormat, cb RefCallback) error {
}
func SerialCommitParentAddrs(nbf *NomsBinFormat, sm SerialMessage) ([]hash.Hash, error) {
msg := serial.GetRootAsCommit([]byte(sm), serial.MessagePrefixSz)
var msg serial.Commit
err := serial.InitCommitRoot(&msg, []byte(sm), serial.MessagePrefixSz)
if err != nil {
return nil, err
}
addrs := msg.ParentAddrsBytes()
n := len(addrs) / 20
ret := make([]hash.Hash, n)