go/store/datas: Add flatbuffers serialization for workingset.

This commit is contained in:
Aaron Son
2022-03-02 16:32:24 -08:00
parent 88229d027a
commit 6307f96e6f
11 changed files with 391 additions and 271 deletions
-208
View File
@@ -188,115 +188,6 @@ func DatabaseRootEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type WorkingSet struct {
_tab flatbuffers.Table
}
func GetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &WorkingSet{}
x.Init(buf, n+offset)
return x
}
func GetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
x := &WorkingSet{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x
}
func (rcv *WorkingSet) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *WorkingSet) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *WorkingSet) Name() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) WorkingRoot(obj *Ref) *Ref {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Ref)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *WorkingSet) StagedRoot(obj *Ref) *Ref {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(Ref)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *WorkingSet) MergeState(obj *MergeState) *MergeState {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(MergeState)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *WorkingSet) Meta(obj *WorkingSetMeta) *WorkingSetMeta {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := rcv._tab.Indirect(o + rcv._tab.Pos)
if obj == nil {
obj = new(WorkingSetMeta)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func WorkingSetStart(builder *flatbuffers.Builder) {
builder.StartObject(5)
}
func WorkingSetAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
}
func WorkingSetAddWorkingRoot(builder *flatbuffers.Builder, workingRoot flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(workingRoot), 0)
}
func WorkingSetAddStagedRoot(builder *flatbuffers.Builder, stagedRoot flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(stagedRoot), 0)
}
func WorkingSetAddMergeState(builder *flatbuffers.Builder, mergeState flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(mergeState), 0)
}
func WorkingSetAddMeta(builder *flatbuffers.Builder, meta flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(meta), 0)
}
func WorkingSetEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type MergeState struct {
_tab flatbuffers.Table
}
@@ -363,105 +254,6 @@ func MergeStateEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type WorkingSetMeta struct {
_tab flatbuffers.Table
}
func GetRootAsWorkingSetMeta(buf []byte, offset flatbuffers.UOffsetT) *WorkingSetMeta {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &WorkingSetMeta{}
x.Init(buf, n+offset)
return x
}
func GetSizePrefixedRootAsWorkingSetMeta(buf []byte, offset flatbuffers.UOffsetT) *WorkingSetMeta {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
x := &WorkingSetMeta{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x
}
func (rcv *WorkingSetMeta) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *WorkingSetMeta) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *WorkingSetMeta) Name() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSetMeta) Email() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSetMeta) Desc() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSetMeta) Timestamp(obj *Timestamp) *Timestamp {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
x := o + rcv._tab.Pos
if obj == nil {
obj = new(Timestamp)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func (rcv *WorkingSetMeta) UserTimestamp(obj *Timestamp) *Timestamp {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := o + rcv._tab.Pos
if obj == nil {
obj = new(Timestamp)
}
obj.Init(rcv._tab.Bytes, x)
return obj
}
return nil
}
func WorkingSetMetaStart(builder *flatbuffers.Builder) {
builder.StartObject(5)
}
func WorkingSetMetaAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
}
func WorkingSetMetaAddEmail(builder *flatbuffers.Builder, email flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(email), 0)
}
func WorkingSetMetaAddDesc(builder *flatbuffers.Builder, desc flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(desc), 0)
}
func WorkingSetMetaAddTimestamp(builder *flatbuffers.Builder, timestamp flatbuffers.UOffsetT) {
builder.PrependStructSlot(3, flatbuffers.UOffsetT(timestamp), 0)
}
func WorkingSetMetaAddUserTimestamp(builder *flatbuffers.Builder, userTimestamp flatbuffers.UOffsetT) {
builder.PrependStructSlot(4, flatbuffers.UOffsetT(userTimestamp), 0)
}
func WorkingSetMetaEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
type Commit struct {
_tab flatbuffers.Table
}
+1
View File
@@ -18,6 +18,7 @@ package serial
const StoreRootFileID = "STRT"
const TagFileID = "DTAG"
const WorkingSetFileID = "WRST"
func GetFileID(bs []byte) string {
if len(bs) < 8 {
+223
View File
@@ -0,0 +1,223 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by the FlatBuffers compiler. DO NOT EDIT.
package serial
import (
flatbuffers "github.com/google/flatbuffers/go"
)
type WorkingSet struct {
_tab flatbuffers.Table
}
func GetRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
n := flatbuffers.GetUOffsetT(buf[offset:])
x := &WorkingSet{}
x.Init(buf, n+offset)
return x
}
func GetSizePrefixedRootAsWorkingSet(buf []byte, offset flatbuffers.UOffsetT) *WorkingSet {
n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:])
x := &WorkingSet{}
x.Init(buf, n+offset+flatbuffers.SizeUint32)
return x
}
func (rcv *WorkingSet) Init(buf []byte, i flatbuffers.UOffsetT) {
rcv._tab.Bytes = buf
rcv._tab.Pos = i
}
func (rcv *WorkingSet) Table() flatbuffers.Table {
return rcv._tab
}
func (rcv *WorkingSet) WorkingRootAddr(j int) byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *WorkingSet) WorkingRootAddrLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *WorkingSet) WorkingRootAddrBytes() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) MutateWorkingRootAddr(j int, n byte) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
}
return false
}
func (rcv *WorkingSet) StagedRootAddr(j int) byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *WorkingSet) StagedRootAddrLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *WorkingSet) StagedRootAddrBytes() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) MutateStagedRootAddr(j int, n byte) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
}
return false
}
func (rcv *WorkingSet) MergeStateAddr(j int) byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
}
return 0
}
func (rcv *WorkingSet) MergeStateAddrLength() int {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
func (rcv *WorkingSet) MergeStateAddrBytes() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) MutateMergeStateAddr(j int, n byte) bool {
o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
}
return false
}
func (rcv *WorkingSet) Name() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) Email() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) Desc() []byte {
o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
func (rcv *WorkingSet) TimestampMillis() uint64 {
o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
return 0
}
func (rcv *WorkingSet) MutateTimestampMillis(n uint64) bool {
return rcv._tab.MutateUint64Slot(16, n)
}
func WorkingSetStart(builder *flatbuffers.Builder) {
builder.StartObject(7)
}
func WorkingSetAddWorkingRootAddr(builder *flatbuffers.Builder, workingRootAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(workingRootAddr), 0)
}
func WorkingSetStartWorkingRootAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func WorkingSetAddStagedRootAddr(builder *flatbuffers.Builder, stagedRootAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(stagedRootAddr), 0)
}
func WorkingSetStartStagedRootAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func WorkingSetAddMergeStateAddr(builder *flatbuffers.Builder, mergeStateAddr flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(mergeStateAddr), 0)
}
func WorkingSetStartMergeStateAddrVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func WorkingSetAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(name), 0)
}
func WorkingSetAddEmail(builder *flatbuffers.Builder, email flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(email), 0)
}
func WorkingSetAddDesc(builder *flatbuffers.Builder, desc flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(desc), 0)
}
func WorkingSetAddTimestampMillis(builder *flatbuffers.Builder, timestampMillis uint64) {
builder.PrependUint64Slot(6, timestampMillis, 0)
}
func WorkingSetEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
-18
View File
@@ -36,29 +36,11 @@ table DatabaseRoot {
foreign_keys:[ForeignKey] (required);
}
table WorkingSet {
// todo(andy): should |name| be included?
name:string (required);
working_root:Ref (required);
staged_root:Ref (required);
merge_state:MergeState;
meta:WorkingSetMeta (required);
}
table MergeState {
pre_merge_root:Ref (required);
candidate_merge_commit:Ref (required);
}
table WorkingSetMeta {
name:string (required);
email:string (required);
// todo(andy): is description required?
desc:string;
timestamp:Timestamp (required);
user_timestamp:Timestamp (required);
}
table Commit {
root:Ref (required);
parent_list:[Ref] (required);
+1
View File
@@ -18,6 +18,7 @@ package serial
const StoreRootFileID = "STRT"
const TagFileID = "DTAG"
const WorkingSetFileID = "WRST"
func GetFileID(bs []byte) string {
if len(bs) < 8 {
+2 -1
View File
@@ -12,7 +12,8 @@ flatc -o $GEN_DIR --gen-onefile --filename-suffix "" --gen-mutable --go-namespac
schema.fbs \
storeroot.fbs \
tag.fbs \
table.fbs
table.fbs \
workingset.fbs
# prefix files with copyright header
for FILE in $GEN_DIR/*.go;
+32
View File
@@ -0,0 +1,32 @@
// Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
table WorkingSet {
// 20-byte hashes of root values.
working_root_addr:[ubyte] (required);
staged_root_addr:[ubyte];
// 20-byte hash of the merge state.
merge_state_addr:[ubyte];
// Meta
name:string (required);
email:string (required);
desc:string (required);
timestamp_millis:uint64;
}
// KEEP THIS IN SYNC WITH fileidentifiers.go
file_identifier "WRST";
root_type WorkingSet;
+1
View File
@@ -50,6 +50,7 @@ func (ms *MemoryStorage) NewView() ChunkStore {
if version == "" {
version = constants.Format718String
}
version = "__DOLT_1__"
return &MemoryStoreView{storage: ms, rootHash: ms.rootHash, version: version}
}
+7 -38
View File
@@ -611,12 +611,11 @@ func (db *database) UpdateWorkingSet(ctx context.Context, ds Dataset, workingSet
ctx,
ds,
func(ds Dataset) error {
workspace, err := newWorkingSet(ctx, workingSet.Meta, workingSet.WorkingRoot, workingSet.StagedRoot, workingSet.MergeState)
addr, ref, err := newWorkingSet(ctx, db, workingSet.Meta, workingSet.WorkingRoot, workingSet.StagedRoot, workingSet.MergeState)
if err != nil {
return err
}
return db.doUpdateWorkingSet(ctx, ds.ID(), workspace, prevHash)
return db.doUpdateWorkingSet(ctx, ds.ID(), addr, ref, prevHash)
},
)
}
@@ -626,22 +625,7 @@ func (db *database) UpdateWorkingSet(ctx context.Context, ds Dataset, workingSet
// compare-and-set for the current target hash of the datasets entry, and will
// return an error if the application is working with a stale value for the
// workingset.
func (db *database) doUpdateWorkingSet(ctx context.Context, datasetID string, workingSet types.Struct, currHash hash.Hash) error {
err := db.validateWorkingSet(workingSet)
if err != nil {
return err
}
workingSetRef, err := db.WriteValue(ctx, workingSet)
if err != nil {
return err
}
wsValRef, err := types.ToRefOfValue(workingSetRef, db.Format())
if err != nil {
return err
}
func (db *database) doUpdateWorkingSet(ctx context.Context, datasetID string, addr hash.Hash, ref types.Ref, currHash hash.Hash) error {
return db.update(ctx, func(ctx context.Context, datasets types.Map) (types.Map, error) {
success, err := assertDatasetHash(ctx, datasets, datasetID, currHash)
if err != nil {
@@ -651,13 +635,13 @@ func (db *database) doUpdateWorkingSet(ctx context.Context, datasetID string, wo
return types.Map{}, ErrOptimisticLockFailed
}
return datasets.Edit().Set(types.String(datasetID), wsValRef).Map(ctx)
return datasets.Edit().Set(types.String(datasetID), ref).Map(ctx)
}, func(ctx context.Context, rm refmap) (refmap, error) {
curr := rm.lookup(datasetID)
if curr != currHash {
return refmap{}, ErrOptimisticLockFailed
}
rm.set(datasetID, wsValRef.TargetHash())
rm.set(datasetID, addr)
return rm, nil
})
}
@@ -689,22 +673,7 @@ func (db *database) CommitWithWorkingSet(
val types.Value, workingSetSpec WorkingSetSpec,
prevWsHash hash.Hash, opts CommitOptions,
) (Dataset, Dataset, error) {
workingSet, err := newWorkingSet(ctx, workingSetSpec.Meta, workingSetSpec.WorkingRoot, workingSetSpec.StagedRoot, workingSetSpec.MergeState)
if err != nil {
return Dataset{}, Dataset{}, err
}
err = db.validateWorkingSet(workingSet)
if err != nil {
return Dataset{}, Dataset{}, err
}
workingSetRef, err := db.WriteValue(ctx, workingSet)
if err != nil {
return Dataset{}, Dataset{}, err
}
wsValRef, err := types.ToRefOfValue(workingSetRef, db.Format())
wsAddr, wsValRef, err := newWorkingSet(ctx, db, workingSetSpec.Meta, workingSetSpec.WorkingRoot, workingSetSpec.StagedRoot, workingSetSpec.MergeState)
if err != nil {
return Dataset{}, Dataset{}, err
}
@@ -774,7 +743,7 @@ func (db *database) CommitWithWorkingSet(
return refmap{}, ErrMergeNeeded
}
rm.set(commitDS.ID(), commitValRef.TargetHash())
rm.set(workingSetDS.ID(), wsValRef.TargetHash())
rm.set(workingSetDS.ID(), wsAddr)
return rm, nil
})
+44
View File
@@ -100,6 +100,47 @@ func (h serialTagHead) HeadWorkingSet() (*WorkingSetHead, error) {
return nil, errors.New("HeadWorkingSet called on tag")
}
type serialWorkingSetHead struct {
msg *serial.WorkingSet
addr hash.Hash
}
func newSerialWorkingSetHead(bs []byte, addr hash.Hash) serialWorkingSetHead {
return serialWorkingSetHead{serial.GetRootAsWorkingSet(bs, 0), addr}
}
func (h serialWorkingSetHead) TypeName() string {
return WorkingSetName
}
func (h serialWorkingSetHead) Addr() hash.Hash {
return h.addr
}
func (h serialWorkingSetHead) HeadTag() (*TagMeta, hash.Hash, error) {
return nil, hash.Hash{}, errors.New("HeadTag called on working set")
}
func (h serialWorkingSetHead) HeadWorkingSet() (*WorkingSetHead, error) {
var ret WorkingSetHead
ret.Meta = &WorkingSetMeta{
Name: string(h.msg.Name()),
Email: string(h.msg.Email()),
Timestamp: h.msg.TimestampMillis(),
Description: string(h.msg.Desc()),
}
ret.WorkingAddr = hash.New(h.msg.WorkingRootAddrBytes())
if h.msg.StagedRootAddrLength() != 0 {
ret.StagedAddr = new(hash.Hash)
*ret.StagedAddr = hash.New(h.msg.StagedRootAddrBytes())
}
if h.msg.MergeStateAddrLength() != 0 {
ret.MergeStateAddr = new(hash.Hash)
*ret.MergeStateAddr = hash.New(h.msg.MergeStateAddrBytes())
}
return &ret, nil
}
// Dataset is a named value within a Database. Different head values may be stored in a dataset. Most commonly, this is
// a commit, but other values are also supported in some cases.
type Dataset struct {
@@ -116,6 +157,9 @@ func newHead(db *database, c chunks.Chunk) (dsHead, error) {
if serial.GetFileID(c.Data()) == serial.TagFileID {
return newSerialTagHead(c.Data(), c.Hash()), nil
}
if serial.GetFileID(c.Data()) == serial.WorkingSetFileID {
return newSerialWorkingSetHead(c.Data(), c.Hash()), nil
}
head, err := types.DecodeValue(c, db)
if err != nil {
+80 -6
View File
@@ -17,6 +17,11 @@ package datas
import (
"context"
"github.com/google/flatbuffers/go"
"github.com/dolthub/dolt/go/gen/fb/serial"
"github.com/dolthub/dolt/go/store/chunks"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
@@ -46,10 +51,10 @@ const (
const workingSetMetaVersion = "1.0"
type WorkingSetMeta struct {
Name string
Email string
Name string
Email string
Description string
Timestamp uint64
Timestamp uint64
}
func (m *WorkingSetMeta) toNomsStruct(format *types.NomsBinFormat) (types.Struct, error) {
@@ -94,10 +99,27 @@ type WorkingSetSpec struct {
// }
// ```
// where M is a struct type and R is a ref type.
func newWorkingSet(_ context.Context, meta *WorkingSetMeta, workingRef, stagedRef types.Ref, mergeStateRef *types.Ref) (types.Struct, error) {
func newWorkingSet(ctx context.Context, db *database, meta *WorkingSetMeta, workingRef, stagedRef types.Ref, mergeStateRef *types.Ref) (hash.Hash, types.Ref, error) {
if db.Format() == types.Format_DOLT_1 {
stagedAddr := stagedRef.TargetHash()
var mergeStateAddr *hash.Hash
if mergeStateRef != nil {
mergeStateAddr = new(hash.Hash)
*mergeStateAddr = mergeStateRef.TargetHash()
}
data := workingset_flatbuffer(workingRef.TargetHash(), &stagedAddr, mergeStateAddr, meta)
chunk := chunks.NewChunk(data)
err := db.chunkStore().Put(ctx, chunk)
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
return chunk.Hash(), types.Ref{}, nil
}
metaSt, err := meta.toNomsStruct(workingRef.Format())
if err != nil {
return types.Struct{}, err
return hash.Hash{}, types.Ref{}, err
}
fields := make(types.StructData)
@@ -109,7 +131,59 @@ func newWorkingSet(_ context.Context, meta *WorkingSetMeta, workingRef, stagedRe
fields[MergeStateField] = mergeStateRef
}
return types.NewStruct(workingRef.Format(), WorkingSetName, fields)
st, err := types.NewStruct(workingRef.Format(), WorkingSetName, fields)
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
wsRef, err := db.WriteValue(ctx, st)
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
ref, err := types.ToRefOfValue(wsRef, db.Format())
if err != nil {
return hash.Hash{}, types.Ref{}, err
}
return ref.TargetHash(), ref, nil
}
func workingset_flatbuffer(working hash.Hash, staged, mergeState *hash.Hash, meta *WorkingSetMeta) []byte {
builder := flatbuffers.NewBuilder(1024)
workingoff := builder.CreateByteVector(working[:])
var stagedOff, mergeStateOff flatbuffers.UOffsetT
if staged != nil {
stagedOff = builder.CreateByteVector((*staged)[:])
}
if mergeState != nil {
mergeStateOff = builder.CreateByteVector((*mergeState)[:])
}
var nameOff, emailOff, descOff flatbuffers.UOffsetT
if meta != nil {
nameOff = builder.CreateString(meta.Name)
emailOff = builder.CreateString(meta.Email)
descOff = builder.CreateString(meta.Description)
}
serial.WorkingSetStart(builder)
serial.WorkingSetAddWorkingRootAddr(builder, workingoff)
if stagedOff != 0 {
serial.WorkingSetAddStagedRootAddr(builder, stagedOff)
}
if mergeStateOff != 0 {
serial.WorkingSetAddMergeStateAddr(builder, mergeStateOff)
}
if meta != nil {
serial.WorkingSetAddName(builder, nameOff)
serial.WorkingSetAddEmail(builder, emailOff)
serial.WorkingSetAddDesc(builder, descOff)
serial.WorkingSetAddTimestampMillis(builder, meta.Timestamp)
}
builder.FinishWithFileIdentifier(serial.WorkingSetEnd(builder), []byte(serial.WorkingSetFileID))
return builder.FinishedBytes()
}
func NewMergeState(_ context.Context, preMergeWorking types.Ref, commit types.Struct) (types.Struct, error) {