Merge pull request #1497 from dolthub/db/fix-leveling

[no-release-notes] /go: add manifest appendix to support level-ordered tablefiles
This commit is contained in:
Dustin Brown
2021-04-02 13:05:06 -07:00
committed by GitHub
27 changed files with 1267 additions and 286 deletions

View File

@@ -23,14 +23,13 @@
package eventsapi
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
duration "github.com/golang/protobuf/ptypes/duration"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (

View File

@@ -4,7 +4,6 @@ package eventsapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"

View File

@@ -23,12 +23,11 @@
package eventsapi
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (

View File

@@ -21,12 +21,11 @@
package remotesapi
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
@@ -40,6 +39,55 @@ const (
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type ManifestAppendixOption int32
const (
ManifestAppendixOption_MANIFEST_APPENDIX_OPTION_UNSPECIFIED ManifestAppendixOption = 0
ManifestAppendixOption_MANIFEST_APPENDIX_OPTION_SET ManifestAppendixOption = 1
ManifestAppendixOption_MANIFEST_APPENDIX_OPTION_APPEND ManifestAppendixOption = 2
)
// Enum value maps for ManifestAppendixOption.
var (
ManifestAppendixOption_name = map[int32]string{
0: "MANIFEST_APPENDIX_OPTION_UNSPECIFIED",
1: "MANIFEST_APPENDIX_OPTION_SET",
2: "MANIFEST_APPENDIX_OPTION_APPEND",
}
ManifestAppendixOption_value = map[string]int32{
"MANIFEST_APPENDIX_OPTION_UNSPECIFIED": 0,
"MANIFEST_APPENDIX_OPTION_SET": 1,
"MANIFEST_APPENDIX_OPTION_APPEND": 2,
}
)
func (x ManifestAppendixOption) Enum() *ManifestAppendixOption {
p := new(ManifestAppendixOption)
*p = x
return p
}
func (x ManifestAppendixOption) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ManifestAppendixOption) Descriptor() protoreflect.EnumDescriptor {
return file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_enumTypes[0].Descriptor()
}
func (ManifestAppendixOption) Type() protoreflect.EnumType {
return &file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_enumTypes[0]
}
func (x ManifestAppendixOption) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ManifestAppendixOption.Descriptor instead.
func (ManifestAppendixOption) EnumDescriptor() ([]byte, []int) {
return file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_rawDescGZIP(), []int{0}
}
type RepoId struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1392,7 +1440,8 @@ type ListTableFilesRequest struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RepoId *RepoId `protobuf:"bytes,1,opt,name=repo_id,json=repoId,proto3" json:"repo_id,omitempty"`
RepoId *RepoId `protobuf:"bytes,1,opt,name=repo_id,json=repoId,proto3" json:"repo_id,omitempty"`
AppendixOnly bool `protobuf:"varint,2,opt,name=appendix_only,json=appendixOnly,proto3" json:"appendix_only,omitempty"`
}
func (x *ListTableFilesRequest) Reset() {
@@ -1434,6 +1483,13 @@ func (x *ListTableFilesRequest) GetRepoId() *RepoId {
return nil
}
func (x *ListTableFilesRequest) GetAppendixOnly() bool {
if x != nil {
return x.AppendixOnly
}
return false
}
type TableFileInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1560,6 +1616,13 @@ type AddTableFilesRequest struct {
RepoId *RepoId `protobuf:"bytes,1,opt,name=repo_id,json=repoId,proto3" json:"repo_id,omitempty"`
ClientRepoFormat *ClientRepoFormat `protobuf:"bytes,2,opt,name=client_repo_format,json=clientRepoFormat,proto3" json:"client_repo_format,omitempty"`
ChunkTableInfo []*ChunkTableInfo `protobuf:"bytes,3,rep,name=chunk_table_info,json=chunkTableInfo,proto3" json:"chunk_table_info,omitempty"`
// If set, this is a write for the manifest appendix, not just the manifest table file specs.
// The table files appearing in `chunk_table_info` are added to `specs` and are also set
// in the manifest appendix. If `appendix_option` is `SET`, then the value of the appendix
// becomes the full list provided in `chunk_table_info` and any prior specs in the appendix
// are removed from the manifest specs. If `append_option` is `APPEND`, then the
// supplied table files are added to the appendix and to specs.
AppendixOption ManifestAppendixOption `protobuf:"varint,4,opt,name=appendix_option,json=appendixOption,proto3,enum=dolt.services.remotesapi.v1alpha1.ManifestAppendixOption" json:"appendix_option,omitempty"`
}
func (x *AddTableFilesRequest) Reset() {
@@ -1615,6 +1678,13 @@ func (x *AddTableFilesRequest) GetChunkTableInfo() []*ChunkTableInfo {
return nil
}
func (x *AddTableFilesRequest) GetAppendixOption() ManifestAppendixOption {
if x != nil {
return x.AppendixOption
}
return ManifestAppendixOption_MANIFEST_APPENDIX_OPTION_UNSPECIFIED
}
type AddTableFilesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1832,136 +1902,153 @@ var file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_rawDesc = []byte{
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x62, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x62, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x62, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46,
0x6f, 0x6e, 0x22, 0x80, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x07,
0x72, 0x65, 0x70, 0x6f, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65,
0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x49, 0x64, 0x52, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x49, 0x64,
0x12, 0x23, 0x0a, 0x0d, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x78, 0x5f, 0x6f, 0x6e, 0x6c,
0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x69,
0x78, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x59, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69,
0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12,
0x1d, 0x0a, 0x0a, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x75, 0x6d, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x10,
0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
0x22, 0x8f, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69,
0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72,
0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08,
0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x58, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c,
0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x30, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49,
0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e,
0x66, 0x6f, 0x22, 0xfe, 0x02, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46,
0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x72,
0x65, 0x70, 0x6f, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x64,
0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d,
0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x52, 0x65, 0x70, 0x6f, 0x49, 0x64, 0x52, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x49, 0x64, 0x22,
0x59, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f,
0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x75, 0x6d,
0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e,
0x75, 0x6d, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x8f, 0x01, 0x0a, 0x16, 0x4c,
0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61,
0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61,
0x73, 0x68, 0x12, 0x58, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65,
0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x64, 0x6f,
0x2e, 0x52, 0x65, 0x70, 0x6f, 0x49, 0x64, 0x52, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x49, 0x64, 0x12,
0x61, 0x0a, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x5f, 0x66,
0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x64, 0x6f,
0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f,
0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x74,
0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x9a, 0x02, 0x0a,
0x14, 0x41, 0x64, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x49,
0x64, 0x52, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x12, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72,
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
0x52, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x46, 0x6f, 0x72, 0x6d,
0x61, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c,
0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64,
0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d,
0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
0x0e, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x62, 0x0a, 0x0f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x78, 0x5f, 0x6f, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73,
0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x6e,
0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x78, 0x4f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x78, 0x4f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x22, 0x31, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46,
0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73,
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x2a, 0x89, 0x01, 0x0a, 0x16, 0x4d, 0x61, 0x6e, 0x69, 0x66,
0x65, 0x73, 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x78, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x28, 0x0a, 0x24, 0x4d, 0x41, 0x4e, 0x49, 0x46, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x50,
0x50, 0x45, 0x4e, 0x44, 0x49, 0x58, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x4d,
0x41, 0x4e, 0x49, 0x46, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x58,
0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x23, 0x0a,
0x1f, 0x4d, 0x41, 0x4e, 0x49, 0x46, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44,
0x49, 0x58, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44,
0x10, 0x02, 0x32, 0x9b, 0x0a, 0x0a, 0x11, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x53, 0x74, 0x6f, 0x72,
0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74,
0x52, 0x65, 0x70, 0x6f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x39, 0x2e, 0x64,
0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d,
0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61,
0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52,
0x65, 0x70, 0x6f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x09, 0x48, 0x61, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73,
0x12, 0x33, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x52, 0x65, 0x70, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x10, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x5b, 0x0a, 0x10,
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x75, 0x6e, 0x6b,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x63, 0x68, 0x75, 0x6e, 0x6b,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x31, 0x0a, 0x15, 0x41, 0x64, 0x64,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x32, 0x9b, 0x0a, 0x0a,
0x11, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x39, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65,
0x70, 0x6f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x3a, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a,
0x09, 0x48, 0x61, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x33, 0x2e, 0x64, 0x6f, 0x6c,
0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48,
0x61, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x34, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x43, 0x68, 0x75,
0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x14,
0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e,
0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x3a, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e,
0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2e, 0x48, 0x61, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77,
0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39,
0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72,
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f,
0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x64, 0x6f, 0x6c, 0x74,
0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c,
0x6f, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x17,
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61,
0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44,
0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01,
0x30, 0x01, 0x12, 0x87, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64,
0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x2e, 0x64, 0x6f, 0x6c, 0x74,
0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65,
0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x17, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x12, 0x39, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61,
0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x64,
0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d,
0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x87, 0x01, 0x0a,
0x12, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x12, 0x37, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61,
0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x64,
0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d,
0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x06, 0x52, 0x65, 0x62, 0x61, 0x73, 0x65,
0x12, 0x30, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x31, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x38, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x2e,
0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65,
0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e,
0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65,
0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d,
0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x30, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64,
0x4c, 0x6f, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x06,
0x52, 0x65, 0x62, 0x61, 0x73, 0x65, 0x12, 0x30, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x61, 0x73,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73,
0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6d,
0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x64, 0x6f, 0x6c,
0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01,
0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73,
0x12, 0x38, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x62,
0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x52,
0x6f, 0x6f, 0x74, 0x12, 0x2e, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x30,
0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72,
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x31, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c,
0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54,
0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x39, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69,
0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x64, 0x6f, 0x6c,
0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c,
0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x54, 0x61, 0x62,
0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x37, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61,
0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54,
0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x38, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c,
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x53, 0x5a, 0x51, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x68, 0x75, 0x62,
0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f, 0x67, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x0d,
0x41, 0x64, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x37, 0x2e,
0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65,
0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x64, 0x6f, 0x6c, 0x74, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61,
0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x42, 0x53, 0x5a, 0x51, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64,
0x6f, 0x6c, 0x74, 0x68, 0x75, 0x62, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f, 0x67, 0x6f, 0x2f, 0x67,
0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x6f, 0x6c, 0x74, 0x2f, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x73, 0x61, 0x70,
0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74,
0x65, 0x73, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1976,86 +2063,89 @@ func file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_rawDescGZIP() []byt
return file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_rawDescData
}
var file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_msgTypes = make([]protoimpl.MessageInfo, 29)
var file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_goTypes = []interface{}{
(*RepoId)(nil), // 0: dolt.services.remotesapi.v1alpha1.RepoId
(*HasChunksRequest)(nil), // 1: dolt.services.remotesapi.v1alpha1.HasChunksRequest
(*HasChunksResponse)(nil), // 2: dolt.services.remotesapi.v1alpha1.HasChunksResponse
(*HttpGetChunk)(nil), // 3: dolt.services.remotesapi.v1alpha1.HttpGetChunk
(*RangeChunk)(nil), // 4: dolt.services.remotesapi.v1alpha1.RangeChunk
(*HttpGetRange)(nil), // 5: dolt.services.remotesapi.v1alpha1.HttpGetRange
(*DownloadLoc)(nil), // 6: dolt.services.remotesapi.v1alpha1.DownloadLoc
(*HttpPostTableFile)(nil), // 7: dolt.services.remotesapi.v1alpha1.HttpPostTableFile
(*UploadLoc)(nil), // 8: dolt.services.remotesapi.v1alpha1.UploadLoc
(*GetDownloadLocsRequest)(nil), // 9: dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest
(*GetDownloadLocsResponse)(nil), // 10: dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse
(*TableFileDetails)(nil), // 11: dolt.services.remotesapi.v1alpha1.TableFileDetails
(*GetUploadLocsRequest)(nil), // 12: dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest
(*GetUploadLocsResponse)(nil), // 13: dolt.services.remotesapi.v1alpha1.GetUploadLocsResponse
(*RebaseRequest)(nil), // 14: dolt.services.remotesapi.v1alpha1.RebaseRequest
(*RebaseResponse)(nil), // 15: dolt.services.remotesapi.v1alpha1.RebaseResponse
(*RootRequest)(nil), // 16: dolt.services.remotesapi.v1alpha1.RootRequest
(*RootResponse)(nil), // 17: dolt.services.remotesapi.v1alpha1.RootResponse
(*ChunkTableInfo)(nil), // 18: dolt.services.remotesapi.v1alpha1.ChunkTableInfo
(*CommitRequest)(nil), // 19: dolt.services.remotesapi.v1alpha1.CommitRequest
(*CommitResponse)(nil), // 20: dolt.services.remotesapi.v1alpha1.CommitResponse
(*GetRepoMetadataRequest)(nil), // 21: dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest
(*GetRepoMetadataResponse)(nil), // 22: dolt.services.remotesapi.v1alpha1.GetRepoMetadataResponse
(*ClientRepoFormat)(nil), // 23: dolt.services.remotesapi.v1alpha1.ClientRepoFormat
(*ListTableFilesRequest)(nil), // 24: dolt.services.remotesapi.v1alpha1.ListTableFilesRequest
(*TableFileInfo)(nil), // 25: dolt.services.remotesapi.v1alpha1.TableFileInfo
(*ListTableFilesResponse)(nil), // 26: dolt.services.remotesapi.v1alpha1.ListTableFilesResponse
(*AddTableFilesRequest)(nil), // 27: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest
(*AddTableFilesResponse)(nil), // 28: dolt.services.remotesapi.v1alpha1.AddTableFilesResponse
(ManifestAppendixOption)(0), // 0: dolt.services.remotesapi.v1alpha1.ManifestAppendixOption
(*RepoId)(nil), // 1: dolt.services.remotesapi.v1alpha1.RepoId
(*HasChunksRequest)(nil), // 2: dolt.services.remotesapi.v1alpha1.HasChunksRequest
(*HasChunksResponse)(nil), // 3: dolt.services.remotesapi.v1alpha1.HasChunksResponse
(*HttpGetChunk)(nil), // 4: dolt.services.remotesapi.v1alpha1.HttpGetChunk
(*RangeChunk)(nil), // 5: dolt.services.remotesapi.v1alpha1.RangeChunk
(*HttpGetRange)(nil), // 6: dolt.services.remotesapi.v1alpha1.HttpGetRange
(*DownloadLoc)(nil), // 7: dolt.services.remotesapi.v1alpha1.DownloadLoc
(*HttpPostTableFile)(nil), // 8: dolt.services.remotesapi.v1alpha1.HttpPostTableFile
(*UploadLoc)(nil), // 9: dolt.services.remotesapi.v1alpha1.UploadLoc
(*GetDownloadLocsRequest)(nil), // 10: dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest
(*GetDownloadLocsResponse)(nil), // 11: dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse
(*TableFileDetails)(nil), // 12: dolt.services.remotesapi.v1alpha1.TableFileDetails
(*GetUploadLocsRequest)(nil), // 13: dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest
(*GetUploadLocsResponse)(nil), // 14: dolt.services.remotesapi.v1alpha1.GetUploadLocsResponse
(*RebaseRequest)(nil), // 15: dolt.services.remotesapi.v1alpha1.RebaseRequest
(*RebaseResponse)(nil), // 16: dolt.services.remotesapi.v1alpha1.RebaseResponse
(*RootRequest)(nil), // 17: dolt.services.remotesapi.v1alpha1.RootRequest
(*RootResponse)(nil), // 18: dolt.services.remotesapi.v1alpha1.RootResponse
(*ChunkTableInfo)(nil), // 19: dolt.services.remotesapi.v1alpha1.ChunkTableInfo
(*CommitRequest)(nil), // 20: dolt.services.remotesapi.v1alpha1.CommitRequest
(*CommitResponse)(nil), // 21: dolt.services.remotesapi.v1alpha1.CommitResponse
(*GetRepoMetadataRequest)(nil), // 22: dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest
(*GetRepoMetadataResponse)(nil), // 23: dolt.services.remotesapi.v1alpha1.GetRepoMetadataResponse
(*ClientRepoFormat)(nil), // 24: dolt.services.remotesapi.v1alpha1.ClientRepoFormat
(*ListTableFilesRequest)(nil), // 25: dolt.services.remotesapi.v1alpha1.ListTableFilesRequest
(*TableFileInfo)(nil), // 26: dolt.services.remotesapi.v1alpha1.TableFileInfo
(*ListTableFilesResponse)(nil), // 27: dolt.services.remotesapi.v1alpha1.ListTableFilesResponse
(*AddTableFilesRequest)(nil), // 28: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest
(*AddTableFilesResponse)(nil), // 29: dolt.services.remotesapi.v1alpha1.AddTableFilesResponse
}
var file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_depIdxs = []int32{
0, // 0: dolt.services.remotesapi.v1alpha1.HasChunksRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
4, // 1: dolt.services.remotesapi.v1alpha1.HttpGetRange.ranges:type_name -> dolt.services.remotesapi.v1alpha1.RangeChunk
3, // 2: dolt.services.remotesapi.v1alpha1.DownloadLoc.http_get:type_name -> dolt.services.remotesapi.v1alpha1.HttpGetChunk
5, // 3: dolt.services.remotesapi.v1alpha1.DownloadLoc.http_get_range:type_name -> dolt.services.remotesapi.v1alpha1.HttpGetRange
7, // 4: dolt.services.remotesapi.v1alpha1.UploadLoc.http_post:type_name -> dolt.services.remotesapi.v1alpha1.HttpPostTableFile
0, // 5: dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
6, // 6: dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse.locs:type_name -> dolt.services.remotesapi.v1alpha1.DownloadLoc
0, // 7: dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
11, // 8: dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest.table_file_details:type_name -> dolt.services.remotesapi.v1alpha1.TableFileDetails
8, // 9: dolt.services.remotesapi.v1alpha1.GetUploadLocsResponse.locs:type_name -> dolt.services.remotesapi.v1alpha1.UploadLoc
0, // 10: dolt.services.remotesapi.v1alpha1.RebaseRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
0, // 11: dolt.services.remotesapi.v1alpha1.RootRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
0, // 12: dolt.services.remotesapi.v1alpha1.CommitRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
18, // 13: dolt.services.remotesapi.v1alpha1.CommitRequest.chunk_table_info:type_name -> dolt.services.remotesapi.v1alpha1.ChunkTableInfo
23, // 14: dolt.services.remotesapi.v1alpha1.CommitRequest.client_repo_format:type_name -> dolt.services.remotesapi.v1alpha1.ClientRepoFormat
0, // 15: dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
23, // 16: dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest.client_repo_format:type_name -> dolt.services.remotesapi.v1alpha1.ClientRepoFormat
0, // 17: dolt.services.remotesapi.v1alpha1.ListTableFilesRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
25, // 18: dolt.services.remotesapi.v1alpha1.ListTableFilesResponse.table_file_info:type_name -> dolt.services.remotesapi.v1alpha1.TableFileInfo
0, // 19: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
23, // 20: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.client_repo_format:type_name -> dolt.services.remotesapi.v1alpha1.ClientRepoFormat
18, // 21: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.chunk_table_info:type_name -> dolt.services.remotesapi.v1alpha1.ChunkTableInfo
21, // 22: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetRepoMetadata:input_type -> dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest
1, // 23: dolt.services.remotesapi.v1alpha1.ChunkStoreService.HasChunks:input_type -> dolt.services.remotesapi.v1alpha1.HasChunksRequest
9, // 24: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetDownloadLocations:input_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest
9, // 25: dolt.services.remotesapi.v1alpha1.ChunkStoreService.StreamDownloadLocations:input_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest
12, // 26: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetUploadLocations:input_type -> dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest
14, // 27: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Rebase:input_type -> dolt.services.remotesapi.v1alpha1.RebaseRequest
16, // 28: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Root:input_type -> dolt.services.remotesapi.v1alpha1.RootRequest
19, // 29: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Commit:input_type -> dolt.services.remotesapi.v1alpha1.CommitRequest
24, // 30: dolt.services.remotesapi.v1alpha1.ChunkStoreService.ListTableFiles:input_type -> dolt.services.remotesapi.v1alpha1.ListTableFilesRequest
27, // 31: dolt.services.remotesapi.v1alpha1.ChunkStoreService.AddTableFiles:input_type -> dolt.services.remotesapi.v1alpha1.AddTableFilesRequest
22, // 32: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetRepoMetadata:output_type -> dolt.services.remotesapi.v1alpha1.GetRepoMetadataResponse
2, // 33: dolt.services.remotesapi.v1alpha1.ChunkStoreService.HasChunks:output_type -> dolt.services.remotesapi.v1alpha1.HasChunksResponse
10, // 34: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetDownloadLocations:output_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse
10, // 35: dolt.services.remotesapi.v1alpha1.ChunkStoreService.StreamDownloadLocations:output_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse
13, // 36: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetUploadLocations:output_type -> dolt.services.remotesapi.v1alpha1.GetUploadLocsResponse
15, // 37: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Rebase:output_type -> dolt.services.remotesapi.v1alpha1.RebaseResponse
17, // 38: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Root:output_type -> dolt.services.remotesapi.v1alpha1.RootResponse
20, // 39: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Commit:output_type -> dolt.services.remotesapi.v1alpha1.CommitResponse
26, // 40: dolt.services.remotesapi.v1alpha1.ChunkStoreService.ListTableFiles:output_type -> dolt.services.remotesapi.v1alpha1.ListTableFilesResponse
28, // 41: dolt.services.remotesapi.v1alpha1.ChunkStoreService.AddTableFiles:output_type -> dolt.services.remotesapi.v1alpha1.AddTableFilesResponse
32, // [32:42] is the sub-list for method output_type
22, // [22:32] is the sub-list for method input_type
22, // [22:22] is the sub-list for extension type_name
22, // [22:22] is the sub-list for extension extendee
0, // [0:22] is the sub-list for field type_name
1, // 0: dolt.services.remotesapi.v1alpha1.HasChunksRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
5, // 1: dolt.services.remotesapi.v1alpha1.HttpGetRange.ranges:type_name -> dolt.services.remotesapi.v1alpha1.RangeChunk
4, // 2: dolt.services.remotesapi.v1alpha1.DownloadLoc.http_get:type_name -> dolt.services.remotesapi.v1alpha1.HttpGetChunk
6, // 3: dolt.services.remotesapi.v1alpha1.DownloadLoc.http_get_range:type_name -> dolt.services.remotesapi.v1alpha1.HttpGetRange
8, // 4: dolt.services.remotesapi.v1alpha1.UploadLoc.http_post:type_name -> dolt.services.remotesapi.v1alpha1.HttpPostTableFile
1, // 5: dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
7, // 6: dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse.locs:type_name -> dolt.services.remotesapi.v1alpha1.DownloadLoc
1, // 7: dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
12, // 8: dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest.table_file_details:type_name -> dolt.services.remotesapi.v1alpha1.TableFileDetails
9, // 9: dolt.services.remotesapi.v1alpha1.GetUploadLocsResponse.locs:type_name -> dolt.services.remotesapi.v1alpha1.UploadLoc
1, // 10: dolt.services.remotesapi.v1alpha1.RebaseRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
1, // 11: dolt.services.remotesapi.v1alpha1.RootRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
1, // 12: dolt.services.remotesapi.v1alpha1.CommitRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
19, // 13: dolt.services.remotesapi.v1alpha1.CommitRequest.chunk_table_info:type_name -> dolt.services.remotesapi.v1alpha1.ChunkTableInfo
24, // 14: dolt.services.remotesapi.v1alpha1.CommitRequest.client_repo_format:type_name -> dolt.services.remotesapi.v1alpha1.ClientRepoFormat
1, // 15: dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
24, // 16: dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest.client_repo_format:type_name -> dolt.services.remotesapi.v1alpha1.ClientRepoFormat
1, // 17: dolt.services.remotesapi.v1alpha1.ListTableFilesRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
26, // 18: dolt.services.remotesapi.v1alpha1.ListTableFilesResponse.table_file_info:type_name -> dolt.services.remotesapi.v1alpha1.TableFileInfo
1, // 19: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.repo_id:type_name -> dolt.services.remotesapi.v1alpha1.RepoId
24, // 20: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.client_repo_format:type_name -> dolt.services.remotesapi.v1alpha1.ClientRepoFormat
19, // 21: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.chunk_table_info:type_name -> dolt.services.remotesapi.v1alpha1.ChunkTableInfo
0, // 22: dolt.services.remotesapi.v1alpha1.AddTableFilesRequest.appendix_option:type_name -> dolt.services.remotesapi.v1alpha1.ManifestAppendixOption
22, // 23: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetRepoMetadata:input_type -> dolt.services.remotesapi.v1alpha1.GetRepoMetadataRequest
2, // 24: dolt.services.remotesapi.v1alpha1.ChunkStoreService.HasChunks:input_type -> dolt.services.remotesapi.v1alpha1.HasChunksRequest
10, // 25: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetDownloadLocations:input_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest
10, // 26: dolt.services.remotesapi.v1alpha1.ChunkStoreService.StreamDownloadLocations:input_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsRequest
13, // 27: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetUploadLocations:input_type -> dolt.services.remotesapi.v1alpha1.GetUploadLocsRequest
15, // 28: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Rebase:input_type -> dolt.services.remotesapi.v1alpha1.RebaseRequest
17, // 29: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Root:input_type -> dolt.services.remotesapi.v1alpha1.RootRequest
20, // 30: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Commit:input_type -> dolt.services.remotesapi.v1alpha1.CommitRequest
25, // 31: dolt.services.remotesapi.v1alpha1.ChunkStoreService.ListTableFiles:input_type -> dolt.services.remotesapi.v1alpha1.ListTableFilesRequest
28, // 32: dolt.services.remotesapi.v1alpha1.ChunkStoreService.AddTableFiles:input_type -> dolt.services.remotesapi.v1alpha1.AddTableFilesRequest
23, // 33: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetRepoMetadata:output_type -> dolt.services.remotesapi.v1alpha1.GetRepoMetadataResponse
3, // 34: dolt.services.remotesapi.v1alpha1.ChunkStoreService.HasChunks:output_type -> dolt.services.remotesapi.v1alpha1.HasChunksResponse
11, // 35: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetDownloadLocations:output_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse
11, // 36: dolt.services.remotesapi.v1alpha1.ChunkStoreService.StreamDownloadLocations:output_type -> dolt.services.remotesapi.v1alpha1.GetDownloadLocsResponse
14, // 37: dolt.services.remotesapi.v1alpha1.ChunkStoreService.GetUploadLocations:output_type -> dolt.services.remotesapi.v1alpha1.GetUploadLocsResponse
16, // 38: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Rebase:output_type -> dolt.services.remotesapi.v1alpha1.RebaseResponse
18, // 39: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Root:output_type -> dolt.services.remotesapi.v1alpha1.RootResponse
21, // 40: dolt.services.remotesapi.v1alpha1.ChunkStoreService.Commit:output_type -> dolt.services.remotesapi.v1alpha1.CommitResponse
27, // 41: dolt.services.remotesapi.v1alpha1.ChunkStoreService.ListTableFiles:output_type -> dolt.services.remotesapi.v1alpha1.ListTableFilesResponse
29, // 42: dolt.services.remotesapi.v1alpha1.ChunkStoreService.AddTableFiles:output_type -> dolt.services.remotesapi.v1alpha1.AddTableFilesResponse
33, // [33:43] is the sub-list for method output_type
23, // [23:33] is the sub-list for method input_type
23, // [23:23] is the sub-list for extension type_name
23, // [23:23] is the sub-list for extension extendee
0, // [0:23] is the sub-list for field type_name
}
func init() { file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_init() }
@@ -2425,13 +2515,14 @@ func file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_rawDesc,
NumEnums: 0,
NumEnums: 1,
NumMessages: 29,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_goTypes,
DependencyIndexes: file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_depIdxs,
EnumInfos: file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_enumTypes,
MessageInfos: file_dolt_services_remotesapi_v1alpha1_chunkstore_proto_msgTypes,
}.Build()
File_dolt_services_remotesapi_v1alpha1_chunkstore_proto = out.File

View File

@@ -4,7 +4,6 @@ package remotesapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"

View File

@@ -21,12 +21,11 @@
package remotesapi
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (

View File

@@ -4,7 +4,6 @@ package remotesapi
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"

View File

@@ -41,6 +41,7 @@ require (
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d
github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
github.com/lestrrat-go/strftime v1.0.3 // indirect
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2
github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-runewidth v0.0.9
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b

View File

@@ -432,6 +432,7 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=

View File

@@ -1039,3 +1039,7 @@ func (ddb *DoltDB) PullChunks(ctx context.Context, tempDir string, srcDB *DoltDB
func (ddb *DoltDB) Clone(ctx context.Context, destDB *DoltDB, eventCh chan<- datas.TableFileEvent) error {
return datas.Clone(ctx, ddb.db, destDB.db, eventCh)
}
func (ddb *DoltDB) GetStorageVersion(ctx context.Context) (string, error) {
return datas.GetManifestStorageVersion(ctx, ddb.db)
}

View File

@@ -756,6 +756,10 @@ type Sizer interface {
}
func (dcs *DoltChunkStore) httpPostUpload(ctx context.Context, hashBytes []byte, post *remotesapi.HttpPostTableFile, rd io.Reader, contentHash []byte) error {
return HttpPostUpload(ctx, dcs.httpFetcher, post, rd, contentHash)
}
func HttpPostUpload(ctx context.Context, httpFetcher HTTPFetcher, post *remotesapi.HttpPostTableFile, rd io.Reader, contentHash []byte) error {
req, err := http.NewRequest(http.MethodPut, post.Url, rd)
if err != nil {
return err
@@ -770,10 +774,15 @@ func (dcs *DoltChunkStore) httpPostUpload(ctx context.Context, hashBytes []byte,
req.Header.Set("Content-MD5", md5s)
}
fetcher := globalHttpFetcher
if httpFetcher != nil {
fetcher = httpFetcher
}
var resp *http.Response
op := func() error {
var err error
resp, err = dcs.httpFetcher.Do(req.WithContext(ctx))
resp, err = fetcher.Do(req.WithContext(ctx))
if err == nil {
defer func() {

View File

@@ -100,6 +100,15 @@ type ChunkStoreGarbageCollector interface {
MarkAndSweepChunks(ctx context.Context, last hash.Hash, keepChunks <-chan []hash.Hash) error
}
// ChunkStoreVersionGetter is a ChunkStore that supports getting the manifest's
// storage version
type ChunkStoreVersionGetter interface {
ChunkStore
// GetManifestStorageVersion returns the storage version of the Chunkstore's manifest
GetManifestStorageVersion(ctx context.Context) (string, error)
}
var ErrUnsupportedOperation = errors.New("operation not supported")
var ErrGCGenerationExpired = errors.New("garbage collection generation expired")

29
go/store/datas/version.go Normal file
View File

@@ -0,0 +1,29 @@
// Copyright 2020 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datas
import (
"context"
"github.com/dolthub/dolt/go/store/chunks"
)
func GetManifestStorageVersion(ctx context.Context, db Database) (string, error) {
store, ok := db.chunkStore().(chunks.ChunkStoreVersionGetter)
if !ok {
return "", chunks.ErrUnsupportedOperation
}
return store.GetManifestStorageVersion(ctx)
}

View File

@@ -474,7 +474,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
srcs := makeTestSrcs(t, []uint32{1, 1, 3, 7}, p)
upstream, err := toSpecs(srcs)
require.NoError(t, err)
fm.set(constants.NomsVersion, computeAddr([]byte{0xbe}), hash.Of([]byte{0xef}), upstream)
fm.set(constants.NomsVersion, computeAddr([]byte{0xbe}), hash.Of([]byte{0xef}), upstream, nil)
c := &fakeConjoiner{
[]cannedConjoin{makeCanned(upstream[:2], upstream[2:], p)},
}
@@ -506,7 +506,7 @@ func TestBlockStoreConjoinOnCommit(t *testing.T) {
srcs := makeTestSrcs(t, []uint32{1, 1, 3, 7, 13}, p)
upstream, err := toSpecs(srcs)
require.NoError(t, err)
fm.set(constants.NomsVersion, computeAddr([]byte{0xbe}), hash.Of([]byte{0xef}), upstream)
fm.set(constants.NomsVersion, computeAddr([]byte{0xbe}), hash.Of([]byte{0xef}), upstream, nil)
c := &fakeConjoiner{
[]cannedConjoin{
makeCanned(upstream[:2], upstream[2:], p),

View File

@@ -72,10 +72,17 @@ func (c noopConjoiner) Conjoin(ctx context.Context, upstream manifestContents, m
func conjoin(ctx context.Context, upstream manifestContents, mm manifestUpdater, p tablePersister, stats *Stats) (manifestContents, error) {
var conjoined tableSpec
var conjoinees, keepers []tableSpec
var conjoinees, keepers, appendixSpecs []tableSpec
for {
if conjoinees == nil {
// Appendix table files should never be conjoined
// so we remove them before conjoining and add them
// back after
if upstream.NumAppendixSpecs() != 0 {
upstream, appendixSpecs = upstream.removeAppendixSpecs()
}
var err error
conjoined, conjoinees, keepers, err = conjoinTables(ctx, p, upstream.specs, stats)
@@ -85,19 +92,24 @@ func conjoin(ctx context.Context, upstream manifestContents, mm manifestUpdater,
}
specs := append(make([]tableSpec, 0, len(keepers)+1), conjoined)
if len(appendixSpecs) > 0 {
specs = append(make([]tableSpec, 0, len(specs)+len(appendixSpecs)), appendixSpecs...)
specs = append(specs, conjoined)
}
specs = append(specs, keepers...)
newContents := manifestContents{
vers: upstream.vers,
root: upstream.root,
lock: generateLockHash(upstream.root, specs),
gcGen: upstream.gcGen,
specs: specs,
vers: upstream.vers,
root: upstream.root,
lock: generateLockHash(upstream.root, specs),
gcGen: upstream.gcGen,
specs: specs,
appendix: appendixSpecs,
}
var err error
upstream, err = mm.Update(ctx, upstream.lock, newContents, stats, nil)
if err != nil {
return manifestContents{}, err
}
@@ -105,8 +117,28 @@ func conjoin(ctx context.Context, upstream manifestContents, mm manifestUpdater,
if newContents.lock == upstream.lock {
return upstream, nil
}
// Optimistic lock failure. Someone else moved to the root, the set of tables, or both out from under us.
// If we can re-use the conjoin we already performed, we want to try again. Currently, we will only do so if ALL conjoinees are still present upstream. If we can't re-use...then someone else almost certainly landed a conjoin upstream. In this case, bail and let clients ask again if they think they still can't proceed.
// If the appendix has changed we simply bail
// and let the client retry
if len(appendixSpecs) > 0 {
if len(upstream.appendix) != len(appendixSpecs) {
return upstream, nil
}
for i := range upstream.appendix {
if upstream.appendix[i].name != appendixSpecs[i].name {
return upstream, nil
}
}
// No appendix change occured, so we remove the appendix
// on the "latest" upstream which will be added back
// before the conjoin completes
upstream, appendixSpecs = upstream.removeAppendixSpecs()
}
conjoineeSet := map[addr]struct{}{}
upstreamNames := map[addr]struct{}{}
for _, spec := range upstream.specs {

View File

@@ -119,7 +119,7 @@ func TestConjoin(t *testing.T) {
setup := func(lock addr, root hash.Hash, sizes []uint32) (fm *fakeManifest, p tablePersister, upstream manifestContents) {
p = newFakeTablePersister()
fm = &fakeManifest{}
fm.set(constants.NomsVersion, lock, root, makeTestTableSpecs(sizes, p))
fm.set(constants.NomsVersion, lock, root, makeTestTableSpecs(sizes, p), nil)
var err error
_, upstream, err = fm.ParseIfExists(context.Background(), nil, nil)
@@ -128,6 +128,16 @@ func TestConjoin(t *testing.T) {
return
}
// Compact some tables, interloper slips in a new table
makeExtra := func(p tablePersister) tableSpec {
mt := newMemTable(testMemTableSize)
data := []byte{0xde, 0xad}
mt.addChunk(computeAddr(data), data)
src, err := p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
return tableSpec{mustAddr(src.hash()), mustUint32(src.count())}
}
tc := []struct {
name string
precompact []uint32
@@ -161,15 +171,6 @@ func TestConjoin(t *testing.T) {
})
t.Run("Retry", func(t *testing.T) {
// Compact some tables, interloper slips in a new table
makeExtra := func(p tablePersister) tableSpec {
mt := newMemTable(testMemTableSize)
data := []byte{0xde, 0xad}
mt.addChunk(computeAddr(data), data)
src, err := p.Persist(context.Background(), mt, nil, &Stats{})
require.NoError(t, err)
return tableSpec{mustAddr(src.hash()), mustUint32(src.count())}
}
for _, c := range tc {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setup(startLock, startRoot, c.precompact)
@@ -177,7 +178,7 @@ func TestConjoin(t *testing.T) {
newTable := makeExtra(p)
u := updatePreemptManifest{fm, func() {
specs := append([]tableSpec{}, upstream.specs...)
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, newTable))
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, newTable), nil)
}}
_, err := conjoin(context.Background(), upstream, u, p, stats)
require.NoError(t, err)
@@ -197,7 +198,7 @@ func TestConjoin(t *testing.T) {
fm, p, upstream := setup(startLock, startRoot, c.precompact)
u := updatePreemptManifest{fm, func() {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, upstream.specs[1:])
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, upstream.specs[1:], nil)
}}
_, err := conjoin(context.Background(), upstream, u, p, stats)
require.NoError(t, err)
@@ -208,6 +209,146 @@ func TestConjoin(t *testing.T) {
})
}
})
setupAppendix := func(lock addr, root hash.Hash, specSizes, appendixSizes []uint32) (fm *fakeManifest, p tablePersister, upstream manifestContents) {
p = newFakeTablePersister()
fm = &fakeManifest{}
fm.set(constants.NomsVersion, lock, root, makeTestTableSpecs(specSizes, p), makeTestTableSpecs(appendixSizes, p))
var err error
_, upstream, err = fm.ParseIfExists(context.Background(), nil, nil)
require.NoError(t, err)
return
}
tca := []struct {
name string
appendix []uint32
precompact []uint32
postcompact []uint32
}{
{"uniform", []uint32{1}, []uint32{1, 1, 1, 1, 1}, []uint32{1, 4}},
{"all but last", []uint32{2}, []uint32{2, 1, 1, 1, 1, 5}, []uint32{2, 4, 5}},
{"all", []uint32{1, 2, 3}, []uint32{1, 2, 3, 5, 5, 5}, []uint32{1, 2, 3, 15}},
{"first four", []uint32{8, 9, 10}, []uint32{8, 9, 10, 5, 6, 10, 11, 35, 64}, []uint32{8, 9, 10, 32, 35, 64}},
{"log, first two", nil, []uint32{1, 2, 4, 8, 16, 32, 64}, []uint32{3, 4, 8, 16, 32, 64}},
{"log, all", []uint32{9, 10, 11, 12}, []uint32{9, 10, 11, 12, 2, 3, 4, 8, 16, 32, 64}, []uint32{9, 10, 11, 12, 129}},
}
t.Run("SuccessAppendix", func(t *testing.T) {
// Compact some tables, no one interrupts
for _, c := range tca {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setupAppendix(startLock, startRoot, c.precompact, c.appendix)
_, err := conjoin(context.Background(), upstream, fm, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
assert.True(t, exists)
assert.Equal(t, c.postcompact, getSortedSizes(newUpstream.specs))
assert.Equal(t, c.appendix, getSortedSizes(newUpstream.appendix))
assertContainAll(t, p, upstream.specs, newUpstream.specs)
assertContainAll(t, p, upstream.appendix, newUpstream.appendix)
})
}
})
t.Run("RetryAppendixSpecsChange", func(t *testing.T) {
for _, c := range tca {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setupAppendix(startLock, startRoot, c.precompact, c.appendix)
newTable := makeExtra(p)
u := updatePreemptManifest{fm, func() {
specs := append([]tableSpec{}, upstream.specs...)
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, newTable), upstream.appendix)
}}
_, err := conjoin(context.Background(), upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
assert.True(t, exists)
assert.Equal(t, append([]uint32{1}, c.postcompact...), getSortedSizes(newUpstream.specs))
assert.Equal(t, c.appendix, getSortedSizes(newUpstream.appendix))
assertContainAll(t, p, append(upstream.specs, newTable), newUpstream.specs)
assertContainAll(t, p, upstream.appendix, newUpstream.appendix)
})
}
})
t.Run("RetryAppendixAppendixChange", func(t *testing.T) {
for _, c := range tca {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setupAppendix(startLock, startRoot, c.precompact, c.appendix)
newTable := makeExtra(p)
u := updatePreemptManifest{fm, func() {
app := append([]tableSpec{}, upstream.appendix...)
specs := append([]tableSpec{}, newTable)
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, append(specs, upstream.specs...), append(app, newTable))
}}
_, err := conjoin(context.Background(), upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
assert.True(t, exists)
if newUpstream.appendix != nil {
assert.Equal(t, append([]uint32{1}, c.appendix...), getSortedSizes(newUpstream.appendix))
assertContainAll(t, p, append(upstream.appendix, newTable), newUpstream.appendix)
} else {
assert.Equal(t, upstream.appendix, newUpstream.appendix)
}
})
}
})
t.Run("TablesDroppedUpstreamAppendixSpecChanges", func(t *testing.T) {
// Interloper drops some compactees
for _, c := range tca {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setupAppendix(startLock, startRoot, c.precompact, c.appendix)
u := updatePreemptManifest{fm, func() {
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, upstream.specs[len(c.appendix)+1:], upstream.appendix[:])
}}
_, err := conjoin(context.Background(), upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
assert.True(t, exists)
assert.Equal(t, c.precompact[len(c.appendix)+1:], getSortedSizes(newUpstream.specs))
assert.Equal(t, c.appendix, getSortedSizes(newUpstream.appendix))
})
}
})
t.Run("TablesDroppedUpstreamAppendixAppendixChanges", func(t *testing.T) {
// Interloper drops some compactees
for _, c := range tca {
t.Run(c.name, func(t *testing.T) {
fm, p, upstream := setupAppendix(startLock, startRoot, c.precompact, c.appendix)
newTable := makeExtra(p)
u := updatePreemptManifest{fm, func() {
specs := append([]tableSpec{}, newTable)
specs = append(specs, upstream.specs[len(c.appendix)+1:]...)
fm.set(constants.NomsVersion, computeAddr([]byte("lock2")), startRoot, specs, append([]tableSpec{}, newTable))
}}
_, err := conjoin(context.Background(), upstream, u, p, stats)
require.NoError(t, err)
exists, newUpstream, err := fm.ParseIfExists(context.Background(), stats, nil)
require.NoError(t, err)
assert.True(t, exists)
assert.Equal(t, append([]uint32{1}, c.precompact[len(c.appendix)+1:]...), getSortedSizes(newUpstream.specs))
assert.Equal(t, []uint32{1}, getSortedSizes(newUpstream.appendix))
})
}
})
}
type updatePreemptManifest struct {

View File

@@ -41,8 +41,8 @@ type fakeDDB struct {
}
type record struct {
lock, root []byte
vers, specs string
lock, root []byte
vers, specs, appendix string
}
func makeFakeDDB(t *testing.T) *fakeDDB {
@@ -83,6 +83,9 @@ func (m *fakeDDB) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInp
if e.specs != "" {
item[tableSpecsAttr] = &dynamodb.AttributeValue{S: aws.String(e.specs)}
}
if e.appendix != "" {
item[appendixAttr] = &dynamodb.AttributeValue{S: aws.String(e.appendix)}
}
case []byte:
item[dataAttr] = &dynamodb.AttributeValue{B: e}
}
@@ -91,8 +94,8 @@ func (m *fakeDDB) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInp
return &dynamodb.GetItemOutput{Item: item}, nil
}
func (m *fakeDDB) putRecord(k string, l, r []byte, v string, s string) {
m.data[k] = record{l, r, v, s}
func (m *fakeDDB) putRecord(k string, l, r []byte, v string, s string, a string) {
m.data[k] = record{l, r, v, s, a}
}
func (m *fakeDDB) putData(k string, d []byte) {
@@ -132,6 +135,12 @@ func (m *fakeDDB) PutItemWithContext(ctx aws.Context, input *dynamodb.PutItemInp
specs = *attr.S
}
apps := ""
if attr, present := input.Item[appendixAttr]; present {
assert.NotNil(m.t, attr.S, "appendix specs should have been a String: %+v", input.Item[appendixAttr])
apps = *attr.S
}
mustNotExist := *(input.ConditionExpression) == valueNotExistsOrEqualsExpression
current, present := m.data[key]
@@ -141,14 +150,15 @@ func (m *fakeDDB) PutItemWithContext(ctx aws.Context, input *dynamodb.PutItemInp
return nil, mockAWSError("ConditionalCheckFailedException")
}
m.putRecord(key, lock, root, constants.NomsVersion, specs)
atomic.AddInt64(&m.numPuts, 1)
m.putRecord(key, lock, root, constants.NomsVersion, specs, apps)
atomic.AddInt64(&m.numPuts, 1)
return &dynamodb.PutItemOutput{}, nil
}
func checkCondition(current record, expressionAttrVals map[string]*dynamodb.AttributeValue) bool {
return current.vers == *expressionAttrVals[":vers"].S && bytes.Equal(current.lock, expressionAttrVals[":prev"].B)
return current.vers == *expressionAttrVals[versExpressionValuesKey].S && bytes.Equal(current.lock, expressionAttrVals[prevLockExpressionValuesKey].B)
}
func (m *fakeDDB) NumGets() int64 {

View File

@@ -41,16 +41,19 @@ const (
// DynamoManifest does not yet include GC Generation
AWSStorageVersion = "4"
dbAttr = "db"
lockAttr = "lck" // 'lock' is a reserved word in dynamo
rootAttr = "root"
versAttr = "vers"
nbsVersAttr = "nbsVers"
tableSpecsAttr = "specs"
dbAttr = "db"
lockAttr = "lck" // 'lock' is a reserved word in dynamo
rootAttr = "root"
versAttr = "vers"
nbsVersAttr = "nbsVers"
tableSpecsAttr = "specs"
appendixAttr = "appendix"
prevLockExpressionValuesKey = ":prev"
versExpressionValuesKey = ":vers"
)
var (
valueEqualsExpression = fmt.Sprintf("(%s = :prev) and (%s = :vers)", lockAttr, versAttr)
valueEqualsExpression = fmt.Sprintf("(%s = %s) and (%s = %s)", lockAttr, prevLockExpressionValuesKey, versAttr, versExpressionValuesKey)
valueNotExistsOrEqualsExpression = fmt.Sprintf("attribute_not_exists("+lockAttr+") or %s", valueEqualsExpression)
)
@@ -96,17 +99,24 @@ func (dm dynamoManifest) ParseIfExists(ctx context.Context, stats *Stats, readHo
// !exists(dbAttr) => unitialized store
if len(result.Item) > 0 {
valid, hasSpecs := validateManifest(result.Item)
valid, hasSpecs, hasAppendix := validateManifest(result.Item)
if !valid {
return false, contents, ErrCorruptManifest
}
exists = true
contents.vers = *result.Item[versAttr].S
contents.root = hash.New(result.Item[rootAttr].B)
copy(contents.lock[:], result.Item[lockAttr].B)
if hasSpecs {
contents.specs, err = parseSpecs(strings.Split(*result.Item[tableSpecsAttr].S, ":"))
if err != nil {
return false, manifestContents{}, ErrCorruptManifest
}
}
if hasAppendix {
contents.appendix, err = parseSpecs(strings.Split(*result.Item[appendixAttr].S, ":"))
if err != nil {
return false, manifestContents{}, ErrCorruptManifest
}
@@ -116,18 +126,24 @@ func (dm dynamoManifest) ParseIfExists(ctx context.Context, stats *Stats, readHo
return exists, contents, nil
}
func validateManifest(item map[string]*dynamodb.AttributeValue) (valid, hasSpecs bool) {
func validateManifest(item map[string]*dynamodb.AttributeValue) (valid, hasSpecs, hasAppendix bool) {
if item[nbsVersAttr] != nil && item[nbsVersAttr].S != nil &&
AWSStorageVersion == *item[nbsVersAttr].S &&
item[versAttr] != nil && item[versAttr].S != nil &&
item[lockAttr] != nil && item[lockAttr].B != nil &&
item[rootAttr] != nil && item[rootAttr].B != nil {
if len(item) == 6 && item[tableSpecsAttr] != nil && item[tableSpecsAttr].S != nil {
return true, true
if len(item) == 6 || len(item) == 7 {
if item[tableSpecsAttr] != nil && item[tableSpecsAttr].S != nil {
hasSpecs = true
}
if item[appendixAttr] != nil && item[appendixAttr].S != nil {
hasAppendix = true
}
return true, hasSpecs, hasAppendix
}
return len(item) == 5, false
return len(item) == 5, false, false
}
return false, false
return false, false, false
}
func (dm dynamoManifest) Update(ctx context.Context, lastLock addr, newContents manifestContents, stats *Stats, writeHook func() error) (manifestContents, error) {
@@ -144,12 +160,19 @@ func (dm dynamoManifest) Update(ctx context.Context, lastLock addr, newContents
lockAttr: {B: newContents.lock[:]},
},
}
if len(newContents.specs) > 0 {
tableInfo := make([]string, 2*len(newContents.specs))
formatSpecs(newContents.specs, tableInfo)
putArgs.Item[tableSpecsAttr] = &dynamodb.AttributeValue{S: aws.String(strings.Join(tableInfo, ":"))}
}
if len(newContents.appendix) > 0 {
tableInfo := make([]string, 2*len(newContents.appendix))
formatSpecs(newContents.appendix, tableInfo)
putArgs.Item[appendixAttr] = &dynamodb.AttributeValue{S: aws.String(strings.Join(tableInfo, ":"))}
}
expr := valueEqualsExpression
if lastLock == (addr{}) {
expr = valueNotExistsOrEqualsExpression
@@ -157,8 +180,8 @@ func (dm dynamoManifest) Update(ctx context.Context, lastLock addr, newContents
putArgs.ConditionExpression = aws.String(expr)
putArgs.ExpressionAttributeValues = map[string]*dynamodb.AttributeValue{
":prev": {B: lastLock[:]},
":vers": {S: aws.String(newContents.vers)},
prevLockExpressionValuesKey: {B: lastLock[:]},
versExpressionValuesKey: {S: aws.String(newContents.vers)},
}
_, ddberr := dm.ddbsvc.PutItemWithContext(ctx, &putArgs)

View File

@@ -52,11 +52,13 @@ func TestDynamoManifestParseIfExists(t *testing.T) {
require.NoError(t, err)
assert.False(exists)
// Simulate another process writing a manifest (with an old Noms version).
// Simulate another process writing a manifest and appendix (with an old Noms version).
newLock := computeAddr([]byte("locker"))
newRoot := hash.Of([]byte("new root"))
tableName := hash.Of([]byte("table1"))
ddb.putRecord(db, newLock[:], newRoot[:], "0", tableName.String()+":"+"0")
app := tableName.String() + ":" + "0"
specsWithAppendix := app + ":" + tableName.String() + ":" + "0"
ddb.putRecord(db, newLock[:], newRoot[:], "0", specsWithAppendix, app)
// ParseIfExists should now reflect the manifest written above.
exists, contents, err := mm.ParseIfExists(context.Background(), stats, nil)
@@ -65,18 +67,25 @@ func TestDynamoManifestParseIfExists(t *testing.T) {
assert.Equal("0", contents.vers)
assert.Equal(newLock, contents.lock)
assert.Equal(newRoot, contents.root)
if assert.Len(contents.specs, 1) {
if assert.Len(contents.appendix, 1) {
assert.Equal(tableName.String(), contents.specs[0].name.String())
assert.Equal(uint32(0), contents.specs[0].chunkCount)
assert.Equal(tableName.String(), contents.appendix[0].name.String())
assert.Equal(uint32(0), contents.appendix[0].chunkCount)
}
if assert.Len(contents.specs, 2) {
assert.Equal(tableName.String(), contents.specs[1].name.String())
assert.Equal(uint32(0), contents.specs[1].chunkCount)
}
}
func makeContents(lock, root string, specs []tableSpec) manifestContents {
func makeContents(lock, root string, specs, appendix []tableSpec) manifestContents {
return manifestContents{
vers: constants.NomsVersion,
lock: computeAddr([]byte(lock)),
root: hash.Of([]byte(root)),
specs: specs,
vers: constants.NomsVersion,
lock: computeAddr([]byte(lock)),
root: hash.Of([]byte(root)),
specs: specs,
appendix: appendix,
}
}
@@ -88,7 +97,7 @@ func TestDynamoManifestUpdateWontClobberOldVersion(t *testing.T) {
// Simulate another process having already put old Noms data in dir/.
lock := computeAddr([]byte("locker"))
badRoot := hash.Of([]byte("bad root"))
ddb.putRecord(db, lock[:], badRoot[:], "0", "")
ddb.putRecord(db, lock[:], badRoot[:], "0", "", "")
_, err := mm.Update(context.Background(), lock, manifestContents{vers: constants.NomsVersion}, stats, nil)
assert.Error(err)
@@ -100,12 +109,12 @@ func TestDynamoManifestUpdate(t *testing.T) {
stats := &Stats{}
// First, test winning the race against another process.
contents := makeContents("locker", "nuroot", []tableSpec{{computeAddr([]byte("a")), 3}})
contents := makeContents("locker", "nuroot", []tableSpec{{computeAddr([]byte("a")), 3}}, nil)
upstream, err := mm.Update(context.Background(), addr{}, contents, stats, func() error {
// This should fail to get the lock, and therefore _not_ clobber the manifest. So the Update should succeed.
lock := computeAddr([]byte("nolock"))
newRoot2 := hash.Of([]byte("noroot"))
ddb.putRecord(db, lock[:], newRoot2[:], constants.NomsVersion, "")
ddb.putRecord(db, lock[:], newRoot2[:], constants.NomsVersion, "", "")
return nil
})
require.NoError(t, err)
@@ -114,7 +123,7 @@ func TestDynamoManifestUpdate(t *testing.T) {
assert.Equal(contents.specs, upstream.specs)
// Now, test the case where the optimistic lock fails, and someone else updated the root since last we checked.
rejected := makeContents("locker 2", "new root 2", nil)
rejected := makeContents("locker 2", "new root 2", nil, nil)
upstream, err = mm.Update(context.Background(), addr{}, rejected, stats, nil)
require.NoError(t, err)
assert.Equal(contents.lock, upstream.lock)
@@ -129,9 +138,9 @@ func TestDynamoManifestUpdate(t *testing.T) {
// Now, test the case where the optimistic lock fails because someone else updated only the tables since last we checked
jerkLock := computeAddr([]byte("jerk"))
tableName := computeAddr([]byte("table1"))
ddb.putRecord(db, jerkLock[:], upstream.root[:], constants.NomsVersion, tableName.String()+":1")
ddb.putRecord(db, jerkLock[:], upstream.root[:], constants.NomsVersion, tableName.String()+":1", "")
newContents3 := makeContents("locker 3", "new root 3", nil)
newContents3 := makeContents("locker 3", "new root 3", nil, nil)
upstream, err = mm.Update(context.Background(), upstream.lock, newContents3, stats, nil)
require.NoError(t, err)
assert.Equal(jerkLock, upstream.lock)
@@ -139,6 +148,66 @@ func TestDynamoManifestUpdate(t *testing.T) {
assert.Equal([]tableSpec{{tableName, 1}}, upstream.specs)
}
func TestDynamoManifestUpdateAppendix(t *testing.T) {
assert := assert.New(t)
mm, ddb := makeDynamoManifestFake(t)
stats := &Stats{}
// First, test winning the race against another process.
specs := []tableSpec{
{computeAddr([]byte("app-a")), 3},
{computeAddr([]byte("a")), 3},
}
app := []tableSpec{{computeAddr([]byte("app-a")), 3}}
contents := makeContents("locker", "nuroot", specs, app)
upstream, err := mm.Update(context.Background(), addr{}, contents, stats, func() error {
// This should fail to get the lock, and therefore _not_ clobber the manifest. So the Update should succeed.
lock := computeAddr([]byte("nolock"))
newRoot2 := hash.Of([]byte("noroot"))
ddb.putRecord(db, lock[:], newRoot2[:], constants.NomsVersion, "", "")
return nil
})
require.NoError(t, err)
assert.Equal(contents.lock, upstream.lock)
assert.Equal(contents.root, upstream.root)
assert.Equal(contents.specs, upstream.specs)
assert.Equal(contents.appendix, upstream.appendix)
// Now, test the case where the optimistic lock fails, and someone else updated the root since last we checked.
rejected := makeContents("locker 2", "new root 2", nil, nil)
upstream, err = mm.Update(context.Background(), addr{}, rejected, stats, nil)
require.NoError(t, err)
assert.Equal(contents.lock, upstream.lock)
assert.Equal(contents.root, upstream.root)
assert.Equal(contents.specs, upstream.specs)
assert.Equal(contents.appendix, upstream.appendix)
upstream, err = mm.Update(context.Background(), upstream.lock, rejected, stats, nil)
require.NoError(t, err)
assert.Equal(rejected.lock, upstream.lock)
assert.Equal(rejected.root, upstream.root)
assert.Empty(upstream.specs)
assert.Empty(upstream.appendix)
// Now, test the case where the optimistic lock fails because someone else updated only the tables since last we checked
jerkLock := computeAddr([]byte("jerk"))
tableName := computeAddr([]byte("table1"))
appTableName := computeAddr([]byte("table1-appendix"))
appStr := appTableName.String() + ":1"
specsStr := appStr + ":" + tableName.String() + ":1"
ddb.putRecord(db, jerkLock[:], upstream.root[:], constants.NomsVersion, specsStr, appStr)
newContents3 := makeContents("locker 3", "new root 3", nil, nil)
upstream, err = mm.Update(context.Background(), upstream.lock, newContents3, stats, nil)
require.NoError(t, err)
assert.Equal(jerkLock, upstream.lock)
assert.Equal(rejected.root, upstream.root)
assert.Equal([]tableSpec{{appTableName, 1}, {tableName, 1}}, upstream.specs)
assert.Equal([]tableSpec{{appTableName, 1}}, upstream.appendix)
}
func TestDynamoManifestCaching(t *testing.T) {
assert := assert.New(t)
mm, ddb := makeDynamoManifestFake(t)
@@ -152,7 +221,7 @@ func TestDynamoManifestCaching(t *testing.T) {
assert.Equal(reads+1, ddb.NumGets())
lock, root := computeAddr([]byte("lock")), hash.Of([]byte("root"))
ddb.putRecord(db, lock[:], root[:], constants.NomsVersion, "")
ddb.putRecord(db, lock[:], root[:], constants.NomsVersion, "", "")
reads = ddb.NumGets()
exists, _, err = mm.ParseIfExists(context.Background(), stats, nil)
@@ -162,7 +231,7 @@ func TestDynamoManifestCaching(t *testing.T) {
// When failing the optimistic lock, we should hit persistent storage.
reads = ddb.NumGets()
contents := makeContents("lock2", "nuroot", []tableSpec{{computeAddr([]byte("a")), 3}})
contents := makeContents("lock2", "nuroot", []tableSpec{{computeAddr([]byte("a")), 3}}, nil)
upstream, err := mm.Update(context.Background(), addr{}, contents, stats, nil)
require.NoError(t, err)
assert.NotEqual(contents.lock, upstream.lock)
@@ -187,4 +256,5 @@ func TestDynamoManifestUpdateEmpty(t *testing.T) {
assert.Equal(contents.lock, upstream.lock)
assert.True(upstream.root.IsEmpty())
assert.Empty(upstream.specs)
assert.Empty(upstream.appendix)
}

View File

@@ -367,18 +367,16 @@ func parseIfExistsWithParser(_ context.Context, dir string, parse manifestParser
// !exists(lockFileName) => unitialized store
if locked {
var f io.ReadCloser
var f *os.File
err = func() (ferr error) {
lck := newLock(dir)
ferr = lck.Lock()
if ferr != nil {
return ferr
}
defer func() {
unlockErr := lck.Unlock()
if ferr == nil {
ferr = unlockErr
}
@@ -396,7 +394,6 @@ func parseIfExistsWithParser(_ context.Context, dir string, parse manifestParser
if ferr != nil {
return ferr
}
return nil
}()
@@ -422,7 +419,6 @@ func parseIfExistsWithParser(_ context.Context, dir string, parse manifestParser
}
}
}
return exists, contents, nil
}

View File

@@ -34,6 +34,7 @@ import (
)
var ErrCorruptManifest = errors.New("corrupt manifest")
var ErrUnsupportedManifestAppendixOption = errors.New("unsupported manifest appendix option")
type manifest interface {
// Name returns a stable, unique identifier for the store this manifest describes.
@@ -96,15 +97,32 @@ type ManifestInfo interface {
GetGCGen() string
GetRoot() hash.Hash
NumTableSpecs() int
NumAppendixSpecs() int
GetTableSpecInfo(i int) TableSpecInfo
GetAppendixTableSpecInfo(i int) TableSpecInfo
}
type ManifestAppendixOption int
const (
ManifestAppendixOption_Unspecified ManifestAppendixOption = iota
ManifestAppendixOption_Set
ManifestAppendixOption_Append
)
type manifestContents struct {
vers string
lock addr
root hash.Hash
gcGen addr
specs []tableSpec
// An appendix is a list of |tableSpecs| that track an auxillary collection of
// table files used _only_ for query performance optimizations. These appendix |tableSpecs| can be safely
// managed with nbs.UpdateManifestWithAppendix, however generation and removal of the actual table files
// the appendix |tableSpecs| reference is done manually. All appendix |tableSpecs| will be prepended to the
// manifest.specs across manifest updates.
appendix []tableSpec
}
func (mc manifestContents) GetVersion() string {
@@ -127,17 +145,62 @@ func (mc manifestContents) NumTableSpecs() int {
return len(mc.specs)
}
func (mc manifestContents) NumAppendixSpecs() int {
return len(mc.appendix)
}
func (mc manifestContents) GetTableSpecInfo(i int) TableSpecInfo {
return mc.specs[i]
}
func (mc manifestContents) GetAppendixTableSpecInfo(i int) TableSpecInfo {
return mc.appendix[i]
}
func (mc manifestContents) getSpec(i int) tableSpec {
return mc.specs[i]
}
func (mc manifestContents) getAppendixSpec(i int) tableSpec {
return mc.appendix[i]
}
func (mc manifestContents) removeAppendixSpecs() (manifestContents, []tableSpec) {
if mc.appendix == nil || len(mc.appendix) == 0 {
return mc, nil
}
appendixSet := mc.getAppendixSet()
filtered := make([]tableSpec, 0)
removed := make([]tableSpec, 0)
for _, s := range mc.specs {
if _, ok := appendixSet[s.name]; ok {
removed = append(removed, s)
} else {
filtered = append(filtered, s)
}
}
return manifestContents{
vers: mc.vers,
lock: mc.lock,
root: mc.root,
gcGen: mc.gcGen,
specs: filtered,
}, removed
}
func (mc manifestContents) getSpecSet() (ss map[addr]struct{}) {
ss = make(map[addr]struct{}, len(mc.specs))
for _, ts := range mc.specs {
return toSpecSet(mc.specs)
}
func (mc manifestContents) getAppendixSet() (ss map[addr]struct{}) {
return toSpecSet(mc.appendix)
}
func toSpecSet(specs []tableSpec) (ss map[addr]struct{}) {
ss = make(map[addr]struct{}, len(specs))
for _, ts := range specs {
ss[ts.name] = struct{}{}
}
return ss

View File

@@ -81,3 +81,8 @@ func (nbsMW *NBSMetricWrapper) GetManyCompressed(ctx context.Context, hashes has
atomic.AddInt32(&nbsMW.TotalChunkGets, int32(len(hashes)))
return nbsMW.nbs.GetManyCompressed(ctx, hashes, found)
}
// GetManifestStorageVersion returns the storage version of the manifest.
func (nbsMW *NBSMetricWrapper) GetManifestStorageVersion(ctx context.Context) (string, error) {
return nbsMW.nbs.GetManifestStorageVersion(ctx)
}

View File

@@ -356,7 +356,7 @@ func interloperWrite(fm *fakeManifest, p tablePersister, rootChunk []byte, chunk
return hash.Hash{}, nil, err
}
fm.set(constants.NomsVersion, newLock, newRoot, []tableSpec{{mustAddr(src.hash()), uint32(len(chunks))}})
fm.set(constants.NomsVersion, newLock, newRoot, []tableSpec{{mustAddr(src.hash()), uint32(len(chunks))}}, nil)
return
}
@@ -406,15 +406,19 @@ func (fm *fakeManifest) Update(ctx context.Context, lastLock addr, newContents m
fm.mu.Lock()
defer fm.mu.Unlock()
if fm.contents.lock == lastLock {
fm.contents = manifestContents{newContents.vers, newContents.lock, newContents.root, addr(hash.Hash{}), nil}
fm.contents = manifestContents{newContents.vers, newContents.lock, newContents.root, addr(hash.Hash{}), nil, nil}
fm.contents.specs = make([]tableSpec, len(newContents.specs))
copy(fm.contents.specs, newContents.specs)
if newContents.appendix != nil && len(newContents.appendix) > 0 {
fm.contents.appendix = make([]tableSpec, len(newContents.appendix))
copy(fm.contents.appendix, newContents.appendix)
}
}
return fm.contents, nil
}
func (fm *fakeManifest) set(version string, lock addr, root hash.Hash, specs []tableSpec) {
fm.contents = manifestContents{version, lock, root, addr(hash.Hash{}), specs}
func (fm *fakeManifest) set(version string, lock addr, root hash.Hash, specs, appendix []tableSpec) {
fm.contents = manifestContents{version, lock, root, addr(hash.Hash{}), specs, appendix}
}
func newFakeTableSet() tableSet {

View File

@@ -224,6 +224,14 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
return contents, nil
}
// ensure we dont drop existing appendices
if contents.appendix != nil && len(contents.appendix) > 0 {
contents, err = fromManifestAppendixOptionNewContents(contents, contents.appendix, ManifestAppendixOption_Set)
if err != nil {
return manifestContents{}, err
}
}
var updatedContents manifestContents
updatedContents, err = nbs.mm.Update(ctx, contents.lock, contents, &stats, nil)
@@ -248,6 +256,129 @@ func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.
return updatedContents, nil
}
func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updates map[hash.Hash]uint32, option ManifestAppendixOption) (mi ManifestInfo, err error) {
nbs.mm.LockForUpdate()
defer func() {
unlockErr := nbs.mm.UnlockForUpdate()
if err == nil {
err = unlockErr
}
}()
nbs.mu.Lock()
defer nbs.mu.Unlock()
var stats Stats
var ok bool
var contents manifestContents
ok, contents, err = nbs.mm.Fetch(ctx, &stats)
if err != nil {
return manifestContents{}, err
} else if !ok {
contents = manifestContents{vers: nbs.upstream.vers}
}
currAppendixSpecs := contents.getAppendixSet()
appendixSpecs := make([]tableSpec, 0)
var addCount int
for h, count := range updates {
a := addr(h)
if option == ManifestAppendixOption_Set {
appendixSpecs = append(appendixSpecs, tableSpec{a, count})
} else {
if _, ok := currAppendixSpecs[a]; !ok {
addCount++
appendixSpecs = append(appendixSpecs, tableSpec{a, count})
}
}
}
if addCount == 0 && option != ManifestAppendixOption_Set {
return contents, nil
}
contents, err = fromManifestAppendixOptionNewContents(contents, appendixSpecs, option)
if err != nil {
return manifestContents{}, err
}
var updatedContents manifestContents
updatedContents, err = nbs.mm.Update(ctx, contents.lock, contents, &stats, nil)
if err != nil {
return manifestContents{}, err
}
newTables, err := nbs.tables.Rebase(ctx, contents.specs, nbs.stats)
if err != nil {
return manifestContents{}, err
}
nbs.upstream = updatedContents
oldTables := nbs.tables
nbs.tables = newTables
err = oldTables.Close()
if err != nil {
return manifestContents{}, err
}
return updatedContents, nil
}
func fromManifestAppendixOptionNewContents(upstream manifestContents, appendixSpecs []tableSpec, option ManifestAppendixOption) (manifestContents, error) {
contents, upstreamAppendixSpecs := upstream.removeAppendixSpecs()
switch option {
case ManifestAppendixOption_Append:
// prepend all appendix specs to contents.specs
specs := append([]tableSpec{}, appendixSpecs...)
specs = append(specs, upstreamAppendixSpecs...)
contents.specs = append(specs, contents.specs...)
// append all appendix specs to contents.appendix
newAppendixSpecs := append([]tableSpec{}, upstreamAppendixSpecs...)
contents.appendix = append(newAppendixSpecs, appendixSpecs...)
return contents, nil
case ManifestAppendixOption_Set:
if len(appendixSpecs) < 1 {
return contents, nil
}
// prepend new appendix specs to contents.specs
// dropping all upstream appendix specs
specs := append([]tableSpec{}, appendixSpecs...)
contents.specs = append(specs, contents.specs...)
// append new appendix specs to contents.appendix
contents.appendix = append([]tableSpec{}, appendixSpecs...)
return contents, nil
default:
return manifestContents{}, ErrUnsupportedManifestAppendixOption
}
}
// GetManifestStorageVersion returns the manifest storage version or an error if the operation
// is not supported
func (nbs *NomsBlockStore) GetManifestStorageVersion(ctx context.Context) (version string, err error) {
info, ok := nbs.mm.m.(ManifestInfo)
if !ok {
return "", chunks.ErrUnsupportedOperation
}
// possibly unnecessary
nbs.mm.LockForUpdate()
defer func() {
err = nbs.mm.UnlockForUpdate()
}()
nbs.mu.Lock()
defer nbs.mu.Unlock()
return info.GetVersion(), nil
}
func NewAWSStoreWithMMapIndex(ctx context.Context, nbfVerStr string, table, ns, bucket string, s3 s3svc, ddb ddbsvc, memTableSize uint64) (*NomsBlockStore, error) {
cacheOnce.Do(makeGlobalCaches)
readRateLimiter := make(chan struct{}, 32)
@@ -890,6 +1021,7 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
if nbs.c.ConjoinRequired(nbs.tables) {
var err error
newUpstream, err := nbs.c.Conjoin(ctx, nbs.upstream, nbs.mm, nbs.p, nbs.stats)
if err != nil {
@@ -914,17 +1046,34 @@ func (nbs *NomsBlockStore) updateManifest(ctx context.Context, current, last has
}
specs, err := nbs.tables.ToSpecs()
if err != nil {
return err
}
// ensure we dont drop appendices on commit
var appendixSpecs []tableSpec
if nbs.upstream.appendix != nil && len(nbs.upstream.appendix) > 0 {
appendixSet := nbs.upstream.getAppendixSet()
filtered := make([]tableSpec, 0, len(specs))
for _, s := range specs {
if _, present := appendixSet[s.name]; !present {
filtered = append(filtered, s)
}
}
_, appendixSpecs = nbs.upstream.removeAppendixSpecs()
prepended := append([]tableSpec{}, appendixSpecs...)
specs = append(prepended, filtered...)
}
newContents := manifestContents{
vers: nbs.upstream.vers,
root: current,
lock: generateLockHash(current, specs),
gcGen: nbs.upstream.gcGen,
specs: specs,
vers: nbs.upstream.vers,
root: current,
lock: generateLockHash(current, specs),
gcGen: nbs.upstream.gcGen,
specs: specs,
appendix: appendixSpecs,
}
upstream, err := nbs.mm.Update(ctx, nbs.upstream.lock, newContents, nbs.stats, nil)
@@ -1037,6 +1186,53 @@ func (nbs *NomsBlockStore) Sources(ctx context.Context) (hash.Hash, []TableFile,
return contents.GetRoot(), tableFiles, nil
}
// AppendixSources retrieves the current root hash, and a list of all the table files in the manifest appendix
func (nbs *NomsBlockStore) AppendixSources(ctx context.Context) (hash.Hash, []TableFile, error) {
nbs.mu.Lock()
defer nbs.mu.Unlock()
stats := &Stats{}
exists, contents, err := nbs.mm.m.ParseIfExists(ctx, stats, nil)
if err != nil {
return hash.Hash{}, nil, err
}
if !exists {
return hash.Hash{}, nil, nil
}
css, err := nbs.chunkSourcesByAddr()
if err != nil {
return hash.Hash{}, nil, err
}
numSpecs := contents.NumAppendixSpecs()
var tableFiles []TableFile
for i := 0; i < numSpecs; i++ {
info := contents.getAppendixSpec(i)
cs, ok := css[info.name]
if !ok {
return hash.Hash{}, nil, errors.New("manifest referenced table file for which there is no chunkSource.")
}
tf := tableFile{
info: info,
open: func(ctx context.Context) (io.ReadCloser, error) {
r, err := cs.reader(ctx)
if err != nil {
return nil, err
}
return ioutil.NopCloser(r), nil
},
}
tableFiles = append(tableFiles, tf)
}
return contents.GetRoot(), tableFiles, nil
}
func (nbs *NomsBlockStore) Size(ctx context.Context) (uint64, error) {
nbs.mu.Lock()
defer nbs.mu.Unlock()

View File

@@ -280,3 +280,244 @@ func TestNBSCopyGC(t *testing.T) {
assert.Equal(t, chunks.EmptyChunk, out)
}
}
func persistTableFileSources(t *testing.T, p tablePersister, numTableFiles int) (map[hash.Hash]uint32, []hash.Hash) {
tableFileMap := make(map[hash.Hash]uint32, numTableFiles)
mapIds := make([]hash.Hash, numTableFiles)
for i := 0; i < numTableFiles; i++ {
var chunkData [][]byte
for j := 0; j < i+1; j++ {
chunkData = append(chunkData, []byte(fmt.Sprintf("%d:%d", i, j)))
}
_, addr, err := buildTable(chunkData)
require.NoError(t, err)
fileIDHash, ok := hash.MaybeParse(addr.String())
require.True(t, ok)
tableFileMap[fileIDHash] = uint32(i + 1)
mapIds[i] = fileIDHash
_, err = p.Persist(context.Background(), createMemTable(chunkData), nil, &Stats{})
require.NoError(t, err)
}
return tableFileMap, mapIds
}
func prepStore(ctx context.Context, t *testing.T, assert *assert.Assertions) (*fakeManifest, tablePersister, *NomsBlockStore, *Stats, chunks.Chunk) {
fm, p, store := makeStoreWithFakes(t)
h, err := store.Root(ctx)
require.NoError(t, err)
assert.Equal(hash.Hash{}, h)
rootChunk := chunks.NewChunk([]byte("root"))
rootHash := rootChunk.Hash()
err = store.Put(ctx, rootChunk)
require.NoError(t, err)
success, err := store.Commit(ctx, rootHash, hash.Hash{})
require.NoError(t, err)
if assert.True(success) {
has, err := store.Has(ctx, rootHash)
require.NoError(t, err)
assert.True(has)
h, err := store.Root(ctx)
require.NoError(t, err)
assert.Equal(rootHash, h)
}
stats := &Stats{}
_, upstream, err := fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// expect single spec for initial commit
assert.Equal(1, upstream.NumTableSpecs())
// Start with no appendixes
assert.Equal(0, upstream.NumAppendixSpecs())
return fm, p, store, stats, rootChunk
}
func TestNBSUpdateManifestWithAppendixOptions(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
_, p, store, _, _ := prepStore(ctx, t, assert)
defer store.Close()
// persist tablefiles to tablePersister
appendixUpdates, appendixIds := persistTableFileSources(t, p, 4)
tests := []struct {
description string
option ManifestAppendixOption
appendixSpecIds []hash.Hash
expectedNumberOfSpecs int
expectedNumberOfAppendixSpecs int
expectedError error
}{
{
description: "should error on unsupported appendix option",
appendixSpecIds: appendixIds[:1],
expectedError: ErrUnsupportedManifestAppendixOption,
},
{
description: "should append to appendix",
option: ManifestAppendixOption_Append,
appendixSpecIds: appendixIds[:2],
expectedNumberOfSpecs: 3,
expectedNumberOfAppendixSpecs: 2,
},
{
description: "should replace appendix",
option: ManifestAppendixOption_Set,
appendixSpecIds: appendixIds[3:],
expectedNumberOfSpecs: 2,
expectedNumberOfAppendixSpecs: 1,
},
{
description: "should set appendix to nil",
option: ManifestAppendixOption_Set,
appendixSpecIds: []hash.Hash{},
expectedNumberOfSpecs: 1,
expectedNumberOfAppendixSpecs: 0,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
updates := make(map[hash.Hash]uint32)
for _, id := range test.appendixSpecIds {
updates[id] = appendixUpdates[id]
}
if test.expectedError == nil {
info, err := store.UpdateManifestWithAppendix(ctx, updates, test.option)
require.NoError(t, err)
assert.Equal(test.expectedNumberOfSpecs, info.NumTableSpecs())
assert.Equal(test.expectedNumberOfAppendixSpecs, info.NumAppendixSpecs())
} else {
_, err := store.UpdateManifestWithAppendix(ctx, updates, test.option)
assert.Equal(test.expectedError, err)
}
})
}
}
func TestNBSUpdateManifestWithAppendix(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
fm, p, store, stats, _ := prepStore(ctx, t, assert)
defer store.Close()
_, upstream, err := fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// persist tablefile to tablePersister
appendixUpdates, appendixIds := persistTableFileSources(t, p, 1)
// Ensure appendix (and specs) are updated
appendixFileId := appendixIds[0]
updates := map[hash.Hash]uint32{appendixFileId: appendixUpdates[appendixFileId]}
newContents, err := store.UpdateManifestWithAppendix(ctx, updates, ManifestAppendixOption_Append)
require.NoError(t, err)
assert.Equal(upstream.NumTableSpecs()+1, newContents.NumTableSpecs())
assert.Equal(1, newContents.NumAppendixSpecs())
assert.Equal(newContents.GetTableSpecInfo(0), newContents.GetAppendixTableSpecInfo(0))
}
func TestNBSUpdateManifestRetainsAppendix(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
fm, p, store, stats, _ := prepStore(ctx, t, assert)
defer store.Close()
_, upstream, err := fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// persist tablefile to tablePersister
specUpdates, specIds := persistTableFileSources(t, p, 3)
// Update the manifest
firstSpecId := specIds[0]
newContents, err := store.UpdateManifest(ctx, map[hash.Hash]uint32{firstSpecId: specUpdates[firstSpecId]})
require.NoError(t, err)
assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs())
assert.Equal(0, upstream.NumAppendixSpecs())
_, upstream, err = fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// Update the appendix
appendixSpecId := specIds[1]
updates := map[hash.Hash]uint32{appendixSpecId: specUpdates[appendixSpecId]}
newContents, err = store.UpdateManifestWithAppendix(ctx, updates, ManifestAppendixOption_Append)
require.NoError(t, err)
assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs())
assert.Equal(1+upstream.NumAppendixSpecs(), newContents.NumAppendixSpecs())
assert.Equal(newContents.GetAppendixTableSpecInfo(0), newContents.GetTableSpecInfo(0))
_, upstream, err = fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// Update the manifest again to show
// it successfully retains the appendix
// and the appendix specs are properly prepended
// to the |manifestContents.specs|
secondSpecId := specIds[2]
newContents, err = store.UpdateManifest(ctx, map[hash.Hash]uint32{secondSpecId: specUpdates[secondSpecId]})
require.NoError(t, err)
assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs())
assert.Equal(upstream.NumAppendixSpecs(), newContents.NumAppendixSpecs())
assert.Equal(newContents.GetAppendixTableSpecInfo(0), newContents.GetTableSpecInfo(0))
}
func TestNBSCommitRetainsAppendix(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
fm, p, store, stats, rootChunk := prepStore(ctx, t, assert)
defer store.Close()
_, upstream, err := fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// persist tablefile to tablePersister
appendixUpdates, appendixIds := persistTableFileSources(t, p, 1)
// Update the appendix
appendixFileId := appendixIds[0]
updates := map[hash.Hash]uint32{appendixFileId: appendixUpdates[appendixFileId]}
newContents, err := store.UpdateManifestWithAppendix(ctx, updates, ManifestAppendixOption_Append)
require.NoError(t, err)
assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs())
assert.Equal(1, newContents.NumAppendixSpecs())
_, upstream, err = fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
// Make second Commit
secondRootChunk := chunks.NewChunk([]byte("newer root"))
secondRoot := secondRootChunk.Hash()
err = store.Put(ctx, secondRootChunk)
require.NoError(t, err)
success, err := store.Commit(ctx, secondRoot, rootChunk.Hash())
require.NoError(t, err)
if assert.True(success) {
h, err := store.Root(ctx)
require.NoError(t, err)
assert.Equal(secondRoot, h)
has, err := store.Has(context.Background(), rootChunk.Hash())
require.NoError(t, err)
assert.True(has)
has, err = store.Has(context.Background(), secondRoot)
require.NoError(t, err)
assert.True(has)
}
// Ensure commit did not blow away appendix
_, newUpstream, err := fm.ParseIfExists(ctx, stats, nil)
require.NoError(t, err)
assert.Equal(1+upstream.NumTableSpecs(), newUpstream.NumTableSpecs())
assert.Equal(upstream.NumAppendixSpecs(), newUpstream.NumAppendixSpecs())
assert.Equal(upstream.GetAppendixTableSpecInfo(0), newUpstream.GetTableSpecInfo(0))
assert.Equal(newUpstream.GetTableSpecInfo(0), newUpstream.GetAppendixTableSpecInfo(0))
}

View File

@@ -25,6 +25,7 @@ import (
"context"
"errors"
"fmt"
"io"
"golang.org/x/sync/errgroup"
@@ -617,3 +618,50 @@ func (m Map) String() string {
func (m Map) HumanReadableString() string {
panic("unreachable")
}
// VisitMapLevelOrder writes hashes of internal node chunks to a writer
// delimited with a newline character and returns the total number of
// bytes written or an error if encountered
func VisitMapLevelOrder(w io.Writer, m Map) (total int, err error) {
total = 0
curLevel := []Map{m}
for len(curLevel) > 0 {
nextLevel := []Map{}
for _, m := range curLevel {
if metaSeq, ok := m.orderedSequence.(metaSequence); ok {
ts, err := metaSeq.tuples()
if err != nil {
return 0, err
}
for _, t := range ts {
r, err := t.ref()
if err != nil {
return 0, err
}
p := []byte(r.TargetHash().String() + "\n")
n, err := w.Write(p)
if err != nil {
return 0, err
}
total += n
v, err := r.TargetValue(context.Background(), m.valueReadWriter())
if err != nil {
return 0, err
}
nextLevel = append(nextLevel, v.(Map))
}
} else if _, ok := m.orderedSequence.(mapLeafSequence); ok {
}
}
curLevel = nextLevel
}
return total, nil
}

View File

@@ -176,6 +176,7 @@ message ClientRepoFormat {
message ListTableFilesRequest {
RepoId repo_id = 1;
bool appendix_only = 2;
}
message TableFileInfo {
@@ -189,10 +190,23 @@ message ListTableFilesResponse {
repeated TableFileInfo table_file_info = 2;
}
enum ManifestAppendixOption {
MANIFEST_APPENDIX_OPTION_UNSPECIFIED = 0;
MANIFEST_APPENDIX_OPTION_SET = 1;
MANIFEST_APPENDIX_OPTION_APPEND = 2;
}
message AddTableFilesRequest {
RepoId repo_id = 1;
ClientRepoFormat client_repo_format = 2;
repeated ChunkTableInfo chunk_table_info = 3;
// If set, this is a write for the manifest appendix, not just the manifest table file specs.
// The table files appearing in `chunk_table_info` are added to `specs` and are also set
// in the manifest appendix. If `appendix_option` is `SET`, then the value of the appendix
// becomes the full list provided in `chunk_table_info` and any prior specs in the appendix
// are removed from the manifest specs. If `append_option` is `APPEND`, then the
// supplied table files are added to the appendix and to specs.
ManifestAppendixOption appendix_option = 4;
}
message AddTableFilesResponse {