Merge pull request #10596 from dolthub/zachmu/kill-ld1

[no-release-notes] removed ld_1 binary format constant, which means manifests containing it will no longer parse
This commit is contained in:
Zach Musgrave
2026-02-28 11:41:08 -08:00
committed by GitHub
64 changed files with 240 additions and 605 deletions
+1 -16
View File
@@ -31,7 +31,6 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
config "github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/store/nbs"
"github.com/dolthub/dolt/go/store/types"
eventsapi "github.com/dolthub/eventsapi_schema/dolt/services/eventsapi/v1alpha1"
)
@@ -120,7 +119,7 @@ type CommandDocumentationContent struct {
Synopsis []string
}
//type CommandDocumentation
// type CommandDocumentation
// RepoNotRequiredCommand is an optional interface that commands can implement if the command can be run without
// the current directory being a valid Dolt data repository. Any commands not implementing this interface are
@@ -144,12 +143,6 @@ type HiddenCommand interface {
Hidden() bool
}
type FormatGatedCommand interface {
Command
GatedForNBF(nbf *types.NomsBinFormat) bool
}
// SubCommandHandler is a command implementation which holds subcommands which can be called
type SubCommandHandler struct {
name string
@@ -259,14 +252,6 @@ func (hc SubCommandHandler) handleCommand(ctx context.Context, commandStr string
defer stop()
}
fgc, ok := cmd.(FormatGatedCommand)
if ok && dEnv.DoltDB(ctx) != nil && fgc.GatedForNBF(dEnv.DoltDB(ctx).Format()) {
vs := dEnv.DoltDB(ctx).Format().VersionString()
err := fmt.Sprintf("Dolt command '%s' is not supported in format %s", cmd.Name(), vs)
PrintErrln(color.YellowString(err))
return 1
}
ret := cmd.Exec(ctx, commandStr, args, dEnv, cliCtx)
if evt != nil {
@@ -31,7 +31,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/merge"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/store/types"
)
var verifyConstraintsDocs = cli.CommandDocumentationContent{
@@ -54,10 +53,6 @@ func (cmd VerifyConstraintsCmd) Description() string {
return "Command to verify that the constraints on the given table(s) are satisfied."
}
func (cmd VerifyConstraintsCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
return false
}
func (cmd VerifyConstraintsCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(verifyConstraintsDocs, ap)
+3 -9
View File
@@ -21,8 +21,6 @@ import (
"math"
"path/filepath"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/cmd/dolt/cli"
"github.com/dolthub/dolt/go/cmd/dolt/errhand"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
@@ -58,10 +56,6 @@ func (cmd InspectCmd) Description() string {
return "Inspects a Dolt Database and collects stats."
}
func (cmd InspectCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
return types.IsFormat_DOLT(nbf)
}
func (cmd InspectCmd) Docs() *cli.CommandDocumentation {
return nil
}
@@ -144,7 +138,7 @@ func (cmd InspectCmd) processTableFile(ctx context.Context, path string, fs file
sum = &chunkIndexSummary{
file: path,
count: uint32(len(prefixes)),
//errs: make([]float64, 0, len(prefixes)),
// errs: make([]float64, 0, len(prefixes)),
}
for i, prefix := range prefixes {
@@ -156,7 +150,7 @@ func (cmd InspectCmd) processTableFile(ctx context.Context, path string, fs file
type chunkIndexSummary struct {
file string
count uint32
//errs []float64
// errs []float64
sumErr float64
maxErr float64
}
@@ -170,7 +164,7 @@ func (s *chunkIndexSummary) addPrefix(i int, prefix uint64) {
g := nbs.GuessPrefixOrdinal(prefix, s.count)
guessErr := math.Abs(float64(i - g))
//s.errs = append(s.errs, guessErr)
// s.errs = append(s.errs, guessErr)
s.sumErr += guessErr
if guessErr > s.maxErr {
s.maxErr = guessErr
-4
View File
@@ -57,10 +57,6 @@ func (cmd ReadTablesCmd) Description() string {
return readTablesDocs.ShortDesc
}
func (cmd ReadTablesCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
return types.IsFormat_DOLT(nbf)
}
func (cmd ReadTablesCmd) Docs() *cli.CommandDocumentation {
ap := cmd.ArgParser()
return cli.NewCommandDocumentation(readTablesDocs, ap)
-4
View File
@@ -57,10 +57,6 @@ func (cmd RootsCmd) Description() string {
return "Displays store root values (or potential store root values) that we find in the current database."
}
func (cmd RootsCmd) GatedForNBF(nbf *types.NomsBinFormat) bool {
return false
}
func (cmd RootsCmd) Docs() *cli.CommandDocumentation {
return nil
}
@@ -33,7 +33,6 @@ import (
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/libraries/utils/config"
"github.com/dolthub/dolt/go/store/datas"
"github.com/dolthub/dolt/go/store/types"
)
var copyTagsDocs = cli.CommandDocumentationContent{
@@ -196,9 +195,6 @@ func (cmd CopyTagsCmd) validateArgs(ctx context.Context, commandStr string, args
apr := cli.ParseArgsOrDie(ap, args, help)
doltDB := dEnv.DoltDB(ctx)
if !types.IsFormat_DOLT(doltDB.Format()) {
return "", nil, nil, nil, fmt.Errorf("copy-tags is only available for modern database storage formats")
}
if len(apr.Args) != 1 {
return "", nil, nil, nil, fmt.Errorf("must provide <from-branch>")
@@ -26,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/env"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/utils/argparser"
"github.com/dolthub/dolt/go/store/types"
)
// TODO: Update tag should be migrated to call the new dolt_update_column_tag() stored procedure
@@ -69,11 +68,6 @@ func (cmd UpdateTagCmd) Exec(ctx context.Context, commandStr string, args []stri
help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, updateTagDocs, ap))
apr := cli.ParseArgsOrDie(ap, args, help)
if !types.IsFormat_DOLT(dEnv.DoltDB(ctx).Format()) {
verr := errhand.BuildDError("update-tag is only available in storage format __DOLT__").Build()
return commands.HandleVErrAndExitCode(verr, usage)
}
if len(apr.Args) != 3 {
verr := errhand.BuildDError("must provide <table> <column> <tag>").Build()
return commands.HandleVErrAndExitCode(verr, usage)
-2
View File
@@ -25,7 +25,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -58,7 +57,6 @@ func Stat(ctx context.Context, ch chan DiffStatProgress, from, to durable.Index,
return fmt.Errorf("cannot perform a diff between keyless and keyed schema")
}
types.AssertFormat_DOLT(from.Format())
return diffProllyTrees(ctx, ch, keyless, from, to, fromSch, toSch)
}
+4 -4
View File
@@ -1357,7 +1357,7 @@ func (ddb *DoltDB) GetTuple(ctx context.Context, key string) ([]byte, bool, erro
return nil, false, nil
}
tup, err := datas.LoadTuple(ctx, ddb.Format(), ddb.NodeStore(), ddb.ValueReadWriter(), ds)
tup, err := datas.LoadTuple(ctx, ddb.NodeStore(), ddb.ValueReadWriter(), ds)
if err != nil {
return nil, false, err
}
@@ -2256,7 +2256,7 @@ func (ddb *DoltDB) AddStash(ctx context.Context, head *Commit, stash RootValue,
}
// this either creates new stash list dataset or loads current stash list dataset if exists.
stashList, err := datas.LoadStashList(ctx, nbf, ddb.NodeStore(), vrw, stashesDS)
stashList, err := datas.LoadStashList(ctx, ddb.NodeStore(), vrw, stashesDS)
if err != nil {
return err
}
@@ -2302,7 +2302,7 @@ func (ddb *DoltDB) GetStatistics(ctx context.Context) (prolly.Map, error) {
return prolly.Map{}, ErrNoStatistics
}
stats, err := datas.LoadStatistics(ctx, ddb.Format(), ddb.NodeStore(), ddb.ValueReadWriter(), ds)
stats, err := datas.LoadStatistics(ctx, ddb.NodeStore(), ddb.ValueReadWriter(), ds)
if err != nil {
return prolly.Map{}, err
}
@@ -2326,7 +2326,7 @@ func (ddb *DoltDB) RemoveStashAtIdx(ctx context.Context, idx int, stashName stri
}
vrw := ddb.ValueReadWriter()
stashList, err := datas.LoadStashList(ctx, ddb.Format(), ddb.NodeStore(), vrw, stashesDS)
stashList, err := datas.LoadStashList(ctx, ddb.NodeStore(), vrw, stashesDS)
if err != nil {
return err
}
@@ -48,21 +48,12 @@ func RefFromArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, idx Ar
// NewEmptyArtifactIndex returns an ArtifactIndex with no artifacts.
func NewEmptyArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, tableSch schema.Schema) (ArtifactIndex, error) {
switch vrw.Format() {
case types.Format_LD_1:
panic("Unsupported format " + vrw.Format().VersionString())
case types.Format_DOLT:
kd := tableSch.GetKeyDescriptor(ns)
m, err := prolly.NewArtifactMapFromTuples(ctx, ns, kd)
if err != nil {
return nil, err
}
return ArtifactIndexFromProllyMap(m), nil
default:
return nil, errNbfUnknown
kd := tableSch.GetKeyDescriptor(ns)
m, err := prolly.NewArtifactMapFromTuples(ctx, ns, kd)
if err != nil {
return nil, err
}
return ArtifactIndexFromProllyMap(m), nil
}
func ArtifactIndexFromProllyMap(m prolly.ArtifactMap) ArtifactIndex {
@@ -81,25 +72,16 @@ func artifactIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tr
return nil, err
}
switch vrw.Format() {
case types.Format_LD_1:
panic("Unsupported format " + vrw.Format().VersionString())
case types.Format_DOLT:
root, fileId, err := shim.NodeFromValue(v)
if err != nil {
return nil, err
}
if fileId != serial.MergeArtifactsFileID {
return nil, fmt.Errorf("unexpected file ID for artifact node, expected %s, found %s", serial.MergeArtifactsFileID, fileId)
}
kd := tableSch.GetKeyDescriptor(ns)
m := prolly.NewArtifactMap(root, ns, kd)
return ArtifactIndexFromProllyMap(m), nil
default:
return nil, errNbfUnknown
root, fileId, err := shim.NodeFromValue(v)
if err != nil {
return nil, err
}
if fileId != serial.MergeArtifactsFileID {
return nil, fmt.Errorf("unexpected file ID for artifact node, expected %s, found %s", serial.MergeArtifactsFileID, fileId)
}
kd := tableSch.GetKeyDescriptor(ns)
m := prolly.NewArtifactMap(root, ns, kd)
return ArtifactIndexFromProllyMap(m), nil
}
type prollyArtifactIndex struct {
+14 -42
View File
@@ -80,16 +80,8 @@ type IndexSet interface {
// RefFromIndex persists the Index and returns a types.Ref to it.
func RefFromIndex(ctx context.Context, vrw types.ValueReadWriter, idx Index) (types.Ref, error) {
switch idx.Format() {
case types.Format_LD_1:
panic("Unsupported format " + idx.Format().VersionString())
case types.Format_DOLT:
b := shim.ValueFromMap(MapFromIndex(idx))
return vrw.WriteValue(ctx, b)
default:
return types.Ref{}, errNbfUnknown
}
b := shim.ValueFromMap(MapFromIndex(idx))
return vrw.WriteValue(ctx, b)
}
// indexFromRef reads the types.Ref from storage and returns the Index it points to.
@@ -104,20 +96,11 @@ func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
return nil, err
}
switch vrw.Format() {
case types.Format_LD_1:
panic("Unsupported format " + vrw.Format().VersionString())
case types.Format_DOLT:
m, err := shim.MapInterfaceFromValue(ctx, v, sch, ns, isKeylessTable)
if err != nil {
return nil, err
}
return IndexFromMapInterface(m), nil
default:
return nil, errNbfUnknown
m, err := shim.MapInterfaceFromValue(ctx, v, sch, ns, isKeylessTable)
if err != nil {
return nil, err
}
return IndexFromMapInterface(m), nil
}
// NewEmptyPrimaryIndex creates a new empty Index for use as the primary index in a table.
@@ -139,23 +122,14 @@ func NewEmptyIndexFromTableSchema(ctx context.Context, vrw types.ValueReadWriter
// newEmptyIndex returns an index with no rows.
func newEmptyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, isVector bool, isKeylessSecondary bool) (Index, error) {
switch vrw.Format() {
case types.Format_LD_1:
panic("Unsupported format " + vrw.Format().VersionString())
case types.Format_DOLT:
kd, vd := sch.GetMapDescriptors(ns)
if isKeylessSecondary {
kd = prolly.AddHashToSchema(kd)
}
if isVector {
return NewEmptyProximityIndex(ctx, ns, kd, vd)
} else {
return NewEmptyProllyIndex(ctx, ns, kd, vd)
}
default:
return nil, errNbfUnknown
kd, vd := sch.GetMapDescriptors(ns)
if isKeylessSecondary {
kd = prolly.AddHashToSchema(kd)
}
if isVector {
return NewEmptyProximityIndex(ctx, ns, kd, vd)
} else {
return NewEmptyProllyIndex(ctx, ns, kd, vd)
}
}
@@ -353,8 +327,6 @@ func (i prollyIndex) DebugString(ctx context.Context, ns tree.NodeStore, schema
// NewIndexSet returns an empty IndexSet.
func NewIndexSet(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore) (IndexSet, error) {
types.AssertFormat_DOLT(vrw.Format())
emptyam, err := prolly.NewEmptyAddressMap(ns)
if err != nil {
return nil, err
+32 -44
View File
@@ -79,15 +79,11 @@ var sharePool = pool.NewBuffPool()
// NewTable returns a new Table.
func NewTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
types.AssertFormat_DOLT(vrw.Format())
return newDoltDevTable(ctx, vrw, ns, sch, rows, indexes, autoIncVal)
return newDoltTable(ctx, vrw, ns, sch, rows, indexes, autoIncVal)
}
// TableFromAddr deserializes the table in the chunk at |addr|.
func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, addr hash.Hash) (Table, error) {
types.AssertFormat_DOLT(vrw.Format())
val, err := vrw.MustReadValue(ctx, addr)
if err != nil {
return nil, err
@@ -107,17 +103,17 @@ func TableFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
if err != nil {
return nil, err
}
return doltDevTable{vrw, ns, st}, nil
return doltTable{vrw, ns, st}, nil
}
// VrwFromTable returns the types.ValueReadWriter used by |t|.
func VrwFromTable(t Table) types.ValueReadWriter {
ddt := t.(doltDevTable)
ddt := t.(doltTable)
return ddt.vrw
}
func NodeStoreFromTable(t Table) tree.NodeStore {
ddt := t.(doltDevTable)
ddt := t.(doltTable)
return ddt.ns
}
@@ -125,13 +121,13 @@ func schemaFromAddr(ctx context.Context, vrw types.ValueReadWriter, addr hash.Ha
return encoding.UnmarshalSchemaAtAddr(ctx, vrw, addr)
}
type doltDevTable struct {
type doltTable struct {
vrw types.ValueReadWriter
ns tree.NodeStore
msg *serial.Table
}
func (t doltDevTable) DebugString(ctx context.Context, ns tree.NodeStore) string {
func (t doltTable) DebugString(ctx context.Context, ns tree.NodeStore) string {
rows, err := t.GetTableRows(ctx)
if err != nil {
panic(err)
@@ -145,7 +141,7 @@ func (t doltDevTable) DebugString(ctx context.Context, ns tree.NodeStore) string
return rows.DebugString(ctx, ns, schema)
}
var _ Table = doltDevTable{}
var _ Table = doltTable{}
type serialTableFields struct {
schema []byte
@@ -196,7 +192,7 @@ func (fields serialTableFields) write() (*serial.Table, error) {
return serial.TryGetRootAsTable(bs, serial.MessagePrefixSz)
}
func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
func newDoltTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema, rows Index, indexes IndexSet, autoIncVal types.Value) (Table, error) {
schVal, err := encoding.MarshalSchema(ctx, vrw, sch)
if err != nil {
return nil, err
@@ -242,31 +238,31 @@ func newDoltDevTable(ctx context.Context, vrw types.ValueReadWriter, ns tree.Nod
return nil, err
}
return doltDevTable{vrw, ns, msg}, nil
return doltTable{vrw, ns, msg}, nil
}
func (t doltDevTable) nomsValue() types.Value {
func (t doltTable) nomsValue() types.Value {
return types.SerialMessage(t.msg.Table().Bytes)
}
func (t doltDevTable) HashOf() (hash.Hash, error) {
func (t doltTable) HashOf() (hash.Hash, error) {
return t.nomsValue().Hash(t.Format())
}
func (t doltDevTable) Format() *types.NomsBinFormat {
func (t doltTable) Format() *types.NomsBinFormat {
return t.vrw.Format()
}
func (t doltDevTable) GetSchemaHash(ctx context.Context) (hash.Hash, error) {
func (t doltTable) GetSchemaHash(ctx context.Context) (hash.Hash, error) {
return hash.New(t.msg.SchemaBytes()), nil
}
func (t doltDevTable) GetSchema(ctx context.Context) (schema.Schema, error) {
func (t doltTable) GetSchema(ctx context.Context) (schema.Schema, error) {
addr := hash.New(t.msg.SchemaBytes())
return schemaFromAddr(ctx, t.vrw, addr)
}
func (t doltDevTable) SetSchema(ctx context.Context, sch schema.Schema) (Table, error) {
func (t doltTable) SetSchema(ctx context.Context, sch schema.Schema) (Table, error) {
newSchemaVal, err := encoding.MarshalSchema(ctx, t.vrw, sch)
if err != nil {
return nil, err
@@ -280,10 +276,10 @@ func (t doltDevTable) SetSchema(ctx context.Context, sch schema.Schema) (Table,
addr := schRef.TargetHash()
msg := t.clone()
copy(msg.SchemaBytes(), addr[:])
return doltDevTable{t.vrw, t.ns, msg}, nil
return doltTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetTableRows(ctx context.Context) (Index, error) {
func (t doltTable) GetTableRows(ctx context.Context) (Index, error) {
rowbytes := t.msg.PrimaryIndexBytes()
sch, err := t.GetSchema(ctx)
if err != nil {
@@ -296,7 +292,7 @@ func (t doltDevTable) GetTableRows(ctx context.Context) (Index, error) {
return IndexFromMapInterface(m), nil
}
func (t doltDevTable) GetTableRowsWithDescriptors(ctx context.Context, kd, vd *val.TupleDesc) (Index, error) {
func (t doltTable) GetTableRowsWithDescriptors(ctx context.Context, kd, vd *val.TupleDesc) (Index, error) {
rowbytes := t.msg.PrimaryIndexBytes()
m, err := shim.MapFromValueWithDescriptors(types.SerialMessage(rowbytes), kd, vd, t.ns)
if err != nil {
@@ -305,7 +301,7 @@ func (t doltDevTable) GetTableRowsWithDescriptors(ctx context.Context, kd, vd *v
return IndexFromMapInterface(m), nil
}
func (t doltDevTable) SetTableRows(ctx context.Context, rows Index) (Table, error) {
func (t doltTable) SetTableRows(ctx context.Context, rows Index) (Table, error) {
rowsbytes, err := rows.bytes()
if err != nil {
return nil, err
@@ -321,10 +317,10 @@ func (t doltDevTable) SetTableRows(ctx context.Context, rows Index) (Table, erro
return nil, err
}
return doltDevTable{t.vrw, t.ns, msg}, nil
return doltTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) GetIndexes(ctx context.Context) (IndexSet, error) {
func (t doltTable) GetIndexes(ctx context.Context) (IndexSet, error) {
ambytes := t.msg.SecondaryIndexesBytes()
node, fileId, err := tree.NodeFromBytes(ambytes)
if err != nil {
@@ -341,7 +337,7 @@ func (t doltDevTable) GetIndexes(ctx context.Context) (IndexSet, error) {
return doltDevIndexSet{t.vrw, t.ns, am}, nil
}
func (t doltDevTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, error) {
func (t doltTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table, error) {
fields, err := t.fields()
if err != nil {
return nil, err
@@ -351,15 +347,11 @@ func (t doltDevTable) SetIndexes(ctx context.Context, indexes IndexSet) (Table,
if err != nil {
return nil, err
}
return doltDevTable{t.vrw, t.ns, msg}, nil
return doltTable{t.vrw, t.ns, msg}, nil
}
// GetArtifacts implements Table.
func (t doltDevTable) GetArtifacts(ctx context.Context) (ArtifactIndex, error) {
if t.Format() != types.Format_DOLT {
panic("artifacts only implemented for DOLT")
}
func (t doltTable) GetArtifacts(ctx context.Context) (ArtifactIndex, error) {
sch, err := t.GetSchema(ctx)
if err != nil {
return nil, err
@@ -374,11 +366,7 @@ func (t doltDevTable) GetArtifacts(ctx context.Context) (ArtifactIndex, error) {
}
// SetArtifacts implements Table.
func (t doltDevTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex) (Table, error) {
if t.Format() != types.Format_DOLT {
panic("artifacts only implemented for DOLT")
}
func (t doltTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex) (Table, error) {
var addr hash.Hash
if artifacts != nil {
c, err := artifacts.Count()
@@ -395,11 +383,11 @@ func (t doltDevTable) SetArtifacts(ctx context.Context, artifacts ArtifactIndex)
}
msg := t.clone()
copy(msg.ArtifactsBytes(), addr[:])
return doltDevTable{t.vrw, t.ns, msg}, nil
return doltTable{t.vrw, t.ns, msg}, nil
}
// GetAutoIncrement returns the next value to be used for the AUTO_INCREMENT column.
func (t doltDevTable) GetAutoIncrement(ctx context.Context) (uint64, error) {
func (t doltTable) GetAutoIncrement(ctx context.Context) (uint64, error) {
res := t.msg.AutoIncrementValue()
if res == 0 {
return 1, nil
@@ -409,7 +397,7 @@ func (t doltDevTable) GetAutoIncrement(ctx context.Context) (uint64, error) {
// SetAutoIncrement sets the next value to be used for the AUTO_INCREMENT column.
// Since AUTO_INCREMENT starts at 1, setting this to either 0 or 1 will result in the field being unset.
func (t doltDevTable) SetAutoIncrement(ctx context.Context, val uint64) (Table, error) {
func (t doltTable) SetAutoIncrement(ctx context.Context, val uint64) (Table, error) {
// AUTO_INCREMENT starts at 1, so a value of 1 is the same as being unset.
// Normalizing both values to 0 ensures that they both result in the same hash as the field being unset.
if val == 1 {
@@ -428,10 +416,10 @@ func (t doltDevTable) SetAutoIncrement(ctx context.Context, val uint64) (Table,
return nil, err
}
}
return doltDevTable{t.vrw, t.ns, msg}, nil
return doltTable{t.vrw, t.ns, msg}, nil
}
func (t doltDevTable) clone() *serial.Table {
func (t doltTable) clone() *serial.Table {
bs := make([]byte, len(t.msg.Table().Bytes))
copy(bs, t.msg.Table().Bytes)
var ret serial.Table
@@ -439,7 +427,7 @@ func (t doltDevTable) clone() *serial.Table {
return &ret
}
func (t doltDevTable) fields() (serialTableFields, error) {
func (t doltTable) fields() (serialTableFields, error) {
ambytes := t.msg.SecondaryIndexesBytes()
node, fileId, err := tree.NodeFromBytes(ambytes)
if err != nil {
@@ -473,6 +461,6 @@ func (t doltDevTable) fields() (serialTableFields, error) {
}
func RefFromNomsTable(ctx context.Context, table Table) (types.Ref, error) {
ddt := table.(doltDevTable)
ddt := table.(doltTable)
return ddt.vrw.WriteValue(ctx, ddt.nomsValue())
}
-3
View File
@@ -156,8 +156,6 @@ type tableEdit struct {
// NewRootValue returns a new RootValue. This is a variable as it's changed in Doltgres.
var NewRootValue = func(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeStore, v types.Value) (RootValue, error) {
types.AssertFormat_DOLT(vrw.Format())
srv, err := serial.TryGetRootAsRootValue([]byte(v.(types.SerialMessage)), serial.MessagePrefixSz)
if err != nil {
return nil, err
@@ -247,7 +245,6 @@ func decodeRootNomsValue(ctx context.Context, vrw types.ValueReadWriter, ns tree
// isRootValue returns whether the value is a RootValue. This is a variable as it's changed in Doltgres.
func isRootValue(nbf *types.NomsBinFormat, val types.Value) bool {
types.AssertFormat_DOLT(nbf)
if sm, ok := val.(types.SerialMessage); ok {
fileID := serial.GetFileID(sm)
return fileID == serial.RootValueFileID || fileID == serial.DoltgresRootValueFileID
+14 -34
View File
@@ -112,16 +112,12 @@ func (t *Table) GetOverriddenSchema() schema.Schema {
// HasConflicts returns true if this table contains merge conflicts.
func (t *Table) HasConflicts(ctx context.Context) (bool, error) {
if t.Format() == types.Format_DOLT {
art, err := t.GetArtifacts(ctx)
if err != nil {
return false, err
}
return art.HasConflicts(ctx)
art, err := t.GetArtifacts(ctx)
if err != nil {
return false, err
}
panic("Unsupported format: " + t.Format().VersionString())
return art.HasConflicts(ctx)
}
// GetArtifacts returns the merge artifacts for this table.
@@ -140,37 +136,25 @@ func (t *Table) SetArtifacts(ctx context.Context, artifacts durable.ArtifactInde
// NumRowsInConflict returns the number of rows with merge conflicts for this table.
func (t *Table) NumRowsInConflict(ctx context.Context) (uint64, error) {
if t.Format() == types.Format_DOLT {
artIdx, err := t.table.GetArtifacts(ctx)
if err != nil {
return 0, err
}
return artIdx.ConflictCount(ctx)
artIdx, err := t.table.GetArtifacts(ctx)
if err != nil {
return 0, err
}
panic("Unsupported format: " + t.Format().VersionString())
return artIdx.ConflictCount(ctx)
}
// NumConstraintViolations returns the number of constraint violations for this table.
func (t *Table) NumConstraintViolations(ctx context.Context) (uint64, error) {
if t.Format() == types.Format_DOLT {
artIdx, err := t.table.GetArtifacts(ctx)
if err != nil {
return 0, err
}
return artIdx.ConstraintViolationCount(ctx)
artIdx, err := t.table.GetArtifacts(ctx)
if err != nil {
return 0, err
}
panic("Unsupported format: " + t.Format().VersionString())
return artIdx.ConstraintViolationCount(ctx)
}
// ClearConflicts deletes all merge conflicts for this table.
func (t *Table) ClearConflicts(ctx context.Context) (*Table, error) {
if t.Format() == types.Format_DOLT {
return t.clearArtifactConflicts(ctx)
}
panic("Unsupported format: " + t.Format().VersionString())
return t.clearArtifactConflicts(ctx)
}
func (t *Table) clearArtifactConflicts(ctx context.Context) (*Table, error) {
@@ -191,11 +175,7 @@ func (t *Table) clearArtifactConflicts(ctx context.Context) (*Table, error) {
// GetConflictSchemas returns the merge conflict schemas for this table.
func (t *Table) GetConflictSchemas(ctx context.Context, tblName TableName) (base, sch, mergeSch schema.Schema, err error) {
if t.Format() == types.Format_DOLT {
return t.getProllyConflictSchemas(ctx, tblName)
}
panic("Unsupported format: " + t.Format().VersionString())
return t.getProllyConflictSchemas(ctx, tblName)
}
// The conflict schema is implicitly determined based on the first conflict in the artifacts table.
+4 -4
View File
@@ -476,7 +476,7 @@ func newWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
if err != nil {
return nil, err
}
commitSpec, err := dsws.MergeState.FromCommitSpec(ctx, vrw)
commitSpec, err := dsws.MergeState.FromCommitSpec()
if err != nil {
return nil, err
}
@@ -500,12 +500,12 @@ func newWorkingSet(ctx context.Context, name string, vrw types.ValueReadWriter,
return nil, err
}
unmergableTables, err := dsws.MergeState.UnmergableTables(ctx, vrw)
unmergableTables, err := dsws.MergeState.UnmergableTables()
if err != nil {
return nil, err
}
isCherryPick, err := dsws.MergeState.IsCherryPick(ctx, vrw)
isCherryPick, err := dsws.MergeState.IsCherryPick()
if err != nil {
return nil, err
}
@@ -629,7 +629,7 @@ func (ws *WorkingSet) writeValues(ctx context.Context, db *DoltDB, meta *datas.W
}
// TODO: Serialize the full TableName
mergeState, err = datas.NewMergeState(ctx, db.vrw, preMergeWorking, dCommit, ws.mergeState.commitSpecStr, FlattenTableNames(ws.mergeState.unmergableTables), ws.mergeState.isCherryPick)
mergeState, err = datas.NewMergeState(preMergeWorking, dCommit, ws.mergeState.commitSpecStr, FlattenTableNames(ws.mergeState.unmergableTables), ws.mergeState.isCherryPick)
if err != nil {
return nil, err
}
@@ -325,11 +325,6 @@ func TestKeylessMergeConflicts(t *testing.T) {
}
func assertConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected conflictEntries) {
types.AssertFormat_DOLT(tbl.Format())
assertProllyConflicts(t, ctx, tbl, expected)
}
func assertProllyConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected conflictEntries) {
artIdx, err := tbl.GetArtifacts(ctx)
require.NoError(t, err)
artM := durable.ProllyMapFromArtifactIndex(artIdx)
@@ -375,7 +370,6 @@ func assertProllyConflicts(t *testing.T, ctx context.Context, tbl *doltdb.Table,
}
require.Equal(t, len(expected), c)
}
func mustGetRowValueFromTable(t *testing.T, ctx context.Context, tbl *doltdb.Table, key val.Tuple) val.Tuple {
@@ -405,11 +399,6 @@ func mustGetRowValueFromRootIsh(t *testing.T, ctx context.Context, vrw types.Val
// |expected| is a tupleSet to compensate for random storage order
func assertKeylessRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected keylessEntries) {
types.AssertFormat_DOLT(tbl.Format())
assertKeylessProllyRows(t, ctx, tbl, expected)
}
func assertKeylessProllyRows(t *testing.T, ctx context.Context, tbl *doltdb.Table, expected []keylessEntry) {
idx, err := tbl.GetRowData(ctx)
require.NoError(t, err)
m, _ := durable.ProllyMapFromIndex(idx)
@@ -443,9 +432,6 @@ var keylessSch = dtu.MustSchema(
schema.NewColumn("c1", 1, types.IntKind, false),
schema.NewColumn("c2", 2, types.IntKind, false),
)
var c1Tag = types.Uint(1)
var c2Tag = types.Uint(2)
var cardTag = types.Uint(schema.KeylessRowCardinalityTag)
var valDesc = val.NewTupleDescriptor(val.Type{Enc: val.Uint64Enc}, val.Type{Enc: val.Int64Enc, Nullable: true}, val.Type{Enc: val.Int64Enc, Nullable: true})
var valBld = val.NewTupleBuilder(valDesc, nil)
+1 -8
View File
@@ -26,7 +26,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
var ErrFastForward = errors.New("fast forward")
@@ -174,13 +173,7 @@ func MergeRoots(
opts editor.Options,
mergeOpts MergeOpts,
) (*Result, error) {
var (
nbf *types.NomsBinFormat
err error
)
nbf = ourRoot.VRW().Format()
types.AssertFormat_DOLT(nbf)
var err error
// merge collations
oColl, err := ourRoot.GetCollation(ctx)
@@ -212,8 +212,6 @@ func (rm *RootMerger) MergeTable(
return nil, nil, err
}
types.AssertFormat_DOLT(tm.vrw.Format())
// short-circuit here if we can
finished, finishedRootObj, stats, err := rm.MaybeShortCircuit(ctx, tm, mergeOpts)
if finished != nil || finishedRootObj != nil || stats != nil || err != nil {
+1 -1
View File
@@ -420,7 +420,7 @@ func mergeColumns(tblName string, format *storetypes.NomsBinFormat, ourCC, their
return nil, nil, mergeInfo, diffInfo, err
}
compatChecker := typecompatibility.NewTypeCompatabilityCheckerForStorageFormat(format)
compatChecker := typecompatibility.NewTypeCompatabilityChecker()
// After we've checked for schema conflicts, merge the columns together
// TODO: We don't currently preserve all column position changes; the returned merged columns are always based on
@@ -155,10 +155,6 @@ var testCases = []testCase{
}
func TestRowMerge(t *testing.T) {
if types.Format_Default != types.Format_DOLT {
t.Skip()
}
ctx := sql.NewEmptyContext()
tests := make([]rowMergeTest, len(testCases))
@@ -1872,7 +1872,7 @@ func makeEmptyRoot(t *testing.T, ddb *doltdb.DoltDB, eo editor.Options) doltdb.R
gst, err := dsess.NewAutoIncrementTracker(ctx, "dolt", ws)
require.NoError(t, err)
sess := writer.NewWriteSession(ddb.Format(), ws, gst, eo)
sess := writer.NewWriteSession(ws, gst, eo)
ws, err = sess.Flush(sql.NewContext(ctx))
require.NoError(t, err)
@@ -1894,7 +1894,7 @@ func makeRootWithTable(t *testing.T, ddb *doltdb.DoltDB, eo editor.Options, tbl
gst, err := dsess.NewAutoIncrementTracker(ctx, "dolt", ws)
require.NoError(t, err)
noop := func(ctx *sql.Context, dbName string, root doltdb.RootValue) (err error) { return }
sess := writer.NewWriteSession(ddb.Format(), ws, gst, eo)
sess := writer.NewWriteSession(ws, gst, eo)
wr, err := sess.GetTableWriter(sql.NewContext(ctx), doltdb.TableName{Name: tbl.ns.name}, "test", noop, false)
require.NoError(t, err)
+2 -14
View File
@@ -131,12 +131,12 @@ func RegisterForeignKeyViolations(
return err
}
err = childFkConstraintViolations(ctx, baseRoot.VRW(), foreignKey, postParent, postChild, postChild, emptyIdx, receiver)
err = childFkConstraintViolations(ctx, foreignKey, postParent, postChild, postChild, emptyIdx, receiver)
if err != nil {
return err
}
} else {
err = childFkConstraintViolations(ctx, baseRoot.VRW(), foreignKey, postParent, postChild, preChild, preChild.RowData, receiver)
err = childFkConstraintViolations(ctx, foreignKey, postParent, postChild, preChild, preChild.RowData, receiver)
if err != nil {
return err
}
@@ -252,7 +252,6 @@ func (f *foreignKeyViolationWriter) StartFK(ctx *sql.Context, fk doltdb.ForeignK
return err
}
types.AssertFormat_DOLT(tbl.Format())
arts, err := tbl.GetArtifacts(ctx)
if err != nil {
return err
@@ -266,8 +265,6 @@ func (f *foreignKeyViolationWriter) StartFK(ctx *sql.Context, fk doltdb.ForeignK
}
func (f *foreignKeyViolationWriter) EndCurrFK(ctx context.Context) error {
types.AssertFormat_DOLT(f.currTbl.Format())
artMap, err := f.artEditor.Flush(ctx)
if err != nil {
return err
@@ -307,10 +304,6 @@ func parentFkConstraintViolations(
preParentRowData durable.Index,
receiver FKViolationReceiver,
) error {
if preParentRowData.Format() != types.Format_DOLT {
panic("unsupported format: " + preParentRowData.Format().VersionString())
}
if preParent.IndexData == nil || postParent.Schema.GetPKCols().Size() == 0 || preParent.Schema.GetPKCols().Size() == 0 {
m, err := durable.ProllyMapFromIndex(preParentRowData)
if err != nil {
@@ -342,16 +335,11 @@ func parentFkConstraintViolations(
// necessary.
func childFkConstraintViolations(
ctx context.Context,
vr types.ValueReader,
foreignKey doltdb.ForeignKey,
postParent, postChild, preChild *constraintViolationsLoadedTable,
preChildRowData durable.Index,
receiver FKViolationReceiver,
) error {
if preChildRowData.Format() != types.Format_DOLT {
panic("unsupported format: " + preChildRowData.Format().VersionString())
}
if preChild.IndexData == nil || postChild.Schema.GetPKCols().Size() == 0 || preChild.Schema.GetPKCols().Size() == 0 {
m, err := durable.ProllyMapFromIndex(preChildRowData)
if err != nil {
@@ -88,6 +88,13 @@ func TestValidateGetRepoMetadataRequest(t *testing.T) {
NbsVersion: "5",
},
},
{
RepoPath: GoodRepoPath,
ClientRepoFormat: &remotesapi.ClientRepoFormat{
NbfVersion: "__LD_1__",
NbsVersion: "5",
},
},
} {
t.Run(fmt.Sprintf("Error #%02d", i), func(t *testing.T) {
assert.Error(t, ValidateGetRepoMetadataRequest(errMsg), "%v should not validate", errMsg)
@@ -115,13 +122,6 @@ func TestValidateGetRepoMetadataRequest(t *testing.T) {
NbsVersion: "4",
},
},
{
RepoPath: GoodRepoPath,
ClientRepoFormat: &remotesapi.ClientRepoFormat{
NbfVersion: "__LD_1__",
NbsVersion: "5",
},
},
} {
t.Run(fmt.Sprintf("NoError #%02d", i), func(t *testing.T) {
assert.NoError(t, ValidateGetRepoMetadataRequest(msg), "%v should validate", msg)
@@ -26,8 +26,6 @@ import (
// MarshalSchema takes a Schema and converts it to a types.Value
func MarshalSchema(ctx context.Context, vrw types.ValueReadWriter, sch schema.Schema) (types.Value, error) {
types.AssertFormat_DOLT(vrw.Format())
// Anyone calling this is going to serialize this to disk, so it's our last line of defense against defective schemas.
// Business logic should catch errors before this point, but this is a failsafe.
err := schema.ValidateColumnConstraints(sch.GetAllCols())
@@ -52,8 +50,6 @@ var unmarshalledSchemaCache = map[hash.Hash]schCacheData{}
// UnmarshalSchema takes a types.Value representing a Schema and Unmarshalls it into a schema.Schema.
func UnmarshalSchema(ctx context.Context, nbf *types.NomsBinFormat, schemaVal types.Value) (schema.Schema, error) {
types.AssertFormat_DOLT(nbf)
return DeserializeSchema(ctx, nbf, schemaVal)
}
@@ -87,8 +87,6 @@ func serializeSchemaAsFlatbuffer(sch schema.Schema) ([]byte, error) {
// DeserializeSchema deserializes a schema.Schema from a serial.Message.
func DeserializeSchema(ctx context.Context, nbf *types.NomsBinFormat, v types.Value) (schema.Schema, error) {
types.AssertFormat_DOLT(nbf)
sm, ok := v.(types.SerialMessage)
assertTrue(ok, "must pass types.SerialMessage value to DeserializeSchema")
return deserializeSchemaFromFlatbuffer(ctx, sm)
@@ -21,7 +21,6 @@ import (
"github.com/dolthub/vitess/go/vt/proto/query"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
storetypes "github.com/dolthub/dolt/go/store/types"
)
// TypeCompatibilityChecker checks if type changes are compatible at the storage layer and is used to
@@ -41,15 +40,10 @@ type TypeCompatibilityChecker interface {
IsTypeChangeCompatible(from, to typeinfo.TypeInfo) TypeChangeInfo
}
// NewTypeCompatabilityCheckerForStorageFormat returns a new TypeCompatibilityChecker
// NewTypeCompatabilityChecker returns a new TypeCompatibilityChecker
// instance for the given storage format.
func NewTypeCompatabilityCheckerForStorageFormat(format *storetypes.NomsBinFormat) TypeCompatibilityChecker {
switch format {
case storetypes.Format_DOLT:
return newDoltTypeCompatibilityChecker()
default:
panic("unsupported storage format: " + format.VersionString())
}
func NewTypeCompatabilityChecker() TypeCompatibilityChecker {
return newDoltTypeCompatibilityChecker()
}
// doltTypeCompatibilityChecker implements TypeCompatibilityChecker for the DOLT storage
@@ -26,7 +26,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
storetypes "github.com/dolthub/dolt/go/store/types"
)
type typeChangeCompatibilityTest struct {
@@ -79,7 +78,7 @@ var extendedTypeInfo = typeinfo.CreateExtendedTypeFromSqlType(extendedType{})
// TestDoltIsTypeChangeCompatible tests that the DOLT TypeCompatibilityChecker implementation
// correctly computes compatibility between types.
func TestDoltIsTypeChangeCompatible(t *testing.T) {
compatChecker := NewTypeCompatabilityCheckerForStorageFormat(storetypes.Format_DOLT)
compatChecker := NewTypeCompatabilityChecker()
runTypeCompatibilityTests(t, compatChecker, []typeChangeCompatibilityTest{
{
name: "equivalent types are compatible",
@@ -829,8 +829,6 @@ func getTableWriter(ctx *sql.Context, engine *gms.Engine, tableName, databaseNam
return nil, nil, fmt.Errorf("unexpected database type: %T", database)
}
binFormat := sqlDatabase.DbData().Ddb.Format()
ws, err := env.WorkingSet(ctx, sqlDatabase.GetDoltDB(), sqlDatabase.DbData().Rsr)
if err != nil {
return nil, nil, err
@@ -841,7 +839,7 @@ func getTableWriter(ctx *sql.Context, engine *gms.Engine, tableName, databaseNam
return nil, nil, err
}
writeSession := writer.NewWriteSession(binFormat, ws, tracker, sqlDatabase.EditOptions())
writeSession := writer.NewWriteSession(ws, tracker, sqlDatabase.EditOptions())
ds := dsess.DSessFromSess(ctx.Session)
setter := ds.SetWorkingRoot
@@ -374,14 +374,9 @@ func createTestSchema(t *testing.T) schema.Schema {
}
func createTestRowData(t *testing.T, vrw types.ValueReadWriter, ns tree.NodeStore, sch schema.Schema) durable.Index {
if types.Format_Default == types.Format_DOLT {
idx, err := durable.NewEmptyPrimaryIndex(context.Background(), vrw, ns, sch)
require.NoError(t, err)
return idx
}
t.Fatal("unsupported format")
return nil
idx, err := durable.NewEmptyPrimaryIndex(context.Background(), vrw, ns, sch)
require.NoError(t, err)
return idx
}
func createHooksTestTable(vrw types.ValueReadWriter, ns tree.NodeStore, tSchema schema.Schema, rowData durable.Index) (*doltdb.Table, error) {
@@ -329,11 +329,7 @@ func ResolveDataConflictsForTable(ctx *sql.Context, dbName string, root doltdb.R
}
if !ours {
if tbl.Format() == types.Format_DOLT {
tbl, err = resolveProllyConflicts(ctx, tbl, tblName, ourSch, sch)
} else {
panic("only __DOLT__ format is supported")
}
tbl, err = resolveProllyConflicts(ctx, tbl, tblName, ourSch, sch)
if err != nil {
return nil, false, err
}
+2 -6
View File
@@ -1372,10 +1372,6 @@ func (d *DoltSession) addDB(ctx *sql.Context, db SqlDatabase) error {
// TODO: figure out how to cast this to dsqle.SqlDatabase without creating import cycles
// Or better yet, get rid of EditOptions from the database, it's a session setting
nbf := types.Format_Default
if branchState.dbData.Ddb != nil {
nbf = branchState.dbData.Ddb.Format()
}
editOpts := db.(interface{ EditOptions() editor.Options }).EditOptions()
if dbState.Err != nil {
@@ -1405,7 +1401,7 @@ func (d *DoltSession) addDB(ctx *sql.Context, db SqlDatabase) error {
if err != nil {
return err
}
branchState.writeSession = d.writeSessProv(nbf, branchState.WorkingSet(), tracker, editOpts)
branchState.writeSession = d.writeSessProv(branchState.WorkingSet(), tracker, editOpts)
}
}
@@ -2069,4 +2065,4 @@ func DefaultHead(ctx *sql.Context, baseName string, db SqlDatabase) (string, err
// WriteSessFunc is a constructor that session builders use to
// create fresh table editors.
// The indirection avoids a writer/dsess package import cycle.
type WriteSessFunc func(nbf *types.NomsBinFormat, ws *doltdb.WorkingSet, aiTracker globalstate.AutoIncrementTracker, opts editor.Options) WriteSession
type WriteSessFunc func(ws *doltdb.WorkingSet, aiTracker globalstate.AutoIncrementTracker, opts editor.Options) WriteSession
@@ -648,15 +648,9 @@ func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, fromCommitVal, to
dtf.overriddenSchema = overriddenSchema
}
fromTable, fromTableExists := delta.FromTable, delta.FromTable != nil
toTable, toTableExists := delta.ToTable, delta.ToTable != nil
var format *types.NomsBinFormat
if toTableExists {
format = toTable.Format()
} else if fromTableExists {
format = fromTable.Format()
} else {
fromTableExists := delta.FromTable != nil
toTableExists := delta.ToTable != nil
if !toTableExists && !fromTableExists {
return sql.ErrTableNotFound.New(tableName)
}
@@ -667,7 +661,7 @@ func (dtf *DiffTableFunction) generateSchema(ctx *sql.Context, fromCommitVal, to
toSchForJoiner = dtf.overriddenSchema
}
diffTableSch, err := dtables.GetDiffTableSchemaAndJoiner(format, fromSchForJoiner, toSchForJoiner)
diffTableSch, err := dtables.GetDiffTableSchemaAndJoiner(fromSchForJoiner, toSchForJoiner)
if err != nil {
return err
}
@@ -625,7 +625,7 @@ func getDataSqlPatchResults(ctx *sql.Context, diffQuerySch, targetSch sql.Schema
// fmt.Sprintf("select %s, %s from dolt_diff('%s', '%s', '%s')", columnsWithDiff, "diff_type", fromRef, toRef, tableName)
// on sql engine, which returns the schema and rowIter of the final data diff result.
func getDiffQuery(ctx *sql.Context, dbData env.DbData[*sql.Context], td diff.TableDelta, fromRefDetails, toRefDetails *refDetails) (sql.Schema, []sql.Expression, sql.RowIter, error) {
diffTableSchema, err := dtables.GetDiffTableSchemaAndJoiner(td.ToTable.Format(), td.FromSch, td.ToSch)
diffTableSchema, err := dtables.GetDiffTableSchemaAndJoiner(td.FromSch, td.ToSch)
if err != nil {
return nil, nil, nil, err
}
@@ -475,12 +475,8 @@ func (pm *PreviewMergeConflictsTableFunction) RowIter(ctx *sql.Context, row sql.
return nil, fmt.Errorf("schema conflicts found: %d", schConflicts.Count())
}
if !tm.InvolvesRootObjects() {
if !dtypes.IsFormat_DOLT(pm.rootInfo.leftRoot.VRW().Format()) {
return nil, fmt.Errorf("preview_merge_conflicts table function only supports dolt format")
}
} else {
return nil, fmt.Errorf("Dolt does not operate on root objects")
if tm.InvolvesRootObjects() {
return nil, fmt.Errorf("dolt_preview_merge_conflict does not operate on root objects")
}
keyless := schema.IsKeyless(mergeSch)
@@ -519,7 +519,7 @@ func processTableColDelta(ctx *sql.Context, ddb *doltdb.DoltDB, delta diff.Table
// cells to compile a list of modified columns
func calculateColDelta(ctx *sql.Context, ddb *doltdb.DoltDB, delta *diff.TableDelta, colSchDiff *colSchemaDiff) ([]string, []string, error) {
// initialize row iterator
diffTableSchema, err := GetDiffTableSchemaAndJoiner(delta.ToTable.Format(), delta.FromSch, delta.ToSch)
diffTableSchema, err := GetDiffTableSchemaAndJoiner(delta.FromSch, delta.ToSch)
if err != nil {
return nil, nil, err
}
@@ -74,7 +74,7 @@ func NewCommitDiffTable(ctx *sql.Context, dbName string, tblName doltdb.TableNam
return nil, err
}
diffTableSchema, err := GetDiffTableSchemaAndJoiner(ddb.Format(), sch, sch)
diffTableSchema, err := GetDiffTableSchemaAndJoiner(sch, sch)
if err != nil {
return nil, err
}
@@ -20,7 +20,6 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/store/types"
)
// NewConflictsTable returns a new ConflictsTable instance
@@ -32,7 +31,6 @@ func NewConflictsTable(ctx *sql.Context, tblName doltdb.TableName, srcTable sql.
return nil, err
}
types.AssertFormat_DOLT(tbl.Format())
upd, ok := srcTable.(sql.UpdatableTable)
if !ok {
return nil, fmt.Errorf("%s can not have conflicts because it is not updateable", tblName)
@@ -18,14 +18,9 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
"github.com/dolthub/dolt/go/store/types"
)
// NewConstraintViolationsTable returns a sql.Table that lists constraint violations.
func NewConstraintViolationsTable(ctx *sql.Context, tblName doltdb.TableName, root doltdb.RootValue, rs RootSetter) (sql.Table, error) {
if root.VRW().Format() == types.Format_DOLT {
return newProllyCVTable(ctx, tblName, root, rs)
}
panic("Unsupported format: " + root.VRW().Format().VersionString())
return newProllyCVTable(ctx, tblName, root, rs)
}
@@ -102,7 +102,7 @@ func NewDiffTable(ctx *sql.Context, dbName string, tblName doltdb.TableName, ddb
return nil, err
}
diffTableSchema, err := GetDiffTableSchemaAndJoiner(ddb.Format(), sch, sch)
diffTableSchema, err := GetDiffTableSchemaAndJoiner(sch, sch)
if err != nil {
return nil, err
}
@@ -860,12 +860,8 @@ func (dps *DiffPartitions) Close(*sql.Context) error {
// GetDiffTableSchemaAndJoiner returns the schema for the diff table given a
// target schema for a row |sch|. In the old storage format, it also returns the
// associated joiner.
func GetDiffTableSchemaAndJoiner(format *types.NomsBinFormat, fromSch, toSch schema.Schema) (diffTableSchema schema.Schema, err error) {
if format == types.Format_DOLT {
return CalculateDiffSchema(fromSch, toSch)
} else {
panic("Unsupported format for diff table schema calculation: " + format.VersionString())
}
func GetDiffTableSchemaAndJoiner(fromSch, toSch schema.Schema) (diffTableSchema schema.Schema, err error) {
return CalculateDiffSchema(fromSch, toSch)
}
// expandFromToSchemas converts input schemas to schemas appropriate for diffs. One argument must be
@@ -35,7 +35,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -272,7 +271,7 @@ func (wtm *WorkspaceTableModifier) getWorkspaceTableWriter(ctx *sql.Context, tar
return nil, nil, err
}
writeSession := writer.NewWriteSession(types.Format_DOLT, wtm.ws, gst, editor.Options{TargetStaging: targetStaging})
writeSession := writer.NewWriteSession(wtm.ws, gst, editor.Options{TargetStaging: targetStaging})
tableWriter, err := writeSession.GetTableWriter(ctx, wtm.tableName, ctx.GetCurrentDatabase(), setter, targetStaging)
if err != nil {
@@ -619,11 +619,7 @@ func TestConvertPrepared(t *testing.T) {
}
func TestScripts(t *testing.T) {
var skipped []string
if types.IsFormat_DOLT(types.Format_Default) {
skipped = append(skipped, newFormatSkippedScripts...)
}
h := newDoltHarness(t).WithSkippedQueries(skipped).WithConfigureStats(true)
h := newDoltHarness(t).WithConfigureStats(true)
defer h.Close()
enginetest.TestScripts(t, h)
}
@@ -1850,12 +1846,8 @@ func TestDeleteQueriesPrepared(t *testing.T) {
}
func TestScriptsPrepared(t *testing.T) {
var skipped []string
if types.IsFormat_DOLT(types.Format_Default) {
skipped = append(skipped, newFormatSkippedScripts...)
}
skipPreparedTests(t)
h := newDoltHarness(t).WithSkippedQueries(skipped).WithConfigureStats(true)
h := newDoltHarness(t).WithConfigureStats(true)
defer h.Close()
enginetest.TestScriptsPrepared(t, h)
}
@@ -2087,18 +2087,6 @@ func runMergeScriptTestsInBothDirections(t *testing.T, tests []MergeScriptTest,
})
}
func SkipByDefaultInCI(t *testing.T) {
if os.Getenv("CI") != "" && os.Getenv("DOLT_TEST_RUN_NON_RACE_TESTS") == "" {
t.Skip()
}
}
var newFormatSkippedScripts = []string{
// Different query plans
"Partial indexes are used and return the expected result",
"Multiple indexes on the same columns in a different order",
}
func skipPreparedTests(t *testing.T) {
if skipPrepared {
t.Skip("skip prepared")
+44 -68
View File
@@ -142,7 +142,7 @@ func DoltDiffIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Tab
if err != nil {
return nil, err
}
keyBld := maybeGetKeyBuilder(tableRows)
keyBld := keyBuilderForIndex(tableRows)
cols := sch.GetPKCols().GetColumns()
@@ -174,13 +174,29 @@ func DoltDiffIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Tab
indexes = append(indexes, &keyIndex)
}
if types.IsFormat_DOLT(t.Format()) {
indexes = append(indexes, NewCommitIndex(&doltIndex{
id: ToCommitIndexId,
indexes = append(indexes, NewCommitIndex(&doltIndex{
id: ToCommitIndexId,
tblName: doltdb.DoltDiffTablePrefix + tbl,
dbName: db,
columns: []schema.Column{
schema.NewColumn(ToCommitIndexId, schema.DiffCommitTag, types.StringKind, false),
},
indexSch: sch,
tableSch: sch,
unique: true,
comment: "",
vrw: t.ValueReadWriter(),
ns: t.NodeStore(),
order: sql.IndexOrderNone,
constrainedToLookupExpression: false,
}),
NewCommitIndex(&doltIndex{
id: FromCommitIndexId,
tblName: doltdb.DoltDiffTablePrefix + tbl,
dbName: db,
columns: []schema.Column{
schema.NewColumn(ToCommitIndexId, schema.DiffCommitTag, types.StringKind, false),
schema.NewColumn(FromCommitIndexId, schema.DiffCommitTag, types.StringKind, false),
},
indexSch: sch,
tableSch: sch,
@@ -191,24 +207,7 @@ func DoltDiffIndexesFromTable(ctx context.Context, db, tbl string, t *doltdb.Tab
order: sql.IndexOrderNone,
constrainedToLookupExpression: false,
}),
NewCommitIndex(&doltIndex{
id: FromCommitIndexId,
tblName: doltdb.DoltDiffTablePrefix + tbl,
dbName: db,
columns: []schema.Column{
schema.NewColumn(FromCommitIndexId, schema.DiffCommitTag, types.StringKind, false),
},
indexSch: sch,
tableSch: sch,
unique: true,
comment: "",
vrw: t.ValueReadWriter(),
ns: t.NodeStore(),
order: sql.IndexOrderNone,
constrainedToLookupExpression: false,
}),
)
}
)
return indexes, nil
}
@@ -262,10 +261,6 @@ func MockIndex(indexId, dbName, tableName, columnName string, columnType types.N
}
func DoltCommitIndexes(dbName, tab string, db *doltdb.DoltDB, unique bool) (indexes []sql.Index, err error) {
if !types.IsFormat_DOLT(db.Format()) {
return nil, nil
}
return []sql.Index{
NewCommitIndex(MockIndex(CommitHashIndexId, dbName, tab, CommitHashIndexId, types.StringKind, unique)),
}, nil
@@ -376,7 +371,7 @@ func getPrimaryKeyIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sc
if err != nil {
return nil, err
}
keyBld := maybeGetKeyBuilder(tableRows)
keyBld := keyBuilderForIndex(tableRows)
cols := sch.GetPKCols().GetColumns()
@@ -397,7 +392,6 @@ func getPrimaryKeyIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sc
keyBld: keyBld,
order: sql.IndexOrderAsc,
constrainedToLookupExpression: true,
doltBinFormat: types.IsFormat_DOLT(vrw.Format()),
}, nil
}
@@ -406,7 +400,7 @@ func getSecondaryIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch
if err != nil {
return nil, err
}
keyBld := maybeGetKeyBuilder(indexRows)
keyBld := keyBuilderForIndex(indexRows)
cols := make([]schema.Column, idx.Count())
for i, tag := range idx.IndexedColumnTags() {
@@ -432,7 +426,6 @@ func getSecondaryIndex(ctx context.Context, db, tbl string, t *doltdb.Table, sch
keyBld: keyBld,
order: sql.IndexOrderAsc,
constrainedToLookupExpression: true,
doltBinFormat: types.IsFormat_DOLT(vrw.Format()),
prefixLengths: idx.PrefixLengths(),
fullTextProps: idx.FullTextProperties(),
vectorProps: idx.VectorProperties(),
@@ -465,7 +458,6 @@ func ConvertFullTextToSql(ctx context.Context, db, tbl string, sch schema.Schema
keyBld: nil,
order: sql.IndexOrderAsc,
constrainedToLookupExpression: true,
doltBinFormat: true,
prefixLengths: idx.PrefixLengths(),
fullTextProps: idx.FullTextProperties(),
vectorProps: idx.VectorProperties(),
@@ -489,17 +481,11 @@ func (s *durableIndexState) coversAllColumns(i *doltIndex) bool {
}
cols := i.Schema().GetAllCols()
var idxCols *schema.ColCollection
if types.IsFormat_DOLT(i.Format()) {
// prolly indexes can cover an index lookup using
// both the key and value fields of the index,
// this allows using covering index machinery for
// primary key index lookups.
idxCols = i.IndexSchema().GetAllCols()
} else {
// to cover an index lookup, noms indexes must
// contain all fields in the index's key.
idxCols = i.IndexSchema().GetPKCols()
}
// prolly indexes can cover an index lookup using
// both the key and value fields of the index,
// this allows using covering index machinery for
// primary key index lookups.
idxCols = i.IndexSchema().GetAllCols()
covers := true
for i := 0; i < cols.Size(); i++ {
col := cols.GetByIndex(i)
@@ -556,12 +542,11 @@ type doltIndex struct {
order sql.IndexOrder
constrainedToLookupExpression bool
vector bool
isPk bool
doltBinFormat bool
unique bool
spatial bool
fulltext bool
vector bool
isPk bool
unique bool
spatial bool
fulltext bool
}
type LookupMeta struct {
@@ -734,17 +719,11 @@ func (di *doltIndex) coversColumns(s *durableIndexState, cols []uint64) bool {
}
var idxCols *schema.ColCollection
if types.IsFormat_DOLT(di.Format()) {
// prolly indexes can cover an index lookup using
// both the key and value fields of the index,
// this allows using covering index machinery for
// primary key index lookups.
idxCols = di.IndexSchema().GetAllCols()
} else {
// to cover an index lookup, noms indexes must
// contain all fields in the index's key.
idxCols = di.IndexSchema().GetPKCols()
}
// prolly indexes can cover an index lookup using
// both the key and value fields of the index,
// this allows using covering index machinery for
// primary key index lookups.
idxCols = di.IndexSchema().GetAllCols()
if len(cols) > len(idxCols.Tags) {
return false
@@ -830,7 +809,7 @@ func (di *doltIndex) Reversible() bool {
return false
}
return di.doltBinFormat
return true
}
// Database implement sql.Index
@@ -959,13 +938,10 @@ func (di *doltIndex) FullTextKeyColumns(ctx *sql.Context) (fulltext.KeyColumns,
var sharePool = pool.NewBuffPool()
func maybeGetKeyBuilder(idx durable.Index) *val.TupleBuilder {
if types.IsFormat_DOLT(idx.Format()) {
m := durable.MapFromIndex(idx)
kd, _ := m.Descriptors()
return val.NewTupleBuilder(kd, m.NodeStore())
}
return nil
func keyBuilderForIndex(idx durable.Index) *val.TupleBuilder {
m := durable.MapFromIndex(idx)
kd, _ := m.Descriptors()
return val.NewTupleBuilder(kd, m.NodeStore())
}
func pruneEmptyRanges(sqlRanges []sql.MySQLRange) (pruned []sql.MySQLRange, err error) {
@@ -27,7 +27,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -47,18 +46,14 @@ func RowIterForIndexLookup(ctx *sql.Context, t DoltTableable, lookup sql.IndexLo
return nil, err
}
if types.IsFormat_DOLT(idx.Format()) {
prollyRanges, err := idx.prollyRanges(ctx, idx.ns, mysqlRanges...)
if len(prollyRanges) > 1 {
return nil, fmt.Errorf("expected a single index range")
}
if err != nil {
return nil, err
}
return RowIterForProllyRange(ctx, idx, prollyRanges[0], pkSch, columns, durableState, lookup.IsReverse)
prollyRanges, err := idx.prollyRanges(ctx, idx.ns, mysqlRanges...)
if len(prollyRanges) > 1 {
return nil, fmt.Errorf("expected a single index range")
}
panic("Unsupported format for RowIterForIndexLookup")
if err != nil {
return nil, err
}
return RowIterForProllyRange(ctx, idx, prollyRanges[0], pkSch, columns, durableState, lookup.IsReverse)
}
func RowIterForProllyRange(ctx *sql.Context, idx DoltIndex, r prolly.Range, pkSch sql.PrimaryKeySchema, projections []uint64, durableState *durableIndexState, reverse bool) (sql.RowIter, error) {
+1 -8
View File
@@ -30,7 +30,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/prolly"
"github.com/dolthub/dolt/go/store/prolly/tree"
"github.com/dolthub/dolt/go/store/types"
"github.com/dolthub/dolt/go/store/val"
)
@@ -396,9 +395,7 @@ func getSourceKv(ctx *sql.Context, n sql.Node, isSrc bool) (prolly.Map, prolly.M
if err != nil {
return prolly.Map{}, nil, nil, nil, nil, nil, err
}
if rowData.Format() != types.Format_DOLT {
panic("Unsupported index format in lookup join: " + rowData.Format().VersionString())
}
priMap, err = durable.ProllyMapFromIndex(rowData)
if err != nil {
return prolly.Map{}, nil, nil, nil, nil, nil, err
@@ -567,10 +564,6 @@ func getMergeKv(ctx *sql.Context, n sql.Node) (mergeState, error) {
return ms, fmt.Errorf("non-standard indexed table not supported")
}
if idx.Format() != types.Format_DOLT {
panic("Unsupported index format in merge join: " + idx.Format().VersionString())
}
secIdx, err := index.GetDurableIndex(ctx, doltTable, idx)
if err != nil {
return ms, err
-3
View File
@@ -23,18 +23,15 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/types"
)
// Returns a new row iterator for the table given
func newRowIterator(ctx context.Context, tbl *doltdb.Table, projCols []uint64, partition doltTablePartition) (sql.RowIter, error) {
sch, err := tbl.GetSchema(ctx)
if err != nil {
return nil, err
}
types.AssertFormat_DOLT(tbl.Format())
return ProllyRowIterFromPartition(ctx, sch, projCols, partition)
}
+3 -6
View File
@@ -1090,7 +1090,6 @@ func (t *WritableDoltTable) truncate(
}
func copyConstraintViolationsAndConflicts(ctx context.Context, from, to *doltdb.Table) (*doltdb.Table, error) {
types.AssertFormat_DOLT(to.Format())
arts, err := from.GetArtifacts(ctx)
if err != nil {
return nil, err
@@ -1612,7 +1611,7 @@ func (t *AlterableDoltTable) columnChangeRequiresRewrite(oldColumn *sql.Column,
return true
} else {
// This is overly broad, we could narrow this down a bit
compatibilityChecker := typecompatibility.NewTypeCompatabilityCheckerForStorageFormat(t.Format())
compatibilityChecker := typecompatibility.NewTypeCompatabilityChecker()
typeChangeInfo := compatibilityChecker.IsTypeChangeCompatible(existingCol.TypeInfo, newCol.TypeInfo)
return !typeChangeInfo.Compatible || typeChangeInfo.RewriteRows || typeChangeInfo.InvalidateSecondaryIndexes
}
@@ -1798,7 +1797,7 @@ func (t *AlterableDoltTable) RewriteInserter(
return nil, fmt.Errorf("cannot rebuild index on a headless branch")
}
writeSession := writer.NewWriteSession(dt.Format(), newWs, ait, dbState.WriteSession().GetOptions())
writeSession := writer.NewWriteSession(newWs, ait, dbState.WriteSession().GetOptions())
ed, err := writeSession.GetTableWriter(ctx, t.TableName(), t.db.RevisionQualifiedName(), sess.SetWorkingRoot, false)
if err != nil {
@@ -1845,7 +1844,7 @@ func fullTextRewriteEditor(
return nil, fmt.Errorf("cannot rebuild index on read only database %s", t.Name())
}
writeSession := writer.NewWriteSession(dt.Format(), newWs, ait, dbState.WriteSession().GetOptions())
writeSession := writer.NewWriteSession(newWs, ait, dbState.WriteSession().GetOptions())
parentEditor, err := writeSession.GetTableWriter(ctx, t.TableName(), t.db.RevisionQualifiedName(), sess.SetWorkingRoot, false)
if err != nil {
@@ -2412,8 +2411,6 @@ func (t *AlterableDoltTable) RenameIndex(ctx *sql.Context, fromIndexName string,
// CreateFulltextIndex implements fulltext.IndexAlterableTable
func (t *AlterableDoltTable) CreateFulltextIndex(ctx *sql.Context, idx sql.IndexDef, keyCols fulltext.KeyColumns, tableNames fulltext.IndexTableNames) error {
types.AssertFormat_DOLT(t.Format())
if err := dsess.CheckAccessForDb(ctx, t.db, branch_control.Permissions_Write); err != nil {
return err
}
+2 -2
View File
@@ -136,7 +136,7 @@ func NewTempTable(
return nil, err
}
writeSession := writer.NewWriteSession(tbl.Format(), newWs, ait, opts)
writeSession := writer.NewWriteSession(newWs, ait, opts)
tempTable := &TempTable{
tableName: name,
@@ -186,7 +186,7 @@ func setTempTableRoot(t *TempTable) func(ctx *sql.Context, dbName string, newRoo
return err
}
writeSession := writer.NewWriteSession(newTable.Format(), newWs, ait, t.opts)
writeSession := writer.NewWriteSession(newWs, ait, t.opts)
t.ed, err = writeSession.GetTableWriter(ctx, doltdb.TableName{Name: t.tableName}, t.dbName, setTempTableRoot(t), false)
if err != nil {
return err
+23 -27
View File
@@ -275,34 +275,30 @@ func CreateTestEnvWithName(envName string) *env.DoltEnv {
func SqlRowsFromDurableIndex(idx durable.Index, sch schema.Schema) ([]sql.Row, error) {
ctx := context.Background()
var sqlRows []sql.Row
if types.Format_Default == types.Format_DOLT {
rowData, err := durable.ProllyMapFromIndex(idx)
if err != nil {
return nil, err
}
kd, vd := rowData.Descriptors()
iter, err := rowData.IterAll(ctx)
if err != nil {
return nil, err
}
for {
var k, v val.Tuple
k, v, err = iter.Next(ctx)
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
sqlRow, err := sqlRowFromTuples(sch, kd, vd, k, v)
if err != nil {
return nil, err
}
sqlRows = append(sqlRows, sqlRow)
}
} else {
panic("Unsupported format: " + idx.Format().VersionString())
rowData, err := durable.ProllyMapFromIndex(idx)
if err != nil {
return nil, err
}
kd, vd := rowData.Descriptors()
iter, err := rowData.IterAll(ctx)
if err != nil {
return nil, err
}
for {
var k, v val.Tuple
k, v, err = iter.Next(ctx)
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
sqlRow, err := sqlRowFromTuples(sch, kd, vd, k, v)
if err != nil {
return nil, err
}
sqlRows = append(sqlRows, sqlRow)
}
return sqlRows, nil
}
@@ -27,24 +27,19 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/globalstate"
"github.com/dolthub/dolt/go/libraries/doltcore/table/editor"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
// NewWriteSession creates and returns a WriteSession. Inserting a nil root is not an error, as there are
// locations that do not have a root at the time of this call. However, a root must be set through SetWorkingRoot before any
// table editors are returned.
func NewWriteSession(nbf *types.NomsBinFormat, ws *doltdb.WorkingSet, aiTracker globalstate.AutoIncrementTracker, opts editor.Options) dsess.WriteSession {
if types.IsFormat_DOLT(nbf) {
return &prollyWriteSession{
workingSet: ws,
tables: make(map[doltdb.TableName]*prollyTableWriter),
aiTracker: aiTracker,
mut: &sync.RWMutex{},
targetStaging: opts.TargetStaging,
}
func NewWriteSession(ws *doltdb.WorkingSet, aiTracker globalstate.AutoIncrementTracker, opts editor.Options) dsess.WriteSession {
return &prollyWriteSession{
workingSet: ws,
tables: make(map[doltdb.TableName]*prollyTableWriter),
aiTracker: aiTracker,
mut: &sync.RWMutex{},
targetStaging: opts.TargetStaging,
}
panic("only __DOLT__ format is supported")
}
// prollyWriteSession handles all edit operations on a table that may also update other tables.
@@ -133,29 +133,20 @@ func CreateIndex(
}
func BuildSecondaryIndex(ctx *sql.Context, tbl *doltdb.Table, idx schema.Index, tableName string, opts editor.Options) (durable.Index, error) {
switch tbl.Format() {
case types.Format_LD_1:
panic("LD_1 format is not supported in this version of Dolt")
case types.Format_DOLT:
sch, err := tbl.GetSchema(ctx)
if err != nil {
return nil, err
}
m, err := tbl.GetRowData(ctx)
if err != nil {
return nil, err
}
primary, err := durable.ProllyMapFromIndex(m)
if err != nil {
return nil, err
}
return BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, tableName, idx, primary)
default:
return nil, fmt.Errorf("unknown NomsBinFormat")
sch, err := tbl.GetSchema(ctx)
if err != nil {
return nil, err
}
m, err := tbl.GetRowData(ctx)
if err != nil {
return nil, err
}
primary, err := durable.ProllyMapFromIndex(m)
if err != nil {
return nil, err
}
return BuildSecondaryProllyIndex(ctx, tbl.ValueReadWriter(), tbl.NodeStore(), sch, tableName, idx, primary)
}
// BuildSecondaryProllyIndex builds secondary index data for the given primary
@@ -22,7 +22,6 @@ import (
"github.com/dolthub/dolt/go/libraries/doltcore/doltdb/durable"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/doltcore/sqle/index"
"github.com/dolthub/dolt/go/store/types"
)
// RowIter wraps a sql.RowIter and abstracts away sql.Context for a
@@ -60,13 +59,10 @@ func (i rowIterImpl) Close(ctx context.Context) error {
// NewTableIterator creates a RowIter that iterates sql.Row's from |idx|.
// |offset| can be supplied to read at some start point in |idx|.
func NewTableIterator(ctx context.Context, sch schema.Schema, idx durable.Index) (RowIter, error) {
if types.IsFormat_DOLT(idx.Format()) {
m := durable.MapFromIndex(idx)
itr, err := m.IterAll(ctx)
if err != nil {
return nil, err
}
return NewRowIter(index.NewProllyRowIterForMap(sch, m, itr, nil)), nil
m := durable.MapFromIndex(idx)
itr, err := m.IterAll(ctx)
if err != nil {
return nil, err
}
panic("Unsupported format: " + idx.Format().VersionString())
return NewRowIter(index.NewProllyRowIterForMap(sch, m, itr, nil)), nil
}
-4
View File
@@ -147,8 +147,6 @@ func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.Valu
opts.Meta = &CommitMeta{}
}
types.AssertFormat_DOLT(vrw.Format())
r, err := vrw.WriteValue(ctx, v)
if err != nil {
return nil, err
@@ -181,8 +179,6 @@ func newCommitForValue(ctx context.Context, cs chunks.ChunkStore, vrw types.Valu
}
func commitPtr(nbf *types.NomsBinFormat, v types.Value, r *types.Ref) (*Commit, error) {
types.AssertFormat_DOLT(nbf)
bs := []byte(v.(types.SerialMessage))
var cm serial.Commit
err := serial.InitCommitRoot(&cm, bs, serial.MessagePrefixSz)
-5
View File
@@ -135,8 +135,6 @@ func (m refmapDatasetsMap) IterAll(ctx context.Context, cb func(string, hash.Has
// then you should fetch the current root, then call DatasetsInRoot with that hash. Otherwise, another writer could
// change the root value between when you get the root hash and call this method.
func (db *database) Datasets(ctx context.Context) (DatasetsMap, error) {
types.AssertFormat_DOLT(db.Format())
rootHash, err := db.rt.Root(ctx)
if err != nil {
return nil, err
@@ -180,8 +178,6 @@ func (db *database) GetDatasetByRootHash(ctx context.Context, datasetID string,
}
func (db *database) DatasetsByRootHash(ctx context.Context, rootHash hash.Hash) (DatasetsMap, error) {
types.AssertFormat_DOLT(db.Format())
rm, err := db.loadDatasetsRefmap(ctx, rootHash)
if err != nil {
return nil, err
@@ -827,7 +823,6 @@ func (db *database) update(
err error
root hash.Hash
)
types.AssertFormat_DOLT(db.Format())
for {
root, err = db.rt.Root(ctx)
+3 -9
View File
@@ -282,21 +282,15 @@ func (ms *MergeState) FromCommit(ctx context.Context, vr types.ValueReader) (*Co
return CommitFromValue(vr.Format(), commitV)
}
func (ms *MergeState) FromCommitSpec(ctx context.Context, vr types.ValueReader) (string, error) {
types.AssertFormat_DOLT(vr.Format())
func (ms *MergeState) FromCommitSpec() (string, error) {
return ms.fromCommitSpec, nil
}
func (ms *MergeState) IsCherryPick(_ context.Context, vr types.ValueReader) (bool, error) {
types.AssertFormat_DOLT(vr.Format())
func (ms *MergeState) IsCherryPick() (bool, error) {
return ms.isCherryPick, nil
}
func (ms *MergeState) UnmergableTables(ctx context.Context, vr types.ValueReader) ([]string, error) {
types.AssertFormat_DOLT(vr.Format())
func (ms *MergeState) UnmergableTables() ([]string, error) {
return ms.unmergableTables, nil
}
-2
View File
@@ -32,8 +32,6 @@ const (
// NewStash creates a new stash object.
func NewStash(ctx context.Context, nbf *types.NomsBinFormat, vrw types.ValueReadWriter, stashRef types.Ref, headAddr hash.Hash, meta *StashMeta) (hash.Hash, types.Ref, error) {
types.AssertFormat_DOLT(nbf)
headCommit, err := vrw.MustReadValue(ctx, headAddr)
if err != nil {
return hash.Hash{}, types.Ref{}, err
+1 -3
View File
@@ -175,9 +175,7 @@ func GetHashListFromStashList(ctx context.Context, ns tree.NodeStore, val types.
// LoadStashList returns StashList object that contains the AddressMap that contains all stashes. This method creates
// new StashList address map, if there is none exists yet (dataset head is null). Otherwise, it returns the address map
// that corresponds to given root hash value.
func LoadStashList(ctx context.Context, nbf *types.NomsBinFormat, ns tree.NodeStore, vr types.ValueReader, ds Dataset) (*StashList, error) {
types.AssertFormat_DOLT(nbf)
func LoadStashList(ctx context.Context, ns tree.NodeStore, vr types.ValueReader, ds Dataset) (*StashList, error) {
rootHash, hasHead := ds.MaybeHeadAddr()
if !hasHead {
nam, err := prolly.NewEmptyAddressMap(ns)
+1 -3
View File
@@ -50,9 +50,7 @@ func (s *Statistics) Count() (int, error) {
}
// LoadStatistics attempts to dereference a database's statistics Dataset into a typed Statistics object.
func LoadStatistics(ctx context.Context, nbf *types.NomsBinFormat, ns tree.NodeStore, vr types.ValueReader, ds Dataset) (*Statistics, error) {
types.AssertFormat_DOLT(nbf)
func LoadStatistics(ctx context.Context, ns tree.NodeStore, vr types.ValueReader, ds Dataset) (*Statistics, error) {
rootHash, hasHead := ds.MaybeHeadAddr()
if !hasHead {
return &Statistics{m: prolly.Map{}, addr: hash.Hash{}}, nil
-2
View File
@@ -40,8 +40,6 @@ type TagOptions struct {
// newTag serializes a tag pointing to |commitAddr| with the given |meta|,
// persists it, and returns its addr.
func newTag(ctx context.Context, db *database, commitAddr hash.Hash, meta *TagMeta) (hash.Hash, error) {
types.AssertFormat_DOLT(db.Format())
data := tagSerialMessage(commitAddr, meta)
r, err := db.WriteValue(ctx, types.SerialMessage(data))
if err != nil {
+1 -3
View File
@@ -35,9 +35,7 @@ func (t Tuple) Bytes() []byte {
}
// LoadTuple attempts to dereference a database's Tuple Dataset into a typed Tuple object.
func LoadTuple(ctx context.Context, nbf *types.NomsBinFormat, ns tree.NodeStore, vr types.ValueReader, ds Dataset) (*Tuple, error) {
types.AssertFormat_DOLT(nbf)
func LoadTuple(ctx context.Context, ns tree.NodeStore, vr types.ValueReader, ds Dataset) (*Tuple, error) {
rootHash, hasHead := ds.MaybeHeadAddr()
if !hasHead {
return &Tuple{}, nil
-6
View File
@@ -107,8 +107,6 @@ type WorkingSetSpec struct {
// ```
// where M is a struct type and R is a ref type.
func newWorkingSet(ctx context.Context, db *database, workingSetSpec WorkingSetSpec) (hash.Hash, error) {
types.AssertFormat_DOLT(db.Format())
meta := workingSetSpec.Meta
workingRef := workingSetSpec.WorkingRoot
stagedRef := workingSetSpec.StagedRoot
@@ -198,16 +196,12 @@ func workingset_flatbuffer(working hash.Hash, staged *hash.Hash, mergeState *Mer
}
func NewMergeState(
ctx context.Context,
vrw types.ValueReadWriter,
preMergeWorking types.Ref,
commit *Commit,
commitSpecStr string,
unmergableTables []string,
isCherryPick bool,
) (*MergeState, error) {
types.AssertFormat_DOLT(vrw.Format())
ms := &MergeState{
preMergeWorkingAddr: new(hash.Hash),
fromCommitAddr: new(hash.Hash),
-13
View File
@@ -25,7 +25,6 @@ import (
"bytes"
"context"
"math"
"os"
"strconv"
"strings"
"testing"
@@ -33,8 +32,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/dconfig"
"github.com/dolthub/dolt/go/store/constants"
"github.com/dolthub/dolt/go/store/hash"
)
@@ -615,13 +612,3 @@ func TestWriteEmptyUnionList(t *testing.T) {
mustValue(NewList(context.Background(), vrw)),
)
}
func TestNomsBinFormat(t *testing.T) {
v, _ := os.LookupEnv(dconfig.EnvDefaultBinFormat)
switch v {
case constants.FormatLD1String:
assert.Equal(t, Format_LD_1, Format_Default)
case constants.FormatDoltString:
assert.Equal(t, Format_DOLT, Format_Default)
}
}
+2 -23
View File
@@ -16,7 +16,6 @@ package types
import (
"errors"
"fmt"
"sync"
"github.com/dolthub/dolt/go/store/constants"
@@ -40,36 +39,18 @@ type formatTag struct {
furp byte
}
var formatTag_LD_1 = &formatTag{}
var formatTag_DOLT = &formatTag{}
var Format_LD_1 = &NomsBinFormat{formatTag_LD_1}
var Format_DOLT = &NomsBinFormat{formatTag_DOLT}
var nbfLock = &sync.Mutex{}
var Format_Default *NomsBinFormat
var emptyTuples = make(map[*NomsBinFormat]Tuple)
func init() {
emptyTuples[Format_LD_1], _ = NewTuple(Format_LD_1)
}
func IsFormat_DOLT(nbf *NomsBinFormat) bool {
return nbf.tag == formatTag_DOLT
}
// AssertFormat_DOLT panics if the provided NomsBinFormat is not compatible with the DOLT format.
func AssertFormat_DOLT(nbf *NomsBinFormat) {
if !IsFormat_DOLT(nbf) {
panic(fmt.Sprintf("Unsupported binary format %s, please migrate database to newer format", nbf.VersionString()))
}
}
func GetFormatForVersionString(s string) (*NomsBinFormat, error) {
if s == constants.FormatLD1String {
return Format_LD_1, nil
} else if s == constants.FormatDoltString {
if s == constants.FormatDoltString {
return Format_DOLT, nil
} else {
return nil, errors.New("unsupported ChunkStore version " + s)
@@ -77,9 +58,7 @@ func GetFormatForVersionString(s string) (*NomsBinFormat, error) {
}
func (nbf *NomsBinFormat) VersionString() string {
if nbf.tag == formatTag_LD_1 {
return constants.FormatLD1String
} else if nbf.tag == formatTag_DOLT {
if nbf.tag == formatTag_DOLT {
return constants.FormatDoltString
} else {
panic("unrecognized NomsBinFormat tag value")
+1 -1
View File
@@ -93,7 +93,7 @@ func (tvs TupleValueSlice) Value(ctx context.Context) (Value, error) {
}
func EmptyTuple(nbf *NomsBinFormat) Tuple {
return emptyTuples[nbf]
return Tuple{}
}
func newTupleIterator() interface{} {