Merge pull request #6919 from dolthub/zachmu/virtual

Partial support for virtual columns
This commit is contained in:
Zach Musgrave
2023-11-02 16:20:10 -07:00
committed by GitHub
27 changed files with 548 additions and 252 deletions

View File

@@ -864,6 +864,8 @@ func getTableSchemaAtRef(queryist cli.Queryist, sqlCtx *sql.Context, tableName s
return sch, createStmt, nil
}
// schemaFromCreateTableStmt returns a schema for the CREATE TABLE statement given
// TODO: this is substantially incorrect, doesn't handle primary key ordering, probably other things too
func schemaFromCreateTableStmt(createTableStmt string) (schema.Schema, error) {
parsed, err := ast.Parse(createTableStmt)
if err != nil {
@@ -900,15 +902,17 @@ func schemaFromCreateTableStmt(createTableStmt string) (schema.Schema, error) {
if col.Type.Comment != nil {
comment = col.Type.Comment.String()
}
sCol, err := schema.NewColumnWithTypeInfo(
col.Name.String(),
0,
typeInfo,
primaryCols[col.Name.Lowered()],
defBuf.String(),
col.Type.Autoincrement == true,
comment,
)
sCol := schema.Column{
Name: col.Name.String(),
Kind: typeInfo.NomsKind(),
IsPartOfPK: primaryCols[col.Name.Lowered()],
TypeInfo: typeInfo,
Default: defBuf.String(),
Generated: "", // TODO
Virtual: false, // TODO
AutoIncrement: col.Type.Autoincrement == true,
Comment: comment,
}
cols = append(cols, sCol)
}

View File

@@ -50,7 +50,7 @@ func RefFromArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, idx Ar
return refFromNomsValue(ctx, vrw, b)
default:
return types.Ref{}, errNbfUnkown
return types.Ref{}, errNbfUnknown
}
}
@@ -69,7 +69,7 @@ func NewEmptyArtifactIndex(ctx context.Context, vrw types.ValueReadWriter, ns tr
return ArtifactIndexFromProllyMap(m), nil
default:
return nil, errNbfUnkown
return nil, errNbfUnknown
}
}
@@ -107,7 +107,7 @@ func artifactIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tr
return ArtifactIndexFromProllyMap(m), nil
default:
return nil, errNbfUnkown
return nil, errNbfUnknown
}
}

View File

@@ -40,7 +40,7 @@ func RefFromConflictIndex(ctx context.Context, vrw types.ValueReadWriter, idx Co
return types.Ref{}, fmt.Errorf("__DOLT__ conflicts should be stored in ArtifactIndex")
default:
return types.Ref{}, errNbfUnkown
return types.Ref{}, errNbfUnknown
}
}
@@ -58,7 +58,7 @@ func NewEmptyConflictIndex(ctx context.Context, vrw types.ValueReadWriter, ns tr
return nil, fmt.Errorf("__DOLT__ conflicts should be stored in ArtifactIndex")
default:
return nil, errNbfUnkown
return nil, errNbfUnknown
}
}
@@ -91,7 +91,7 @@ func conflictIndexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tr
return nil, fmt.Errorf("__DOLT__ conflicts should be stored in ArtifactIndex")
default:
return nil, errNbfUnkown
return nil, errNbfUnknown
}
}

View File

@@ -88,7 +88,7 @@ func RefFromIndex(ctx context.Context, vrw types.ValueReadWriter, idx Index) (ty
return refFromNomsValue(ctx, vrw, b)
default:
return types.Ref{}, errNbfUnkown
return types.Ref{}, errNbfUnknown
}
}
@@ -115,7 +115,7 @@ func indexFromAddr(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
return IndexFromProllyMap(pm), nil
default:
return nil, errNbfUnkown
return nil, errNbfUnknown
}
}
@@ -138,7 +138,7 @@ func NewEmptyIndex(ctx context.Context, vrw types.ValueReadWriter, ns tree.NodeS
return IndexFromProllyMap(m), nil
default:
return nil, errNbfUnkown
return nil, errNbfUnknown
}
}

View File

@@ -52,7 +52,7 @@ var (
)
var (
errNbfUnkown = fmt.Errorf("unknown NomsBinFormat")
errNbfUnknown = fmt.Errorf("unknown NomsBinFormat")
errNbfUnsupported = fmt.Errorf("operation unsupported for NomsBinFormat")
)

View File

@@ -50,7 +50,7 @@ type FeatureVersion int64
// DoltFeatureVersion is described in feature_version.md.
// only variable for testing.
var DoltFeatureVersion FeatureVersion = 4 // last bumped when adding sql_mode column to dolt_schemas
var DoltFeatureVersion FeatureVersion = 5 // last bumped when adding virtual columns to schema storage
// RootValue is the value of the Database and is the committed value in every Dolt commit.
type RootValue struct {

View File

@@ -484,15 +484,11 @@ func migrateSchema(ctx context.Context, tableName string, existing schema.Schema
case query.Type_TEXT:
patched = true
info := typeinfo.StringDefaultType
cols[i], err = schema.NewColumnWithTypeInfo(
c.Name, c.Tag, info, c.IsPartOfPK, c.Default,
c.AutoIncrement, c.Comment, c.Constraints...)
cols[i], err = schema.NewColumnWithTypeInfo(c.Name, c.Tag, info, c.IsPartOfPK, c.Default, c.AutoIncrement, c.Comment, c.Constraints...)
case query.Type_BLOB:
patched = true
info := typeinfo.VarbinaryDefaultType
cols[i], err = schema.NewColumnWithTypeInfo(
c.Name, c.Tag, info, c.IsPartOfPK, c.Default,
c.AutoIncrement, c.Comment, c.Constraints...)
cols[i], err = schema.NewColumnWithTypeInfo(c.Name, c.Tag, info, c.IsPartOfPK, c.Default, c.AutoIncrement, c.Comment, c.Constraints...)
}
if err != nil {
return nil, err
@@ -525,9 +521,7 @@ func migrateSchema(ctx context.Context, tableName string, existing schema.Schema
return nil, err
}
cols[i], err = schema.NewColumnWithTypeInfo(
c.Name, c.Tag, info, c.IsPartOfPK, c.Default,
c.AutoIncrement, c.Comment, c.Constraints...)
cols[i], err = schema.NewColumnWithTypeInfo(c.Name, c.Tag, info, c.IsPartOfPK, c.Default, c.AutoIncrement, c.Comment, c.Constraints...)
if err != nil {
return nil, err
}

View File

@@ -37,13 +37,13 @@ var ErrNoPrimaryKeyColumns = errors.New("no primary key columns")
var ErrNonAutoIncType = errors.New("column type cannot be auto incremented")
var EmptyColColl = &ColCollection{
[]Column{},
[]uint64{},
[]uint64{},
map[uint64]Column{},
map[string]Column{},
map[string]Column{},
map[uint64]int{},
cols: []Column{},
Tags: []uint64{},
SortedTags: []uint64{},
TagToCol: map[uint64]Column{},
NameToCol: map[string]Column{},
LowerNameToCol: map[string]Column{},
TagToIdx: map[uint64]int{},
}
// ColCollection is a collection of columns. As a stand-alone collection, all columns in the collection must have unique
@@ -51,6 +51,10 @@ var EmptyColColl = &ColCollection{
// See schema.ValidateForInsert for details.
type ColCollection struct {
cols []Column
// virtualColumns stores the indexes of any virtual columns in the collection
virtualColumns []int
// storedIndexes stores the indexes of the stored columns in the collection
storedIndexes []int
// Tags is a list of all the tags in the ColCollection in their original order.
Tags []uint64
// SortedTags is a list of all the tags in the ColCollection in sorted order.
@@ -63,6 +67,8 @@ type ColCollection struct {
LowerNameToCol map[string]Column
// TagToIdx is a map from a tag to the column index
TagToIdx map[uint64]int
// tagToStorageIndex is a map from a tag to the physical storage column index
tagToStorageIndex map[uint64]int
}
// NewColCollection creates a new collection from a list of columns. If any columns have the same tag, by-tag lookups in
@@ -78,8 +84,12 @@ func NewColCollection(cols ...Column) *ColCollection {
nameToCol := make(map[string]Column, len(cols))
lowerNameToCol := make(map[string]Column, len(cols))
tagToIdx := make(map[uint64]int, len(cols))
tagToStorageIndex := make(map[uint64]int, len(cols))
var virtualColumns []int
var columns []Column
var storedIndexes []int
storageIdx := 0
for i, col := range cols {
// If multiple columns have the same tag, the last one is used for tag lookups.
// Columns must have unique tags to pass schema.ValidateForInsert.
@@ -96,18 +106,29 @@ func NewColCollection(cols ...Column) *ColCollection {
if _, ok := lowerNameToCol[lowerCaseName]; !ok {
lowerNameToCol[lowerCaseName] = cols[i]
}
if col.Virtual {
virtualColumns = append(virtualColumns, i)
} else {
storedIndexes = append(storedIndexes, i)
tagToStorageIndex[col.Tag] = storageIdx
storageIdx++
}
}
sort.Slice(sortedTags, func(i, j int) bool { return sortedTags[i] < sortedTags[j] })
return &ColCollection{
cols: columns,
Tags: tags,
SortedTags: sortedTags,
TagToCol: tagToCol,
NameToCol: nameToCol,
LowerNameToCol: lowerNameToCol,
TagToIdx: tagToIdx,
cols: columns,
virtualColumns: virtualColumns,
storedIndexes: storedIndexes,
tagToStorageIndex: tagToStorageIndex,
Tags: tags,
SortedTags: sortedTags,
TagToCol: tagToCol,
NameToCol: nameToCol,
LowerNameToCol: lowerNameToCol,
TagToIdx: tagToIdx,
}
}
@@ -220,16 +241,32 @@ func (cc *ColCollection) GetByTag(tag uint64) (Column, bool) {
return InvalidCol, false
}
// GetByIndex returns a column with a given index
// GetByIndex returns the Nth column in the collection
func (cc *ColCollection) GetByIndex(idx int) Column {
return cc.cols[idx]
}
// GetByStoredIndex returns the Nth stored column (omitting virtual columns from index calculation)
func (cc *ColCollection) GetByStoredIndex(idx int) Column {
return cc.cols[cc.storedIndexes[idx]]
}
// StoredIndexByTag returns the storage index of the column with the given tag, ignoring virtual columns
func (cc *ColCollection) StoredIndexByTag(tag uint64) (int, bool) {
idx, ok := cc.tagToStorageIndex[tag]
return idx, ok
}
// Size returns the number of columns in the collection.
func (cc *ColCollection) Size() int {
return len(cc.cols)
}
// StoredSize returns the number of non-virtual columns in the collection
func (cc *ColCollection) StoredSize() int {
return len(cc.storedIndexes)
}
// Contains returns whether this column collection contains a column with the name given, case insensitive
func (cc *ColCollection) Contains(name string) bool {
_, ok := cc.GetByNameCaseInsensitive(name)

View File

@@ -21,14 +21,14 @@ import (
"github.com/stretchr/testify/assert"
"github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
typeinfo "github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
"github.com/dolthub/dolt/go/store/types"
)
var firstNameCol = Column{"first", 0, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil}
var lastNameCol = Column{"last", 1, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil}
var firstNameCapsCol = Column{"FiRsT", 2, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil}
var lastNameCapsCol = Column{"LAST", 3, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil}
var firstNameCol = Column{Name: "first", Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType}
var lastNameCol = Column{Name: "last", Tag: 1, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType}
var firstNameCapsCol = Column{Name: "FiRsT", Tag: 2, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType}
var lastNameCapsCol = Column{Name: "LAST", Tag: 3, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType}
func TestGetByNameAndTag(t *testing.T) {
cols := []Column{firstNameCol, lastNameCol, firstNameCapsCol, lastNameCapsCol}
@@ -102,18 +102,18 @@ func TestGetByNameCaseInsensitive(t *testing.T) {
func TestAppendAndItrInSortOrder(t *testing.T) {
cols := []Column{
{"0", 0, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"2", 2, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"4", 4, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"3", 3, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"1", 1, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{Name: "0", Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "2", Tag: 2, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "4", Tag: 4, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "3", Tag: 3, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "1", Tag: 1, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
}
cols2 := []Column{
{"7", 7, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"9", 9, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"5", 5, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"8", 8, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{"6", 6, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{Name: "7", Tag: 7, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "9", Tag: 9, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "5", Tag: 5, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "8", Tag: 8, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: "6", Tag: 6, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
}
colColl := NewColCollection(cols...)

View File

@@ -37,15 +37,10 @@ var (
var (
// InvalidCol is a Column instance that is returned when there is nothing to return and can be tested against.
InvalidCol = Column{
"invalid",
InvalidTag,
types.NullKind,
false,
typeinfo.UnknownType,
"",
false,
"",
nil,
Name: "invalid",
Tag: InvalidTag,
Kind: types.NullKind,
TypeInfo: typeinfo.UnknownType,
}
)
@@ -76,6 +71,12 @@ type Column struct {
// Default is the default value of this column. This is the string representation of a sql.Expression.
Default string
// Generated is the generated value of this column. This is the string representation of a sql.Expression.
Generated string
// Virtual is true if this is a virtual column.
Virtual bool
// AutoIncrement says whether this column auto increments.
AutoIncrement bool
@@ -97,28 +98,46 @@ func NewColumn(name string, tag uint64, kind types.NomsKind, partOfPK bool, cons
}
// NewColumnWithTypeInfo creates a Column instance with the given type info.
// Callers are encouraged to construct schema.Column structs directly instead of using this method, then call
// ValidateColumn.
func NewColumnWithTypeInfo(name string, tag uint64, typeInfo typeinfo.TypeInfo, partOfPK bool, defaultVal string, autoIncrement bool, comment string, constraints ...ColConstraint) (Column, error) {
for _, c := range constraints {
c := Column{
Name: name,
Tag: tag,
Kind: typeInfo.NomsKind(),
IsPartOfPK: partOfPK,
TypeInfo: typeInfo,
Default: defaultVal,
AutoIncrement: autoIncrement,
Comment: comment,
Constraints: constraints,
}
err := ValidateColumn(c)
if err != nil {
return InvalidCol, err
}
return c, nil
}
// ValidateColumn validates the given column.
func ValidateColumn(c Column) error {
for _, c := range c.Constraints {
if c == nil {
return Column{}, errors.New("nil passed as a constraint")
return errors.New("nil passed as a constraint")
}
}
if typeInfo == nil {
return Column{}, errors.New("cannot instantiate column with nil type info")
if c.TypeInfo == nil {
return errors.New("cannot instantiate column with nil type info")
}
return Column{
name,
tag,
typeInfo.NomsKind(),
partOfPK,
typeInfo,
defaultVal,
autoIncrement,
comment,
constraints,
}, nil
if c.TypeInfo.NomsKind() != c.Kind {
return errors.New("type info and kind do not match")
}
return nil
}
// IsNullable returns whether the column can be set to a null value.

View File

@@ -297,8 +297,7 @@ func getColumns(t *testing.T) (cols []schema.Column) {
for i := range cols {
name := "col" + strconv.Itoa(i)
tag := uint64(i)
cols[i], err = schema.NewColumnWithTypeInfo(
name, tag, ti[i], false, "", false, "")
cols[i], err = schema.NewColumnWithTypeInfo(name, tag, ti[i], false, "", false, "")
require.NoError(t, err)
}
return

View File

@@ -204,8 +204,15 @@ func serializeSchemaColumns(b *fb.Builder, sch schema.Schema) fb.UOffsetT {
// serialize columns in |cols|
for i := len(cols) - 1; i >= 0; i-- {
col := cols[i]
defVal := ""
if col.Default != "" {
defVal = col.Default
} else {
defVal = col.Generated
}
co := b.CreateString(col.Comment)
do := b.CreateString(col.Default)
do := b.CreateString(defVal)
typeString := sqlTypeString(col.TypeInfo)
to := b.CreateString(typeString)
no := b.CreateString(col.Name)
@@ -222,8 +229,8 @@ func serializeSchemaColumns(b *fb.Builder, sch schema.Schema) fb.UOffsetT {
serial.ColumnAddPrimaryKey(b, col.IsPartOfPK)
serial.ColumnAddAutoIncrement(b, col.AutoIncrement)
serial.ColumnAddNullable(b, col.IsNullable())
serial.ColumnAddGenerated(b, false)
serial.ColumnAddVirtual(b, false)
serial.ColumnAddGenerated(b, col.Generated != "")
serial.ColumnAddVirtual(b, col.Virtual)
serial.ColumnAddHidden(b, false)
offs[i] = serial.ColumnEnd(b)
}
@@ -294,17 +301,28 @@ func deserializeColumns(ctx context.Context, s *serial.TableSchema) ([]schema.Co
return nil, err
}
cols[i], err = schema.NewColumnWithTypeInfo(
string(c.Name()),
c.Tag(),
sqlType,
c.PrimaryKey(),
string(c.DefaultValue()),
c.AutoIncrement(),
string(c.Comment()),
constraintsFromSerialColumn(&c)...)
if err != nil {
return nil, err
defVal := ""
generatedVal := ""
if c.DefaultValue() != nil {
if c.Generated() {
generatedVal = string(c.DefaultValue())
} else {
defVal = string(c.DefaultValue())
}
}
cols[i] = schema.Column{
Name: string(c.Name()),
Tag: c.Tag(),
Kind: sqlType.NomsKind(),
IsPartOfPK: c.PrimaryKey(),
TypeInfo: sqlType,
Default: defVal,
Generated: generatedVal,
Virtual: c.Virtual(),
AutoIncrement: c.AutoIncrement(),
Comment: string(c.Comment()),
Constraints: constraintsFromSerialColumn(&c),
}
}
return cols, nil

View File

@@ -129,6 +129,10 @@ func IsKeyless(sch Schema) bool {
sch.GetAllCols().Size() != 0
}
func IsVirtual(sch Schema) bool {
return sch != nil && len(sch.GetAllCols().virtualColumns) > 0
}
func HasAutoIncrement(sch Schema) (ok bool) {
_ = sch.GetAllCols().Iter(func(tag uint64, col Column) (stop bool, err error) {
if col.AutoIncrement {

View File

@@ -473,6 +473,10 @@ func (si *schemaImpl) GetValueDescriptor() val.TupleDesc {
useCollations := false // We only use collations if a string exists
_ = si.GetNonPKCols().Iter(func(tag uint64, col Column) (stop bool, err error) {
if col.Virtual {
return
}
sqlType := col.TypeInfo.ToSqlType()
queryType := sqlType.Type()
tt = append(tt, val.Type{

View File

@@ -17,7 +17,7 @@ package schema
import (
"fmt"
"reflect"
"strings"
strings "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -42,21 +42,15 @@ const (
reservedColTag = 50
)
var lnVal = types.String("astley")
var fnVal = types.String("rick")
var addrVal = types.String("123 Fake St")
var ageVal = types.Uint(53)
var titleVal = types.NullValue
var pkCols = []Column{
{lnColName, lnColTag, types.StringKind, true, typeinfo.StringDefaultType, "", false, "", nil},
{fnColName, fnColTag, types.StringKind, true, typeinfo.StringDefaultType, "", false, "", nil},
{Name: lnColName, Tag: lnColTag, Kind: types.StringKind, IsPartOfPK: true, TypeInfo: typeinfo.StringDefaultType},
{Name: fnColName, Tag: fnColTag, Kind: types.StringKind, IsPartOfPK: true, TypeInfo: typeinfo.StringDefaultType},
}
var nonPkCols = []Column{
{addrColName, addrColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{ageColName, ageColTag, types.UintKind, false, typeinfo.FromKind(types.UintKind), "", false, "", nil},
{titleColName, titleColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{reservedColName, reservedColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil},
{Name: addrColName, Tag: addrColTag, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: ageColName, Tag: ageColTag, Kind: types.UintKind, TypeInfo: typeinfo.FromKind(types.UintKind)},
{Name: titleColName, Tag: titleColTag, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
{Name: reservedColName, Tag: reservedColTag, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType},
}
var allCols = append(append([]Column(nil), pkCols...), nonPkCols...)
@@ -204,7 +198,7 @@ func TestValidateForInsert(t *testing.T) {
})
t.Run("Name collision", func(t *testing.T) {
cols := append(allCols, Column{titleColName, 100, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil})
cols := append(allCols, Column{Name: titleColName, Tag: 100, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType})
colColl := NewColCollection(cols...)
err := ValidateForInsert(colColl)
@@ -213,7 +207,7 @@ func TestValidateForInsert(t *testing.T) {
})
t.Run("Case insensitive collision", func(t *testing.T) {
cols := append(allCols, Column{strings.ToUpper(titleColName), 100, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil})
cols := append(allCols, Column{Name: strings.ToUpper(titleColName), Tag: 100, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType})
colColl := NewColCollection(cols...)
err := ValidateForInsert(colColl)
@@ -222,7 +216,7 @@ func TestValidateForInsert(t *testing.T) {
})
t.Run("Tag collision", func(t *testing.T) {
cols := append(allCols, Column{"newCol", lnColTag, types.StringKind, false, typeinfo.StringDefaultType, "", false, "", nil})
cols := append(allCols, Column{Name: "newCol", Tag: lnColTag, Kind: types.StringKind, TypeInfo: typeinfo.StringDefaultType})
colColl := NewColCollection(cols...)
err := ValidateForInsert(colColl)

View File

@@ -29,6 +29,7 @@ import (
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/memo"
"github.com/dolthub/go-mysql-server/sql/mysql_db"
"github.com/dolthub/go-mysql-server/sql/plan"
gmstypes "github.com/dolthub/go-mysql-server/sql/types"
"github.com/dolthub/vitess/go/mysql"
"github.com/stretchr/testify/assert"
@@ -91,8 +92,8 @@ func TestSingleQuery(t *testing.T) {
enginetest.RunQuery(t, engine, harness, q)
}
engine.EngineAnalyzer().Debug = true
engine.EngineAnalyzer().Verbose = true
// engine.EngineAnalyzer().Debug = true
// engine.EngineAnalyzer().Verbose = true
var test queries.QueryTest
test = queries.QueryTest{
@@ -117,43 +118,7 @@ func TestSingleQuery(t *testing.T) {
func TestSingleScript(t *testing.T) {
t.Skip()
var scripts = []queries.ScriptTest{
{
Name: "failed statements data validation for DELETE, REPLACE",
SetUpScript: []string{
"CREATE TABLE test (pk BIGINT PRIMARY KEY, v1 BIGINT, INDEX (v1));",
"INSERT INTO test VALUES (1,1), (4,4), (5,5);",
"CREATE TABLE test2 (pk BIGINT PRIMARY KEY, CONSTRAINT fk_test FOREIGN KEY (pk) REFERENCES test (v1));",
"INSERT INTO test2 VALUES (4);",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "DELETE FROM test WHERE pk > 0;",
ExpectedErr: sql.ErrForeignKeyParentViolation,
},
{
Query: "SELECT * FROM test;",
Expected: []sql.Row{{1, 1}, {4, 4}, {5, 5}},
},
{
Query: "SELECT * FROM test2;",
Expected: []sql.Row{{4}},
},
{
Query: "REPLACE INTO test VALUES (1,7), (4,8), (5,9);",
ExpectedErr: sql.ErrForeignKeyParentViolation,
},
{
Query: "SELECT * FROM test;",
Expected: []sql.Row{{1, 1}, {4, 4}, {5, 5}},
},
{
Query: "SELECT * FROM test2;",
Expected: []sql.Row{{4}},
},
},
},
}
var scripts = []queries.ScriptTest{}
tcc := &testCommitClock{}
cleanup := installTestCommitClock(tcc)
@@ -177,6 +142,13 @@ func TestSingleScript(t *testing.T) {
}
}
func newUpdateResult(matched, updated int) gmstypes.OkResult {
return gmstypes.OkResult{
RowsAffected: uint64(updated),
Info: plan.UpdateInfo{Matched: matched, Updated: updated},
}
}
// Convenience test for debugging a single query. Unskip and set to the desired query.
func TestSingleMergeScript(t *testing.T) {
t.Skip()
@@ -347,7 +319,7 @@ func TestQueryPlans(t *testing.T) {
}
// Parallelism introduces Exchange nodes into the query plans, so disable.
// TODO: exchange nodes should really only be part of the explain plan under certain debug settings
harness := newDoltHarness(t).WithParallelism(1).WithSkippedQueries(skipped)
harness := newDoltHarness(t).WithSkippedQueries(skipped)
if !types.IsFormat_DOLT(types.Format_Default) {
// only new format supports reverse IndexTableAccess
reverseIndexSkip := []string{
@@ -372,7 +344,7 @@ func TestQueryPlans(t *testing.T) {
}
func TestIntegrationQueryPlans(t *testing.T) {
harness := newDoltHarness(t).WithParallelism(1)
harness := newDoltHarness(t)
defer harness.Close()
enginetest.TestIntegrationPlans(t, harness)
@@ -398,7 +370,7 @@ func TestDoltDiffQueryPlans(t *testing.T) {
func TestBranchPlans(t *testing.T) {
for _, script := range BranchPlanTests {
t.Run(script.Name, func(t *testing.T) {
harness := newDoltHarness(t).WithParallelism(1)
harness := newDoltHarness(t)
defer harness.Close()
e := mustNewEngine(t, harness)
@@ -507,6 +479,22 @@ func TestInsertIntoErrors(t *testing.T) {
enginetest.TestInsertIntoErrors(t, h)
}
func TestGeneratedColumns(t *testing.T) {
enginetest.TestGeneratedColumns(t, newDoltHarness(t))
for _, script := range GeneratedColumnMergeTestScripts {
func() {
h := newDoltHarness(t)
defer h.Close()
enginetest.TestScript(t, h, script)
}()
}
}
func TestGeneratedColumnPlans(t *testing.T) {
enginetest.TestGeneratedColumnPlans(t, newDoltHarness(t))
}
func TestSpatialQueries(t *testing.T) {
h := newDoltHarness(t)
defer h.Close()
@@ -615,7 +603,7 @@ func TestScripts(t *testing.T) {
if types.IsFormat_DOLT(types.Format_Default) {
skipped = append(skipped, newFormatSkippedScripts...)
}
h := newDoltHarness(t).WithSkippedQueries(skipped).WithParallelism(1)
h := newDoltHarness(t).WithSkippedQueries(skipped)
defer h.Close()
enginetest.TestScripts(t, h)
}
@@ -699,7 +687,7 @@ func TestJoinPlanning(t *testing.T) {
if types.IsFormat_LD(types.Format_Default) {
t.Skip("DOLT_LD keyless indexes are not sorted")
}
h := newDoltHarness(t).WithParallelism(1)
h := newDoltHarness(t)
defer h.Close()
enginetest.TestJoinPlanning(t, h)
}
@@ -1546,7 +1534,7 @@ func TestDoltMerge(t *testing.T) {
// harness can't reset effectively when there are new commits / branches created, so use a new harness for
// each script
func() {
h := newDoltHarness(t).WithParallelism(1)
h := newDoltHarness(t)
defer h.Close()
h.Setup(setup.MydbData)
enginetest.TestScript(t, h, script)
@@ -1559,7 +1547,7 @@ func TestDoltMergePrepared(t *testing.T) {
// harness can't reset effectively when there are new commits / branches created, so use a new harness for
// each script
func() {
h := newDoltHarness(t).WithParallelism(1)
h := newDoltHarness(t)
defer h.Close()
enginetest.TestScriptPrepared(t, h, script)
}()
@@ -1570,7 +1558,7 @@ func TestDoltRevert(t *testing.T) {
for _, script := range RevertScripts {
// harness can't reset effectively. Use a new harness for each script
func() {
h := newDoltHarness(t).WithParallelism(1)
h := newDoltHarness(t)
defer h.Close()
enginetest.TestScript(t, h, script)
}()
@@ -1581,7 +1569,7 @@ func TestDoltRevertPrepared(t *testing.T) {
for _, script := range RevertScripts {
// harness can't reset effectively. Use a new harness for each script
func() {
h := newDoltHarness(t).WithParallelism(1)
h := newDoltHarness(t)
defer h.Close()
enginetest.TestScriptPrepared(t, h, script)
}()
@@ -2455,7 +2443,7 @@ func TestScriptsPrepared(t *testing.T) {
skipped = append(skipped, newFormatSkippedScripts...)
}
skipPreparedTests(t)
h := newDoltHarness(t).WithSkippedQueries(skipped).WithParallelism(1)
h := newDoltHarness(t).WithSkippedQueries(skipped)
defer h.Close()
enginetest.TestScriptsPrepared(t, h)
}

View File

@@ -71,6 +71,7 @@ func newDoltHarness(t *testing.T) *DoltHarness {
dh := &DoltHarness{
t: t,
skippedQueries: defaultSkippedQueries,
parallelism: 1,
}
return dh

View File

@@ -4273,6 +4273,217 @@ var DoltVerifyConstraintsTestScripts = []queries.ScriptTest{
},
}
var GeneratedColumnMergeTestScripts = []queries.ScriptTest{
{
Name: "merge a generated stored column",
SetUpScript: []string{
"create table t1 (id bigint primary key, v1 bigint, v2 bigint, v3 bigint as (v1 + v2) stored, index (v3))",
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
"call dolt_commit('-Am', 'first commit')",
"call dolt_checkout('-b', 'branch1')",
"insert into t1 (id, v1, v2) values (3, 3, 3)",
"call dolt_commit('-Am', 'branch1 commit')",
"call dolt_checkout('main')",
"call dolt_checkout('-b', 'branch2')",
"insert into t1 (id, v1, v2) values (4, 4, 4)",
"call dolt_commit('-Am', 'branch2 commit')",
"call dolt_checkout('main')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('branch1')",
SkipResultsCheck: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
},
},
{
Query: "select id from t1 where v3 = 6",
Expected: []sql.Row{{3}},
},
{
Query: "call dolt_merge('branch2')",
SkipResultsCheck: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
{4, 4, 4, 8},
},
},
{
Query: "select id from t1 where v3 = 8",
Expected: []sql.Row{{4}},
},
},
},
{
Name: "merge a generated column created on another branch",
SetUpScript: []string{
"create table t1 (id bigint primary key, v1 bigint, v2 bigint)",
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
"call dolt_commit('-Am', 'first commit')",
"call dolt_branch('branch1')",
"insert into t1 (id, v1, v2) values (3, 3, 3)",
"call dolt_commit('-Am', 'main commit')",
"call dolt_checkout('branch1')",
"alter table t1 add column v3 bigint as (v1 + v2) stored",
"alter table t1 add key idx_v3 (v3)",
"insert into t1 (id, v1, v2) values (4, 4, 4)",
"call dolt_commit('-Am', 'branch1 commit')",
"call dolt_checkout('main')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('branch1')",
SkipResultsCheck: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
{4, 4, 4, 8},
},
Skip: true,
},
{
Query: "select id from t1 where v3 = 6",
Expected: []sql.Row{{3}},
Skip: true,
},
{
Query: "select id from t1 where v3 = 8",
Expected: []sql.Row{{4}},
},
},
},
{
Name: "merge a virtual column",
SetUpScript: []string{
"create table t1 (id bigint primary key, v1 bigint, v2 bigint, v3 bigint as (v1 + v2), index (v3))",
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
"call dolt_commit('-Am', 'first commit')",
"call dolt_checkout('-b', 'branch1')",
"insert into t1 (id, v1, v2) values (3, 3, 3)",
"call dolt_commit('-Am', 'branch1 commit')",
"call dolt_checkout('main')",
"call dolt_checkout('-b', 'branch2')",
"insert into t1 (id, v1, v2) values (4, 4, 4)",
"call dolt_commit('-Am', 'branch2 commit')",
"call dolt_checkout('main')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('branch1')",
SkipResultsCheck: true,
Skip: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
},
Skip: true,
},
{
Query: "select id from t1 where v3 = 6",
Expected: []sql.Row{{3}},
Skip: true,
},
{
Query: "call dolt_merge('branch2')",
SkipResultsCheck: true,
Skip: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
{4, 4, 4, 8},
},
Skip: true,
},
{
Query: "select id from t1 where v3 = 8",
Expected: []sql.Row{{4}},
Skip: true,
},
},
},
{
Name: "merge a virtual column created on another branch",
SetUpScript: []string{
"create table t1 (id bigint primary key, v1 bigint, v2 bigint)",
"insert into t1 (id, v1, v2) values (1, 1, 1), (2, 2, 2)",
"call dolt_commit('-Am', 'first commit')",
"call dolt_branch('branch1')",
"insert into t1 (id, v1, v2) values (3, 3, 3)",
"call dolt_commit('-Am', 'main commit')",
"call dolt_checkout('branch1')",
"alter table t1 add column v3 bigint as (v1 + v2)",
"alter table t1 add key idx_v3 (v3)",
"insert into t1 (id, v1, v2) values (4, 4, 4)",
"call dolt_commit('-Am', 'branch1 commit')",
"call dolt_checkout('main')",
},
Assertions: []queries.ScriptTestAssertion{
{
Query: "call dolt_merge('branch1')",
SkipResultsCheck: true,
Skip: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
},
Skip: true,
},
{
Query: "select id from t1 where v3 = 6",
Expected: []sql.Row{{3}},
Skip: true,
},
{
Query: "call dolt_merge('branch2')",
SkipResultsCheck: true,
Skip: true,
},
{
Query: "select * from t1 order by id",
Expected: []sql.Row{
{1, 1, 1, 2},
{2, 2, 2, 4},
{3, 3, 3, 6},
{4, 4, 4, 8},
},
Skip: true,
},
{
Query: "select id from t1 where v3 = 8",
Expected: []sql.Row{{4}},
Skip: true,
},
},
},
}
// convertMergeScriptTest converts a MergeScriptTest into a standard ScriptTest. If flipSides is true, then the
// left and right setup is swapped (i.e. left setup is done on right branch and right setup is done on main branch).
// This enables us to test merges in both directions, since the merge code is asymmetric and some code paths currently

View File

@@ -170,6 +170,11 @@ func validateKeylessIndex(ctx context.Context, sch schema.Schema, def schema.Ind
return nil
}
// Indexes on virtual columns cannot be rebuilt via the method below
if isVirtualIndex(def, sch) {
return nil
}
secondary = prolly.ConvertToSecondaryKeylessIndex(secondary)
idxDesc, _ := secondary.Descriptors()
builder := val.NewTupleBuilder(idxDesc)
@@ -237,6 +242,11 @@ func validatePkIndex(ctx context.Context, sch schema.Schema, def schema.Index, p
return nil
}
// Indexes on virtual columns cannot be rebuilt via the method below
if isVirtualIndex(def, sch) {
return nil
}
// secondary indexes have empty values
idxDesc, _ := secondary.Descriptors()
builder := val.NewTupleBuilder(idxDesc)
@@ -317,6 +327,19 @@ func validatePkIndex(ctx context.Context, sch schema.Schema, def schema.Index, p
}
}
func isVirtualIndex(def schema.Index, sch schema.Schema) bool {
for _, colName := range def.ColumnNames() {
col, ok := sch.GetAllCols().GetByName(colName)
if !ok {
panic(fmt.Sprintf("column not found: %s", colName))
}
if col.Virtual {
return true
}
}
return false
}
// shouldDereferenceContent returns true if address encoded content should be dereferenced when
// building a key for a secondary index. This is determined by looking at the encoding of the field
// in the main table (|tablePos| and |tableValueDescriptor|) and the encoding of the field in the index

View File

@@ -17,7 +17,6 @@ package index
import (
"context"
"io"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"golang.org/x/sync/errgroup"
@@ -263,38 +262,7 @@ func coveringIndexMapping(d DoltIndex, projections []uint64) (keyMap, ordMap val
}
func primaryIndexMapping(idx DoltIndex, sqlSch sql.PrimaryKeySchema, projections []uint64) (keyProj, valProj, ordProj val.OrdinalMapping) {
pks := idx.Schema().GetPKCols()
nonPks := idx.Schema().GetNonPKCols()
allMap := make([]int, len(projections)*2)
i := 0
j := len(projections) - 1
for k, p := range projections {
if idx, ok := pks.TagToIdx[p]; ok {
allMap[i] = idx
allMap[len(projections)+i] = k
i++
}
if idx, ok := nonPks.TagToIdx[p]; ok {
allMap[j] = idx
allMap[len(projections)+j] = k
j--
}
}
keyProj = allMap[:i]
valProj = allMap[i:len(projections)]
ordProj = allMap[len(projections):]
return
}
func contains(slice []string, str string) (ok bool) {
for _, x := range slice {
if strings.ToLower(x) == strings.ToLower(str) {
ok = true
}
}
return
return projectionMappingsForIndex(idx.Schema(), projections)
}
type prollyKeylessIndexIter struct {

View File

@@ -32,7 +32,7 @@ type prollyRowIter struct {
keyProj []int
valProj []int
// orjProj is a concatenated list of output ordinals for |keyProj| and |valProj|
// ordProj is a concatenated list of output ordinals for |keyProj| and |valProj|
ordProj []int
rowLen int
}
@@ -73,26 +73,12 @@ func NewProllyRowIter(sch schema.Schema, rows prolly.Map, iter prolly.MapIter, p
// projectionMappings returns data structures that specify 1) which fields we read
// from key and value tuples, and 2) the position of those fields in the output row.
func projectionMappings(sch schema.Schema, projections []uint64) (keyMap, valMap, ordMap val.OrdinalMapping) {
pks := sch.GetPKCols()
nonPks := sch.GetNonPKCols()
keyMap, valMap, ordMap = projectionMappingsForIndex(sch, projections)
adjustOffsetsForKeylessTable(sch, keyMap, valMap)
return keyMap, valMap, ordMap
}
allMap := make([]int, 2*len(projections))
i := 0
j := len(projections) - 1
for k, t := range projections {
if idx, ok := pks.TagToIdx[t]; ok {
allMap[len(projections)+i] = k
allMap[i] = idx
i++
} else if idx, ok := nonPks.TagToIdx[t]; ok {
allMap[j] = idx
allMap[len(projections)+j] = k
j--
}
}
keyMap = allMap[:i]
valMap = allMap[i:len(projections)]
ordMap = allMap[len(projections):]
func adjustOffsetsForKeylessTable(sch schema.Schema, keyMap val.OrdinalMapping, valMap val.OrdinalMapping) {
if schema.IsKeyless(sch) {
// skip the cardinality value, increment every index
for i := range keyMap {
@@ -102,7 +88,47 @@ func projectionMappings(sch schema.Schema, projections []uint64) (keyMap, valMap
valMap[i]++
}
}
return
}
func projectionMappingsForIndex(sch schema.Schema, projections []uint64) (keyMap, valMap, ordMap val.OrdinalMapping) {
pks := sch.GetPKCols()
nonPks := sch.GetNonPKCols()
numPhysicalColumns := len(projections)
if schema.IsVirtual(sch) {
numPhysicalColumns = 0
for _, t := range projections {
if idx, ok := sch.GetAllCols().TagToIdx[t]; ok && !sch.GetAllCols().GetByIndex(idx).Virtual {
numPhysicalColumns++
}
}
}
// Build a slice of positional values. For a set of P projections, for K key columns and N=P-K non-key columns,
// we'll generate a slice 2P long structured as follows:
// [K key projections, // list of tuple indexes to read for key columns
// N non-key projections, // list of tuple indexes to read for non-key columns, ordered backward from end
// P output ordinals] // list of output column ordinals for each projection
// Afterward we slice this into three separate mappings to return.
allMap := make([]int, 2*numPhysicalColumns)
keyIdx := 0
nonKeyIdx := numPhysicalColumns - 1
for projNum, tag := range projections {
if idx, ok := pks.StoredIndexByTag(tag); ok && !pks.GetByStoredIndex(idx).Virtual {
allMap[keyIdx] = idx
allMap[numPhysicalColumns+keyIdx] = projNum
keyIdx++
} else if idx, ok := nonPks.StoredIndexByTag(tag); ok && !nonPks.GetByStoredIndex(idx).Virtual {
allMap[nonKeyIdx] = idx
allMap[numPhysicalColumns+nonKeyIdx] = projNum
nonKeyIdx--
}
}
keyMap = allMap[:keyIdx]
valMap = allMap[keyIdx:numPhysicalColumns]
ordMap = allMap[numPhysicalColumns:]
return keyMap, valMap, ordMap
}
func (it prollyRowIter) Next(ctx *sql.Context) (sql.Row, error) {

View File

@@ -91,7 +91,7 @@ func BasicSelectTests() []SelectTest {
var headCommitHash string
switch types.Format_Default {
case types.Format_DOLT:
headCommitHash = "m1gkfp9ii4hiqhpmgcfet5sojvopo4da"
headCommitHash = "li3mp6hml1bctgon5hptfh9b8rqc1i6a"
case types.Format_LD_1:
headCommitHash = "73hc2robs4v0kt9taoe3m5hd49dmrgun"
}

View File

@@ -40,21 +40,26 @@ func FromDoltSchema(dbName, tableName string, sch schema.Schema) (sql.PrimaryKey
extra = "auto_increment"
}
var deflt *sql.ColumnDefaultValue
var deflt, generated *sql.ColumnDefaultValue
if col.Default != "" {
deflt = sql.NewUnresolvedColumnDefaultValue(col.Default)
}
if col.Generated != "" {
generated = sql.NewUnresolvedColumnDefaultValue(col.Generated)
}
cols[i] = &sql.Column{
Name: col.Name,
Type: sqlType,
Default: deflt,
Generated: generated,
Nullable: col.IsNullable(),
DatabaseSource: dbName,
Source: tableName,
PrimaryKey: col.IsPartOfPK,
AutoIncrement: col.AutoIncrement,
Comment: col.Comment,
Virtual: col.Virtual,
Extra: extra,
}
i++
@@ -65,7 +70,6 @@ func FromDoltSchema(dbName, tableName string, sch schema.Schema) (sql.PrimaryKey
}
// ToDoltSchema returns a dolt Schema from the sql schema given, suitable for use in creating a table.
// For result set schemas, see ToDoltResultSchema.
func ToDoltSchema(
ctx context.Context,
root *doltdb.RootValue,
@@ -137,20 +141,32 @@ func ToDoltCol(tag uint64, col *sql.Column) (schema.Column, error) {
return schema.Column{}, err
}
return schema.NewColumnWithTypeInfo(col.Name, tag, typeInfo, col.PrimaryKey, col.Default.String(), col.AutoIncrement, col.Comment, constraints...)
}
// ToDoltResultSchema returns a dolt Schema from the sql schema given, suitable for use as a result set
func ToDoltResultSchema(sqlSchema sql.Schema) (schema.Schema, error) {
var cols []schema.Column
for i, col := range sqlSchema {
convertedCol, err := ToDoltCol(uint64(i), col)
if err != nil {
return nil, err
}
cols = append(cols, convertedCol)
defaultVal := ""
generatedVal := ""
if col.Default != nil {
defaultVal = col.Default.String()
} else {
generatedVal = col.Generated.String()
}
colColl := schema.NewColCollection(cols...)
return schema.UnkeyedSchemaFromCols(colColl), nil
c := schema.Column{
Name: col.Name,
Tag: tag,
Kind: typeInfo.NomsKind(),
IsPartOfPK: col.PrimaryKey,
TypeInfo: typeInfo,
Default: defaultVal,
Generated: generatedVal,
Virtual: col.Virtual,
AutoIncrement: col.AutoIncrement,
Comment: col.Comment,
Constraints: constraints,
}
err = schema.ValidateColumn(c)
if err != nil {
return schema.Column{}, err
}
return c, nil
}

View File

@@ -377,14 +377,12 @@ func ordinalMappingsFromSchema(from sql.Schema, to schema.Schema) (km, vm val.Or
}
func makeOrdinalMapping(from sql.Schema, to *schema.ColCollection) (m val.OrdinalMapping) {
m = make(val.OrdinalMapping, len(to.GetColumns()))
m = make(val.OrdinalMapping, to.StoredSize())
for i := range m {
name := to.GetByIndex(i).Name
for j, col := range from {
if col.Name == name {
m[i] = j
}
}
col := to.GetByStoredIndex(i)
name := col.Name
colIdx := from.IndexOfColName(name)
m[i] = colIdx
}
return
}

View File

@@ -33,7 +33,7 @@ table Column {
// sql column type
sql_type:string;
// sql default value
// sql default value. For generated columns, this is the generated expression rather than the default.
default_value:string;
// sql comment

View File

@@ -81,7 +81,7 @@ assert_feature_version() {
# Tests that don't end in a valid dolt dir will fail the above
# command, don't check its output in that case
if [ "$status" -eq 0 ]; then
[[ "$output" =~ "feature version: 4" ]] || exit 1
[[ "$output" =~ "feature version: 5" ]] || exit 1
else
# Clear status to avoid BATS failing if this is the last run command
status=0

View File

@@ -14,14 +14,6 @@ get_head_commit() {
dolt log -n 1 | grep -m 1 commit | cut -c 13-44
}
@test "status: dolt version --feature" {
# bump this test with feature version bumps
run dolt version --feature
[ "$status" -eq 0 ]
[[ "$output" =~ "dolt version" ]] || false
[[ "$output" =~ "feature version: 4" ]] || false
}
@test "status: no changes" {
run dolt status
[ "$status" -eq 0 ]